repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
RocketQA | RocketQA-main/examples/jina_example/rocketqa_reranker/executor.py | import numpy as np
import rocketqa
from jina import Executor, Document, DocumentArray, requests
from jina.types.score import NamedScore
class RocketQAReranker(Executor):
"""
Re-rank the `matches` of a Document based on the relevance to the question stored in the `text` field with RocketQA matching model.
"""
def __init__(self, model, use_cuda=False, device_id=0, batch_size=1, *args, **kwargs):
"""
:param model: A model name return by `rocketqa.available_models()` or the path of an user-specified checkpoint config
:param use_cuda: Set to `True` (default: `False`) to use GPU
:param device_id: The GPU device id to load the model. Set to integers starting from 0 to `N`, where `N` is the number of GPUs minus 1.
:param batch_size: the batch size during inference.
"""
super().__init__(*args, **kwargs)
self.encoder = rocketqa.load_model(model=model, use_cuda=use_cuda, device_id=device_id, batch_size=batch_size)
self.b_s = batch_size
@requests(on='/search')
def rank(self, docs, **kwargs):
for doc in docs:
question = doc.text
doc_arr = DocumentArray([doc])
match_batches_generator = (doc_arr
.traverse_flat(traversal_paths='m')
.batch(batch_size=self.b_s))
reranked_matches = DocumentArray()
reranked_scores = []
unsorted_matches = DocumentArray()
for matches in match_batches_generator:
titles, paras = matches.get_attributes('tags__title', 'tags__para')
score_list = self.encoder.matching(query=[question] * len(paras), para=paras, title=titles)
score_list = list(score_list)
reranked_scores.extend(score_list)
unsorted_matches += list(matches)
sorted_args = np.argsort(reranked_scores).tolist()
sorted_args.reverse()
for idx in sorted_args:
score = reranked_scores[idx]
m = Document(
id=unsorted_matches[idx].id,
tags={
'title': unsorted_matches[idx].tags['title'],
'para': unsorted_matches[idx].tags['para']
}
)
m.scores['relevance'] = NamedScore(value=score)
reranked_matches.append(m)
doc.matches = reranked_matches
| 2,516 | 43.946429 | 143 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/metric/generate_candrank.py | import csv
import sys
from collections import defaultdict
score_f = sys.argv[1]
id_f = sys.argv[2]
#id_f = 'marco_joint_qtp/qtp.test.id'
#id_f = 'dev.es_1000.id'
outputf = 'metric/ranking_res'
scores = []
q_ids = []
p_ids = []
q_dic = defaultdict(list)
with open(score_f, 'r') as f:
for line in f:
scores.append(float(line.strip()))
with open(id_f, 'r') as f:
for line in f:
v = line.strip().split('\t')
q_ids.append(int(v[0]))
p_ids.append(int(v[1]))
for q, p, s in zip(q_ids, p_ids, scores):
q_dic[q].append((s, p))
output = []
for q in q_dic:
rank = 0
cands = q_dic[q]
cands.sort(reverse=True)
for cand in cands:
rank += 1
output.append([q, cand[1], rank])
#print str(q) + '\t' + str(cand[1]) + '\t' + str(rank)
if rank > 49:
break
with open(outputf, 'w') as f:
writer = csv.writer(f, delimiter= '\t')
writer.writerows(output)
| 950 | 20.133333 | 62 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/metric/tokenizers.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Most of the tokenizers code here is copied from DrQA codebase to avoid adding extra dependency
"""
import copy
import logging
import regex
#import spacy
import unicodedata
logger = logging.getLogger(__name__)
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def __del__(self):
self.shutdown()
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
class SpacyTokenizer(Tokenizer):
def __init__(self, **kwargs):
"""
Args:
annotators: set that can include pos, lemma, and ner.
model: spaCy model to use (either path, or keyword like 'en').
"""
model = kwargs.get('model', 'en')
self.annotators = copy.deepcopy(kwargs.get('annotators', set()))
nlp_kwargs = {'parser': False}
if not any([p in self.annotators for p in ['lemma', 'pos', 'ner']]):
nlp_kwargs['tagger'] = False
if 'ner' not in self.annotators:
nlp_kwargs['entity'] = False
self.nlp = spacy.load(model, **nlp_kwargs)
def tokenize(self, text):
# We don't treat new lines as tokens.
clean_text = text.replace('\n', ' ')
tokens = self.nlp.tokenizer(clean_text)
if any([p in self.annotators for p in ['lemma', 'pos', 'ner']]):
self.nlp.tagger(tokens)
if 'ner' in self.annotators:
self.nlp.entity(tokens)
data = []
for i in range(len(tokens)):
# Get whitespace
start_ws = tokens[i].idx
if i + 1 < len(tokens):
end_ws = tokens[i + 1].idx
else:
end_ws = tokens[i].idx + len(tokens[i].text)
data.append((
tokens[i].text,
text[start_ws: end_ws],
(tokens[i].idx, tokens[i].idx + len(tokens[i].text)),
tokens[i].tag_,
tokens[i].lemma_,
tokens[i].ent_type_,
))
# Set special option for non-entity tag: '' vs 'O' in spaCy
return Tokens(data, self.annotators, opts={'non_ent': ''})
def _normalize(text):
return unicodedata.normalize('NFD', unicode(text, 'utf-8'))
def has_answer(text, answers, match_type='string'):
tok_opts = {}
tokenizer = SimpleTokenizer(**tok_opts)
text = _normalize(text)
if match_type == 'string':
# Answer is a list of possible strings
text = tokenizer.tokenize(text).words(uncased=True)
for single_answer in answers:
single_answer = _normalize(single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
return True
return False
| 8,194 | 29.692884 | 94 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/metric/nq_eval_rerank.py | import numpy as np
from tqdm import tqdm
import random
import sys
from tokenizers import SimpleTokenizer
from tokenizers import has_answer
f_answer = open('corpus/nq/test.answers.txt', 'r')
#f_answer = open('nq.58812.ans', 'r')
print('reading text')
p_text = []
for i in range(8):
f_p_text = open('corpus/nq/para_8part/part-0%d' % i, 'r')
for line in f_p_text:
line = line.strip('\n').split('\t')
p_text.append(line[2].strip())
print('reading query-para-score')
para = {}
scores = []
score = {}
score_name = sys.argv[1]
recall_name = sys.argv[2]
f_s = open(score_name, 'r')
for line in f_s:
scores.append(float(line.strip()))
f_s.close()
#f_qp = open('nq.58812.qtp.id', 'r')
f_qp = open(recall_name, 'r')
for i, line in enumerate(f_qp):
line = line.strip('\n').split('\t')
q = line[0]
p = line[1]
if q not in para and q not in score:
para[q] = []
score[q] = []
para[q].append(p)
score[q].append(scores[i])
f_qp.close()
print('calculating acc')
right_num_r20 = 0.0
right_num_r5 = 0.0
query_num = 0.0
MRR = 0.0
for qid, line in enumerate(f_answer):
query_num += 1
line = line.strip('\n').split('\t')
answer = line[1:]
#q = str(int(line[0])+1)
q = str(qid+1)
data = list(zip(score[q], para[q]))
data.sort()
data.reverse()
data = data[:50]
# random.shuffle(data)
for i in range(20):
if has_answer(p_text[int(data[i][1])], answer):
right_num_r20 += 1
break
for i in range(5):
if has_answer(p_text[int(data[i][1])], answer):
right_num_r5 += 1
break
flag = 0
for i in range(10):
if has_answer(p_text[int(data[i][1])], answer):
MRR += 1.0 / (i+1)
break
query_num = qid + 1
r20 = right_num_r20 / query_num
r5 = right_num_r5 / query_num
MRR = MRR / query_num
print('recall@20: ' + str(r20))
print('recall@5: ' + str(r5))
print('MRR@10: ' + str(MRR))
| 1,972 | 21.94186 | 61 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/metric/msmarco_eval.py | """
This module computes evaluation metrics for MSMARCO dataset on the ranking task.
Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
Creation Date : 06/12/2018
Last Modified : 1/21/2019
Authors : Daniel Campos <dacamp@microsoft.com>, Rutger van Haasteren <ruvanh@microsoft.com>
"""
import sys
from collections import Counter
MaxMRRRank = 10
def load_reference_from_stream(f):
"""Load Reference reference relevant passages
Args:f (stream): stream to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
qids_to_relevant_passageids = {}
for l in f:
try:
l = l.strip().split('\t')
qid = int(l[0])
if qid in qids_to_relevant_passageids:
pass
else:
qids_to_relevant_passageids[qid] = []
qids_to_relevant_passageids[qid].append(int(l[1]))
except:
raise IOError('\"%s\" is not valid format' % l)
return qids_to_relevant_passageids
def load_reference(path_to_reference):
"""Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
with open(path_to_reference, 'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids
def load_candidate_from_stream(f):
"""Load candidate data from a stream.
Args:f (stream): stream to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
qid_to_ranked_candidate_passages = {}
for l in f:
try:
l = l.strip().split('\t')
qid = int(l[0])
pid = int(l[1])
rank = int(l[2])
if qid in qid_to_ranked_candidate_passages:
pass
else:
# By default, all PIDs in the list of 1000 are 0. Only override those that are given
tmp = [0] * 1000
qid_to_ranked_candidate_passages[qid] = tmp
qid_to_ranked_candidate_passages[qid][rank - 1] = pid
except:
raise IOError('\"%s\" is not valid format' % l)
return qid_to_ranked_candidate_passages
def load_candidate(path_to_candidate):
"""Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
with open(path_to_candidate, 'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
"""
message = ''
allowed = True
# Create sets of the QIDs for the submitted and reference queries
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
# Check that we do not have multiple passages per query
for qid in qids_to_ranked_candidate_passages:
# Remove all zeros from the candidates
duplicate_pids = set(
[item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids - set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
recall_q_top1 = set()
recall_q_top50 = set()
recall_q_all = set()
for qid in qids_to_ranked_candidate_passages:
if qid in qids_to_relevant_passageids:
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0, MaxMRRRank):
if candidate_pid[i] in target_pid:
MRR += 1.0 / (i + 1)
ranking.pop()
ranking.append(i + 1)
break
for i, pid in enumerate(candidate_pid):
if pid in target_pid:
recall_q_all.add(qid)
if i < 50:
recall_q_top50.add(qid)
if i == 0:
recall_q_top1.add(qid)
break
if len(ranking) == 0:
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
MRR = MRR / len(qids_to_relevant_passageids)
recall_top1 = len(recall_q_top1) * 1.0 / len(qids_to_relevant_passageids)
recall_top50 = len(recall_q_top50) * 1.0 / len(qids_to_relevant_passageids)
recall_all = len(recall_q_all) * 1.0 / len(qids_to_relevant_passageids)
all_scores['MRR @10'] = MRR
all_scores["recall@1"] = recall_top1
all_scores["recall@50"] = recall_top50
all_scores["recall@all"] = recall_all
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
return all_scores
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True):
"""Compute MRR metric
Args:
p_path_to_reference_file (str): path to reference file.
Reference file should contain lines in the following format:
QUERYID\tPASSAGEID
Where PASSAGEID is a relevant passage for a query. Note QUERYID can repeat on different lines with different PASSAGEIDs
p_path_to_candidate_file (str): path to candidate file.
Candidate file sould contain lines in the following format:
QUERYID\tPASSAGEID1\tRank
If a user wishes to use the TREC format please run the script with a -t flag at the end. If this flag is used the expected format is
QUERYID\tITER\tDOCNO\tRANK\tSIM\tRUNID
Where the values are separated by tabs and ranked in order of relevance
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
qids_to_relevant_passageids = load_reference(path_to_reference)
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
if perform_checks:
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
if message != '': print(message)
return compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
def main():
"""Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
"""
if len(sys.argv) == 3:
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
else:
path_to_reference = 'metric/qp_reference.all.tsv'
path_to_candidate = 'metric/ranking_res'
#print('Usage: msmarco_eval_ranking.py <reference ranking> <candidate ranking>')
#exit()
metrics = compute_metrics_from_files(path_to_reference, path_to_candidate)
print('#####################')
for metric in sorted(metrics):
print('{}: {}'.format(metric, metrics[metric]))
print('#####################')
if __name__ == '__main__':
main()
| 8,406 | 38.843602 | 161 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/metric/nq_eval.py | import sys
import numpy as np
sys.path.append('data_process/')
sys.path.append('metric/')
from dpr.utils.tokenizers import SimpleTokenizer
import utils
import unicodedata
recall_cands_file = sys.argv[1]
topk = 100
answers = utils.load_answers('test')
q_text, p_text, p_title = utils.load_corpus(corpus='nq', q_type='test')
cand_qp_all, train_qids = utils.load_candidates(recall_cands_file, col=4)
def has_answer(answers, text, tokenizer, match_type):
text = unicodedata.normalize('NFD', text)
if match_type == 'string':
text = tokenizer.tokenize(text).words(uncased=True)
for single_answer in answers:
single_answer = unicodedata.normalize('NFD', single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i+ len(single_answer)]:
return 1
return 0
print('calculating acc')
right_top100 = set()
right_top50 = set()
right_top20 = set()
right_top10 = set()
right_top5 = set()
tok_opts = {}
tokenizer = SimpleTokenizer(**tok_opts)
for qid, pids in cand_qp_all.items():
answer = answers[qid]
for i, pid in enumerate(pids):
if has_answer(answer, p_text[pid], tokenizer, 'string'):
if i < 100:
right_top100.add(qid)
if i < 50:
right_top50.add(qid)
if i < 20:
right_top20.add(qid)
if i < 10:
right_top10.add(qid)
if i < 5:
right_top5.add(qid)
break
query_num = len(cand_qp_all)
print(query_num)
print(len(right_top100))
r100 = len(right_top100) * 1.0 / query_num
r50 = len(right_top50) * 1.0 / query_num
r20 = len(right_top20) * 1.0 / query_num
r10 = len(right_top10) * 1.0 / query_num
r5 = len(right_top5) * 1.0 / query_num
print('recall@100: ' + str(r100))
print('recall@50: ' + str(r50))
print('recall@20: ' + str(r20))
print('recall@10: ' + str(r10))
print('recall@5: ' + str(r5))
| 2,204 | 31.426471 | 73 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/metric/dpr/utils/tokenizers.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Most of the tokenizers code here is copied from DrQA codebase to avoid adding extra dependency
"""
import copy
import logging
import regex
logger = logging.getLogger(__name__)
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def __del__(self):
self.shutdown()
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
| 5,679 | 28.278351 | 94 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/optimization.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization and learning rate scheduling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import paddle.fluid as fluid
from utils.fp16 import create_master_params_grads, master_param_to_train_param, apply_dynamic_loss_scaling
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
def linear_warmup_decay(learning_rate, warmup_steps, num_train_steps):
""" Applies linear warmup of learning rate from 0 and decay to 0."""
with fluid.default_main_program()._lr_schedule_guard():
lr = fluid.layers.tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="scheduled_learning_rate")
global_step = fluid.layers.learning_rate_scheduler._decay_step_counter()
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step < warmup_steps):
warmup_lr = learning_rate * (global_step / warmup_steps)
fluid.layers.tensor.assign(warmup_lr, lr)
with switch.default():
decayed_lr = fluid.layers.learning_rate_scheduler.polynomial_decay(
learning_rate=learning_rate,
decay_steps=num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
fluid.layers.tensor.assign(decayed_lr, lr)
return lr
def optimization(loss,
warmup_steps,
num_train_steps,
learning_rate,
train_program,
startup_prog,
weight_decay,
scheduler='linear_warmup_decay',
use_fp16=False,
use_dynamic_loss_scaling=False,
init_loss_scaling=1.0,
incr_every_n_steps=1000,
decr_every_n_nan_or_inf=2,
incr_ratio=2.0,
decr_ratio=0.8,
dist_strategy=None):
if warmup_steps > 0:
if scheduler == 'noam_decay':
scheduled_lr = fluid.layers.learning_rate_scheduler\
.noam_decay(1/(warmup_steps *(learning_rate ** 2)),
warmup_steps)
elif scheduler == 'linear_warmup_decay':
scheduled_lr = linear_warmup_decay(learning_rate, warmup_steps,
num_train_steps)
else:
raise ValueError("Unkown learning rate scheduler, should be "
"'noam_decay' or 'linear_warmup_decay'")
optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr)
else:
scheduled_lr = fluid.layers.create_global_var(
name=fluid.unique_name.generate("learning_rate"),
shape=[1],
value=learning_rate,
dtype='float32',
persistable=True)
optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr)
optimizer._learning_rate_map[fluid.default_main_program(
)] = scheduled_lr
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0))
def exclude_from_weight_decay(name):
if name.find("layer_norm") > -1:
return True
bias_suffix = ["_bias", "_b", ".b_0"]
for suffix in bias_suffix:
if name.endswith(suffix):
return True
return False
param_list = dict()
loss_scaling = fluid.layers.create_global_var(
name=fluid.unique_name.generate("loss_scaling"),
shape=[1],
value=init_loss_scaling,
dtype='float32',
persistable=True)
if use_fp16:
loss *= loss_scaling
param_grads = optimizer.backward(loss)
master_param_grads = create_master_params_grads(
param_grads, train_program, startup_prog, loss_scaling)
for param, _ in master_param_grads:
param_list[param.name] = param * 1.0
param_list[param.name].stop_gradient = True
if use_dynamic_loss_scaling:
apply_dynamic_loss_scaling(
loss_scaling, master_param_grads, incr_every_n_steps,
decr_every_n_nan_or_inf, incr_ratio, decr_ratio)
optimizer.apply_gradients(master_param_grads)
if weight_decay > 0:
for param, grad in master_param_grads:
if exclude_from_weight_decay(param.name.rstrip(".master")):
continue
with param.block.program._optimized_guard(
[param, grad]), fluid.framework.name_scope("weight_decay"):
updated_param = param - param_list[
param.name] * weight_decay * scheduled_lr
fluid.layers.assign(output=param, input=updated_param)
master_param_to_train_param(master_param_grads, param_grads,
train_program)
else:
for param in train_program.global_block().all_parameters():
param_list[param.name] = param * 1.0
param_list[param.name].stop_gradient = True
if dist_strategy is not None:
# use fleet api
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
_, param_grads = optimizer.minimize(loss)
if weight_decay > 0:
for param, grad in param_grads:
if exclude_from_weight_decay(param.name):
continue
with param.block.program._optimized_guard(
[param, grad]), fluid.framework.name_scope("weight_decay"):
updated_param = param - param_list[
param.name] * weight_decay * scheduled_lr
fluid.layers.assign(output=param, input=updated_param)
return scheduled_lr, loss_scaling
| 6,674 | 38.497041 | 106 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from io import open
import collections
import unicodedata
import six
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, encoding='utf8') as fin:
for num, line in enumerate(fin):
items = convert_to_unicode(line.strip()).split("\t")
if len(items) > 2:
break
token = items[0]
index = items[1] if len(items) == 2 else num
token = token.strip()
vocab[token] = int(index)
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class CharTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in text.lower().split(" "):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def tokenize_chinese_chars(text):
"""Adds whitespace around any CJK character."""
def _is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
output = []
buff = ""
for char in text:
cp = ord(char)
if _is_chinese_char(cp) or _is_whitespace(char):
if buff != "":
output.append(buff)
buff = ""
output.append(char)
else:
buff += char
if buff != "":
output.append(buff)
return output
| 14,348 | 32.921986 | 84 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/inference_de.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import logging
import multiprocessing
import numpy as np
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
os.environ['FLAGS_eager_delete_tensor_gb'] = '0' # enable gc
import paddle.fluid as fluid
import reader.reader_de_infer as reader_de_infer
from model.ernie import ErnieConfig
from finetune.dual_encoder_infer import create_model, predict
from utils.args import print_arguments, check_cuda, prepare_logger
from utils.init import init_pretraining_params, init_checkpoint
from finetune_args import parser
args = parser.parse_args()
log = logging.getLogger()
def main(args):
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
if args.use_cuda:
dev_list = fluid.cuda_places()
place = dev_list[0]
dev_count = len(dev_list)
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
exe = fluid.Executor(place)
reader = reader_de_infer.ClassifyReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
q_max_seq_len=args.q_max_seq_len,
p_max_seq_len=args.p_max_seq_len,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
for_cn=args.for_cn,
task_id=args.task_id)
assert args.test_save is not None
startup_prog = fluid.Program()
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
test_pyreader, graph_vars = create_model(
args,
pyreader_name='test_reader',
ernie_config=ernie_config,
batch_size=args.batch_size,
is_prediction=True)
test_prog = test_prog.clone(for_test=True)
exe = fluid.Executor(place)
exe.run(startup_prog)
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
test_sets = args.test_set.split(',')
save_dirs = args.test_save.split(',')
assert len(test_sets) == len(save_dirs)
batch_size = args.batch_size if args.predict_batch_size is None else args.predict_batch_size
for test_f, save_f in zip(test_sets, save_dirs):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
test_f,
batch_size=batch_size,
epoch=1,
dev_count=1,
shuffle=False))
save_path = save_f
log.info("testing {}, save to {}".format(test_f, save_path))
predict(
args,
exe,
test_prog,
test_pyreader,
graph_vars,
output_item=args.output_item,
output_file_name=args.output_file_name,
hidden_size=ernie_config['hidden_size'])
if __name__ == '__main__':
prepare_logger(log)
print_arguments(args)
check_cuda(args.use_cuda)
main(args)
| 4,126 | 31.496063 | 96 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/finetune_args.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import argparse
from utils.args import ArgumentGroup
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
model_g = ArgumentGroup(parser, "model", "model configuration and paths.")
model_g.add_arg("ernie_config_path", str, None, "Path to the json file for ernie model config.")
model_g.add_arg("init_checkpoint", str, None, "Init checkpoint to resume training from.")
model_g.add_arg("init_pretraining_params", str, None,
"Init pre-training params which preforms fine-tuning from. If the "
"arg 'init_checkpoint' has been set, this argument wouldn't be valid.")
model_g.add_arg("checkpoints", str, "checkpoints", "Path to save checkpoints.")
model_g.add_arg("is_classify", bool, True, "is_classify")
model_g.add_arg("is_regression", bool, False, "is_regression")
model_g.add_arg("task_id", int, 0, "task id")
train_g = ArgumentGroup(parser, "training", "training options.")
train_g.add_arg("epoch", int, 3, "Number of epoches for fine-tuning.")
train_g.add_arg("learning_rate", float, 5e-5, "Learning rate used to train with warmup.")
train_g.add_arg("lr_scheduler", str, "linear_warmup_decay",
"scheduler of learning rate.", choices=['linear_warmup_decay', 'noam_decay'])
train_g.add_arg("weight_decay", float, 0.01, "Weight decay rate for L2 regularizer.")
train_g.add_arg("warmup_proportion", float, 0.1,
"Proportion of training steps to perform linear learning rate warmup for.")
train_g.add_arg("save_steps", int, 10000, "The steps interval to save checkpoints.")
train_g.add_arg("validation_steps", int, 1000, "The steps interval to evaluate model performance.")
train_g.add_arg("use_fp16", bool, False, "Whether to use fp16 mixed precision training.")
train_g.add_arg("use_recompute", bool, False, "Whether to use recompute optimizer for training.")
train_g.add_arg("use_mix_precision", bool, False, "Whether to use mix-precision optimizer for training.")
train_g.add_arg("use_dynamic_loss_scaling", bool, True, "Whether to use dynamic loss scaling.")
train_g.add_arg("init_loss_scaling", float, 102400,
"Loss scaling factor for mixed precision training, only valid when use_fp16 is enabled.")
train_g.add_arg("test_save", str, "./checkpoints/test_result", "test_save")
train_g.add_arg("metric", str, "simple_accuracy", "metric")
train_g.add_arg("incr_every_n_steps", int, 100, "Increases loss scaling every n consecutive.")
train_g.add_arg("decr_every_n_nan_or_inf", int, 2,
"Decreases loss scaling every n accumulated steps with nan or inf gradients.")
train_g.add_arg("incr_ratio", float, 2.0,
"The multiplier to use when increasing the loss scaling.")
train_g.add_arg("decr_ratio", float, 0.8,
"The less-than-one-multiplier to use when decreasing.")
log_g = ArgumentGroup(parser, "logging", "logging related.")
log_g.add_arg("skip_steps", int, 10, "The steps interval to print loss.")
log_g.add_arg("verbose", bool, False, "Whether to output verbose log.")
data_g = ArgumentGroup(parser, "data", "Data paths, vocab paths and data processing options")
data_g.add_arg("tokenizer", str, "FullTokenizer",
"ATTENTION: the INPUT must be splited by Word with blank while using SentencepieceTokenizer or WordsegTokenizer")
data_g.add_arg("train_set", str, None, "Path to training data.")
data_g.add_arg("test_set", str, None, "Path to test data.")
data_g.add_arg("dev_set", str, None, "Path to validation data.")
data_g.add_arg("vocab_path", str, None, "Vocabulary path.")
data_g.add_arg("q_max_seq_len", int, 512, "Number of words of the longest seqence.")
data_g.add_arg("p_max_seq_len", int, 512, "Number of words of the longest seqence.")
data_g.add_arg("train_data_size", int, 0, "Number of training data's total examples. Set for distribute.")
data_g.add_arg("p_cnt_per_q", int, 1, "Number of training data's total examples. Set for distribute.")
data_g.add_arg("batch_size", int, 32, "Total examples' number in batch for training. see also --in_tokens.")
data_g.add_arg("predict_batch_size", int, None, "Total examples' number in batch for predict. see also --in_tokens.")
data_g.add_arg("in_tokens", bool, False,
"If set, the batch size will be the maximum number of tokens in one batch. "
"Otherwise, it will be the maximum number of examples in one batch.")
data_g.add_arg("do_lower_case", bool, True,
"Whether to lower case the input text. Should be True for uncased models and False for cased models.")
data_g.add_arg("random_seed", int, None, "Random seed.")
data_g.add_arg("label_map_config", str, None, "label_map_path.")
data_g.add_arg("num_labels", int, 2, "label number")
data_g.add_arg("diagnostic", str, None, "GLUE Diagnostic Dataset")
data_g.add_arg("diagnostic_save", str, None, "GLUE Diagnostic save f")
data_g.add_arg("max_query_length", int, 64, "Max query length.")
data_g.add_arg("max_answer_length", int, 100, "Max answer length.")
data_g.add_arg("doc_stride", int, 128,
"When splitting up a long document into chunks, how much stride to take between chunks.")
data_g.add_arg("n_best_size", int, 20,
"The total number of n-best predictions to generate in the nbest_predictions.json output file.")
data_g.add_arg("chunk_scheme", type=str, default="IOB", choices=["IO", "IOB", "IOE", "IOBES"], help="chunk scheme")
run_type_g = ArgumentGroup(parser, "run_type", "running type options.")
run_type_g.add_arg("use_cuda", bool, True, "If set, use GPU for training.")
run_type_g.add_arg("is_distributed", bool, False, "If set, then start distributed training.")
run_type_g.add_arg("use_fast_executor", bool, False, "If set, use fast parallel executor (in experiment).")
run_type_g.add_arg("num_iteration_per_drop_scope", int, 10, "Iteration intervals to drop scope.")
run_type_g.add_arg("do_train", bool, True, "Whether to perform training.")
run_type_g.add_arg("do_val", bool, True, "Whether to perform evaluation on dev data set.")
run_type_g.add_arg("do_test", bool, True, "Whether to perform evaluation on test data set.")
run_type_g.add_arg("use_multi_gpu_test", bool, False, "Whether to perform evaluation using multiple gpu cards")
run_type_g.add_arg("metrics", bool, True, "Whether to perform evaluation on test data set.")
run_type_g.add_arg("shuffle", bool, True, "")
run_type_g.add_arg("for_cn", bool, True, "model train for cn or for other langs.")
parser.add_argument("--enable_ce", action='store_true', help="The flag indicating whether to run the task for continuous evaluation.")
# yapf: enable
| 8,173 | 67.116667 | 134 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/merge.py | import sys
total_part = 8
shift = int(sys.argv[1])
top = int(sys.argv[2])
f_list = []
for part in range(total_part):
f0 = open('res.top%s-part%s' % (top, part))
f_list.append(f0)
line_list = []
for part in range(total_part):
line = f_list[part].readline()
line_list.append(line)
out = open('output/res.top%s' % top, 'w')
last_q = ''
ans_list = {}
while line_list[-1]:
cur_list = []
for line in line_list:
sub = line.strip().split('\t')
cur_list.append(sub)
if last_q == '':
last_q = cur_list[0][0]
if cur_list[0][0] != last_q:
rank = sorted(ans_list.items(), key = lambda a:a[1], reverse=True)
for i in range(top):
out.write("%s\t%s\t%s\t%s\n" % (last_q, rank[i][0], i+1, rank[i][1]))
ans_list = {}
for i, sub in enumerate(cur_list):
ans_list[int(sub[1]) + shift*i] = float(sub[-1])
last_q = cur_list[0][0]
line_list = []
for f0 in f_list:
line = f0.readline()
line_list.append(line)
rank = sorted(ans_list.items(), key = lambda a:a[1], reverse=True)
for i in range(top):
out.write("%s\t%s\t%s\t%s\n" % (last_q, rank[i][0], i+1, rank[i][1]))
out.close()
print('output/res.top%s' % top)
| 1,230 | 24.645833 | 81 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/batching.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mask, padding and batching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
def pad_batch_data(insts,
pad_idx=0,
return_pos=False,
return_input_mask=False,
return_max_len=False,
return_num_token=False,
return_seq_lens=False):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and attention bias.
"""
return_list = []
max_len = max(len(inst) for inst in insts)
# Any token included in dict can be used to pad, since the paddings' loss
# will be masked out by weights and make no effect on parameter gradients.
inst_data = np.array(
[inst + list([pad_idx] * (max_len - len(inst))) for inst in insts])
return_list += [inst_data.astype("int64").reshape([-1, max_len, 1])]
# position data
if return_pos:
inst_pos = np.array([
list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst))
for inst in insts
])
return_list += [inst_pos.astype("int64").reshape([-1, max_len, 1])]
if return_input_mask:
# This is used to avoid attention on paddings.
input_mask_data = np.array([[1] * len(inst) + [0] *
(max_len - len(inst)) for inst in insts])
input_mask_data = np.expand_dims(input_mask_data, axis=-1)
return_list += [input_mask_data.astype("float32")]
if return_max_len:
return_list += [max_len]
if return_num_token:
num_token = 0
for inst in insts:
num_token += len(inst)
return_list += [num_token]
if return_seq_lens:
seq_lens = np.array([len(inst) for inst in insts])
return_list += [seq_lens.astype("int64").reshape([-1, 1])]
return return_list if len(return_list) > 1 else return_list[0]
if __name__ == "__main__":
pass
| 2,683 | 33.410256 | 78 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/train_je.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import logging
import multiprocessing
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
os.environ['FLAGS_eager_delete_tensor_gb'] = '0' # enable gc
import paddle.fluid as fluid
import reader.task_reader_shuffle as task_reader
from model.ernie import ErnieConfig
from finetune.joint_encoder import create_model, evaluate, predict
from optimization import optimization
from utils.args import print_arguments, check_cuda, prepare_logger
from utils.init import init_pretraining_params, init_checkpoint
from utils.cards import get_cards
from finetune_args import parser
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
args = parser.parse_args()
log = logging.getLogger()
def main(args):
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
if args.use_cuda:
dev_list = fluid.cuda_places()
place = dev_list[0]
dev_count = len(dev_list)
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
exe = fluid.Executor(place)
reader = task_reader.ClassifyReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
q_max_seq_len=args.q_max_seq_len,
p_max_seq_len=args.p_max_seq_len,
total_num=args.train_data_size,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
is_classify=args.is_classify,
is_regression=args.is_regression,
for_cn=args.for_cn,
task_id=args.task_id)
if not (args.do_train or args.do_val or args.do_test):
raise ValueError("For args `do_train`, `do_val` and `do_test`, at "
"least one of them must be True.")
if args.do_test:
assert args.test_save is not None
startup_prog = fluid.Program()
if args.random_seed is not None:
startup_prog.random_seed = args.random_seed
if args.predict_batch_size == None:
args.predict_batch_size = args.batch_size
if args.do_train:
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
dev_count = fleet.worker_num()
train_data_generator = reader.data_generator(
input_file=args.train_set,
batch_size=args.batch_size,
epoch=args.epoch,
dev_count=1,
trainer_id=fleet.worker_index(),
trainer_num=fleet.worker_num(),
shuffle=True,
p_cnt_per_q=args.p_cnt_per_q,
phase="train")
num_train_examples = reader.get_num_examples(args.train_set)
if args.in_tokens:
max_train_steps = args.epoch * num_train_examples // (
args.batch_size // args.max_seq_len) // dev_count
else:
max_train_steps = args.epoch * num_train_examples // args.batch_size // dev_count
warmup_steps = int(max_train_steps * args.warmup_proportion)
log.info("Device count: %d" % dev_count)
log.info("Num train examples: %d" % num_train_examples)
log.info("Max train steps: %d" % max_train_steps)
log.info("Num warmup steps: %d" % warmup_steps)
train_program = fluid.Program()
if args.random_seed is not None and args.enable_ce:
train_program.random_seed = args.random_seed
# use fleet api
exec_strategy = fluid.ExecutionStrategy()
if args.use_fast_executor:
exec_strategy.use_experimental_executor = True
exec_strategy.num_threads = dev_count
if args.is_distributed:
exec_strategy.num_threads = 3
exec_strategy.num_iteration_per_drop_scope = args.num_iteration_per_drop_scope
dist_strategy = DistributedStrategy()
dist_strategy.exec_strategy = exec_strategy
dist_strategy.nccl_comm_num = 1
if args.is_distributed:
dist_strategy.nccl_comm_num = 2
dist_strategy.use_hierarchical_allreduce = True
if args.use_recompute:
dist_strategy.forward_recompute = True
dist_strategy.enable_sequential_execution = True
if args.use_mix_precision:
dist_strategy.use_amp = True
with fluid.program_guard(train_program, startup_prog):
with fluid.unique_name.guard():
train_pyreader, graph_vars, checkpoints = create_model(
args,
pyreader_name='train_reader',
ernie_config=ernie_config,
is_classify=args.is_classify,
is_regression=args.is_regression,
p_cnt_per_q=args.p_cnt_per_q)
if args.use_recompute:
dist_strategy.recompute_checkpoints=checkpoints
scheduled_lr, loss_scaling = optimization(
loss=graph_vars["loss"],
warmup_steps=warmup_steps,
num_train_steps=max_train_steps,
learning_rate=args.learning_rate,
train_program=train_program,
startup_prog=startup_prog,
weight_decay=args.weight_decay,
scheduler=args.lr_scheduler,
use_fp16=args.use_fp16,
use_dynamic_loss_scaling=args.use_dynamic_loss_scaling,
init_loss_scaling=args.init_loss_scaling,
incr_every_n_steps=args.incr_every_n_steps,
decr_every_n_nan_or_inf=args.decr_every_n_nan_or_inf,
incr_ratio=args.incr_ratio,
decr_ratio=args.decr_ratio,
dist_strategy = dist_strategy)
if args.verbose:
if args.in_tokens:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
program=train_program,
batch_size=args.batch_size // args.max_seq_len)
else:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
program=train_program, batch_size=args.batch_size)
log.info("Theoretical memory usage in training: %.3f - %.3f %s" %
(lower_mem, upper_mem, unit))
if args.do_val or args.do_test:
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
test_pyreader, graph_vars, cp = create_model(
args,
pyreader_name='test_reader',
ernie_config=ernie_config,
is_classify=args.is_classify,
is_regression=args.is_regression,
p_cnt_per_q=1)
test_prog = test_prog.clone(for_test=True)
train_program = fleet.main_program
exe = fluid.Executor(place)
exe.run(startup_prog)
if args.do_train:
if args.init_checkpoint and args.init_pretraining_params:
log.warning(
"WARNING: args 'init_checkpoint' and 'init_pretraining_params' "
"both are set! Only arg 'init_checkpoint' is made valid.")
if args.init_checkpoint:
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog,
use_fp16=args.use_fp16)
elif args.init_pretraining_params:
init_pretraining_params(
exe,
args.init_pretraining_params,
main_program=startup_prog,
use_fp16=args.use_fp16)
elif args.do_val or args.do_test:
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog,
use_fp16=args.use_fp16)
if args.do_train:
train_exe = exe
train_pyreader.decorate_tensor_provider(train_data_generator)
else:
train_exe = None
test_exe = exe
# if args.do_val or args.do_test:
# if args.use_multi_gpu_test:
# test_exe = fluid.ParallelExecutor(
# use_cuda=args.use_cuda,
# main_program=test_prog,
# share_vars_from=train_exe)
current_epoch = 0
steps = 0
if args.do_train:
train_pyreader.start()
if warmup_steps > 0:
graph_vars["learning_rate"] = scheduled_lr
ce_info = []
time_begin = time.time()
last_epoch = 0
while True:
try:
steps += 1
# log.info("step: %d" % steps)
# if fleet.worker_index() != 0:
# train_exe.run(fetch_list=[], program=train_program)
# continue
if steps % args.skip_steps != 0:
train_exe.run(fetch_list=[], program=train_program)
else:
outputs = evaluate(
train_exe,
train_program,
train_pyreader,
graph_vars,
"train",
metric=args.metric,
is_classify=args.is_classify,
is_regression=args.is_regression)
if args.verbose:
verbose = "train pyreader queue size: %d, " % train_pyreader.queue.size(
)
verbose += "learning rate: %f" % (
outputs["learning_rate"]
if warmup_steps > 0 else args.learning_rate)
log.info(verbose)
current_example, current_epoch = reader.get_train_progress()
time_end = time.time()
used_time = time_end - time_begin
if args.is_classify:
log.info(
"epoch: %d, progress: %d/%d, step: %d, ave loss: %f, "
"ave acc: %f, speed: %f steps/s" %
(current_epoch, current_example * dev_count, num_train_examples,
steps, outputs["loss"], outputs["accuracy"],
args.skip_steps / used_time))
ce_info.append(
[outputs["loss"], outputs["accuracy"], used_time])
if args.is_regression:
log.info(
"epoch: %d, progress: %d/%d, step: %d, ave loss: %f, "
" speed: %f steps/s" %
(current_epoch, current_example * dev_count, num_train_examples,
steps, outputs["loss"],
args.skip_steps / used_time))
time_begin = time.time()
if steps % args.save_steps == 0:
save_path = os.path.join(args.checkpoints,
"step_" + str(steps))
fluid.io.save_persistables(exe, save_path, fleet._origin_program)
# if steps % args.validation_steps == 0 or last_epoch != current_epoch:
if steps % args.validation_steps == 0:
# evaluate dev set
if args.do_val:
evaluate_wrapper(args, reader, exe, test_prog,
test_pyreader, graph_vars,
current_epoch, steps)
if args.do_test:
predict_wrapper(args, reader, exe, test_prog,
test_pyreader, graph_vars,
current_epoch, steps)
if last_epoch != current_epoch:
last_epoch = current_epoch
except fluid.core.EOFException:
save_path = os.path.join(args.checkpoints, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, fleet._origin_program)
train_pyreader.reset()
break
if args.enable_ce:
card_num = get_cards()
ce_loss = 0
ce_acc = 0
ce_time = 0
try:
ce_loss = ce_info[-2][0]
ce_acc = ce_info[-2][1]
ce_time = ce_info[-2][2]
except:
log.info("ce info error")
log.info("kpis\ttrain_duration_card%s\t%s" % (card_num, ce_time))
log.info("kpis\ttrain_loss_card%s\t%f" % (card_num, ce_loss))
log.info("kpis\ttrain_acc_card%s\t%f" % (card_num, ce_acc))
# final eval on dev set
if args.do_val:
evaluate_wrapper(args, reader, exe, test_prog, test_pyreader,
graph_vars, current_epoch, steps)
# final eval on test set
if args.do_test:
predict_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
current_epoch, steps)
# final eval on dianostic, hack for glue-ax
if args.diagnostic:
test_pyreader.decorate_tensor_provider(
reader.data_generator(
args.diagnostic,
batch_size=args.batch_size,
epoch=1,
dev_count=1,
shuffle=False))
log.info("Final diagnostic")
qids, preds, probs = predict(
test_exe,
test_prog,
test_pyreader,
graph_vars,
is_classify=args.is_classify,
is_regression=args.is_regression)
assert len(qids) == len(preds), '{} v.s. {}'.format(
len(qids), len(preds))
with open(args.diagnostic_save, 'w') as f:
for id, s, p in zip(qids, preds, probs):
f.write('{}\t{}\t{}\n'.format(id, s, p))
log.info("Done final diagnostic, saving to {}".format(
args.diagnostic_save))
def evaluate_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
epoch, steps):
# evaluate dev set
for ds in args.dev_set.split(','):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
ds,
batch_size=args.predict_batch_size,
epoch=1,
dev_count=1,
shuffle=False))
log.info("validation result of dataset {}:".format(ds))
evaluate_info = evaluate(
exe,
test_prog,
test_pyreader,
graph_vars,
"dev",
metric=args.metric,
is_classify=args.is_classify,
is_regression=args.is_regression)
log.info(evaluate_info + ', file: {}, epoch: {}, steps: {}'.format(
ds, epoch, steps))
def predict_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
epoch, steps):
test_sets = args.test_set.split(',')
save_dirs = args.test_save.split(',')
assert len(test_sets) == len(save_dirs)
for test_f, save_f in zip(test_sets, save_dirs):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
test_f,
batch_size=args.predict_batch_size,
epoch=1,
dev_count=1,
shuffle=False))
save_path = save_f + '.' + str(epoch) + '.' + str(steps)
log.info("testing {}, save to {}".format(test_f, save_path))
qids, preds, probs = predict(
exe,
test_prog,
test_pyreader,
graph_vars,
is_classify=args.is_classify,
is_regression=args.is_regression)
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
else:
log.warning('save dir exsits: %s, will skip saving' % save_dir)
with open(save_path, 'w') as f:
# for id, s, p in zip(qids, preds, probs):
# f.write('{}\t{}\t{}\n'.format(id, s, p))
for p in probs:
f.write('{}\n'.format(p[0]))
if __name__ == '__main__':
prepare_logger(log)
print_arguments(args)
check_cuda(args.use_cuda)
main(args)
| 17,415 | 36.86087 | 96 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/index_search.py | import sys
import time
import faiss
import math
import numpy as np
def read_embed(file_name, dim=768, bs=3000):
if file_name.endswith('npy'):
i = 0
emb_np = np.load(file_name)
while(i < len(emb_np)):
vec_list = emb_np[i:i+bs]
i += bs
yield vec_list
else:
vec_list = []
with open(file_name) as inp:
for line in inp:
data = line.strip()
vector = [float(item) for item in data.split(' ')]
assert len(vector) == dim
vec_list.append(vector)
if len(vec_list) == bs:
yield vec_list
vec_list = []
if vec_list:
yield vec_list
def load_qid(file_name):
qid_list = []
with open(file_name) as inp:
for line in inp:
line = line.strip()
qid = line.split('\t')[0]
qid_list.append(qid)
return qid_list
def search(index, emb_file, qid_list, outfile, top_k):
q_idx = 0
with open(outfile, 'w') as out:
for batch_vec in read_embed(emb_file):
q_emb_matrix = np.array(batch_vec)
res_dist, res_p_id = index.search(q_emb_matrix.astype('float32'), top_k)
for i in range(len(q_emb_matrix)):
qid = qid_list[q_idx]
for j in range(top_k):
pid = res_p_id[i][j]
score = res_dist[i][j]
out.write('%s\t%s\t%s\t%s\n' % (qid, pid, j+1, score))
q_idx += 1
def main():
part = sys.argv[1]
topk = int(sys.argv[2])
q_text_file = sys.argv[3]
outfile = 'res.top%s-part%s' % (topk, part)
qid_list = load_qid(q_text_file)
engine = faiss.read_index("para.index.part%s" % part)
emb_file = 'query.emb.npy'
search(engine, emb_file, qid_list, outfile, topk)
if __name__ == "__main__":
main()
| 1,948 | 27.661765 | 84 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/finetune/dual_encoder_infer.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
import faiss
from scipy.stats import pearsonr, spearmanr
from six.moves import xrange
import paddle.fluid as fluid
from model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_model(args,
pyreader_name,
ernie_config,
batch_size=16,
is_prediction=False,
task_name=""):
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1],
[batch_size, 1], [batch_size, 1]],
dtypes=['int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64'],
lod_levels=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
name=pyreader_name,
use_double_buffer=True)
(src_ids_q, sent_ids_q, pos_ids_q, task_ids_q, input_mask_q,
src_ids_p, sent_ids_p, pos_ids_p, task_ids_p, input_mask_p,
labels, qids) = fluid.layers.read_file(pyreader)
ernie_q = ErnieModel(
src_ids=src_ids_q,
position_ids=pos_ids_q,
sentence_ids=sent_ids_q,
task_ids=task_ids_q,
input_mask=input_mask_q,
config=ernie_config,
model_name='titlepara_')
## pos para
ernie_p = ErnieModel(
src_ids=src_ids_p,
position_ids=pos_ids_p,
sentence_ids=sent_ids_p,
task_ids=task_ids_p,
input_mask=input_mask_p,
config=ernie_config,
model_name='titlepara_')
q_cls_feats = ernie_q.get_pooled_output()
p_cls_feats = ernie_p.get_pooled_output()
#p_cls_feats = fluid.layers.concat([pos_cls_feats, neg_cls_feats], axis=0)
#src_ids_p = fluid.layers.Print(src_ids_p, message='p: ')
#p_cls_feats = fluid.layers.Print(p_cls_feats, message='p: ')
#multiply
logits = fluid.layers.matmul(q_cls_feats, p_cls_feats, transpose_x=False, transpose_y=True)
probs = logits
#fluid.layers.Print(probs, message='probs: ')
#logits2 = fluid.layers.elementwise_mul(x=q_rep, y=p_rep)
#fluid.layers.Print(logits2, message='logits2: ')
#probs2 = fluid.layers.reduce_sum(logits, dim=-1)
#fluid.layers.Print(probs2, message='probs2: ')
matrix_labels = fluid.layers.eye(batch_size, batch_size, dtype='float32')
matrix_labels.stop_gradient=True
#print('DEBUG:\tstart loss')
ce_loss, _ = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=matrix_labels, soft_label=True, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss)
#print('DEBUG:\tloss done')
matrix_labels = fluid.layers.argmax(matrix_labels, axis=-1)
matrix_labels = fluid.layers.reshape(x=matrix_labels, shape=[batch_size, 1])
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(input=probs, label=matrix_labels, total=num_seqs)
#ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
# logits=logits, label=labels, return_softmax=True)
#loss = fluid.layers.mean(x=ce_loss)
#accuracy = fluid.layers.accuracy(
# input=probs, label=labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs,
"qids": qids,
"q_rep": q_cls_feats,
"p_rep": p_cls_feats
}
return pyreader, graph_vars
def build_engine(para_emb_list, dim):
index = faiss.IndexFlatIP(dim)
# add paragraph embedding
p_emb_matrix = np.asarray(para_emb_list)
index.add(p_emb_matrix.astype('float32'))
#print ("insert done", file=sys.stderr)
return index
def predict(args,
exe,
test_program,
test_pyreader,
graph_vars,
dev_count=1,
output_item=0,
output_file_name='emb',
hidden_size=768):
test_pyreader.start()
fetch_list = [graph_vars["q_rep"].name, graph_vars["p_rep"].name,]
para_embs = []
batch_id = 0
while True:
try:
batch_id += 1
if batch_id % 500 == 0:
log.info("complete batch %s" % batch_id)
q_rep, p_rep = exe.run(program=test_program,
fetch_list=fetch_list)
if output_item == 0:
for item in q_rep:
para_embs.append(np.array(item, dtype='float32'))
elif output_item == 1:
for item in p_rep:
para_embs.append(np.array(item, dtype='float32'))
except fluid.core.EOFException:
test_pyreader.reset()
break
log.info("predict embs cnt: %s" % len(para_embs))
para_embs = para_embs[:args.test_data_cnt]
log.info("cut embs cnt: %s" % len(para_embs))
if output_item == 1:
engine = build_engine(para_embs, hidden_size)
faiss.write_index(engine, output_file_name)
log.info("create index done!")
else:
emb_matrix = np.asarray(para_embs)
np.save(output_file_name + '.npy', emb_matrix)
log.info("save to npy file!")
| 6,365 | 34.366667 | 95 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/finetune/joint_encoder.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
from scipy.stats import pearsonr, spearmanr
from six.moves import xrange
import paddle.fluid as fluid
from model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_model(args,
pyreader_name,
ernie_config,
is_prediction=False,
task_name="",
is_classify=False,
is_regression=False,
ernie_version="1.0",
p_cnt_per_q=1):
if is_classify:
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[-1, args.q_max_seq_len, 1], [-1, args.q_max_seq_len, 1],
[-1, args.q_max_seq_len, 1], [-1, args.q_max_seq_len, 1],
[-1, args.q_max_seq_len, 1],
[-1, args.p_max_seq_len, 1], [-1, args.p_max_seq_len, 1],
[-1, args.p_max_seq_len, 1], [-1, args.p_max_seq_len, 1],
[-1, args.p_max_seq_len, 1],
[-1, args.q_max_seq_len+args.p_max_seq_len, 1], [-1, args.q_max_seq_len+args.p_max_seq_len, 1],
[-1, args.q_max_seq_len+args.p_max_seq_len, 1], [-1, args.q_max_seq_len+args.p_max_seq_len, 1],
[-1, args.q_max_seq_len+args.p_max_seq_len, 1],
[-1, 1], [-1, 1]],
dtypes=[
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64'
],
lod_levels=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
name=task_name + "_" + pyreader_name,
use_double_buffer=True)
elif is_regression:
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, 1], [-1, 1]],
dtypes=[
'int64', 'int64', 'int64', 'int64', 'float32', 'float32',
'int64'
],
lod_levels=[0, 0, 0, 0, 0, 0, 0],
name=task_name + "_" + pyreader_name,
use_double_buffer=True)
(src_ids_q, sent_ids_q, pos_ids_q, task_ids_q, input_mask_q,
src_ids_tp, sent_ids_tp, pos_ids_tp, task_ids_tp, input_mask_tp,
src_ids_qtp, sent_ids_qtp, pos_ids_qtp, task_ids_qtp, input_mask_qtp,
labels, qids) = fluid.layers.read_file(pyreader)
ernie_q = ErnieModel(
model_name='query_',
src_ids=src_ids_q,
position_ids=pos_ids_q,
sentence_ids=sent_ids_q,
task_ids=task_ids_q,
input_mask=input_mask_q,
config=ernie_config,
use_fp16=args.use_fp16)
## titlepara
ernie_tp = ErnieModel(
model_name='titlepara_',
src_ids=src_ids_tp,
position_ids=pos_ids_tp,
sentence_ids=sent_ids_tp,
task_ids=task_ids_tp,
input_mask=input_mask_tp,
config=ernie_config,
use_fp16=args.use_fp16)
## qtp
ernie_qtp = ErnieModel(
model_name='qtp_',
src_ids=src_ids_qtp,
position_ids=pos_ids_qtp,
sentence_ids=sent_ids_qtp,
task_ids=task_ids_qtp,
input_mask=input_mask_qtp,
config=ernie_config,
use_fp16=args.use_fp16)
q_cls_feats = ernie_q.get_pooled_output("query_")
p_cls_feats = ernie_tp.get_pooled_output("titlepara_")
qtp_cls_feats = ernie_qtp.get_pooled_output("qtp_")
# q_cls_feats = fluid.layers.reshape(x=q_cls_feats, shape=[-1, p_cnt_per_q, 768])
# q_cls_feats = fluid.layers.slice(input=q_cls_feats, axes=[1], starts=[0], ends=[1])
# q_cls_feats = fluid.layers.expand(q_cls_feats, [1, p_cnt_per_q, 1])
# q_cls_feats = fluid.layers.reshape(x=q_cls_feats, shape=[-1, 768])
qtp_cls_feats = fluid.layers.dropout(
x=qtp_cls_feats,
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
twin_logits = fluid.layers.elementwise_mul(x=q_cls_feats, y=p_cls_feats)
twin_logits = fluid.layers.reduce_sum(twin_logits, dim=-1, keep_dim=True)
twin_probs = fluid.layers.reshape(x=twin_logits, shape=[-1, p_cnt_per_q])
twin_probs = fluid.layers.log_softmax(twin_probs)
# fluid.layers.Print(twin_probs, message='twin_probs')
logits = fluid.layers.fc(
input=qtp_cls_feats,
size=1,
param_attr=fluid.ParamAttr(
name=task_name + "qtp__cls_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=task_name + "qtp__cls_out_b",
initializer=fluid.initializer.Constant(0.)))
if is_prediction:
probs = logits
feed_targets_name = [
src_ids.name, sent_ids.name, pos_ids.name, input_mask.name
]
if ernie_version == "2.0":
feed_targets_name += [task_ids.name]
return pyreader, probs, feed_targets_name
assert is_classify != is_regression, 'is_classify or is_regression must be true and only one of them can be true'
num_seqs = fluid.layers.create_tensor(dtype='int64')
listwise_score = fluid.layers.reshape(x=logits, shape=[-1, p_cnt_per_q])
listwise_labels = fluid.layers.fill_constant_batch_size_like(
input=listwise_score, dtype='int64', shape=[-1, 1], value=0)
ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
logits=listwise_score, label=listwise_labels, return_softmax=True)
# fluid.layers.Print(probs, message='probs')
kl_loss = fluid.layers.kldiv_loss(x=twin_probs, target=probs) # mean
loss = fluid.layers.mean(x=ce_loss) + kl_loss
accuracy = fluid.layers.accuracy(
input=probs, label=listwise_labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": logits,
"accuracy": accuracy,
"labels": listwise_labels,
"num_seqs": num_seqs,
"qids": qids
}
cp = []
cp.extend(ernie_q.checkpoints)
cp.extend(ernie_tp.checkpoints)
cp.extend(ernie_qtp.checkpoints)
return pyreader, graph_vars, cp
def evaluate_mrr(preds):
last_qid = None
total_mrr = 0.0
qnum = 0.0
rank = 0.0
correct = False
for qid, score, label in preds:
if qid != last_qid:
rank = 0.0
qnum += 1
correct = False
last_qid = qid
rank += 1
if not correct and label != 0:
total_mrr += 1.0 / rank
correct = True
return total_mrr / qnum
def evaluate_map(preds):
def singe_map(st, en):
total_p = 0.0
correct_num = 0.0
for index in xrange(st, en):
if int(preds[index][2]) != 0:
correct_num += 1
total_p += correct_num / (index - st + 1)
if int(correct_num) == 0:
return 0.0
return total_p / correct_num
last_qid = None
total_map = 0.0
qnum = 0.0
st = 0
for i in xrange(len(preds)):
qid = preds[i][0]
if qid != last_qid:
qnum += 1
if last_qid != None:
total_map += singe_map(st, i)
st = i
last_qid = qid
total_map += singe_map(st, len(preds))
return total_map / qnum
def evaluate_classify(exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=False,
metric='simple_accuracy',
is_classify=False,
is_regression=False):
train_fetch_list = [
graph_vars["loss"].name, graph_vars["accuracy"].name,
graph_vars["num_seqs"].name
]
if eval_phase == "train":
if "learning_rate" in graph_vars:
train_fetch_list.append(graph_vars["learning_rate"].name)
outputs = exe.run(fetch_list=train_fetch_list, program=test_program)
ret = {"loss": np.mean(outputs[0]), "accuracy": np.mean(outputs[1])}
if "learning_rate" in graph_vars:
ret["learning_rate"] = float(outputs[3][0])
return ret
test_pyreader.start()
total_cost, total_acc, total_num_seqs, total_label_pos_num, total_pred_pos_num, total_correct_num = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
qids, labels, scores, preds = [], [], [], []
time_begin = time.time()
fetch_list = [
graph_vars["loss"].name, graph_vars["accuracy"].name,
graph_vars["probs"].name, graph_vars["labels"].name,
graph_vars["num_seqs"].name, graph_vars["qids"].name
]
while True:
try:
if use_multi_gpu_test:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids = exe.run(
fetch_list=fetch_list)
else:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids = exe.run(
program=test_program, fetch_list=fetch_list)
total_cost += np.sum(np_loss * np_num_seqs)
total_acc += np.sum(np_acc * np_num_seqs)
total_num_seqs += np.sum(np_num_seqs)
labels.extend(np_labels.reshape((-1)).tolist())
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
scores.extend(np_probs[:, 1].reshape(-1).tolist())
np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
preds.extend(np_preds)
total_label_pos_num += np.sum(np_labels)
total_pred_pos_num += np.sum(np_preds)
total_correct_num += np.sum(np.dot(np_preds, np_labels))
except fluid.core.EOFException:
test_pyreader.reset()
break
time_end = time.time()
cost = total_cost / total_num_seqs
elapsed_time = time_end - time_begin
evaluate_info = ""
if metric == 'acc_and_f1':
ret = acc_and_f1(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, ave_acc: %f, f1: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['acc'], ret['f1'], total_num_seqs, elapsed_time)
elif metric == 'matthews_corrcoef':
ret = matthews_corrcoef(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, matthews_corrcoef: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
elif metric == 'pearson_and_spearman':
ret = pearson_and_spearman(scores, labels)
evaluate_info = "[%s evaluation] ave loss: %f, pearson:%f, spearman:%f, corr:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['pearson'], ret['spearman'], ret['corr'], total_num_seqs, elapsed_time)
elif metric == 'simple_accuracy':
ret = simple_accuracy(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, acc:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
elif metric == "acc_and_f1_and_mrr":
ret_a = acc_and_f1(preds, labels)
preds = sorted(
zip(qids, scores, labels), key=lambda elem: (elem[0], -elem[1]))
ret_b = evaluate_mrr(preds)
evaluate_info = "[%s evaluation] ave loss: %f, acc: %f, f1: %f, mrr: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret_a['acc'], ret_a['f1'], ret_b, total_num_seqs, elapsed_time)
else:
raise ValueError('unsupported metric {}'.format(metric))
return evaluate_info
def evaluate_regression(exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=False,
metric='pearson_and_spearman'):
if eval_phase == "train":
train_fetch_list = [graph_vars["loss"].name]
if "learning_rate" in graph_vars:
train_fetch_list.append(graph_vars["learning_rate"].name)
outputs = exe.run(fetch_list=train_fetch_list)
ret = {"loss": np.mean(outputs[0])}
if "learning_rate" in graph_vars:
ret["learning_rate"] = float(outputs[1][0])
return ret
test_pyreader.start()
total_cost, total_num_seqs = 0.0, 0.0
qids, labels, scores = [], [], []
fetch_list = [
graph_vars["loss"].name, graph_vars["probs"].name,
graph_vars["labels"].name, graph_vars["qids"].name
]
time_begin = time.time()
while True:
try:
if use_multi_gpu_test:
np_loss, np_probs, np_labels, np_qids = exe.run(
fetch_list=fetch_list)
else:
np_loss, np_probs, np_labels, np_qids = exe.run(
program=test_program, fetch_list=fetch_list)
labels.extend(np_labels.reshape((-1)).tolist())
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
scores.extend(np_probs.reshape(-1).tolist())
except fluid.core.EOFException:
test_pyreader.reset()
break
time_end = time.time()
elapsed_time = time_end - time_begin
if metric == 'pearson_and_spearman':
ret = pearson_and_spearman(scores, labels)
evaluate_info = "[%s evaluation] ave loss: %f, pearson:%f, spearman:%f, corr:%f, elapsed time: %f s" \
% (eval_phase, 0.0, ret['pearson'], ret['spearmanr'], ret['corr'], elapsed_time)
else:
raise ValueError('unsupported metric {}'.format(metric))
return evaluate_info
def evaluate(exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=False,
metric='simple_accuracy',
is_classify=False,
is_regression=False):
if is_classify:
return evaluate_classify(
exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=use_multi_gpu_test,
metric=metric)
else:
return evaluate_regression(
exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=use_multi_gpu_test,
metric=metric)
def matthews_corrcoef(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
mcc = ((tp * tn) - (fp * fn)) / np.sqrt(
(tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return mcc
def f1_score(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
p = tp / (tp + fp)
r = tp / (tp + fn)
f1 = (2 * p * r) / (p + r + 1e-8)
return f1
def pearson_and_spearman(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def acc_and_f1(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
acc = simple_accuracy(preds, labels)
f1 = f1_score(preds, labels)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def simple_accuracy(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
return (preds == labels).mean()
def predict(exe,
test_program,
test_pyreader,
graph_vars,
dev_count=1,
is_classify=False,
is_regression=False):
test_pyreader.start()
qids, scores, probs = [], [], []
preds = []
fetch_list = [graph_vars["probs"].name, graph_vars["qids"].name]
while True:
try:
if dev_count == 1:
np_probs, np_qids = exe.run(program=test_program,
fetch_list=fetch_list)
else:
np_probs, np_qids = exe.run(fetch_list=fetch_list)
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
if is_classify:
np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
preds.extend(np_preds)
elif is_regression:
preds.extend(np_probs.reshape(-1))
probs.append(np_probs)
except fluid.core.EOFException:
test_pyreader.reset()
break
probs = np.concatenate(probs, axis=0).reshape([len(preds), -1])
return qids, preds, probs
| 18,127 | 34.475538 | 132 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/reader/reader_de_infer.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
import tokenization
from batching import pad_batch_data
log = logging.getLogger(__name__)
if six.PY3:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def csv_reader(fd, delimiter='\t'):
def gen():
for i in fd:
yield i.rstrip('\n').split(delimiter)
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
q_max_seq_len=128,
p_max_seq_len=512,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
for_cn=True,
task_id=0):
self.q_max_seq_len = q_max_seq_len
self.p_max_seq_len = p_max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
np.random.seed(random_seed)
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, q_max_seq_length, p_max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_query = tokenizer.tokenize(query)
self._truncate_seq_pair([], tokens_query, q_max_seq_length - 2)
# title
title = tokenization.convert_to_unicode(example.title)
tokens_title = tokenizer.tokenize(title)
# para
para = tokenization.convert_to_unicode(example.para)
tokens_para = tokenizer.tokenize(para)
self._truncate_seq_pair(tokens_title, tokens_para, p_max_seq_length - 3)
tokens_q = []
text_type_ids_q = []
tokens_q.append("[CLS]")
text_type_ids_q.append(0)
for token in tokens_query:
tokens_q.append(token)
text_type_ids_q.append(0)
tokens_q.append("[SEP]")
text_type_ids_q.append(0)
token_ids_q = tokenizer.convert_tokens_to_ids(tokens_q)
position_ids_q = list(range(len(token_ids_q)))
#f = open('tid', 'a')
#for tid in range(len(token_ids_q)):
# f.write(str(token_ids_q[tid]) + '\t' + tokens_q[tid] + '\n')
#f.write(str(token_ids_q[tid]) + ' ')
#f.write('\t')
### para
tokens_p = []
text_type_ids_p = []
tokens_p.append("[CLS]")
text_type_ids_p.append(0)
for token in tokens_title:
tokens_p.append(token)
text_type_ids_p.append(0)
tokens_p.append("[SEP]")
text_type_ids_p.append(0)
for token in tokens_para:
tokens_p.append(token)
text_type_ids_p.append(1)
tokens_p.append("[SEP]")
text_type_ids_p.append(1)
token_ids_p = tokenizer.convert_tokens_to_ids(tokens_p)
position_ids_p = list(range(len(token_ids_p)))
#for tid in range(len(token_ids_p)):
# f.write(str(token_ids_p[tid]) + '\t' + tokens_p[tid] + '\n')
#f.write(str(token_ids_p[tid]) + ' ')
#f.write('\n')
#f.close()
if self.is_inference:
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p', 'text_type_ids_p', 'position_ids_p'])
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p=token_ids_p,
text_type_ids_p=text_type_ids_p,
position_ids_p=position_ids_p)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p', 'text_type_ids_p', 'position_ids_p', \
'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p=token_ids_p,
text_type_ids_p=text_type_ids_p,
position_ids_p=position_ids_p,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, phase=None, read_id=False):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if phase == "train":
self.current_example = index
if read_id is False:
record = self._convert_example_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
else:
record = self._convert_example_id_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
max_len = max(max_len, len(record.token_ids_p))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
max_len = len(record.token_ids_p)
batch_records = [record]
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
examples = self._read_tsv(input_file)
return len(examples)
def data_generator(self,
input_file,
batch_size,
epoch,
dev_count=1,
shuffle=True,
phase=None,
read_id=False):
examples = self._read_tsv(input_file, batch_size)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase, read_id=read_id):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class ClassifyReader(BaseReader):
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
#headers = next(reader)
headers = 'query\ttitle\tpara\tlabel'.split('\t')
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
for line in reader:
for index, text in enumerate(line):
if index in text_indices:
if self.for_cn:
line[index] = text.replace(' ', '')
else:
line[index] = text
example = Example(*line)
examples.append(example)
while len(examples) % batch_size != 0:
examples.append(example)
return examples
def _pad_batch_records(self, batch_records):
batch_token_ids_q = [record.token_ids_q for record in batch_records]
batch_text_type_ids_q = [record.text_type_ids_q for record in batch_records]
batch_position_ids_q = [record.position_ids_q for record in batch_records]
batch_token_ids_p = [record.token_ids_p for record in batch_records]
batch_text_type_ids_p = [record.text_type_ids_p for record in batch_records]
batch_position_ids_p = [record.position_ids_p for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids_q, input_mask_q = pad_batch_data(
batch_token_ids_q, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_q = pad_batch_data(
batch_text_type_ids_q, pad_idx=self.pad_id)
padded_position_ids_q = pad_batch_data(
batch_position_ids_q, pad_idx=self.pad_id)
padded_task_ids_q = np.ones_like(padded_token_ids_q, dtype="int64") * self.task_id
padded_token_ids_p, input_mask_p = pad_batch_data(
batch_token_ids_p, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_p = pad_batch_data(
batch_text_type_ids_p, pad_idx=self.pad_id)
padded_position_ids_p = pad_batch_data(
batch_position_ids_p, pad_idx=self.pad_id)
padded_task_ids_p = np.ones_like(padded_token_ids_p, dtype="int64") * self.task_id
return_list = [
padded_token_ids_q, padded_text_type_ids_q, padded_position_ids_q, padded_task_ids_q,
input_mask_q,
padded_token_ids_p, padded_text_type_ids_p, padded_position_ids_p, padded_task_ids_p,
input_mask_p,
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
if __name__ == '__main__':
pass
| 13,641 | 36.581267 | 97 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/reader/task_reader_shuffle.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
import tokenization
from batching import pad_batch_data
log = logging.getLogger(__name__)
if six.PY3:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def csv_reader(fd, delimiter='\t', trainer_id=0, num_examples_per_node=0):
def gen():
for i, line in enumerate(fd):
if num_examples_per_node == 0 or i // num_examples_per_node == trainer_id:
slots = line.rstrip('\n').split(delimiter)
if len(slots) == 1:
yield slots,
else:
yield slots
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
q_max_seq_len=128,
p_max_seq_len=512,
total_num=0,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
is_classify=True,
is_regression=False,
for_cn=True,
task_id=0):
self.q_max_seq_len = q_max_seq_len
self.p_max_seq_len = p_max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
np.random.seed(random_seed)
self.is_classify = is_classify
self.is_regression = is_regression
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
self.total_num = total_num
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, q_max_seq_length, p_max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_query = tokenizer.tokenize(query)
self._truncate_seq_pair([], tokens_query, q_max_seq_length - 2)
para = tokenization.convert_to_unicode(example.para)
tokens_para = tokenizer.tokenize(para)
title = tokenization.convert_to_unicode(example.title)
tokens_title = tokenizer.tokenize(title)
self._truncate_seq_pair(tokens_title, tokens_para, p_max_seq_length - 3)
tokens_title_para = tokens_title[:]
tokens_title_para.extend(tokens_para)
# The convention in BERT/ERNIE is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens_q = []
text_type_ids_q = []
tokens_q.append("[CLS]")
text_type_ids_q.append(0)
for token in tokens_query:
tokens_q.append(token)
text_type_ids_q.append(0)
tokens_q.append("[SEP]")
text_type_ids_q.append(0)
token_ids_q = tokenizer.convert_tokens_to_ids(tokens_q)
position_ids_q = list(range(len(token_ids_q)))
### tp
tokens_tp = []
text_type_ids_tp = []
tokens_tp.append("[CLS]")
text_type_ids_tp.append(0)
for token in tokens_title:
tokens_tp.append(token)
text_type_ids_tp.append(0)
tokens_tp.append("[SEP]")
text_type_ids_tp.append(0)
for token in tokens_para:
tokens_tp.append(token)
text_type_ids_tp.append(1)
tokens_tp.append("[SEP]")
text_type_ids_tp.append(1)
token_ids_tp = tokenizer.convert_tokens_to_ids(tokens_tp)
position_ids_tp = list(range(len(token_ids_tp)))
### qtp
tokens_qtp = []
text_type_ids_qtp = []
tokens_qtp.append("[CLS]")
text_type_ids_qtp.append(0)
for token in tokens_query:
tokens_qtp.append(token)
text_type_ids_qtp.append(0)
tokens_qtp.append("[SEP]")
text_type_ids_qtp.append(0)
for token in tokens_title_para:
tokens_qtp.append(token)
text_type_ids_qtp.append(1)
tokens_qtp.append("[SEP]")
text_type_ids_qtp.append(1)
token_ids_qtp = tokenizer.convert_tokens_to_ids(tokens_qtp)
position_ids_qtp = list(range(len(token_ids_qtp)))
if self.is_inference:
Record = namedtuple('Record',
['token_ids', 'text_type_ids', 'position_ids'])
record = Record(
token_ids=token_ids,
text_type_ids=text_type_ids,
position_ids=position_ids)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record', [
'token_ids_q', 'text_type_ids_q', 'position_ids_q',
'token_ids_tp', 'text_type_ids_tp', 'position_ids_tp',
'token_ids_qtp', 'text_type_ids_qtp', 'position_ids_qtp',
'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_tp=token_ids_tp,
text_type_ids_tp=text_type_ids_tp,
position_ids_tp=position_ids_tp,
token_ids_qtp=token_ids_qtp,
text_type_ids_qtp=text_type_ids_qtp,
position_ids_qtp=position_ids_qtp,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, shuffled_index, phase=None):
"""generate batch records"""
batch_records, max_len = [], 0
for index in range(len(examples)):
example = examples[shuffled_index[index]]
if phase == "train":
self.current_example = index
record = self._convert_example_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
max_len = max(max_len, len(record.token_ids_tp))
max_len = max(max_len, len(record.token_ids_qtp))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
max_len = max(len(record.token_ids_qtp), len(record.token_ids_tp))
batch_records = [record]
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
# examples = self._read_tsv(input_file)
# return len(examples)
return self.num_examples
def data_generator(self,
input_file,
batch_size,
epoch,
dev_count=1,
trainer_id=0,
trainer_num=1,
shuffle=True,
p_cnt_per_q=1,
phase=None):
if phase == 'train':
# examples = examples[trainer_id: (len(examples) //trainer_num) * trainer_num : trainer_num]
self.num_examples_per_node = self.total_num // trainer_num // p_cnt_per_q * p_cnt_per_q
self.num_examples = self.num_examples_per_node * trainer_num
examples = self._read_tsv(input_file, trainer_id=trainer_id, trainer_num=trainer_num, num_examples=self.num_examples_per_node)
log.info('apply sharding %d/%d' % (trainer_id, trainer_num))
else:
examples = self._read_tsv(input_file)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
shuffled_index = np.arange(0, len(examples))
if shuffle:
shuffled_index = np.reshape(shuffled_index, (-1, p_cnt_per_q))
np.random.shuffle(shuffled_index)
shuffled_index = np.reshape(shuffled_index, (-1))
for batch_data in self._prepare_batch_data(
examples, batch_size, shuffled_index, phase=phase):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class ClassifyReader(BaseReader):
def _read_tsv(self, input_file, quotechar=None, trainer_id=0, trainer_num=1, num_examples=0):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f, trainer_id=trainer_id, num_examples_per_node=num_examples)
# headers = next(reader)
headers = 'query\ttitle\tpara\tlabel'.split('\t')
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
for cnt, line in enumerate(reader):
if num_examples != 0 and cnt == num_examples:
break
for index, text in enumerate(line):
if index in text_indices:
if self.for_cn:
line[index] = text.replace(' ', '')
else:
line[index] = text
example = Example(*line)
examples.append(example)
return examples
def _pad_batch_records(self, batch_records):
batch_token_ids_q = [record.token_ids_q for record in batch_records]
batch_text_type_ids_q = [record.text_type_ids_q for record in batch_records]
batch_position_ids_q = [record.position_ids_q for record in batch_records]
batch_token_ids_tp = [record.token_ids_tp for record in batch_records]
batch_text_type_ids_tp = [record.text_type_ids_tp for record in batch_records]
batch_position_ids_tp = [record.position_ids_tp for record in batch_records]
batch_token_ids_qtp = [record.token_ids_qtp for record in batch_records]
batch_text_type_ids_qtp = [record.text_type_ids_qtp for record in batch_records]
batch_position_ids_qtp = [record.position_ids_qtp for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
if self.is_classify:
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
elif self.is_regression:
batch_labels = np.array(batch_labels).astype("float32").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids_q, input_mask_q = pad_batch_data(
batch_token_ids_q, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_q = pad_batch_data(
batch_text_type_ids_q, pad_idx=self.pad_id)
padded_position_ids_q = pad_batch_data(
batch_position_ids_q, pad_idx=self.pad_id)
padded_task_ids_q = np.ones_like(padded_token_ids_q, dtype="int64") * self.task_id
padded_token_ids_tp, input_mask_tp = pad_batch_data(
batch_token_ids_tp, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_tp = pad_batch_data(
batch_text_type_ids_tp, pad_idx=self.pad_id)
padded_position_ids_tp = pad_batch_data(
batch_position_ids_tp, pad_idx=self.pad_id)
padded_task_ids_tp = np.ones_like(padded_token_ids_tp, dtype="int64") * self.task_id
padded_token_ids_qtp, input_mask_qtp = pad_batch_data(
batch_token_ids_qtp, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_qtp = pad_batch_data(
batch_text_type_ids_qtp, pad_idx=self.pad_id)
padded_position_ids_qtp = pad_batch_data(
batch_position_ids_qtp, pad_idx=self.pad_id)
padded_task_ids_qtp = np.ones_like(padded_token_ids_qtp, dtype="int64") * self.task_id
return_list = [
padded_token_ids_q, padded_text_type_ids_q, padded_position_ids_q,
padded_task_ids_q, input_mask_q,
padded_token_ids_tp, padded_text_type_ids_tp, padded_position_ids_tp,
padded_task_ids_tp, input_mask_tp,
padded_token_ids_qtp, padded_text_type_ids_qtp, padded_position_ids_qtp,
padded_task_ids_qtp, input_mask_qtp,
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
| 17,374 | 39.595794 | 138 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/utils/cards.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
def get_cards():
"""
get gpu cards number
"""
num = 0
cards = os.environ.get('CUDA_VISIBLE_DEVICES', '')
if cards != '':
num = len(cards.split(","))
return num
| 1,008 | 31.548387 | 74 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/utils/args.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Arguments for configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import six
import os
import sys
import argparse
import logging
import paddle.fluid as fluid
log = logging.getLogger(__name__)
def prepare_logger(logger, debug=False, save_to_file=None):
formatter = logging.Formatter(fmt='[%(levelname)s] %(asctime)s [%(filename)12s:%(lineno)5d]:\t%(message)s')
console_hdl = logging.StreamHandler()
console_hdl.setFormatter(formatter)
logger.addHandler(console_hdl)
if save_to_file is not None and not os.path.exists(save_to_file):
file_hdl = logging.FileHandler(save_to_file)
file_hdl.setFormatter(formatter)
logger.addHandler(file_hdl)
logger.setLevel(logging.DEBUG)
logger.propagate = False
def str2bool(v):
# because argparse does not support to parse "true, False" as python
# boolean directly
return v.lower() in ("true", "t", "1")
class ArgumentGroup(object):
def __init__(self, parser, title, des):
self._group = parser.add_argument_group(title=title, description=des)
def add_arg(self, name, type, default, help, positional_arg=False, **kwargs):
prefix = "" if positional_arg else "--"
type = str2bool if type == bool else type
self._group.add_argument(
prefix + name,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
def print_arguments(args):
log.info('----------- Configuration Arguments -----------')
for arg, value in sorted(six.iteritems(vars(args))):
log.info('%s: %s' % (arg, value))
log.info('------------------------------------------------')
def check_cuda(use_cuda, err = \
"\nYou can not set use_cuda = True in the model because you are using paddlepaddle-cpu.\n \
Please: 1. Install paddlepaddle-gpu to run your models on GPU or 2. Set use_cuda = False to run models on CPU.\n"
):
try:
if use_cuda == True and fluid.is_compiled_with_cuda() == False:
log.error(err)
sys.exit(1)
except Exception as e:
pass
| 2,996 | 34.678571 | 119 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/utils/init.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import six
import ast
import copy
import logging
import numpy as np
import paddle.fluid as fluid
log = logging.getLogger(__name__)
def cast_fp32_to_fp16(exe, main_program):
log.info("Cast parameters to float16 data format.")
for param in main_program.global_block().all_parameters():
if not param.name.endswith(".master"):
param_t = fluid.global_scope().find_var(param.name).get_tensor()
data = np.array(param_t)
if param.name.startswith("encoder_layer") \
and "layer_norm" not in param.name:
param_t.set(np.float16(data).view(np.uint16), exe.place)
#load fp32
master_param_var = fluid.global_scope().find_var(param.name +
".master")
if master_param_var is not None:
master_param_var.get_tensor().set(data, exe.place)
def init_checkpoint(exe, init_checkpoint_path, main_program, use_fp16=False):
assert os.path.exists(
init_checkpoint_path), "[%s] cann't be found." % init_checkpoint_path
def existed_persitables(var):
if not fluid.io.is_persistable(var):
return False
if not os.path.exists(os.path.join(init_checkpoint_path, var.name)):
print ("Var not exists: [%s]\t%s" % (var.name, os.path.join(init_checkpoint_path, var.name)))
#else:
# print ("Var exists: [%s]" % (var.name))
return os.path.exists(os.path.join(init_checkpoint_path, var.name))
fluid.io.load_vars(
exe,
init_checkpoint_path,
main_program=main_program,
predicate=existed_persitables)
log.info("Load model from {}".format(init_checkpoint_path))
if use_fp16:
cast_fp32_to_fp16(exe, main_program)
def init_pretraining_params(exe,
pretraining_params_path,
main_program,
use_fp16=False):
assert os.path.exists(pretraining_params_path
), "[%s] cann't be found." % pretraining_params_path
def existed_params(var):
if not isinstance(var, fluid.framework.Parameter):
return False
if not os.path.exists(os.path.join(pretraining_params_path, var.name)):
print ("Var not exists: [%s]\t%s" % (var.name, os.path.join(pretraining_params_path, var.name)))
#else:
# print ("Var exists: [%s]" % (var.name))
return os.path.exists(os.path.join(pretraining_params_path, var.name))
fluid.io.load_vars(
exe,
pretraining_params_path,
main_program=main_program,
predicate=existed_params)
log.info("Load pretraining parameters from {}.".format(
pretraining_params_path))
if use_fp16:
cast_fp32_to_fp16(exe, main_program)
| 3,632 | 35.33 | 108 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/utils/fp16.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.fluid as fluid
def append_cast_op(i, o, prog):
"""
Append a cast op in a given Program to cast input `i` to data type `o.dtype`.
Args:
i (Variable): The input Variable.
o (Variable): The output Variable.
prog (Program): The Program to append cast op.
"""
prog.global_block().append_op(
type="cast",
inputs={"X": i},
outputs={"Out": o},
attrs={"in_dtype": i.dtype,
"out_dtype": o.dtype})
def copy_to_master_param(p, block):
v = block.vars.get(p.name, None)
if v is None:
raise ValueError("no param name %s found!" % p.name)
new_p = fluid.framework.Parameter(
block=block,
shape=v.shape,
dtype=fluid.core.VarDesc.VarType.FP32,
type=v.type,
lod_level=v.lod_level,
stop_gradient=p.stop_gradient,
trainable=p.trainable,
optimize_attr=p.optimize_attr,
regularizer=p.regularizer,
gradient_clip_attr=p.gradient_clip_attr,
error_clip=p.error_clip,
name=v.name + ".master")
return new_p
def apply_dynamic_loss_scaling(loss_scaling, master_params_grads,
incr_every_n_steps, decr_every_n_nan_or_inf,
incr_ratio, decr_ratio):
_incr_every_n_steps = fluid.layers.fill_constant(
shape=[1], dtype='int32', value=incr_every_n_steps)
_decr_every_n_nan_or_inf = fluid.layers.fill_constant(
shape=[1], dtype='int32', value=decr_every_n_nan_or_inf)
_num_good_steps = fluid.layers.create_global_var(
name=fluid.unique_name.generate("num_good_steps"),
shape=[1],
value=0,
dtype='int32',
persistable=True)
_num_bad_steps = fluid.layers.create_global_var(
name=fluid.unique_name.generate("num_bad_steps"),
shape=[1],
value=0,
dtype='int32',
persistable=True)
grads = [fluid.layers.reduce_sum(g) for [_, g] in master_params_grads]
all_grads = fluid.layers.concat(grads)
all_grads_sum = fluid.layers.reduce_sum(all_grads)
is_overall_finite = fluid.layers.isfinite(all_grads_sum)
update_loss_scaling(is_overall_finite, loss_scaling, _num_good_steps,
_num_bad_steps, _incr_every_n_steps,
_decr_every_n_nan_or_inf, incr_ratio, decr_ratio)
# apply_gradient append all ops in global block, thus we shouldn't
# apply gradient in the switch branch.
with fluid.layers.Switch() as switch:
with switch.case(is_overall_finite):
pass
with switch.default():
for _, g in master_params_grads:
fluid.layers.assign(fluid.layers.zeros_like(g), g)
def create_master_params_grads(params_grads, main_prog, startup_prog,
loss_scaling):
master_params_grads = []
for p, g in params_grads:
with main_prog._optimized_guard([p, g]):
# create master parameters
master_param = copy_to_master_param(p, main_prog.global_block())
startup_master_param = startup_prog.global_block()._clone_variable(
master_param)
startup_p = startup_prog.global_block().var(p.name)
append_cast_op(startup_p, startup_master_param, startup_prog)
# cast fp16 gradients to fp32 before apply gradients
if g.name.find("layer_norm") > -1:
scaled_g = g / loss_scaling
master_params_grads.append([p, scaled_g])
continue
master_grad = fluid.layers.cast(g, "float32")
master_grad = master_grad / loss_scaling
master_params_grads.append([master_param, master_grad])
return master_params_grads
def master_param_to_train_param(master_params_grads, params_grads, main_prog):
for idx, m_p_g in enumerate(master_params_grads):
train_p, _ = params_grads[idx]
if train_p.name.find("layer_norm") > -1:
continue
with main_prog._optimized_guard([m_p_g[0], m_p_g[1]]):
append_cast_op(m_p_g[0], train_p, main_prog)
def update_loss_scaling(is_overall_finite, prev_loss_scaling, num_good_steps,
num_bad_steps, incr_every_n_steps,
decr_every_n_nan_or_inf, incr_ratio, decr_ratio):
"""
Update loss scaling according to overall gradients. If all gradients is
finite after incr_every_n_steps, loss scaling will increase by incr_ratio.
Otherwisw, loss scaling will decrease by decr_ratio after
decr_every_n_nan_or_inf steps and each step some gradients are infinite.
Args:
is_overall_finite (Variable): A boolean variable indicates whether
all gradients are finite.
prev_loss_scaling (Variable): Previous loss scaling.
num_good_steps (Variable): A variable accumulates good steps in which
all gradients are finite.
num_bad_steps (Variable): A variable accumulates bad steps in which
some gradients are infinite.
incr_every_n_steps (Variable): A variable represents increasing loss
scaling every n consecutive steps with
finite gradients.
decr_every_n_nan_or_inf (Variable): A variable represents decreasing
loss scaling every n accumulated
steps with nan or inf gradients.
incr_ratio(float): The multiplier to use when increasing the loss
scaling.
decr_ratio(float): The less-than-one-multiplier to use when decreasing
loss scaling.
"""
zero_steps = fluid.layers.fill_constant(shape=[1], dtype='int32', value=0)
with fluid.layers.Switch() as switch:
with switch.case(is_overall_finite):
should_incr_loss_scaling = fluid.layers.less_than(
incr_every_n_steps, num_good_steps + 1)
with fluid.layers.Switch() as switch1:
with switch1.case(should_incr_loss_scaling):
new_loss_scaling = prev_loss_scaling * incr_ratio
loss_scaling_is_finite = fluid.layers.isfinite(
new_loss_scaling)
with fluid.layers.Switch() as switch2:
with switch2.case(loss_scaling_is_finite):
fluid.layers.assign(new_loss_scaling,
prev_loss_scaling)
with switch2.default():
pass
fluid.layers.assign(zero_steps, num_good_steps)
fluid.layers.assign(zero_steps, num_bad_steps)
with switch1.default():
fluid.layers.increment(num_good_steps)
fluid.layers.assign(zero_steps, num_bad_steps)
with switch.default():
should_decr_loss_scaling = fluid.layers.less_than(
decr_every_n_nan_or_inf, num_bad_steps + 1)
with fluid.layers.Switch() as switch3:
with switch3.case(should_decr_loss_scaling):
new_loss_scaling = prev_loss_scaling * decr_ratio
static_loss_scaling = \
fluid.layers.fill_constant(shape=[1],
dtype='float32',
value=1.0)
less_than_one = fluid.layers.less_than(new_loss_scaling,
static_loss_scaling)
with fluid.layers.Switch() as switch4:
with switch4.case(less_than_one):
fluid.layers.assign(static_loss_scaling,
prev_loss_scaling)
with switch4.default():
fluid.layers.assign(new_loss_scaling,
prev_loss_scaling)
fluid.layers.assign(zero_steps, num_good_steps)
fluid.layers.assign(zero_steps, num_bad_steps)
with switch3.default():
fluid.layers.assign(zero_steps, num_good_steps)
fluid.layers.increment(num_bad_steps)
| 9,177 | 44.661692 | 81 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/model/transformer_encoder.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer encoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import paddle.fluid as fluid
import paddle.fluid.layers as layers
def multi_head_attention(queries,
keys,
values,
attn_bias,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.,
cache=None,
param_initializer=None,
name='multi_head_att'):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activiation to mask certain selected positions so that
they will not considered in attention weights.
"""
keys = queries if keys is None else keys
values = keys if values is None else values
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors.")
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_query_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_query_fc.b_0')
k = layers.fc(input=keys,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_key_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_key_fc.b_0')
v = layers.fc(input=values,
size=d_value * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_value_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_value_fc.b_0')
return q, k, v
def __split_heads(x, n_head):
"""
Reshape the last dimension of inpunt tensor x so that it becomes two
dimensions and then transpose. Specifically, input a tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] then output a tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
hidden_size = x.shape[-1]
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped = layers.reshape(
x=x, shape=[0, 0, n_head, hidden_size // n_head], inplace=True)
# permuate the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
return layers.transpose(x=reshaped, perm=[0, 2, 1, 3])
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) == 3: return x
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=True)
def scaled_dot_product_attention(q, k, v, attn_bias, d_key, dropout_rate):
"""
Scaled Dot-Product Attention
"""
scaled_q = layers.scale(x=q, scale=d_key**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
if dropout_rate:
weights = layers.dropout(
weights,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
if cache is not None: # use cache and concat time steps
# Since the inplace reshape in __split_heads changes the shape of k and
# v, which is the cache input for next time step, reshape the cache
# input from the previous time step first.
k = cache["k"] = layers.concat(
[layers.reshape(
cache["k"], shape=[0, 0, d_model]), k], axis=1)
v = cache["v"] = layers.concat(
[layers.reshape(
cache["v"], shape=[0, 0, d_model]), v], axis=1)
q = __split_heads(q, n_head)
k = __split_heads(k, n_head)
v = __split_heads(v, n_head)
ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_key,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_output_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_output_fc.b_0')
return proj_out
def positionwise_feed_forward(x,
d_inner_hid,
d_hid,
dropout_rate,
hidden_act,
param_initializer=None,
name='ffn'):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act=hidden_act,
param_attr=fluid.ParamAttr(
name=name + '_fc_0.w_0',
initializer=param_initializer),
bias_attr=name + '_fc_0.b_0')
if dropout_rate:
hidden = layers.dropout(
hidden,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
out = layers.fc(input=hidden,
size=d_hid,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_fc_1.w_0', initializer=param_initializer),
bias_attr=name + '_fc_1.b_0')
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.,
name=''):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out_dtype = out.dtype
if out_dtype == fluid.core.VarDesc.VarType.FP16:
out = layers.cast(x=out, dtype="float32")
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.ParamAttr(
name=name + '_layer_norm_scale',
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
name=name + '_layer_norm_bias',
initializer=fluid.initializer.Constant(0.)))
if out_dtype == fluid.core.VarDesc.VarType.FP16:
out = layers.cast(x=out, dtype="float16")
elif cmd == "d": # add dropout
if dropout_rate:
out = layers.dropout(
out,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name=''):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(
pre_process_layer(
enc_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att'),
None,
None,
attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
param_initializer=param_initializer,
name=name + '_multi_head_att')
attn_output = post_process_layer(
enc_input,
attn_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_att')
ffd_output = positionwise_feed_forward(
pre_process_layer(
attn_output,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_ffn'),
d_inner_hid,
d_model,
relu_dropout,
hidden_act,
param_initializer=param_initializer,
name=name + '_ffn')
return post_process_layer(
attn_output,
ffd_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_ffn'), ffd_output
def encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
model_name='',
name=''):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""
checkpoints = []
for i in range(n_layer):
enc_output, cp = encoder_layer(
enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
param_initializer=param_initializer,
name=name + '_layer_' + str(i))
checkpoints.append(cp)
enc_input = enc_output
enc_output = pre_process_layer(
enc_output, preprocess_cmd, prepostprocess_dropout, name=model_name+"post_encoder")
return enc_output, checkpoints
| 12,649 | 35.666667 | 91 | py |
RocketQA | RocketQA-main/research/RocketQAv2_EMNLP2021/model/src/model/ernie.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ernie model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import json
import six
import logging
import paddle.fluid as fluid
from io import open
from model.transformer_encoder import encoder, pre_process_layer
log = logging.getLogger(__name__)
class ErnieConfig(object):
def __init__(self, config_path):
self._config_dict = self._parse(config_path)
def _parse(self, config_path):
try:
with open(config_path, 'r', encoding='utf8') as json_file:
config_dict = json.load(json_file)
except Exception:
raise IOError("Error in parsing Ernie model config file '%s'" %
config_path)
else:
return config_dict
def __getitem__(self, key):
return self._config_dict.get(key, None)
def print_config(self):
for arg, value in sorted(six.iteritems(self._config_dict)):
log.info('%s: %s' % (arg, value))
log.info('------------------------------------------------')
class ErnieModel(object):
def __init__(self,
model_name,
src_ids,
position_ids,
sentence_ids,
task_ids,
input_mask,
config,
weight_sharing=True,
use_fp16=False):
self._emb_size = config['hidden_size']
self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._voc_size = config['vocab_size']
self._max_position_seq_len = config['max_position_embeddings']
if config['sent_type_vocab_size']:
self._sent_types = config['sent_type_vocab_size']
else:
self._sent_types = config['type_vocab_size']
self._use_task_id = config['use_task_id']
if self._use_task_id:
self._task_types = config['task_type_vocab_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_probs_dropout_prob']
self._weight_sharing = weight_sharing
self.checkpoints = []
self._word_emb_name = "word_embedding"
self._pos_emb_name = "pos_embedding"
self._sent_emb_name = "sent_embedding"
self._task_emb_name = "task_embedding"
self._dtype = "float16" if use_fp16 else "float32"
self._emb_dtype = "float32"
# Initialize all weigths by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._build_model(model_name, src_ids, position_ids, sentence_ids, task_ids,
input_mask)
def _build_model(self, model_name, src_ids, position_ids, sentence_ids, task_ids,
input_mask):
# padding id in vocabulary must be set to 0
emb_out = fluid.layers.embedding(
input=src_ids,
size=[self._voc_size, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._word_emb_name, initializer=self._param_initializer),
is_sparse=False)
position_emb_out = fluid.layers.embedding(
input=position_ids,
size=[self._max_position_seq_len, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._pos_emb_name, initializer=self._param_initializer))
sent_emb_out = fluid.layers.embedding(
sentence_ids,
size=[self._sent_types, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._sent_emb_name, initializer=self._param_initializer))
emb_out = emb_out + position_emb_out
emb_out = emb_out + sent_emb_out
if self._use_task_id:
task_emb_out = fluid.layers.embedding(
task_ids,
size=[self._task_types, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._task_emb_name,
initializer=self._param_initializer))
emb_out = emb_out + task_emb_out
emb_out = pre_process_layer(
emb_out, 'nd', self._prepostprocess_dropout, name=model_name + 'pre_encoder')
if self._dtype == "float16":
emb_out = fluid.layers.cast(x=emb_out, dtype=self._dtype)
input_mask = fluid.layers.cast(x=input_mask, dtype=self._dtype)
self_attn_mask = fluid.layers.matmul(
x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = fluid.layers.scale(
x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = fluid.layers.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
self._enc_out, self.checkpoints = encoder(
enc_input=emb_out,
attn_bias=n_head_self_attn_mask,
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
model_name=model_name,
name=model_name+'encoder')
if self._dtype == "float16":
self._enc_out = fluid.layers.cast(
x=self._enc_out, dtype=self._emb_dtype)
def get_sequence_output(self):
return self._enc_out
def get_pooled_output(self, model_name):
"""Get the first feature of each sequence for classification"""
next_sent_feat = fluid.layers.slice(
input=self._enc_out, axes=[1], starts=[0], ends=[1])
# if model_name == 'qtp_':
# next_sent_feat = fluid.layers.fc(
# input=next_sent_feat,
# size=self._emb_size,
# act="tanh",
# param_attr=fluid.ParamAttr(
# name=model_name+"pooled_fc.w_0", initializer=self._param_initializer),
# bias_attr=model_name+"pooled_fc.b_0")
else:
next_sent_feat = fluid.layers.squeeze(next_sent_feat, axes=[1])
return next_sent_feat
def get_lm_output(self, mask_label, mask_pos):
"""Get the loss & accuracy for pretraining"""
mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32')
# extract the first token feature in each sentence
self.next_sent_feat = self.get_pooled_output()
reshaped_emb_out = fluid.layers.reshape(
x=self._enc_out, shape=[-1, self._emb_size])
# extract masked tokens' feature
mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos)
# transform: fc
mask_trans_feat = fluid.layers.fc(
input=mask_feat,
size=self._emb_size,
act=self._hidden_act,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_fc.w_0',
initializer=self._param_initializer),
bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0'))
# transform: layer norm
mask_trans_feat = fluid.layers.layer_norm(
mask_trans_feat,
begin_norm_axis=len(mask_trans_feat.shape) - 1,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_layer_norm_scale',
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
name='mask_lm_trans_layer_norm_bias',
initializer=fluid.initializer.Constant(1.)))
# transform: layer norm
#mask_trans_feat = pre_process_layer(
# mask_trans_feat, 'n', name='mask_lm_trans')
mask_lm_out_bias_attr = fluid.ParamAttr(
name="mask_lm_out_fc.b_0",
initializer=fluid.initializer.Constant(value=0.0))
if self._weight_sharing:
fc_out = fluid.layers.matmul(
x=mask_trans_feat,
y=fluid.default_main_program().global_block().var(
self._word_emb_name),
transpose_y=True)
fc_out += fluid.layers.create_parameter(
shape=[self._voc_size],
dtype=self._emb_dtype,
attr=mask_lm_out_bias_attr,
is_bias=True)
else:
fc_out = fluid.layers.fc(input=mask_trans_feat,
size=self._voc_size,
param_attr=fluid.ParamAttr(
name="mask_lm_out_fc.w_0",
initializer=self._param_initializer),
bias_attr=mask_lm_out_bias_attr)
mask_lm_loss = fluid.layers.softmax_with_cross_entropy(
logits=fc_out, label=mask_label)
mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss)
return mean_mask_lm_loss
def get_task_output(self, task, task_labels):
task_fc_out = fluid.layers.fc(input=self.next_sent_feat,
size=task["num_labels"],
param_attr=fluid.ParamAttr(
name=task["task_name"] + "_fc.w_0",
initializer=self._param_initializer),
bias_attr=task["task_name"] + "_fc.b_0")
task_loss, task_softmax = fluid.layers.softmax_with_cross_entropy(
logits=task_fc_out, label=task_labels, return_softmax=True)
task_acc = fluid.layers.accuracy(input=task_softmax, label=task_labels)
mean_task_loss = fluid.layers.mean(task_loss)
return mean_task_loss, task_acc
| 11,151 | 39.70073 | 92 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/metric/msmarco_eval.py | """
This module computes evaluation metrics for MSMARCO dataset on the ranking task.
Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
Creation Date : 06/12/2018
Last Modified : 1/21/2019
Authors : Daniel Campos <dacamp@microsoft.com>, Rutger van Haasteren <ruvanh@microsoft.com>
"""
import sys
from collections import Counter
MaxMRRRank = 10
def load_reference_from_stream(f):
"""Load Reference reference relevant passages
Args:f (stream): stream to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
qids_to_relevant_passageids = {}
for l in f:
try:
l = l.strip().split('\t')
qid = int(l[0])
if qid in qids_to_relevant_passageids:
pass
else:
qids_to_relevant_passageids[qid] = []
qids_to_relevant_passageids[qid].append(int(l[1]))
except:
raise IOError('\"%s\" is not valid format' % l)
return qids_to_relevant_passageids
def load_reference(path_to_reference):
"""Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
with open(path_to_reference, 'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids
def load_candidate_from_stream(f):
"""Load candidate data from a stream.
Args:f (stream): stream to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
qid_to_ranked_candidate_passages = {}
for l in f:
try:
l = l.strip().split('\t')
qid = int(l[0])
pid = int(l[1])
rank = int(l[2])
if qid in qid_to_ranked_candidate_passages:
pass
else:
# By default, all PIDs in the list of 1000 are 0. Only override those that are given
tmp = [0] * 1000
qid_to_ranked_candidate_passages[qid] = tmp
qid_to_ranked_candidate_passages[qid][rank - 1] = pid
except:
raise IOError('\"%s\" is not valid format' % l)
return qid_to_ranked_candidate_passages
def load_candidate(path_to_candidate):
"""Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
with open(path_to_candidate, 'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
"""
message = ''
allowed = True
# Create sets of the QIDs for the submitted and reference queries
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
# Check that we do not have multiple passages per query
for qid in qids_to_ranked_candidate_passages:
# Remove all zeros from the candidates
duplicate_pids = set(
[item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids - set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
recall_q_top1 = set()
recall_q_top50 = set()
recall_q_all = set()
for qid in qids_to_ranked_candidate_passages:
if qid in qids_to_relevant_passageids:
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0, MaxMRRRank):
if candidate_pid[i] in target_pid:
MRR += 1.0 / (i + 1)
ranking.pop()
ranking.append(i + 1)
break
for i, pid in enumerate(candidate_pid):
if pid in target_pid:
recall_q_all.add(qid)
if i < 50:
recall_q_top50.add(qid)
if i == 0:
recall_q_top1.add(qid)
break
if len(ranking) == 0:
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
MRR = MRR / len(qids_to_relevant_passageids)
recall_top1 = len(recall_q_top1) * 1.0 / len(qids_to_relevant_passageids)
recall_top50 = len(recall_q_top50) * 1.0 / len(qids_to_relevant_passageids)
recall_all = len(recall_q_all) * 1.0 / len(qids_to_relevant_passageids)
all_scores['MRR @10'] = MRR
all_scores["recall@1"] = recall_top1
all_scores["recall@50"] = recall_top50
all_scores["recall@all"] = recall_all
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
return all_scores
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True):
"""Compute MRR metric
Args:
p_path_to_reference_file (str): path to reference file.
Reference file should contain lines in the following format:
QUERYID\tPASSAGEID
Where PASSAGEID is a relevant passage for a query. Note QUERYID can repeat on different lines with different PASSAGEIDs
p_path_to_candidate_file (str): path to candidate file.
Candidate file sould contain lines in the following format:
QUERYID\tPASSAGEID1\tRank
If a user wishes to use the TREC format please run the script with a -t flag at the end. If this flag is used the expected format is
QUERYID\tITER\tDOCNO\tRANK\tSIM\tRUNID
Where the values are separated by tabs and ranked in order of relevance
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
qids_to_relevant_passageids = load_reference(path_to_reference)
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
if perform_checks:
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
if message != '': print(message)
return compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
def main():
"""Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
"""
if len(sys.argv) == 3:
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
else:
path_to_reference = 'metric/qp_reference.all.tsv'
path_to_candidate = 'metric/ranking_res'
#print('Usage: msmarco_eval_ranking.py <reference ranking> <candidate ranking>')
#exit()
metrics = compute_metrics_from_files(path_to_reference, path_to_candidate)
print('#####################')
for metric in sorted(metrics):
print('{}: {}'.format(metric, metrics[metric]))
print('#####################')
if __name__ == '__main__':
main()
| 8,406 | 38.843602 | 161 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/metric/nq_eval.py | import sys
import numpy as np
sys.path.append('data_process/')
sys.path.append('metric/')
from dpr.utils.tokenizers import SimpleTokenizer
import utils
import unicodedata
recall_cands_file = sys.argv[1]
topk = 100
answers = utils.load_answers('test')
q_text, p_text, p_title = utils.load_corpus(corpus='nq', q_type='test')
cand_qp_all, train_qids = utils.load_candidates(recall_cands_file, col=4)
def has_answer(answers, text, tokenizer, match_type):
text = unicodedata.normalize('NFD', text)
if match_type == 'string':
text = tokenizer.tokenize(text).words(uncased=True)
for single_answer in answers:
single_answer = unicodedata.normalize('NFD', single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i+ len(single_answer)]:
return 1
return 0
print('calculating acc')
right_top100 = set()
right_top50 = set()
right_top20 = set()
right_top10 = set()
right_top5 = set()
tok_opts = {}
tokenizer = SimpleTokenizer(**tok_opts)
for qid, pids in cand_qp_all.items():
answer = answers[qid]
for i, pid in enumerate(pids):
if has_answer(answer, p_text[pid], tokenizer, 'string'):
if i < 100:
right_top100.add(qid)
if i < 50:
right_top50.add(qid)
if i < 20:
right_top20.add(qid)
if i < 10:
right_top10.add(qid)
if i < 5:
right_top5.add(qid)
break
query_num = len(cand_qp_all)
print(query_num)
print(len(right_top100))
r100 = len(right_top100) * 1.0 / query_num
r50 = len(right_top50) * 1.0 / query_num
r20 = len(right_top20) * 1.0 / query_num
r10 = len(right_top10) * 1.0 / query_num
r5 = len(right_top5) * 1.0 / query_num
print('recall@100: ' + str(r100))
print('recall@50: ' + str(r50))
print('recall@20: ' + str(r20))
print('recall@10: ' + str(r10))
print('recall@5: ' + str(r5))
| 2,204 | 31.426471 | 73 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/metric/dpr/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/metric/dpr/utils/tokenizers.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Most of the tokenizers code here is copied from DrQA codebase to avoid adding extra dependency
"""
import copy
import logging
import regex
logger = logging.getLogger(__name__)
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def __del__(self):
self.shutdown()
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
| 5,679 | 28.278351 | 94 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/metric/dpr/utils/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/data_process/construct_marco_train_ce.py | import sys
import random
sys.path.append('data_process/')
import utils
recall_cands_file = sys.argv[1]
outfile = sys.argv[2]
random_seed = 111
rng = random.Random(random_seed)
neg_cnt = 4
q_text, p_text, p_title = utils.load_corpus(corpus='marco', q_type='train')
pos_qp, pos_qp_new = utils.load_pos_examples(p_text)
cand_qp_all, train_qids = utils.load_candidates(recall_cands_file, col=4)
# neg examples
neg_qp = {}
for qid, pids in cand_qp_all.items():
select_pid = []
while len(select_pid) < neg_cnt:
_pid = rng.choice(pids)
if _pid in pos_qp[qid] or _pid in select_pid or _pid in pos_qp_new.get(qid, []):
continue
select_pid.append(_pid)
neg_qp[qid] = select_pid
with open(outfile, 'w') as out:
for qid in pos_qp:
for pid in pos_qp[qid]:
out.write('%s\t%s\t%s\t1\n' % (q_text[qid], p_title.get(pid, ''), p_text[pid]))
for qid in neg_qp:
for pid in neg_qp[qid]:
out.write('%s\t%s\t%s\t0\n' % (q_text[qid], p_title.get(pid, ''), p_text[pid]))
| 1,050 | 26.657895 | 91 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/data_process/construct_marco_train_de.py | import sys
import random
sys.path.append('data_process/')
import utils
recall_cands_file = sys.argv[1]
ce_score_file = sys.argv[2]
outfile = sys.argv[3]
random_seed = 111
rng = random.Random(random_seed)
neg_cnt = 4
ce_threshold_neg = 0.1
ce_threshold_pos = 0.9
q_text, p_text, p_title = utils.load_corpus(corpus='marco', q_type='train')
pos_qp, pos_qp_add = utils.load_pos_examples(p_text)
cand_qp_all, train_qids = utils.load_candidates(recall_cands_file, col=4)
ce_score = utils.load_ce_score(ce_score_file, train_qids)
# neg examples
neg_qp = {}
for qid, pids in cand_qp_all.items():
if qid not in pos_qp:
continue
select_pid = []
pos_cnt = len(pos_qp[qid])
for index in range(50):
_pid = pids[index]
if len(select_pid) == neg_cnt * pos_cnt:
break
if _pid in pos_qp[qid] or _pid in select_pid or _pid in pos_qp_add.get(qid, []):
continue
if ce_score[qid][index] < ce_threshold_neg:
select_pid.append(_pid)
while len(select_pid) < neg_cnt * pos_cnt:
_pid = rng.choice(pids[50:])
if _pid in pos_qp[qid] or _pid in select_pid or _pid in pos_qp_add.get(qid, []):
continue
select_pid.append(_pid)
neg_qp[qid] = select_pid
with open(outfile, 'w') as out:
for qid in pos_qp:
neg_pids = neg_qp[qid]
for i in range(neg_cnt):
for pos_pid in pos_qp[qid]:
neg_pid = neg_pids.pop()
out.write('%s\t%s\t%s\t%s\t%s\t0\n' % (q_text[qid],
p_title.get(pos_pid, '-'), p_text[pos_pid],
p_title.get(neg_pid, '-'), p_text[neg_pid]))
| 1,665 | 29.290909 | 88 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/data_process/construct_nq_train_de.py | import sys
import random
import utils
recall_cands_file = sys.argv[1]
ce_score_file = sys.argv[2]
outfile = sys.argv[3]
neg_cnt = 4
ce_threshold_neg = 0.1
ce_threshold_pos = 0.9
q_text, p_text, p_title = utils.load_corpus(corpus='nq', q_type='train')
answers = utils.load_answers(q_type='train')
cand_qp_all, train_qids = utils.load_candidates(recall_cands_file, col=4)
ce_score = utils.load_ce_score(ce_score_file, train_qids, topk=100)
out = open(outfile, 'w')
for qid, pids in cand_qp_all.items():
pos_pid = ''
neg_pid = ''
for index in range(100):
_pid = pids[index]
if utils.has_answer(p_text[_pid], answers[qid]) or utils.has_answer(p_title[_pid], answers[qid]):
if not pos_pid:
pos_pid = _pid
else:
if not neg_pid and ce_score[qid][index] < ce_threshold_neg:
neg_pid = _pid
if pos_pid and neg_pid:
out.write('%s\t%s\t%s\t%s\t%s\t0\n' % (q_text[qid],
p_title.get(pos_pid, '-'), p_text[pos_pid],
p_title.get(neg_pid, '-'), p_text[neg_pid]))
break
out.close()
| 1,132 | 30.472222 | 105 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/data_process/utils.py | import sys
import os
cur_path = os.path.dirname(os.path.realpath(__file__))
corpus_path = 'corpus/'
def load_id_text(file_name):
"""load tsv files"""
id_text = {}
with open(file_name) as inp:
for line in inp:
line = line.strip()
id, text = line.split('\t')
id_text[id] = text
return id_text
def load_corpus(corpus='marco', q_type='train', unlabel=False):
"""load corpus"""
if not unlabel:
q_file = os.path.join(corpus_path, corpus, '%s.query.txt' % q_type)
elif corpus == 'marco':
q_file = os.path.join(corpus_path, 'augment/orcas_yahoo_nq.query.txt')
else:
q_file = os.path.join(corpus_path, 'augment/mrqa.query.txt')
q_text = load_id_text(q_file)
p_file = os.path.join(corpus_path, corpus, 'para.txt')
p_text = load_id_text(p_file)
t_file = os.path.join(corpus_path, corpus, 'para.title.txt')
p_title = load_id_text(t_file)
print('load all corpus done!')
return q_text, p_text, p_title
def load_answers(q_type='train'):
"""load exist answers in NQ"""
qid_answers = {}
file = os.path.join(corpus_path, 'nq/%s.answers.txt' % q_type)
with open(file) as inp:
for line in inp:
info = line.strip().split('\t')
qid = info[0]
answers = info[1:]
qid_answers[qid] = []
for ans in answers:
ans = ans.strip('.').lower()
qid_answers[qid].append(ans)
print('has answer qids: %s' % len(qid_answers))
return qid_answers
def has_answer(text, answers):
for answer in answers:
text = text.strip().lower().replace(' ', '')
answer = answer.strip().lower().replace(' ', '')
if text.find(answer) != -1:
return True
return False
def load_pos_examples(p_text):
"""positive examples(only for MSMARCO)"""
pos_qp = {}
file = os.path.join(corpus_path, 'marco/qrels.train.tsv')
with open(file) as inp:
for line in inp:
line = line.strip()
qid, pid = line.split('\t')
if qid not in pos_qp:
pos_qp[qid] = []
pos_qp[qid].append(pid)
print('positive qids: %s' % len(pos_qp))
# additional positive examples(collect by literal match)
pos_qp_add = {}
file_add = os.path.join(corpus_path, 'marco/qrels.train.addition.tsv')
with open(file_add) as inp:
for line in inp:
qid, pid = line.strip().split('\t')
if qid not in pos_qp_add:
pos_qp_add[qid] = []
pos_qp_add[qid].append(pid)
return pos_qp, pos_qp_add
def load_candidates(filename, col=4, topk=0):
"""Top K candidate examples"""
cand_qp_all = {}
train_qids = []
with open(filename) as inp:
for line in inp:
line = line.strip()
if col == 4:
qid, pid, idx, score = line.split('\t')
else:
qid, pid, idx = line.split('\t')
if topk > 0 and int(idx) > topk:
continue
if qid not in cand_qp_all:
cand_qp_all[qid] = []
train_qids.append(qid)
cand_qp_all[qid].append(pid)
print('load candidate qids: %s' % len(cand_qp_all))
return cand_qp_all, train_qids
def load_ce_score(filename, train_qids, topk=50):
"""Top K cross_encoder model score"""
ce_score = {}
with open(filename) as inp:
for i, line in enumerate(inp):
line = line.strip()
score = float(line)
qid = train_qids[i//topk]
if qid not in ce_score:
ce_score[qid] = []
ce_score[qid].append(score)
print('load cross_encoder score: %s' % len(ce_score))
return ce_score
if __name__ == '__main__':
load_answers()
| 3,845 | 31.05 | 78 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/data_process/construct_nq_train_ce.py | import sys
import random
sys.path.append('data_process/')
import utils
recall_cands_file = sys.argv[1]
outfile = sys.argv[2]
random_seed = 111
rng = random.Random(random_seed)
q_text, p_text, p_title = utils.load_corpus(corpus='nq', q_type='train')
answers = utils.load_answers(q_type='train')
cand_qp_all, train_qids = utils.load_candidates(recall_cands_file, col=4)
out = open(outfile, 'w')
for qid, pids in cand_qp_all.items():
pos_pid = ''
neg_pid_cand = []
for index in range(100):
_pid = pids[index]
if utils.has_answer(p_text[_pid], answers[qid]) or utils.has_answer(p_title[_pid], answers[qid]):
if not pos_pid:
pos_pid = _pid
else:
neg_pid_cand.append(_pid)
if pos_pid:
out.write('%s\t%s\t%s\t1\n' % (q_text[qid], p_title.get(pos_pid, ''), p_text[pos_pid]))
if neg_pid_cand:
neg_pid = random.choice(neg_pid_cand)
out.write('%s\t%s\t%s\t0\n' % (q_text[qid], p_title.get(neg_pid, ''), p_text[neg_pid]))
out.close()
| 1,034 | 30.363636 | 105 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/data_process/construct_unlabel_train_de.py | import sys
import random
sys.path.append('data_process/')
import utils
recall_cands_file = sys.argv[1]
ce_score_file = sys.argv[2]
outfile = sys.argv[3]
corpus = sys.argv[4]
random_seed = 111
rng = random.Random(random_seed)
neg_cnt = 1
if corpus == 'marco':
neg_cnt = 4
ce_threshold_neg = 0.1
ce_threshold_pos = 0.9
q_text, p_text, p_title = utils.load_corpus(corpus=corpus, q_type='train', unlabel=True)
cand_qp_all, train_qids = utils.load_candidates(recall_cands_file, col=4)
ce_score = utils.load_ce_score(ce_score_file, train_qids)
# pseudo pos examples
pos_qp = {}
for qid, pids in cand_qp_all.items():
select_pid = [ce_threshold_pos]
for index in range(50):
_pid = pids[index]
score = ce_score[qid][index]
if score > select_pid[-1]:
select_pid = [_pid, score]
if select_pid[-1] > ce_threshold_pos:
pos_qp[qid] = select_pid[0]
# neg examples
neg_qp = {}
for qid, pids in cand_qp_all.items():
if qid not in pos_qp:
continue
select_pid = []
pos_cnt = 1
for index in range(50):
_pid = pids[index]
if len(select_pid) == neg_cnt * pos_cnt:
break
if _pid in pos_qp[qid] or _pid in select_pid:
continue
if ce_score[qid][index] < ce_threshold_neg:
select_pid.append(_pid)
neg_qp[qid] = select_pid
with open(outfile, 'w') as out:
for qid, neg_pids in neg_qp.items():
pos_pid = pos_qp[qid]
for neg_pid in neg_pids:
out.write('%s\t%s\t%s\t%s\t%s\t0\n' % (q_text[qid],
p_title.get(pos_pid, '-'), p_text[pos_pid],
p_title.get(neg_pid, '-'), p_text[neg_pid]))
| 1,692 | 26.754098 | 88 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/train_de.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import logging
import multiprocessing
import numpy as np
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
os.environ['FLAGS_eager_delete_tensor_gb'] = '0' # enable gc
import paddle.fluid as fluid
import reader.reader_de as reader_de
from model.ernie import ErnieConfig
from finetune.dual_encoder import create_model, evaluate, predict
from optimization import optimization
from utils.args import print_arguments, check_cuda, prepare_logger
from utils.init import init_pretraining_params, init_checkpoint
from finetune_args import parser
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
args = parser.parse_args()
log = logging.getLogger()
def main(args):
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
if args.use_cuda:
dev_list = fluid.cuda_places()
place = dev_list[0]
dev_count = len(dev_list)
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
exe = fluid.Executor(place)
reader = reader_de.ClassifyReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
q_max_seq_len=args.q_max_seq_len,
p_max_seq_len=args.p_max_seq_len,
total_num=args.train_data_size,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
for_cn=args.for_cn,
task_id=args.task_id)
if not (args.do_train or args.do_val or args.do_test):
raise ValueError("For args `do_train`, `do_val` and `do_test`, at "
"least one of them must be True.")
if args.do_test:
assert args.test_save is not None
startup_prog = fluid.Program()
if args.random_seed is not None:
startup_prog.random_seed = args.random_seed
if args.predict_batch_size == None:
args.predict_batch_size = args.batch_size
if args.do_train:
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
dev_count = fleet.worker_num()
train_data_generator = reader.data_generator(
input_file=args.train_set,
batch_size=args.batch_size,
epoch=args.epoch,
dev_count=1,
trainer_id=fleet.worker_index(),
trainer_num=fleet.worker_num(),
shuffle=True,
phase="train")
num_train_examples = reader.get_num_examples(args.train_set)
if args.in_tokens:
if args.batch_size < args.max_seq_len:
raise ValueError('if in_tokens=True, batch_size should greater than max_sqelen, got batch_size:%d seqlen:%d' % (args.batch_size, args.max_seq_len))
max_train_steps = args.epoch * num_train_examples // (
args.batch_size // args.max_seq_len) // dev_count
else:
max_train_steps = args.epoch * num_train_examples // args.batch_size // dev_count
warmup_steps = int(max_train_steps * args.warmup_proportion)
log.info("Device count: %d" % dev_count)
log.info("Num train examples: %d" % num_train_examples)
log.info("Max train steps: %d" % max_train_steps)
log.info("Num warmup steps: %d" % warmup_steps)
train_program = fluid.Program()
# use fleet api
exec_strategy = fluid.ExecutionStrategy()
if args.use_fast_executor:
exec_strategy.use_experimental_executor = True
exec_strategy.num_threads = dev_count
if args.is_distributed:
exec_strategy.num_threads = 3
exec_strategy.num_iteration_per_drop_scope = args.num_iteration_per_drop_scope
dist_strategy = DistributedStrategy()
dist_strategy.exec_strategy = exec_strategy
dist_strategy.nccl_comm_num = 1
if args.is_distributed:
dist_strategy.nccl_comm_num = 2
dist_strategy.use_hierarchical_allreduce = True
if args.use_recompute:
dist_strategy.forward_recompute = True
dist_strategy.enable_sequential_execution = True
if args.use_mix_precision:
dist_strategy.use_amp = True
with fluid.program_guard(train_program, startup_prog):
with fluid.unique_name.guard():
train_pyreader, graph_vars, checkpoints = create_model(
args,
pyreader_name='train_reader',
ernie_config=ernie_config,
batch_size=args.batch_size,
fleet_handle=fleet)
if args.use_recompute:
dist_strategy.recompute_checkpoints=checkpoints
scheduled_lr = optimization(
loss=graph_vars["loss"],
warmup_steps=warmup_steps,
num_train_steps=max_train_steps,
learning_rate=args.learning_rate,
train_program=train_program,
startup_prog=startup_prog,
weight_decay=args.weight_decay,
scheduler=args.lr_scheduler,
use_dynamic_loss_scaling=args.use_dynamic_loss_scaling,
incr_every_n_steps=args.incr_every_n_steps,
decr_every_n_nan_or_inf=args.decr_every_n_nan_or_inf,
incr_ratio=args.incr_ratio,
decr_ratio=args.decr_ratio,
dist_strategy=dist_strategy,
use_lamb=args.use_lamb)
if args.do_val or args.do_test:
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
test_pyreader, test_graph_vars = create_model(
args,
pyreader_name='test_reader',
ernie_config=ernie_config,
batch_size=args.predict_batch_size,
is_prediction=True)
test_prog = test_prog.clone(for_test=True)
train_program = fleet.main_program
exe = fluid.Executor(place)
exe.run(startup_prog)
if args.do_train:
if args.init_checkpoint and args.init_pretraining_params:
log.warning(
"WARNING: args 'init_checkpoint' and 'init_pretraining_params' "
"both are set! Only arg 'init_checkpoint' is made valid.")
if args.init_checkpoint:
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
elif args.init_pretraining_params:
init_pretraining_params(
exe,
args.init_pretraining_params,
main_program=startup_prog)
elif args.do_val or args.do_test:
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
if args.do_train:
train_exe = exe
train_pyreader.decorate_tensor_provider(train_data_generator)
else:
train_exe = None
test_exe = exe
if args.do_train:
train_pyreader.start()
steps = 0
if warmup_steps > 0:
graph_vars["learning_rate"] = scheduled_lr
ce_info = []
time_begin = time.time()
last_epoch = 0
current_epoch = 0
total_loss = []
while True:
try:
steps += 1
if fleet.worker_index() != 0:
train_exe.run(fetch_list=[], program=train_program)
continue
if steps % args.skip_steps != 0:
train_exe.run(fetch_list=[], program=train_program)
else:
outputs = evaluate(
train_exe,
train_program,
train_pyreader,
graph_vars,
"train",
metric=args.metric)
if args.verbose:
verbose = "train pyreader queue size: %d, " % train_pyreader.queue.size(
)
verbose += "learning rate: %f" % (
outputs["learning_rate"]
if warmup_steps > 0 else args.learning_rate)
log.info(verbose)
current_example, current_epoch = reader.get_train_progress()
time_end = time.time()
used_time = time_end - time_begin
total_loss.append(outputs["loss"])
log.info(
"epoch: %d, progress: %d/%d, step: %d, ave loss: %f, "
"ave acc: %f, speed: %f steps/s" %
(current_epoch, current_example * dev_count, num_train_examples,
steps, np.mean(total_loss), outputs["accuracy"],
args.skip_steps / used_time))
ce_info.append(
[outputs["loss"], outputs["accuracy"], used_time])
time_begin = time.time()
if steps % args.save_steps == 0:
save_path = os.path.join(args.checkpoints,
"step_" + str(steps))
fluid.io.save_persistables(exe, save_path, fleet._origin_program)
# if steps % args.validation_steps == 0 or last_epoch != current_epoch:
if steps % args.validation_steps == 0:
# evaluate dev set
if args.do_val:
evaluate_wrapper(args, reader, exe, test_prog,
test_pyreader, test_graph_vars,
current_epoch, steps)
if args.do_test:
predict_wrapper(args, reader, exe, test_prog,
test_pyreader, test_graph_vars,
current_epoch, steps)
if last_epoch != current_epoch:
last_epoch = current_epoch
except fluid.core.EOFException:
save_path = os.path.join(args.checkpoints, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, fleet._origin_program)
train_pyreader.reset()
break
# final eval on dev set
if args.do_val:
evaluate_wrapper(args, reader, exe, test_prog, test_pyreader,
test_graph_vars, current_epoch, steps)
# final eval on test set
if args.do_test:
predict_wrapper(args, reader, exe, test_prog, test_pyreader, test_graph_vars,
current_epoch, steps)
def evaluate_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
epoch, steps):
# evaluate dev set
for ds in args.dev_set.split(','):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
ds,
batch_size=args.predict_batch_size,
epoch=1,
dev_count=1,
shuffle=False))
log.info("validation result of dataset {}:".format(ds))
evaluate_info = evaluate(
exe,
test_prog,
test_pyreader,
graph_vars,
"dev",
metric=args.metric)
log.info(evaluate_info + ', file: {}, epoch: {}, steps: {}'.format(
ds, epoch, steps))
def predict_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
epoch, steps):
test_sets = args.test_set.split(',')
save_dirs = args.test_save.split(',')
assert len(test_sets) == len(save_dirs)
for test_f, save_f in zip(test_sets, save_dirs):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
test_f,
batch_size=args.predict_batch_size,
epoch=1,
dev_count=1,
shuffle=False))
save_path = save_f + '.' + str(epoch) + '.' + str(steps)
log.info("testing {}, save to {}".format(test_f, save_path))
qids, preds, probs = predict(
exe,
test_prog,
test_pyreader,
graph_vars)
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
else:
log.warning('save dir exsits: %s, will skip saving' % save_dir)
print ("DEBUG:\t" + str(len(probs)))
with open(save_path, 'w') as f:
for p in probs:
f.write('{}\n'.format(p))
if __name__ == '__main__':
prepare_logger(log)
print_arguments(args)
check_cuda(args.use_cuda)
main(args)
| 14,023 | 36.198939 | 163 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/optimization.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization and learning rate scheduling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
def linear_warmup_decay(learning_rate, warmup_steps, num_train_steps):
""" Applies linear warmup of learning rate from 0 and decay to 0."""
with fluid.default_main_program()._lr_schedule_guard():
lr = fluid.layers.tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="scheduled_learning_rate")
global_step = fluid.layers.learning_rate_scheduler._decay_step_counter()
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step < warmup_steps):
warmup_lr = learning_rate * (global_step / warmup_steps)
fluid.layers.tensor.assign(warmup_lr, lr)
with switch.default():
decayed_lr = fluid.layers.learning_rate_scheduler.polynomial_decay(
learning_rate=learning_rate,
decay_steps=num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
fluid.layers.tensor.assign(decayed_lr, lr)
return lr
def optimization(loss,
warmup_steps,
num_train_steps,
learning_rate,
train_program,
startup_prog,
weight_decay,
scheduler='linear_warmup_decay',
use_dynamic_loss_scaling=False,
incr_every_n_steps=1000,
decr_every_n_nan_or_inf=2,
incr_ratio=2.0,
decr_ratio=0.8,
dist_strategy=None,
use_lamb=False):
if warmup_steps > 0:
if scheduler == 'noam_decay':
scheduled_lr = fluid.layers.learning_rate_scheduler\
.noam_decay(1/(warmup_steps *(learning_rate ** 2)),
warmup_steps)
elif scheduler == 'linear_warmup_decay':
scheduled_lr = linear_warmup_decay(learning_rate, warmup_steps,
num_train_steps)
else:
raise ValueError("Unkown learning rate scheduler, should be "
"'noam_decay' or 'linear_warmup_decay'")
if use_lamb:
optimizer = fluid.optimizer.LambOptimizer(learning_rate=scheduled_lr)
else:
optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr)
else:
scheduled_lr = fluid.layers.create_global_var(
name=fluid.unique_name.generate("learning_rate"),
shape=[1],
value=learning_rate,
dtype='float32',
persistable=True)
if use_lamb:
optimizer = fluid.optimizer.LambOptimizer(learning_rate=scheduled_lr)
else:
optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr)
optimizer._learning_rate_map[fluid.default_main_program(
)] = scheduled_lr
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0))
def exclude_from_weight_decay(name):
if name.find("layer_norm") > -1:
return True
bias_suffix = ["_bias", "_b", ".b_0"]
for suffix in bias_suffix:
if name.endswith(suffix):
return True
return False
param_list = dict()
for param in train_program.global_block().all_parameters():
param_list[param.name] = param * 1.0
param_list[param.name].stop_gradient = True
if dist_strategy is not None:
# use fleet api
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
_, param_grads = optimizer.minimize(loss)
if weight_decay > 0:
for param, grad in param_grads:
if exclude_from_weight_decay(param.name):
continue
with param.block.program._optimized_guard(
[param, grad]), fluid.framework.name_scope("weight_decay"):
updated_param = param - param_list[
param.name] * weight_decay * scheduled_lr
fluid.layers.assign(output=param, input=updated_param)
return scheduled_lr
| 5,185 | 37.701493 | 83 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from io import open
import collections
import unicodedata
import six
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, encoding='utf8') as fin:
for num, line in enumerate(fin):
items = convert_to_unicode(line.strip()).split("\t")
if len(items) > 2:
break
token = items[0]
index = items[1] if len(items) == 2 else num
token = token.strip()
vocab[token] = int(index)
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class CharTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in text.lower().split(" "):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def tokenize_chinese_chars(text):
"""Adds whitespace around any CJK character."""
def _is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
output = []
buff = ""
for char in text:
cp = ord(char)
if _is_chinese_char(cp) or _is_whitespace(char):
if buff != "":
output.append(buff)
buff = ""
output.append(char)
else:
buff += char
if buff != "":
output.append(buff)
return output
| 14,348 | 32.921986 | 84 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/inference_de.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import logging
import multiprocessing
import numpy as np
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
os.environ['FLAGS_eager_delete_tensor_gb'] = '0' # enable gc
import paddle.fluid as fluid
import reader.reader_de_infer as reader_de_infer
from model.ernie import ErnieConfig
from finetune.dual_encoder_infer import create_model, predict
from utils.args import print_arguments, check_cuda, prepare_logger
from utils.init import init_pretraining_params, init_checkpoint
from finetune_args import parser
args = parser.parse_args()
log = logging.getLogger()
def main(args):
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
if args.use_cuda:
dev_list = fluid.cuda_places()
place = dev_list[0]
dev_count = len(dev_list)
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
exe = fluid.Executor(place)
reader = reader_de_infer.ClassifyReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
q_max_seq_len=args.q_max_seq_len,
p_max_seq_len=args.p_max_seq_len,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
for_cn=args.for_cn,
task_id=args.task_id)
assert args.test_save is not None
startup_prog = fluid.Program()
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
test_pyreader, graph_vars = create_model(
args,
pyreader_name='test_reader',
ernie_config=ernie_config,
batch_size=args.batch_size,
is_prediction=True)
test_prog = test_prog.clone(for_test=True)
exe = fluid.Executor(place)
exe.run(startup_prog)
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
test_sets = args.test_set.split(',')
save_dirs = args.test_save.split(',')
assert len(test_sets) == len(save_dirs)
batch_size = args.batch_size if args.predict_batch_size is None else args.predict_batch_size
for test_f, save_f in zip(test_sets, save_dirs):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
test_f,
batch_size=batch_size,
epoch=1,
dev_count=1,
shuffle=False))
save_path = save_f
log.info("testing {}, save to {}".format(test_f, save_path))
predict(
args,
exe,
test_prog,
test_pyreader,
graph_vars,
output_item=args.output_item,
output_file_name=args.output_file_name,
hidden_size=ernie_config['hidden_size'])
if __name__ == '__main__':
prepare_logger(log)
print_arguments(args)
check_cuda(args.use_cuda)
main(args)
| 4,126 | 31.496063 | 96 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/finetune_args.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import argparse
from utils.args import ArgumentGroup
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
model_g = ArgumentGroup(parser, "model", "model configuration and paths.")
model_g.add_arg("ernie_config_path", str, None, "Path to the json file for ernie model config.")
model_g.add_arg("init_checkpoint", str, None, "Init checkpoint to resume training from.")
model_g.add_arg("init_pretraining_params", str, None,
"Init pre-training params which preforms fine-tuning from. If the "
"arg 'init_checkpoint' has been set, this argument wouldn't be valid.")
model_g.add_arg("checkpoints", str, "checkpoints", "Path to save checkpoints.")
model_g.add_arg("is_classify", bool, True, "is_classify")
model_g.add_arg("is_regression", bool, False, "is_regression")
model_g.add_arg("task_id", int, 0, "task id")
train_g = ArgumentGroup(parser, "training", "training options.")
train_g.add_arg("epoch", int, 3, "Number of epoches for fine-tuning.")
train_g.add_arg("learning_rate", float, 5e-5, "Learning rate used to train with warmup.")
train_g.add_arg("lr_scheduler", str, "linear_warmup_decay",
"scheduler of learning rate.", choices=['linear_warmup_decay', 'noam_decay'])
train_g.add_arg("weight_decay", float, 0.01, "Weight decay rate for L2 regularizer.")
train_g.add_arg("warmup_proportion", float, 0.1,
"Proportion of training steps to perform linear learning rate warmup for.")
train_g.add_arg("save_steps", int, 10000, "The steps interval to save checkpoints.")
train_g.add_arg("validation_steps", int, 1000, "The steps interval to evaluate model performance.")
train_g.add_arg("use_recompute", bool, False, "Whether to use recompute optimizer for training.")
train_g.add_arg("use_mix_precision", bool, False, "Whether to use mix-precision optimizer for training.")
train_g.add_arg("use_cross_batch", bool, False, "Whether to use cross-batch for training.")
train_g.add_arg("use_lamb", bool, False, "Whether to use LambOptimizer for training.")
train_g.add_arg("use_dynamic_loss_scaling", bool, True, "Whether to use dynamic loss scaling.")
train_g.add_arg("test_save", str, "./checkpoints/test_result", "test_save")
train_g.add_arg("metric", str, "simple_accuracy", "metric")
train_g.add_arg("incr_every_n_steps", int, 100, "Increases loss scaling every n consecutive.")
train_g.add_arg("decr_every_n_nan_or_inf", int, 2,
"Decreases loss scaling every n accumulated steps with nan or inf gradients.")
train_g.add_arg("incr_ratio", float, 2.0,
"The multiplier to use when increasing the loss scaling.")
train_g.add_arg("decr_ratio", float, 0.8,
"The less-than-one-multiplier to use when decreasing.")
log_g = ArgumentGroup(parser, "logging", "logging related.")
log_g.add_arg("skip_steps", int, 10, "The steps interval to print loss.")
log_g.add_arg("verbose", bool, False, "Whether to output verbose log.")
data_g = ArgumentGroup(parser, "data", "Data paths, vocab paths and data processing options")
data_g.add_arg("tokenizer", str, "FullTokenizer",
"ATTENTION: the INPUT must be splited by Word with blank while using SentencepieceTokenizer or WordsegTokenizer")
data_g.add_arg("train_set", str, None, "Path to training data.")
data_g.add_arg("test_set", str, None, "Path to test data.")
data_g.add_arg("dev_set", str, None, "Path to validation data.")
data_g.add_arg("vocab_path", str, None, "Vocabulary path.")
data_g.add_arg("max_seq_len", int, 512, "Number of words of the longest seqence.")
data_g.add_arg("q_max_seq_len", int, 32, "Number of words of the longest seqence.")
data_g.add_arg("p_max_seq_len", int, 256, "Number of words of the longest seqence.")
data_g.add_arg("train_data_size", int, 0, "Number of training data's total examples. Set for distribute.")
data_g.add_arg("batch_size", int, 32, "Total examples' number in batch for training. see also --in_tokens.")
data_g.add_arg("predict_batch_size", int, None, "Total examples' number in batch for predict. see also --in_tokens.")
data_g.add_arg("in_tokens", bool, False,
"If set, the batch size will be the maximum number of tokens in one batch. "
"Otherwise, it will be the maximum number of examples in one batch.")
data_g.add_arg("do_lower_case", bool, True,
"Whether to lower case the input text. Should be True for uncased models and False for cased models.")
data_g.add_arg("random_seed", int, None, "Random seed.")
data_g.add_arg("label_map_config", str, None, "label_map_path.")
data_g.add_arg("num_labels", int, 2, "label number")
data_g.add_arg("diagnostic", str, None, "GLUE Diagnostic Dataset")
data_g.add_arg("diagnostic_save", str, None, "GLUE Diagnostic save f")
data_g.add_arg("max_query_length", int, 64, "Max query length.")
data_g.add_arg("max_answer_length", int, 100, "Max answer length.")
data_g.add_arg("doc_stride", int, 128,
"When splitting up a long document into chunks, how much stride to take between chunks.")
data_g.add_arg("n_best_size", int, 20,
"The total number of n-best predictions to generate in the nbest_predictions.json output file.")
data_g.add_arg("chunk_scheme", type=str, default="IOB", choices=["IO", "IOB", "IOE", "IOBES"], help="chunk scheme")
run_type_g = ArgumentGroup(parser, "run_type", "running type options.")
run_type_g.add_arg("use_cuda", bool, True, "If set, use GPU for training.")
run_type_g.add_arg("is_distributed", bool, False, "If set, then start distributed training.")
run_type_g.add_arg("use_fast_executor", bool, False, "If set, use fast parallel executor (in experiment).")
run_type_g.add_arg("num_iteration_per_drop_scope", int, 10, "Iteration intervals to drop scope.")
run_type_g.add_arg("do_train", bool, True, "Whether to perform training.")
run_type_g.add_arg("do_val", bool, True, "Whether to perform evaluation on dev data set.")
run_type_g.add_arg("do_test", bool, True, "Whether to perform evaluation on test data set.")
run_type_g.add_arg("output_item", int, 3, "Test output format.")
run_type_g.add_arg("output_file_name", str, None, "Test output file name")
run_type_g.add_arg("test_data_cnt", int, 1110000 , "total cnt of testset")
run_type_g.add_arg("use_multi_gpu_test", bool, False, "Whether to perform evaluation using multiple gpu cards")
run_type_g.add_arg("metrics", bool, True, "Whether to perform evaluation on test data set.")
run_type_g.add_arg("shuffle", bool, True, "")
run_type_g.add_arg("for_cn", bool, False, "model train for cn or for other langs.")
| 8,197 | 67.890756 | 127 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/merge.py | import sys
total_part = 8
shift = int(sys.argv[1])
top = int(sys.argv[2])
f_list = []
for part in range(total_part):
f0 = open('res.top%s-part%s' % (top, part))
f_list.append(f0)
line_list = []
for part in range(total_part):
line = f_list[part].readline()
line_list.append(line)
out = open('output/res.top%s' % top, 'w')
last_q = ''
ans_list = {}
while line_list[-1]:
cur_list = []
for line in line_list:
sub = line.strip().split('\t')
cur_list.append(sub)
if last_q == '':
last_q = cur_list[0][0]
if cur_list[0][0] != last_q:
rank = sorted(ans_list.items(), key = lambda a:a[1], reverse=True)
for i in range(top):
out.write("%s\t%s\t%s\t%s\n" % (last_q, rank[i][0], i+1, rank[i][1]))
ans_list = {}
for i, sub in enumerate(cur_list):
ans_list[int(sub[1]) + shift*i] = float(sub[-1])
last_q = cur_list[0][0]
line_list = []
for f0 in f_list:
line = f0.readline()
line_list.append(line)
rank = sorted(ans_list.items(), key = lambda a:a[1], reverse=True)
for i in range(top):
out.write("%s\t%s\t%s\t%s\n" % (last_q, rank[i][0], i+1, rank[i][1]))
out.close()
print('output/res.top%s' % top)
| 1,230 | 24.645833 | 81 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/batching.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mask, padding and batching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
def pad_batch_data(insts,
pad_idx=0,
return_pos=False,
return_input_mask=False,
return_max_len=False,
return_num_token=False,
return_seq_lens=False):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and attention bias.
"""
return_list = []
max_len = max(len(inst) for inst in insts)
# Any token included in dict can be used to pad, since the paddings' loss
# will be masked out by weights and make no effect on parameter gradients.
inst_data = np.array(
[inst + list([pad_idx] * (max_len - len(inst))) for inst in insts])
return_list += [inst_data.astype("int64").reshape([-1, max_len, 1])]
# position data
if return_pos:
inst_pos = np.array([
list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst))
for inst in insts
])
return_list += [inst_pos.astype("int64").reshape([-1, max_len, 1])]
if return_input_mask:
# This is used to avoid attention on paddings.
input_mask_data = np.array([[1] * len(inst) + [0] *
(max_len - len(inst)) for inst in insts])
input_mask_data = np.expand_dims(input_mask_data, axis=-1)
return_list += [input_mask_data.astype("float32")]
if return_max_len:
return_list += [max_len]
if return_num_token:
num_token = 0
for inst in insts:
num_token += len(inst)
return_list += [num_token]
if return_seq_lens:
seq_lens = np.array([len(inst) for inst in insts])
return_list += [seq_lens.astype("int64").reshape([-1, 1])]
return return_list if len(return_list) > 1 else return_list[0]
if __name__ == "__main__":
pass
| 2,683 | 33.410256 | 78 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/index_search.py | import sys
import time
import faiss
import math
import numpy as np
def read_embed(file_name, dim=768, bs=3000):
if file_name.endswith('npy'):
i = 0
emb_np = np.load(file_name)
while(i < len(emb_np)):
vec_list = emb_np[i:i+bs]
i += bs
yield vec_list
else:
vec_list = []
with open(file_name) as inp:
for line in inp:
data = line.strip()
vector = [float(item) for item in data.split(' ')]
assert len(vector) == dim
vec_list.append(vector)
if len(vec_list) == bs:
yield vec_list
vec_list = []
if vec_list:
yield vec_list
def load_qid(file_name):
qid_list = []
with open(file_name) as inp:
for line in inp:
line = line.strip()
qid = line.split('\t')[0]
qid_list.append(qid)
return qid_list
def search(index, emb_file, qid_list, outfile, top_k):
q_idx = 0
with open(outfile, 'w') as out:
for batch_vec in read_embed(emb_file):
q_emb_matrix = np.array(batch_vec)
res_dist, res_p_id = index.search(q_emb_matrix.astype('float32'), top_k)
for i in range(len(q_emb_matrix)):
qid = qid_list[q_idx]
for j in range(top_k):
pid = res_p_id[i][j]
score = res_dist[i][j]
out.write('%s\t%s\t%s\t%s\n' % (qid, pid, j+1, score))
q_idx += 1
def main():
part = sys.argv[1]
topk = int(sys.argv[2])
q_text_file = sys.argv[3]
outfile = 'res.top%s-part%s' % (topk, part)
qid_list = load_qid(q_text_file)
engine = faiss.read_index("para.index.part%s" % part)
emb_file = 'query.emb.npy'
search(engine, emb_file, qid_list, outfile, topk)
if __name__ == "__main__":
main()
| 1,948 | 27.661765 | 84 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/train_ce.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import logging
import multiprocessing
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
os.environ['FLAGS_eager_delete_tensor_gb'] = '0' # enable gc
import paddle.fluid as fluid
import reader.reader_ce as reader_ce
from model.ernie import ErnieConfig
from finetune.cross_encoder import create_model, evaluate, predict
from optimization import optimization
from utils.args import print_arguments, check_cuda, prepare_logger
from utils.init import init_pretraining_params, init_checkpoint
from finetune_args import parser
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
args = parser.parse_args()
log = logging.getLogger()
def main(args):
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
if args.use_cuda:
dev_list = fluid.cuda_places()
place = dev_list[0]
dev_count = len(dev_list)
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
exe = fluid.Executor(place)
reader = reader_ce.ClassifyReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
max_seq_len=args.max_seq_len,
total_num=args.train_data_size,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
for_cn=args.for_cn,
task_id=args.task_id)
if not (args.do_train or args.do_val or args.do_test):
raise ValueError("For args `do_train`, `do_val` and `do_test`, at "
"least one of them must be True.")
if args.do_test:
assert args.test_save is not None
startup_prog = fluid.Program()
if args.random_seed is not None:
startup_prog.random_seed = args.random_seed
if args.predict_batch_size == None:
args.predict_batch_size = args.batch_size
if args.do_train:
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
dev_count = fleet.worker_num()
train_data_generator = reader.data_generator(
input_file=args.train_set,
batch_size=args.batch_size,
epoch=args.epoch,
dev_count=1,
trainer_id=fleet.worker_index(),
trainer_num=fleet.worker_num(),
shuffle=True,
phase="train")
num_train_examples = reader.get_num_examples(args.train_set)
if args.in_tokens:
max_train_steps = args.epoch * num_train_examples // (
args.batch_size // args.max_seq_len) // dev_count
else:
max_train_steps = args.epoch * num_train_examples // args.batch_size // dev_count
warmup_steps = int(max_train_steps * args.warmup_proportion)
log.info("Device count: %d" % dev_count)
log.info("Num train examples: %d" % num_train_examples)
log.info("Max train steps: %d" % max_train_steps)
log.info("Num warmup steps: %d" % warmup_steps)
train_program = fluid.Program()
# use fleet api
exec_strategy = fluid.ExecutionStrategy()
if args.use_fast_executor:
exec_strategy.use_experimental_executor = True
exec_strategy.num_threads = dev_count
if args.is_distributed:
exec_strategy.num_threads = 3
exec_strategy.num_iteration_per_drop_scope = args.num_iteration_per_drop_scope
dist_strategy = DistributedStrategy()
dist_strategy.exec_strategy = exec_strategy
dist_strategy.nccl_comm_num = 1
if args.is_distributed:
dist_strategy.nccl_comm_num = 2
dist_strategy.use_hierarchical_allreduce = True
if args.use_mix_precision:
dist_strategy.use_amp = True
with fluid.program_guard(train_program, startup_prog):
with fluid.unique_name.guard():
train_pyreader, graph_vars = create_model(
args,
pyreader_name='train_reader',
ernie_config=ernie_config)
scheduled_lr = optimization(
loss=graph_vars["loss"],
warmup_steps=warmup_steps,
num_train_steps=max_train_steps,
learning_rate=args.learning_rate,
train_program=train_program,
startup_prog=startup_prog,
weight_decay=args.weight_decay,
scheduler=args.lr_scheduler,
use_dynamic_loss_scaling=args.use_dynamic_loss_scaling,
incr_every_n_steps=args.incr_every_n_steps,
decr_every_n_nan_or_inf=args.decr_every_n_nan_or_inf,
incr_ratio=args.incr_ratio,
decr_ratio=args.decr_ratio,
dist_strategy = dist_strategy)
if args.verbose:
if args.in_tokens:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
program=train_program,
batch_size=args.batch_size // args.max_seq_len)
else:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
program=train_program, batch_size=args.batch_size)
log.info("Theoretical memory usage in training: %.3f - %.3f %s" %
(lower_mem, upper_mem, unit))
if args.do_val or args.do_test:
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
test_pyreader, graph_vars = create_model(
args,
pyreader_name='test_reader',
ernie_config=ernie_config,
is_prediction=True)
test_prog = test_prog.clone(for_test=True)
train_program = fleet.main_program
exe = fluid.Executor(place)
exe.run(startup_prog)
if args.do_train:
if args.init_checkpoint and args.init_pretraining_params:
log.warning(
"WARNING: args 'init_checkpoint' and 'init_pretraining_params' "
"both are set! Only arg 'init_checkpoint' is made valid.")
if args.init_checkpoint:
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
elif args.init_pretraining_params:
init_pretraining_params(
exe,
args.init_pretraining_params,
main_program=startup_prog)
elif args.do_val or args.do_test:
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
if args.do_train:
train_exe = exe
train_pyreader.decorate_tensor_provider(train_data_generator)
else:
train_exe = None
test_exe = exe
# if args.do_val or args.do_test:
# if args.use_multi_gpu_test:
# test_exe = fluid.ParallelExecutor(
# use_cuda=args.use_cuda,
# main_program=test_prog,
# share_vars_from=train_exe)
current_epoch = 0
steps = 0
if args.do_train:
train_pyreader.start()
if warmup_steps > 0:
graph_vars["learning_rate"] = scheduled_lr
ce_info = []
time_begin = time.time()
last_epoch = 0
while True:
try:
steps += 1
# log.info("step: %d" % steps)
if fleet.worker_index() != 0:
train_exe.run(fetch_list=[], program=train_program)
continue
if steps % args.skip_steps != 0:
train_exe.run(fetch_list=[], program=train_program)
else:
outputs = evaluate(
train_exe,
train_program,
train_pyreader,
graph_vars,
"train",
metric=args.metric)
if args.verbose:
verbose = "train pyreader queue size: %d, " % train_pyreader.queue.size(
)
verbose += "learning rate: %f" % (
outputs["learning_rate"]
if warmup_steps > 0 else args.learning_rate)
log.info(verbose)
current_example, current_epoch = reader.get_train_progress()
time_end = time.time()
used_time = time_end - time_begin
log.info(
"epoch: %d, progress: %d/%d, step: %d, ave loss: %f, "
"ave acc: %f, speed: %f steps/s" %
(current_epoch, current_example * dev_count, num_train_examples,
steps, outputs["loss"], outputs["accuracy"],
args.skip_steps / used_time))
ce_info.append(
[outputs["loss"], outputs["accuracy"], used_time])
time_begin = time.time()
if steps % args.save_steps == 0:
save_path = os.path.join(args.checkpoints,
"step_" + str(steps))
fluid.io.save_persistables(exe, save_path, fleet._origin_program)
# if steps % args.validation_steps == 0 or last_epoch != current_epoch:
if steps % args.validation_steps == 0:
# evaluate dev set
if args.do_val:
evaluate_wrapper(args, reader, exe, test_prog,
test_pyreader, graph_vars,
current_epoch, steps)
if args.do_test:
predict_wrapper(args, reader, exe, test_prog,
test_pyreader, graph_vars,
current_epoch, steps)
if last_epoch != current_epoch:
last_epoch = current_epoch
except fluid.core.EOFException:
save_path = os.path.join(args.checkpoints, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, fleet._origin_program)
train_pyreader.reset()
break
# final eval on dev set
if args.do_val:
evaluate_wrapper(args, reader, exe, test_prog, test_pyreader,
graph_vars, current_epoch, steps)
# final eval on test set
if args.do_test:
predict_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
current_epoch, steps)
# final eval on dianostic, hack for glue-ax
if args.diagnostic:
test_pyreader.decorate_tensor_provider(
reader.data_generator(
args.diagnostic,
batch_size=args.batch_size,
epoch=1,
dev_count=1,
shuffle=False))
log.info("Final diagnostic")
qids, preds, probs = predict(
test_exe,
test_prog,
test_pyreader,
graph_vars)
assert len(qids) == len(preds), '{} v.s. {}'.format(
len(qids), len(preds))
with open(args.diagnostic_save, 'w') as f:
for id, s, p in zip(qids, preds, probs):
f.write('{}\t{}\t{}\n'.format(id, s, p))
log.info("Done final diagnostic, saving to {}".format(
args.diagnostic_save))
def evaluate_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
epoch, steps):
# evaluate dev set
for ds in args.dev_set.split(','):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
ds,
batch_size=args.predict_batch_size,
epoch=1,
dev_count=1,
shuffle=False))
log.info("validation result of dataset {}:".format(ds))
evaluate_info = evaluate(
exe,
test_prog,
test_pyreader,
graph_vars,
"dev",
metric=args.metric)
log.info(evaluate_info + ', file: {}, epoch: {}, steps: {}'.format(
ds, epoch, steps))
def predict_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
epoch, steps):
test_sets = args.test_set.split(',')
save_dirs = args.test_save.split(',')
assert len(test_sets) == len(save_dirs)
for test_f, save_f in zip(test_sets, save_dirs):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
test_f,
batch_size=args.predict_batch_size,
epoch=1,
dev_count=1,
shuffle=False))
save_path = save_f + '.' + str(epoch) + '.' + str(steps)
log.info("testing {}, save to {}".format(test_f, save_path))
qids, preds, probs = predict(
exe,
test_prog,
test_pyreader,
graph_vars)
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
else:
log.warning('save dir exsits: %s, will skip saving' % save_dir)
with open(save_path, 'w') as f:
# for id, s, p in zip(qids, preds, probs):
# f.write('{}\t{}\t{}\n'.format(id, s, p))
for p in probs:
f.write('{}\n'.format(p[1]))
if __name__ == '__main__':
prepare_logger(log)
print_arguments(args)
check_cuda(args.use_cuda)
main(args)
| 14,978 | 35.713235 | 96 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/finetune/dual_encoder_infer.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
import faiss
from scipy.stats import pearsonr, spearmanr
from six.moves import xrange
import paddle.fluid as fluid
from model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_model(args,
pyreader_name,
ernie_config,
batch_size=16,
is_prediction=False,
task_name=""):
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1],
[batch_size, 1], [batch_size, 1]],
dtypes=['int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64'],
lod_levels=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
name=pyreader_name,
use_double_buffer=True)
(src_ids_q, sent_ids_q, pos_ids_q, task_ids_q, input_mask_q,
src_ids_p, sent_ids_p, pos_ids_p, task_ids_p, input_mask_p,
labels, qids) = fluid.layers.read_file(pyreader)
ernie_q = ErnieModel(
src_ids=src_ids_q,
position_ids=pos_ids_q,
sentence_ids=sent_ids_q,
task_ids=task_ids_q,
input_mask=input_mask_q,
config=ernie_config,
model_name='query_')
## pos para
ernie_p = ErnieModel(
src_ids=src_ids_p,
position_ids=pos_ids_p,
sentence_ids=sent_ids_p,
task_ids=task_ids_p,
input_mask=input_mask_p,
config=ernie_config,
model_name='titlepara_')
q_cls_feats = ernie_q.get_cls_output()
p_cls_feats = ernie_p.get_cls_output()
#p_cls_feats = fluid.layers.concat([pos_cls_feats, neg_cls_feats], axis=0)
#src_ids_p = fluid.layers.Print(src_ids_p, message='p: ')
#p_cls_feats = fluid.layers.Print(p_cls_feats, message='p: ')
#multiply
logits = fluid.layers.matmul(q_cls_feats, p_cls_feats, transpose_x=False, transpose_y=True)
probs = logits
#fluid.layers.Print(probs, message='probs: ')
#logits2 = fluid.layers.elementwise_mul(x=q_rep, y=p_rep)
#fluid.layers.Print(logits2, message='logits2: ')
#probs2 = fluid.layers.reduce_sum(logits, dim=-1)
#fluid.layers.Print(probs2, message='probs2: ')
matrix_labels = fluid.layers.eye(batch_size, batch_size, dtype='float32')
matrix_labels.stop_gradient=True
#print('DEBUG:\tstart loss')
ce_loss, _ = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=matrix_labels, soft_label=True, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss)
#print('DEBUG:\tloss done')
matrix_labels = fluid.layers.argmax(matrix_labels, axis=-1)
matrix_labels = fluid.layers.reshape(x=matrix_labels, shape=[batch_size, 1])
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(input=probs, label=matrix_labels, total=num_seqs)
#ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
# logits=logits, label=labels, return_softmax=True)
#loss = fluid.layers.mean(x=ce_loss)
#accuracy = fluid.layers.accuracy(
# input=probs, label=labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs,
"qids": qids,
"q_rep": q_cls_feats,
"p_rep": p_cls_feats
}
return pyreader, graph_vars
def build_engine(para_emb_list, dim):
index = faiss.IndexFlatIP(dim)
# add paragraph embedding
p_emb_matrix = np.asarray(para_emb_list)
index.add(p_emb_matrix.astype('float32'))
#print ("insert done", file=sys.stderr)
return index
def predict(args,
exe,
test_program,
test_pyreader,
graph_vars,
dev_count=1,
output_item=0,
output_file_name='emb',
hidden_size=768):
test_pyreader.start()
fetch_list = [graph_vars["q_rep"].name, graph_vars["p_rep"].name,]
para_embs = []
batch_id = 0
while True:
try:
batch_id += 1
if batch_id % 500 == 0:
log.info("complete batch %s" % batch_id)
q_rep, p_rep = exe.run(program=test_program,
fetch_list=fetch_list)
if output_item == 0:
for item in q_rep:
para_embs.append(np.array(item, dtype='float32'))
elif output_item == 1:
for item in p_rep:
para_embs.append(np.array(item, dtype='float32'))
except fluid.core.EOFException:
test_pyreader.reset()
break
log.info("predict embs cnt: %s" % len(para_embs))
para_embs = para_embs[:args.test_data_cnt]
log.info("cut embs cnt: %s" % len(para_embs))
if output_item == 1:
engine = build_engine(para_embs, hidden_size)
faiss.write_index(engine, output_file_name)
log.info("create index done!")
else:
emb_matrix = np.asarray(para_embs)
np.save(output_file_name + '.npy', emb_matrix)
log.info("save to npy file!")
| 6,355 | 34.311111 | 95 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/finetune/dual_encoder.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
from scipy.stats import pearsonr, spearmanr
from six.moves import xrange
import paddle.fluid as fluid
from model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_model(args,
pyreader_name,
ernie_config,
batch_size=16,
is_prediction=False,
task_name="",
fleet_handle=None):
print ("DEBUG:\tclassify")
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1],
[batch_size, 1], [batch_size, 1]],
dtypes=['int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64'],
lod_levels=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
name=task_name + "_" + pyreader_name,
use_double_buffer=True)
(src_ids_q, sent_ids_q, pos_ids_q, task_ids_q, input_mask_q,
src_ids_p_pos, sent_ids_p_pos, pos_ids_p_pos, task_ids_p_pos, input_mask_p_pos,
src_ids_p_neg, sent_ids_p_neg, pos_ids_p_neg, task_ids_p_neg, input_mask_p_neg,
labels, qids) = fluid.layers.read_file(pyreader)
ernie_q = ErnieModel(
src_ids=src_ids_q,
position_ids=pos_ids_q,
sentence_ids=sent_ids_q,
task_ids=task_ids_q,
input_mask=input_mask_q,
config=ernie_config,
model_name='query_')
## pos para
ernie_pos = ErnieModel(
src_ids=src_ids_p_pos,
position_ids=pos_ids_p_pos,
sentence_ids=sent_ids_p_pos,
task_ids=task_ids_p_pos,
input_mask=input_mask_p_pos,
config=ernie_config,
model_name='titlepara_')
## neg para
ernie_neg = ErnieModel(
src_ids=src_ids_p_neg,
position_ids=pos_ids_p_neg,
sentence_ids=sent_ids_p_neg,
task_ids=task_ids_p_neg,
input_mask=input_mask_p_neg,
config=ernie_config,
model_name='titlepara_')
q_cls_feats = ernie_q.get_cls_output()
pos_cls_feats = ernie_pos.get_cls_output()
neg_cls_feats = ernie_neg.get_cls_output()
#src_ids_p_pos = fluid.layers.Print(src_ids_p_pos, message='pos: ')
#pos_cls_feats = fluid.layers.Print(pos_cls_feats, message='pos: ')
p_cls_feats = fluid.layers.concat([pos_cls_feats, neg_cls_feats], axis=0)
if is_prediction:
p_cls_feats = fluid.layers.slice(p_cls_feats, axes=[0], starts=[0], ends=[batch_size])
multi = fluid.layers.elementwise_mul(q_cls_feats, p_cls_feats)
probs = fluid.layers.reduce_sum(multi, dim=-1)
graph_vars = {
"probs": probs,
"qids": qids,
"q_rep": q_cls_feats,
"p_rep": p_cls_feats
}
return pyreader, graph_vars
if args.use_cross_batch and fleet_handle is not None:
print("worker num is: {}".format(fleet_handle.worker_num()))
all_p_cls_feats = fluid.layers.collective._c_allgather(
p_cls_feats, fleet_handle.worker_num(), use_calc_stream=True)
#multiply
logits = fluid.layers.matmul(q_cls_feats, all_p_cls_feats, transpose_x=False, transpose_y=True)
worker_id = fleet_handle.worker_index()
else:
logits = fluid.layers.matmul(q_cls_feats, p_cls_feats, transpose_x=False, transpose_y=True)
worker_id = 0
probs = logits
all_labels = np.array(range(batch_size * worker_id * 2, batch_size * (worker_id * 2 + 1)), dtype='int64')
matrix_labels = fluid.layers.assign(all_labels)
matrix_labels = fluid.layers.unsqueeze(matrix_labels, axes=1)
matrix_labels.stop_gradient=True
# fluid.layers.Print(matrix_labels, message='matrix_labels')
#print('DEBUG:\tstart loss')
ce_loss = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=matrix_labels)
loss = fluid.layers.mean(x=ce_loss)
#print('DEBUG:\tloss done')
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(
input=probs, label=matrix_labels)
graph_vars = {
"loss": loss,
"probs": probs,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs,
"qids": qids,
"q_rep": q_cls_feats,
"p_rep": p_cls_feats
}
cp = []
cp.extend(ernie_q.checkpoints)
cp.extend(ernie_pos.checkpoints)
cp.extend(ernie_neg.checkpoints)
return pyreader, graph_vars, cp
def evaluate_mrr(preds):
last_qid = None
total_mrr = 0.0
qnum = 0.0
rank = 0.0
correct = False
for qid, score, label in preds:
if qid != last_qid:
rank = 0.0
qnum += 1
correct = False
last_qid = qid
rank += 1
if not correct and label != 0:
total_mrr += 1.0 / rank
correct = True
return total_mrr / qnum
def evaluate_map(preds):
def singe_map(st, en):
total_p = 0.0
correct_num = 0.0
for index in xrange(st, en):
if int(preds[index][2]) != 0:
correct_num += 1
total_p += correct_num / (index - st + 1)
if int(correct_num) == 0:
return 0.0
return total_p / correct_num
last_qid = None
total_map = 0.0
qnum = 0.0
st = 0
for i in xrange(len(preds)):
qid = preds[i][0]
if qid != last_qid:
qnum += 1
if last_qid != None:
total_map += singe_map(st, i)
st = i
last_qid = qid
total_map += singe_map(st, len(preds))
return total_map / qnum
def evaluate(exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=False,
metric='simple_accuracy'):
train_fetch_list = [
graph_vars["loss"].name, graph_vars["accuracy"].name,
graph_vars["num_seqs"].name
]
if eval_phase == "train":
if "learning_rate" in graph_vars:
train_fetch_list.append(graph_vars["learning_rate"].name)
outputs = exe.run(fetch_list=train_fetch_list, program=test_program)
ret = {"loss": np.mean(outputs[0]), "accuracy": np.mean(outputs[1])}
if "learning_rate" in graph_vars:
ret["learning_rate"] = float(outputs[3][0])
return ret
test_pyreader.start()
total_cost, total_acc, total_num_seqs, total_label_pos_num, total_pred_pos_num, total_correct_num = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
qids, labels, scores, preds = [], [], [], []
time_begin = time.time()
fetch_list = [
graph_vars["loss"].name, graph_vars["accuracy"].name,
graph_vars["probs"].name, graph_vars["labels"].name,
graph_vars["num_seqs"].name, graph_vars["qids"].name,
graph_vars["q_rep"].name, graph_vars["p_rep"].name
]
#emb_file = open('emb_qp', 'w')
while True:
try:
if use_multi_gpu_test:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids, q_rep, p_rep = exe.run(
fetch_list=fetch_list)
else:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids, q_rep, p_rep = exe.run(
program=test_program, fetch_list=fetch_list)
total_cost += np.sum(np_loss * np_num_seqs)
total_acc += np.sum(np_acc * np_num_seqs)
total_num_seqs += np.sum(np_num_seqs)
labels.extend(np_labels.reshape((-1)).tolist())
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
batch_scores = np.diag(np_probs).reshape(-1).tolist()
scores.extend(batch_scores)
#for item in list(zip(q_rep, p_rep, batch_scores)):
# _left = ' '.join([str(each) for each in item[0]])
# _right = ' '.join([str(each) for each in item[1]])
# emb_file.write(_left + '\t' + _right + '\t' + str(item[2]) + '\n')
#scores.extend(np_probs[:, 1].reshape(-1).tolist())
#np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
#preds.extend(np_preds)
#total_label_pos_num += np.sum(np_labels)
#total_pred_pos_num += np.sum(np_preds)
#total_correct_num += np.sum(np.dot(np_preds, np_labels))
except fluid.core.EOFException:
test_pyreader.reset()
break
#for score in np_preds:
# print (score)
#print ('---------------------')
#time_end = time.time()
#cost = total_cost / total_num_seqs
#elapsed_time = time_end - time_begin
#emb_file.close()
return None
evaluate_info = ""
if metric == 'acc_and_f1':
ret = acc_and_f1(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, ave_acc: %f, f1: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['acc'], ret['f1'], total_num_seqs, elapsed_time)
elif metric == 'matthews_corrcoef':
ret = matthews_corrcoef(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, matthews_corrcoef: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
elif metric == 'pearson_and_spearman':
ret = pearson_and_spearman(scores, labels)
evaluate_info = "[%s evaluation] ave loss: %f, pearson:%f, spearman:%f, corr:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['pearson'], ret['spearman'], ret['corr'], total_num_seqs, elapsed_time)
elif metric == 'simple_accuracy':
ret = simple_accuracy(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, acc:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
elif metric == "acc_and_f1_and_mrr":
ret_a = acc_and_f1(preds, labels)
preds = sorted(
zip(qids, scores, labels), key=lambda elem: (elem[0], -elem[1]))
ret_b = evaluate_mrr(preds)
evaluate_info = "[%s evaluation] ave loss: %f, acc: %f, f1: %f, mrr: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret_a['acc'], ret_a['f1'], ret_b, total_num_seqs, elapsed_time)
else:
raise ValueError('unsupported metric {}'.format(metric))
return evaluate_info
def matthews_corrcoef(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
mcc = ((tp * tn) - (fp * fn)) / np.sqrt(
(tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return mcc
def f1_score(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
p = tp / (tp + fp)
r = tp / (tp + fn)
f1 = (2 * p * r) / (p + r + 1e-8)
return f1
def pearson_and_spearman(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def acc_and_f1(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
acc = simple_accuracy(preds, labels)
f1 = f1_score(preds, labels)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def simple_accuracy(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
return (preds == labels).mean()
def predict(exe,
test_program,
test_pyreader,
graph_vars,
dev_count=1):
test_pyreader.start()
qids, scores, probs = [], [], []
preds = []
fetch_list = [graph_vars["probs"].name, graph_vars["qids"].name, \
graph_vars["q_rep"].name, graph_vars["p_rep"].name,]
emb_file = open('emb_qp', 'w')
while True:
try:
if dev_count == 1:
np_probs, np_qids, q_rep, p_rep = exe.run(program=test_program,
fetch_list=fetch_list)
else:
np_probs, np_qids, q_rep, p_rep = exe.run(fetch_list=fetch_list)
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
batch_scores = np_probs.reshape(-1).tolist()
for item in list(zip(q_rep, p_rep, batch_scores)):
_left = ' '.join([str(each) for each in item[0]])
_right = ' '.join([str(each) for each in item[1]])
#emb_file.write(_left + '\t' + _right + '\t' + str(item[2]) + '\n')
#emb_file.write(_right + '\n')
emb_file.write(str(item[2]) + '\n')
#for score in batch_scores:
# print (score)
#print ('--------')
#if is_classify:
# np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
# preds.extend(np_preds)
#elif is_regression:
# preds.extend(np_probs.reshape(-1))
probs.extend(batch_scores)
except fluid.core.EOFException:
test_pyreader.reset()
break
emb_file.close()
#probs = np.concatenate(probs, axis=0).reshape([len(preds), -1])
return qids, preds, probs
| 15,329 | 35.327014 | 132 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/finetune/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/finetune/cross_encoder.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
from scipy.stats import pearsonr, spearmanr
from six.moves import xrange
import paddle.fluid as fluid
from model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_model(args,
pyreader_name,
ernie_config,
is_prediction=False,
task_name=""):
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, 1], [-1, 1]],
dtypes=[
'int64', 'int64', 'int64', 'int64', 'float32', 'int64', 'int64'
],
lod_levels=[0, 0, 0, 0, 0, 0, 0],
name=task_name + "_" + pyreader_name,
use_double_buffer=True)
(src_ids, sent_ids, pos_ids, task_ids, input_mask, labels,
qids) = fluid.layers.read_file(pyreader)
def _model(is_noise=False):
ernie = ErnieModel(
src_ids=src_ids,
position_ids=pos_ids,
sentence_ids=sent_ids,
task_ids=task_ids,
input_mask=input_mask,
config=ernie_config,
is_noise=is_noise)
cls_feats = ernie.get_pooled_output()
if not is_noise:
cls_feats = fluid.layers.dropout(
x=cls_feats,
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
logits = fluid.layers.fc(
input=cls_feats,
size=args.num_labels,
param_attr=fluid.ParamAttr(
name=task_name + "_cls_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=task_name + "_cls_out_b",
initializer=fluid.initializer.Constant(0.)))
"""
if is_prediction:
probs = fluid.layers.softmax(logits)
feed_targets_name = [
src_ids.name, sent_ids.name, pos_ids.name, input_mask.name
]
if ernie_version == "2.0":
feed_targets_name += [task_ids.name]
return pyreader, probs, feed_targets_name
"""
num_seqs = fluid.layers.create_tensor(dtype='int64')
## add focal loss
ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=labels, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss)
accuracy = fluid.layers.accuracy(
input=probs, label=labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs,
"qids": qids
}
return graph_vars
if not is_prediction:
graph_vars = _model(is_noise=True)
old_loss = graph_vars["loss"]
token_emb = fluid.default_main_program().global_block().var("word_embedding")
# print(token_emb)
token_emb.stop_gradient = False
token_gradient = fluid.gradients(old_loss, token_emb)[0]
token_gradient.stop_gradient = False
epsilon = 1e-8
norm = (fluid.layers.sqrt(
fluid.layers.reduce_sum(fluid.layers.square(token_gradient)) + epsilon))
gp = (0.01 * token_gradient) / norm
gp.stop_gradient = True
fluid.layers.assign(token_emb + gp, token_emb)
graph_vars = _model()
fluid.layers.assign(token_emb - gp, token_emb)
else:
graph_vars = _model()
return pyreader, graph_vars
def evaluate_mrr(preds):
last_qid = None
total_mrr = 0.0
qnum = 0.0
rank = 0.0
correct = False
for qid, score, label in preds:
if qid != last_qid:
rank = 0.0
qnum += 1
correct = False
last_qid = qid
rank += 1
if not correct and label != 0:
total_mrr += 1.0 / rank
correct = True
return total_mrr / qnum
def evaluate(exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=False,
metric='simple_accuracy'):
train_fetch_list = [
graph_vars["loss"].name, graph_vars["accuracy"].name,
graph_vars["num_seqs"].name
]
if eval_phase == "train":
if "learning_rate" in graph_vars:
train_fetch_list.append(graph_vars["learning_rate"].name)
outputs = exe.run(fetch_list=train_fetch_list, program=test_program)
ret = {"loss": np.mean(outputs[0]), "accuracy": np.mean(outputs[1])}
if "learning_rate" in graph_vars:
ret["learning_rate"] = float(outputs[3][0])
return ret
test_pyreader.start()
total_cost, total_acc, total_num_seqs, total_label_pos_num, total_pred_pos_num, total_correct_num = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
qids, labels, scores, preds = [], [], [], []
time_begin = time.time()
fetch_list = [
graph_vars["loss"].name, graph_vars["accuracy"].name,
graph_vars["probs"].name, graph_vars["labels"].name,
graph_vars["num_seqs"].name, graph_vars["qids"].name
]
while True:
try:
if use_multi_gpu_test:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids = exe.run(
fetch_list=fetch_list)
else:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids = exe.run(
program=test_program, fetch_list=fetch_list)
total_cost += np.sum(np_loss * np_num_seqs)
total_acc += np.sum(np_acc * np_num_seqs)
total_num_seqs += np.sum(np_num_seqs)
labels.extend(np_labels.reshape((-1)).tolist())
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
scores.extend(np_probs[:, 1].reshape(-1).tolist())
np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
preds.extend(np_preds)
total_label_pos_num += np.sum(np_labels)
total_pred_pos_num += np.sum(np_preds)
total_correct_num += np.sum(np.dot(np_preds, np_labels))
except fluid.core.EOFException:
test_pyreader.reset()
break
time_end = time.time()
cost = total_cost / total_num_seqs
elapsed_time = time_end - time_begin
evaluate_info = ""
if metric == 'acc_and_f1':
ret = acc_and_f1(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, ave_acc: %f, f1: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['acc'], ret['f1'], total_num_seqs, elapsed_time)
elif metric == 'matthews_corrcoef':
ret = matthews_corrcoef(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, matthews_corrcoef: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
elif metric == 'pearson_and_spearman':
ret = pearson_and_spearman(scores, labels)
evaluate_info = "[%s evaluation] ave loss: %f, pearson:%f, spearman:%f, corr:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['pearson'], ret['spearman'], ret['corr'], total_num_seqs, elapsed_time)
elif metric == 'simple_accuracy':
ret = simple_accuracy(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, acc:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
elif metric == "acc_and_f1_and_mrr":
ret_a = acc_and_f1(preds, labels)
preds = sorted(
zip(qids, scores, labels), key=lambda elem: (elem[0], -elem[1]))
ret_b = evaluate_mrr(preds)
evaluate_info = "[%s evaluation] ave loss: %f, acc: %f, f1: %f, mrr: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret_a['acc'], ret_a['f1'], ret_b, total_num_seqs, elapsed_time)
else:
raise ValueError('unsupported metric {}'.format(metric))
return evaluate_info
def matthews_corrcoef(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
mcc = ((tp * tn) - (fp * fn)) / np.sqrt(
(tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return mcc
def f1_score(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
p = tp / (tp + fp)
r = tp / (tp + fn)
f1 = (2 * p * r) / (p + r + 1e-8)
return f1
def pearson_and_spearman(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def acc_and_f1(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
acc = simple_accuracy(preds, labels)
f1 = f1_score(preds, labels)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def simple_accuracy(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
return (preds == labels).mean()
def predict(exe,
test_program,
test_pyreader,
graph_vars,
dev_count=1):
test_pyreader.start()
qids, scores, probs = [], [], []
preds = []
fetch_list = [graph_vars["probs"].name, graph_vars["qids"].name]
while True:
try:
if dev_count == 1:
np_probs, np_qids = exe.run(program=test_program,
fetch_list=fetch_list)
else:
np_probs, np_qids = exe.run(fetch_list=fetch_list)
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
preds.extend(np_preds)
probs.append(np_probs)
except fluid.core.EOFException:
test_pyreader.reset()
break
probs = np.concatenate(probs, axis=0).reshape([len(preds), -1])
return qids, preds, probs
| 11,546 | 33.885196 | 132 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/reader/reader_de.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
import tokenization
from batching import pad_batch_data
log = logging.getLogger(__name__)
if six.PY3:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def csv_reader(fd, delimiter='\t', trainer_id=0, trainer_num=1):
def gen():
for i, line in enumerate(fd):
if i % trainer_num == trainer_id:
slots = line.rstrip('\n').split(delimiter)
if len(slots) == 1:
yield slots,
else:
yield slots
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
q_max_seq_len=128,
p_max_seq_len=512,
total_num=0,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
for_cn=True,
task_id=0):
self.q_max_seq_len = q_max_seq_len
self.p_max_seq_len = p_max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
# np.random.seed(random_seed)
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
self.total_num = total_num
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, q_max_seq_length, p_max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_query = tokenizer.tokenize(query)
self._truncate_seq_pair([], tokens_query, q_max_seq_length - 2)
# pos title
title_pos = tokenization.convert_to_unicode(example.title_pos)
tokens_title_pos = tokenizer.tokenize(title_pos)
# pos para
para_pos = tokenization.convert_to_unicode(example.para_pos)
tokens_para_pos = tokenizer.tokenize(para_pos)
self._truncate_seq_pair(tokens_title_pos, tokens_para_pos, p_max_seq_length - 3)
# neg title
title_neg = tokenization.convert_to_unicode(example.title_neg)
tokens_title_neg = tokenizer.tokenize(title_neg)
# neg para
para_neg = tokenization.convert_to_unicode(example.para_neg)
tokens_para_neg = tokenizer.tokenize(para_neg)
self._truncate_seq_pair(tokens_title_neg, tokens_para_neg, p_max_seq_length - 3)
tokens_q = []
text_type_ids_q = []
tokens_q.append("[CLS]")
text_type_ids_q.append(0)
for token in tokens_query:
tokens_q.append(token)
text_type_ids_q.append(0)
tokens_q.append("[SEP]")
text_type_ids_q.append(0)
token_ids_q = tokenizer.convert_tokens_to_ids(tokens_q)
position_ids_q = list(range(len(token_ids_q)))
#f = open('tid', 'a')
#for tid in range(len(token_ids_q)):
# f.write(str(token_ids_q[tid]) + ' ' + str(tokens_q[tid]) + '\n')
### pos_para
tokens_p_pos = []
text_type_ids_p_pos = []
tokens_p_pos.append("[CLS]")
text_type_ids_p_pos.append(0)
for token in tokens_title_pos:
tokens_p_pos.append(token)
text_type_ids_p_pos.append(0)
tokens_p_pos.append("[SEP]")
text_type_ids_p_pos.append(0)
for token in tokens_para_pos:
tokens_p_pos.append(token)
text_type_ids_p_pos.append(1)
tokens_p_pos.append("[SEP]")
text_type_ids_p_pos.append(1)
token_ids_p_pos = tokenizer.convert_tokens_to_ids(tokens_p_pos)
position_ids_p_pos = list(range(len(token_ids_p_pos)))
#for tid in range(len(token_ids_p_pos)):
# f.write(str(token_ids_p_pos[tid]) + ' ' + str(tokens_p_pos[tid]) + '\n')
#f.close()
### neg_para
tokens_p_neg = []
text_type_ids_p_neg = []
tokens_p_neg.append("[CLS]")
text_type_ids_p_neg.append(0)
for token in tokens_title_neg:
tokens_p_neg.append(token)
text_type_ids_p_neg.append(0)
tokens_p_neg.append("[SEP]")
text_type_ids_p_neg.append(0)
for token in tokens_para_neg:
tokens_p_neg.append(token)
text_type_ids_p_neg.append(1)
tokens_p_neg.append("[SEP]")
text_type_ids_p_neg.append(1)
token_ids_p_neg = tokenizer.convert_tokens_to_ids(tokens_p_neg)
position_ids_p_neg = list(range(len(token_ids_p_neg)))
if self.is_inference:
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p_pos', 'text_type_ids_p_pos', 'position_ids_p_pos', \
'token_ids_p_neg', 'text_type_ids_p_neg', 'position_ids_p_neg'])
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p_pos=token_ids_p_pos,
text_type_ids_p_pos=text_type_ids_p_pos,
position_ids_p_pos=position_ids_p_pos,
token_ids_p_neg=token_ids_p_neg,
text_type_ids_p_neg=text_type_ids_p_neg,
position_ids_p_neg=position_ids_p_neg)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p_pos', 'text_type_ids_p_pos', 'position_ids_p_pos', \
'token_ids_p_neg', 'text_type_ids_p_neg', 'position_ids_p_neg',
'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p_pos=token_ids_p_pos,
text_type_ids_p_pos=text_type_ids_p_pos,
position_ids_p_pos=position_ids_p_pos,
token_ids_p_neg=token_ids_p_neg,
text_type_ids_p_neg=text_type_ids_p_neg,
position_ids_p_neg=position_ids_p_neg,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, phase=None):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if phase == "train":
self.current_example = index
record = self._convert_example_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
max_len = max(max_len, len(record.token_ids_p_pos))
max_len = max(max_len, len(record.token_ids_p_neg))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
max_len = max(len(record.token_ids_p_neg), len(record.token_ids_p_pos))
batch_records = [record]
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
# examples = self._read_tsv(input_file)
# return len(examples)
return self.num_examples
def data_generator(self,
input_file,
batch_size,
epoch,
dev_count=1,
trainer_id=0,
trainer_num=1,
shuffle=True,
phase=None):
if phase == 'train':
# examples = examples[trainer_id: (len(examples) //trainer_num) * trainer_num : trainer_num]
self.num_examples_per_node = self.total_num // trainer_num
self.num_examples = self.num_examples_per_node * trainer_num
examples = self._read_tsv(input_file, batch_size=batch_size, trainer_id=trainer_id, trainer_num=trainer_num, num_examples=self.num_examples_per_node)
log.info('apply sharding %d/%d' % (trainer_id, trainer_num))
else:
examples = self._read_tsv(input_file, batch_size=batch_size)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class ClassifyReader(BaseReader):
def _read_tsv(self, input_file, batch_size=16, quotechar=None, trainer_id=0, trainer_num=1, num_examples=0):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f, trainer_id=trainer_id, trainer_num=trainer_num)
# headers = next(reader)
#headers = 'query\tpara_pos\tpara_neg\tlabel'.split('\t')
headers = 'query\ttitle_pos\tpara_pos\ttitle_neg\tpara_neg\tlabel'.split('\t')
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
for cnt, line in enumerate(reader):
if num_examples != 0 and cnt == num_examples:
break
for index, text in enumerate(line):
if index in text_indices:
if self.for_cn:
line[index] = text.replace(' ', '')
else:
line[index] = text
example = Example(*line)
examples.append(example)
while len(examples) % batch_size != 0:
examples.append(example)
return examples
def _pad_batch_records(self, batch_records):
batch_token_ids_q = [record.token_ids_q for record in batch_records]
batch_text_type_ids_q = [record.text_type_ids_q for record in batch_records]
batch_position_ids_q = [record.position_ids_q for record in batch_records]
batch_token_ids_p_pos = [record.token_ids_p_pos for record in batch_records]
batch_text_type_ids_p_pos = [record.text_type_ids_p_pos for record in batch_records]
batch_position_ids_p_pos = [record.position_ids_p_pos for record in batch_records]
batch_token_ids_p_neg = [record.token_ids_p_neg for record in batch_records]
batch_text_type_ids_p_neg = [record.text_type_ids_p_neg for record in batch_records]
batch_position_ids_p_neg = [record.position_ids_p_neg for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids_q, input_mask_q = pad_batch_data(
batch_token_ids_q, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_q = pad_batch_data(
batch_text_type_ids_q, pad_idx=self.pad_id)
padded_position_ids_q = pad_batch_data(
batch_position_ids_q, pad_idx=self.pad_id)
padded_task_ids_q = np.ones_like(padded_token_ids_q, dtype="int64") * self.task_id
padded_token_ids_p_pos, input_mask_p_pos = pad_batch_data(
batch_token_ids_p_pos, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_p_pos = pad_batch_data(
batch_text_type_ids_p_pos, pad_idx=self.pad_id)
padded_position_ids_p_pos = pad_batch_data(
batch_position_ids_p_pos, pad_idx=self.pad_id)
padded_task_ids_p_pos = np.ones_like(padded_token_ids_p_pos, dtype="int64") * self.task_id
padded_token_ids_p_neg, input_mask_p_neg = pad_batch_data(
batch_token_ids_p_neg, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_p_neg = pad_batch_data(
batch_text_type_ids_p_neg, pad_idx=self.pad_id)
padded_position_ids_p_neg = pad_batch_data(
batch_position_ids_p_neg, pad_idx=self.pad_id)
padded_task_ids_p_neg = np.ones_like(padded_token_ids_p_neg, dtype="int64") * self.task_id
return_list = [
padded_token_ids_q, padded_text_type_ids_q, padded_position_ids_q, padded_task_ids_q,
input_mask_q,
padded_token_ids_p_pos, padded_text_type_ids_p_pos, padded_position_ids_p_pos, padded_task_ids_p_pos,
input_mask_p_pos,
padded_token_ids_p_neg, padded_text_type_ids_p_neg, padded_position_ids_p_neg, padded_task_ids_p_neg,
input_mask_p_neg
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
if __name__ == '__main__':
pass
| 17,266 | 39.437939 | 161 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/reader/reader_de_infer.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
import tokenization
from batching import pad_batch_data
log = logging.getLogger(__name__)
if six.PY3:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def csv_reader(fd, delimiter='\t'):
def gen():
for i in fd:
yield i.rstrip('\n').split(delimiter)
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
q_max_seq_len=128,
p_max_seq_len=512,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
for_cn=True,
task_id=0):
self.q_max_seq_len = q_max_seq_len
self.p_max_seq_len = p_max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
np.random.seed(random_seed)
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, q_max_seq_length, p_max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_query = tokenizer.tokenize(query)
self._truncate_seq_pair([], tokens_query, q_max_seq_length - 2)
# title
title = tokenization.convert_to_unicode(example.title)
tokens_title = tokenizer.tokenize(title)
# para
para = tokenization.convert_to_unicode(example.para)
tokens_para = tokenizer.tokenize(para)
self._truncate_seq_pair(tokens_title, tokens_para, p_max_seq_length - 3)
tokens_q = []
text_type_ids_q = []
tokens_q.append("[CLS]")
text_type_ids_q.append(0)
for token in tokens_query:
tokens_q.append(token)
text_type_ids_q.append(0)
tokens_q.append("[SEP]")
text_type_ids_q.append(0)
token_ids_q = tokenizer.convert_tokens_to_ids(tokens_q)
position_ids_q = list(range(len(token_ids_q)))
#f = open('tid', 'a')
#for tid in range(len(token_ids_q)):
# f.write(str(token_ids_q[tid]) + '\t' + tokens_q[tid] + '\n')
#f.write(str(token_ids_q[tid]) + ' ')
#f.write('\t')
### para
tokens_p = []
text_type_ids_p = []
tokens_p.append("[CLS]")
text_type_ids_p.append(0)
for token in tokens_title:
tokens_p.append(token)
text_type_ids_p.append(0)
tokens_p.append("[SEP]")
text_type_ids_p.append(0)
for token in tokens_para:
tokens_p.append(token)
text_type_ids_p.append(1)
tokens_p.append("[SEP]")
text_type_ids_p.append(1)
token_ids_p = tokenizer.convert_tokens_to_ids(tokens_p)
position_ids_p = list(range(len(token_ids_p)))
#for tid in range(len(token_ids_p)):
# f.write(str(token_ids_p[tid]) + '\t' + tokens_p[tid] + '\n')
#f.write(str(token_ids_p[tid]) + ' ')
#f.write('\n')
#f.close()
if self.is_inference:
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p', 'text_type_ids_p', 'position_ids_p'])
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p=token_ids_p,
text_type_ids_p=text_type_ids_p,
position_ids_p=position_ids_p)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p', 'text_type_ids_p', 'position_ids_p', \
'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p=token_ids_p,
text_type_ids_p=text_type_ids_p,
position_ids_p=position_ids_p,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, phase=None, read_id=False):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if phase == "train":
self.current_example = index
if read_id is False:
record = self._convert_example_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
else:
record = self._convert_example_id_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
max_len = max(max_len, len(record.token_ids_p))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
max_len = len(record.token_ids_p)
batch_records = [record]
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
examples = self._read_tsv(input_file)
return len(examples)
def data_generator(self,
input_file,
batch_size,
epoch,
dev_count=1,
shuffle=True,
phase=None,
read_id=False):
examples = self._read_tsv(input_file, batch_size)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase, read_id=read_id):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class ClassifyReader(BaseReader):
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
#headers = next(reader)
headers = 'query\ttitle\tpara\tlabel'.split('\t')
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
for line in reader:
for index, text in enumerate(line):
if index in text_indices:
if self.for_cn:
line[index] = text.replace(' ', '')
else:
line[index] = text
example = Example(*line)
examples.append(example)
while len(examples) % batch_size != 0:
examples.append(example)
return examples
def _pad_batch_records(self, batch_records):
batch_token_ids_q = [record.token_ids_q for record in batch_records]
batch_text_type_ids_q = [record.text_type_ids_q for record in batch_records]
batch_position_ids_q = [record.position_ids_q for record in batch_records]
batch_token_ids_p = [record.token_ids_p for record in batch_records]
batch_text_type_ids_p = [record.text_type_ids_p for record in batch_records]
batch_position_ids_p = [record.position_ids_p for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids_q, input_mask_q = pad_batch_data(
batch_token_ids_q, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_q = pad_batch_data(
batch_text_type_ids_q, pad_idx=self.pad_id)
padded_position_ids_q = pad_batch_data(
batch_position_ids_q, pad_idx=self.pad_id)
padded_task_ids_q = np.ones_like(padded_token_ids_q, dtype="int64") * self.task_id
padded_token_ids_p, input_mask_p = pad_batch_data(
batch_token_ids_p, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_p = pad_batch_data(
batch_text_type_ids_p, pad_idx=self.pad_id)
padded_position_ids_p = pad_batch_data(
batch_position_ids_p, pad_idx=self.pad_id)
padded_task_ids_p = np.ones_like(padded_token_ids_p, dtype="int64") * self.task_id
return_list = [
padded_token_ids_q, padded_text_type_ids_q, padded_position_ids_q, padded_task_ids_q,
input_mask_q,
padded_token_ids_p, padded_text_type_ids_p, padded_position_ids_p, padded_task_ids_p,
input_mask_p,
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
if __name__ == '__main__':
pass
| 13,641 | 36.581267 | 97 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/reader/reader_ce.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
import tokenization
from batching import pad_batch_data
log = logging.getLogger(__name__)
if six.PY3:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def csv_reader(fd, delimiter='\t', trainer_id=0, trainer_num=1):
def gen():
for i, line in enumerate(fd):
if i % trainer_num == trainer_id:
slots = line.rstrip('\n').split(delimiter)
if len(slots) == 1:
yield slots,
else:
yield slots
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
max_seq_len=512,
total_num=0,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
for_cn=True,
task_id=0):
self.max_seq_len = max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
np.random.seed(random_seed)
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
self.total_num = total_num
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_a = tokenizer.tokenize(query)
tokens_b = None
title = tokenization.convert_to_unicode(example.title)
tokens_b = tokenizer.tokenize(title)
para = tokenization.convert_to_unicode(example.para)
tokens_para = tokenizer.tokenize(para)
tokens_b.extend(tokens_para)
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
# The convention in BERT/ERNIE is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
text_type_ids = []
tokens.append("[CLS]")
text_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
text_type_ids.append(0)
tokens.append("[SEP]")
text_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
text_type_ids.append(1)
tokens.append("[SEP]")
text_type_ids.append(1)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
position_ids = list(range(len(token_ids)))
if self.is_inference:
Record = namedtuple('Record',
['token_ids', 'text_type_ids', 'position_ids'])
record = Record(
token_ids=token_ids,
text_type_ids=text_type_ids,
position_ids=position_ids)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record', [
'token_ids', 'text_type_ids', 'position_ids', 'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids=token_ids,
text_type_ids=text_type_ids,
position_ids=position_ids,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, phase=None):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if phase == "train":
self.current_example = index
record = self._convert_example_to_record(example, self.max_seq_len,
self.tokenizer)
max_len = max(max_len, len(record.token_ids))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
batch_records, max_len = [record], len(record.token_ids)
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
# examples = self._read_tsv(input_file)
# return len(examples)
return self.num_examples
def data_generator(self,
input_file,
batch_size,
epoch,
dev_count=1,
trainer_id=0,
trainer_num=1,
shuffle=True,
phase=None):
if phase == 'train':
# examples = examples[trainer_id: (len(examples) //trainer_num) * trainer_num : trainer_num]
self.num_examples_per_node = self.total_num // trainer_num
self.num_examples = self.num_examples_per_node * trainer_num
examples = self._read_tsv(input_file, trainer_id=trainer_id, trainer_num=trainer_num, num_examples=self.num_examples_per_node)
log.info('apply sharding %d/%d' % (trainer_id, trainer_num))
else:
examples = self._read_tsv(input_file)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class ClassifyReader(BaseReader):
def _read_tsv(self, input_file, quotechar=None, trainer_id=0, trainer_num=1, num_examples=0):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f, trainer_id=trainer_id, trainer_num=trainer_num)
# headers = next(reader)
headers = 'query\ttitle\tpara\tlabel'.split('\t')
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
for cnt, line in enumerate(reader):
if num_examples != 0 and cnt == num_examples:
break
for index, text in enumerate(line):
if index in text_indices:
if self.for_cn:
line[index] = text.replace(' ', '')
else:
line[index] = text
example = Example(*line)
examples.append(example)
return examples
def _pad_batch_records(self, batch_records):
batch_token_ids = [record.token_ids for record in batch_records]
batch_text_type_ids = [record.text_type_ids for record in batch_records]
batch_position_ids = [record.position_ids for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids, input_mask = pad_batch_data(
batch_token_ids, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids = pad_batch_data(
batch_text_type_ids, pad_idx=self.pad_id)
padded_position_ids = pad_batch_data(
batch_position_ids, pad_idx=self.pad_id)
padded_task_ids = np.ones_like(
padded_token_ids, dtype="int64") * self.task_id
return_list = [
padded_token_ids, padded_text_type_ids, padded_position_ids,
padded_task_ids, input_mask
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
| 12,992 | 36.770349 | 138 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/reader/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/utils/args.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Arguments for configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import six
import os
import sys
import argparse
import logging
import paddle.fluid as fluid
log = logging.getLogger(__name__)
def prepare_logger(logger, debug=False, save_to_file=None):
formatter = logging.Formatter(fmt='[%(levelname)s] %(asctime)s [%(filename)12s:%(lineno)5d]:\t%(message)s')
console_hdl = logging.StreamHandler()
console_hdl.setFormatter(formatter)
logger.addHandler(console_hdl)
if save_to_file is not None and not os.path.exists(save_to_file):
file_hdl = logging.FileHandler(save_to_file)
file_hdl.setFormatter(formatter)
logger.addHandler(file_hdl)
logger.setLevel(logging.DEBUG)
logger.propagate = False
def str2bool(v):
# because argparse does not support to parse "true, False" as python
# boolean directly
return v.lower() in ("true", "t", "1")
class ArgumentGroup(object):
def __init__(self, parser, title, des):
self._group = parser.add_argument_group(title=title, description=des)
def add_arg(self, name, type, default, help, positional_arg=False, **kwargs):
prefix = "" if positional_arg else "--"
type = str2bool if type == bool else type
self._group.add_argument(
prefix + name,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
def print_arguments(args):
log.info('----------- Configuration Arguments -----------')
for arg, value in sorted(six.iteritems(vars(args))):
log.info('%s: %s' % (arg, value))
log.info('------------------------------------------------')
def check_cuda(use_cuda, err = \
"\nYou can not set use_cuda = True in the model because you are using paddlepaddle-cpu.\n \
Please: 1. Install paddlepaddle-gpu to run your models on GPU or 2. Set use_cuda = False to run models on CPU.\n"
):
try:
if use_cuda == True and fluid.is_compiled_with_cuda() == False:
log.error(err)
sys.exit(1)
except Exception as e:
pass
| 2,996 | 34.678571 | 119 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/utils/init.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import six
import ast
import copy
import logging
import numpy as np
import paddle.fluid as fluid
log = logging.getLogger(__name__)
def init_checkpoint(exe, init_checkpoint_path, main_program):
assert os.path.exists(
init_checkpoint_path), "[%s] cann't be found." % init_checkpoint_path
def existed_persitables(var):
if not fluid.io.is_persistable(var):
return False
if not os.path.exists(os.path.join(init_checkpoint_path, var.name)):
print ("Var not exists: [%s]\t%s" % (var.name, os.path.join(init_checkpoint_path, var.name)))
#else:
# print ("Var exists: [%s]" % (var.name))
return os.path.exists(os.path.join(init_checkpoint_path, var.name))
fluid.io.load_vars(
exe,
init_checkpoint_path,
main_program=main_program,
predicate=existed_persitables)
log.info("Load model from {}".format(init_checkpoint_path))
def init_pretraining_params(exe,
pretraining_params_path,
main_program):
assert os.path.exists(pretraining_params_path
), "[%s] cann't be found." % pretraining_params_path
def existed_params(var):
if not isinstance(var, fluid.framework.Parameter):
return False
if not os.path.exists(os.path.join(pretraining_params_path, var.name)):
print ("Var not exists: [%s]\t%s" % (var.name, os.path.join(pretraining_params_path, var.name)))
#else:
# print ("Var exists: [%s]" % (var.name))
return os.path.exists(os.path.join(pretraining_params_path, var.name))
fluid.io.load_vars(
exe,
pretraining_params_path,
main_program=main_program,
predicate=existed_params)
log.info("Load pretraining parameters from {}.".format(
pretraining_params_path))
| 2,695 | 34.946667 | 108 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/utils/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/model/transformer_encoder.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer encoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import paddle.fluid as fluid
import paddle.fluid.layers as layers
def multi_head_attention(queries,
keys,
values,
attn_bias,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.,
cache=None,
param_initializer=None,
name='multi_head_att'):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activiation to mask certain selected positions so that
they will not considered in attention weights.
"""
keys = queries if keys is None else keys
values = keys if values is None else values
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors.")
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_query_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_query_fc.b_0')
k = layers.fc(input=keys,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_key_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_key_fc.b_0')
v = layers.fc(input=values,
size=d_value * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_value_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_value_fc.b_0')
return q, k, v
def __split_heads(x, n_head):
"""
Reshape the last dimension of inpunt tensor x so that it becomes two
dimensions and then transpose. Specifically, input a tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] then output a tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
hidden_size = x.shape[-1]
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped = layers.reshape(
x=x, shape=[0, 0, n_head, hidden_size // n_head], inplace=True)
# permuate the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
return layers.transpose(x=reshaped, perm=[0, 2, 1, 3])
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) == 3: return x
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=True)
def scaled_dot_product_attention(q, k, v, attn_bias, d_key, dropout_rate):
"""
Scaled Dot-Product Attention
"""
scaled_q = layers.scale(x=q, scale=d_key**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
if dropout_rate:
weights = layers.dropout(
weights,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
if cache is not None: # use cache and concat time steps
# Since the inplace reshape in __split_heads changes the shape of k and
# v, which is the cache input for next time step, reshape the cache
# input from the previous time step first.
k = cache["k"] = layers.concat(
[layers.reshape(
cache["k"], shape=[0, 0, d_model]), k], axis=1)
v = cache["v"] = layers.concat(
[layers.reshape(
cache["v"], shape=[0, 0, d_model]), v], axis=1)
q = __split_heads(q, n_head)
k = __split_heads(k, n_head)
v = __split_heads(v, n_head)
ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_key,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_output_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_output_fc.b_0')
return proj_out
def positionwise_feed_forward(x,
d_inner_hid,
d_hid,
dropout_rate,
hidden_act,
param_initializer=None,
name='ffn'):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act=hidden_act,
param_attr=fluid.ParamAttr(
name=name + '_fc_0.w_0',
initializer=param_initializer),
bias_attr=name + '_fc_0.b_0')
if dropout_rate:
hidden = layers.dropout(
hidden,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
out = layers.fc(input=hidden,
size=d_hid,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_fc_1.w_0', initializer=param_initializer),
bias_attr=name + '_fc_1.b_0')
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.,
name=''):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out_dtype = out.dtype
if out_dtype == fluid.core.VarDesc.VarType.FP16:
out = layers.cast(x=out, dtype="float32")
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.ParamAttr(
name=name + '_layer_norm_scale',
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
name=name + '_layer_norm_bias',
initializer=fluid.initializer.Constant(0.)))
if out_dtype == fluid.core.VarDesc.VarType.FP16:
out = layers.cast(x=out, dtype="float16")
elif cmd == "d": # add dropout
if dropout_rate:
out = layers.dropout(
out,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name=''):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(
pre_process_layer(
enc_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att'),
None,
None,
attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
param_initializer=param_initializer,
name=name + '_multi_head_att')
attn_output = post_process_layer(
enc_input,
attn_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_att')
ffd_output = positionwise_feed_forward(
pre_process_layer(
attn_output,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_ffn'),
d_inner_hid,
d_model,
relu_dropout,
hidden_act,
param_initializer=param_initializer,
name=name + '_ffn')
return post_process_layer(
attn_output,
ffd_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_ffn'), ffd_output
def encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
model_name='',
name=''):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""
checkpoints = []
for i in range(n_layer):
enc_output, cp = encoder_layer(
enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
param_initializer=param_initializer,
name=name + '_layer_' + str(i))
checkpoints.append(cp)
enc_input = enc_output
enc_output = pre_process_layer(
enc_output, preprocess_cmd, prepostprocess_dropout, name=model_name+"post_encoder")
return enc_output, checkpoints
| 12,649 | 35.666667 | 91 | py |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/model/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/research/RocketQA_NAACL2021/model/src/model/ernie.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ernie model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import json
import six
import logging
import paddle.fluid as fluid
from io import open
from model.transformer_encoder import encoder, pre_process_layer
log = logging.getLogger(__name__)
class ErnieConfig(object):
def __init__(self, config_path):
self._config_dict = self._parse(config_path)
def _parse(self, config_path):
try:
with open(config_path, 'r', encoding='utf8') as json_file:
config_dict = json.load(json_file)
except Exception:
raise IOError("Error in parsing Ernie model config file '%s'" %
config_path)
else:
return config_dict
def __getitem__(self, key):
return self._config_dict.get(key, None)
def print_config(self):
for arg, value in sorted(six.iteritems(self._config_dict)):
log.info('%s: %s' % (arg, value))
log.info('------------------------------------------------')
class ErnieModel(object):
def __init__(self,
src_ids,
position_ids,
sentence_ids,
task_ids,
input_mask,
config,
weight_sharing=True,
model_name='',
is_noise=False):
self._emb_size = config['hidden_size']
self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._voc_size = config['vocab_size']
self._max_position_seq_len = config['max_position_embeddings']
if config['sent_type_vocab_size']:
self._sent_types = config['sent_type_vocab_size']
else:
self._sent_types = config['type_vocab_size']
self._use_task_id = config['use_task_id']
if self._use_task_id:
self._task_types = config['task_type_vocab_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_probs_dropout_prob']
if is_noise:
self._prepostprocess_dropout = 0
self._attention_dropout = 0
self._weight_sharing = weight_sharing
self.checkpoints = []
self._word_emb_name = "word_embedding"
self._pos_emb_name = "pos_embedding"
self._sent_emb_name = "sent_embedding"
self._task_emb_name = "task_embedding"
self._emb_dtype = "float32"
# Initialize all weigths by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._build_model(model_name, src_ids, position_ids, sentence_ids, task_ids,
input_mask)
def _build_model(self, model_name, src_ids, position_ids, sentence_ids, task_ids,
input_mask):
# padding id in vocabulary must be set to 0
emb_out = fluid.layers.embedding(
input=src_ids,
size=[self._voc_size, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._word_emb_name, initializer=self._param_initializer),
is_sparse=False)
position_emb_out = fluid.layers.embedding(
input=position_ids,
size=[self._max_position_seq_len, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._pos_emb_name, initializer=self._param_initializer))
sent_emb_out = fluid.layers.embedding(
sentence_ids,
size=[self._sent_types, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._sent_emb_name, initializer=self._param_initializer))
emb_out = emb_out + position_emb_out
emb_out = emb_out + sent_emb_out
if self._use_task_id:
task_emb_out = fluid.layers.embedding(
task_ids,
size=[self._task_types, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._task_emb_name,
initializer=self._param_initializer))
emb_out = emb_out + task_emb_out
emb_out = pre_process_layer(
emb_out, 'nd', self._prepostprocess_dropout, name=model_name + 'pre_encoder')
self_attn_mask = fluid.layers.matmul(
x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = fluid.layers.scale(
x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = fluid.layers.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
self._enc_out, self.checkpoints = encoder(
enc_input=emb_out,
attn_bias=n_head_self_attn_mask,
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
model_name=model_name,
name=model_name+'encoder')
def get_sequence_output(self):
return self._enc_out
def get_cls_output(self):
"""Get the first feature of each sequence for classification"""
cls_output = fluid.layers.slice(
input=self._enc_out, axes=[1], starts=[0], ends=[1])
cls_output = fluid.layers.squeeze(cls_output, axes=[1])
return cls_output
def get_pooled_output(self):
"""Get the first feature of each sequence for classification"""
next_sent_feat = fluid.layers.slice(
input=self._enc_out, axes=[1], starts=[0], ends=[1])
next_sent_feat = fluid.layers.fc(
input=next_sent_feat,
size=self._emb_size,
act="tanh",
param_attr=fluid.ParamAttr(
name="pooled_fc.w_0", initializer=self._param_initializer),
bias_attr="pooled_fc.b_0")
return next_sent_feat
def get_lm_output(self, mask_label, mask_pos):
"""Get the loss & accuracy for pretraining"""
mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32')
# extract the first token feature in each sentence
self.next_sent_feat = self.get_pooled_output()
reshaped_emb_out = fluid.layers.reshape(
x=self._enc_out, shape=[-1, self._emb_size])
# extract masked tokens' feature
mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos)
# transform: fc
mask_trans_feat = fluid.layers.fc(
input=mask_feat,
size=self._emb_size,
act=self._hidden_act,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_fc.w_0',
initializer=self._param_initializer),
bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0'))
# transform: layer norm
mask_trans_feat = fluid.layers.layer_norm(
mask_trans_feat,
begin_norm_axis=len(mask_trans_feat.shape) - 1,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_layer_norm_scale',
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
name='mask_lm_trans_layer_norm_bias',
initializer=fluid.initializer.Constant(1.)))
# transform: layer norm
#mask_trans_feat = pre_process_layer(
# mask_trans_feat, 'n', name='mask_lm_trans')
mask_lm_out_bias_attr = fluid.ParamAttr(
name="mask_lm_out_fc.b_0",
initializer=fluid.initializer.Constant(value=0.0))
if self._weight_sharing:
fc_out = fluid.layers.matmul(
x=mask_trans_feat,
y=fluid.default_main_program().global_block().var(
self._word_emb_name),
transpose_y=True)
fc_out += fluid.layers.create_parameter(
shape=[self._voc_size],
dtype=self._emb_dtype,
attr=mask_lm_out_bias_attr,
is_bias=True)
else:
fc_out = fluid.layers.fc(input=mask_trans_feat,
size=self._voc_size,
param_attr=fluid.ParamAttr(
name="mask_lm_out_fc.w_0",
initializer=self._param_initializer),
bias_attr=mask_lm_out_bias_attr)
mask_lm_loss = fluid.layers.softmax_with_cross_entropy(
logits=fc_out, label=mask_label)
mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss)
return mean_mask_lm_loss
def get_task_output(self, task, task_labels):
task_fc_out = fluid.layers.fc(
input=self.next_sent_feat,
size=task["num_labels"],
param_attr=fluid.ParamAttr(
name=task["task_name"] + "_fc.w_0",
initializer=self._param_initializer),
bias_attr=task["task_name"] + "_fc.b_0")
task_loss, task_softmax = fluid.layers.softmax_with_cross_entropy(
logits=task_fc_out, label=task_labels, return_softmax=True)
task_acc = fluid.layers.accuracy(input=task_softmax, label=task_labels)
mean_task_loss = fluid.layers.mean(task_loss)
return mean_task_loss, task_acc
| 10,858 | 38.631387 | 92 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/metric/tokenizers.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Most of the tokenizers code here is copied from DrQA codebase to avoid adding extra dependency
"""
import copy
import logging
import regex
logger = logging.getLogger(__name__)
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def __del__(self):
self.shutdown()
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
| 5,679 | 28.278351 | 94 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/metric/msmarco_eval.py | """
This module computes evaluation metrics for MSMARCO dataset on the ranking task.
Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
Creation Date : 06/12/2018
Last Modified : 1/21/2019
Authors : Daniel Campos <dacamp@microsoft.com>, Rutger van Haasteren <ruvanh@microsoft.com>
"""
import sys
from collections import Counter
MaxMRRRank = 10
def load_reference_from_stream(f):
"""Load Reference reference relevant passages
Args:f (stream): stream to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
qids_to_relevant_passageids = {}
for l in f:
try:
l = l.strip().split('\t')
qid = int(l[0])
if qid in qids_to_relevant_passageids:
pass
else:
qids_to_relevant_passageids[qid] = []
qids_to_relevant_passageids[qid].append(int(l[1]))
except:
raise IOError('\"%s\" is not valid format' % l)
return qids_to_relevant_passageids
def load_reference(path_to_reference):
"""Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
with open(path_to_reference, 'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids
def load_candidate_from_stream(f):
"""Load candidate data from a stream.
Args:f (stream): stream to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
qid_to_ranked_candidate_passages = {}
for l in f:
try:
l = l.strip().split('\t')
qid = int(l[0])
pid = int(l[1])
rank = int(l[2])
if qid in qid_to_ranked_candidate_passages:
pass
else:
# By default, all PIDs in the list of 1000 are 0. Only override those that are given
tmp = [0] * 1000
qid_to_ranked_candidate_passages[qid] = tmp
qid_to_ranked_candidate_passages[qid][rank - 1] = pid
except:
raise IOError('\"%s\" is not valid format' % l)
return qid_to_ranked_candidate_passages
def load_candidate(path_to_candidate):
"""Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
with open(path_to_candidate, 'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
"""
message = ''
allowed = True
# Create sets of the QIDs for the submitted and reference queries
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
# Check that we do not have multiple passages per query
for qid in qids_to_ranked_candidate_passages:
# Remove all zeros from the candidates
duplicate_pids = set(
[item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids - set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
recall_q_top1 = set()
recall_q_top50 = set()
recall_q_all = set()
for qid in qids_to_ranked_candidate_passages:
if qid in qids_to_relevant_passageids:
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0, MaxMRRRank):
if candidate_pid[i] in target_pid:
MRR += 1.0 / (i + 1)
ranking.pop()
ranking.append(i + 1)
break
for i, pid in enumerate(candidate_pid):
if pid in target_pid:
recall_q_all.add(qid)
if i < 50:
recall_q_top50.add(qid)
if i == 0:
recall_q_top1.add(qid)
break
if len(ranking) == 0:
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
MRR = MRR / len(qids_to_relevant_passageids)
recall_top1 = len(recall_q_top1) * 1.0 / len(qids_to_relevant_passageids)
recall_top50 = len(recall_q_top50) * 1.0 / len(qids_to_relevant_passageids)
recall_all = len(recall_q_all) * 1.0 / len(qids_to_relevant_passageids)
all_scores['MRR @10'] = MRR
all_scores["recall@1"] = recall_top1
all_scores["recall@50"] = recall_top50
all_scores["recall@all"] = recall_all
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
return all_scores
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True):
"""Compute MRR metric
Args:
p_path_to_reference_file (str): path to reference file.
Reference file should contain lines in the following format:
QUERYID\tPASSAGEID
Where PASSAGEID is a relevant passage for a query. Note QUERYID can repeat on different lines with different PASSAGEIDs
p_path_to_candidate_file (str): path to candidate file.
Candidate file sould contain lines in the following format:
QUERYID\tPASSAGEID1\tRank
If a user wishes to use the TREC format please run the script with a -t flag at the end. If this flag is used the expected format is
QUERYID\tITER\tDOCNO\tRANK\tSIM\tRUNID
Where the values are separated by tabs and ranked in order of relevance
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
qids_to_relevant_passageids = load_reference(path_to_reference)
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
if perform_checks:
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
if message != '': print(message)
return compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
def main():
"""Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
"""
if len(sys.argv) == 3:
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
else:
path_to_reference = 'metric/qp_reference.all.tsv'
path_to_candidate = 'metric/ranking_res'
#print('Usage: msmarco_eval_ranking.py <reference ranking> <candidate ranking>')
#exit()
metrics = compute_metrics_from_files(path_to_reference, path_to_candidate)
print('#####################')
for metric in sorted(metrics):
print('{}: {}'.format(metric, metrics[metric]))
print('#####################')
if __name__ == '__main__':
main()
| 8,406 | 38.843602 | 161 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/metric/nq_eval.py | import sys
import numpy as np
sys.path.append('data_process/')
sys.path.append('metric/')
from tokenizers import SimpleTokenizer
import utils
import unicodedata
recall_cands_file = sys.argv[1]
topk = 100
answers = utils.load_answers('test')
q_text, p_text, p_title = utils.load_corpus(corpus='nq', q_type='test')
cand_qp_all, train_qids = utils.load_candidates(recall_cands_file, col=4)
def has_answer(answers, text, tokenizer, match_type):
text = unicodedata.normalize('NFD', text)
if match_type == 'string':
text = tokenizer.tokenize(text).words(uncased=True)
for single_answer in answers:
single_answer = unicodedata.normalize('NFD', single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i+ len(single_answer)]:
return 1
return 0
print('calculating acc')
right_top100 = set()
right_top50 = set()
right_top20 = set()
right_top10 = set()
right_top5 = set()
tok_opts = {}
tokenizer = SimpleTokenizer(**tok_opts)
for qid, pids in cand_qp_all.items():
answer = answers[qid]
for i, pid in enumerate(pids):
if has_answer(answer, p_text[pid], tokenizer, 'string'):
if i < 100:
right_top100.add(qid)
if i < 50:
right_top50.add(qid)
if i < 20:
right_top20.add(qid)
if i < 10:
right_top10.add(qid)
if i < 5:
right_top5.add(qid)
break
query_num = len(cand_qp_all)
print(query_num)
print(len(right_top100))
r100 = len(right_top100) * 1.0 / query_num
r50 = len(right_top50) * 1.0 / query_num
r20 = len(right_top20) * 1.0 / query_num
r10 = len(right_top10) * 1.0 / query_num
r5 = len(right_top5) * 1.0 / query_num
print('recall@100: ' + str(r100))
print('recall@50: ' + str(r50))
print('recall@20: ' + str(r20))
print('recall@10: ' + str(r10))
print('recall@5: ' + str(r5))
| 2,194 | 31.279412 | 73 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/data_process/utils.py | import sys
import os
cur_path = os.path.dirname(os.path.realpath(__file__))
corpus_path = 'corpus/'
def load_id_text(file_name):
"""load tsv files"""
id_text = {}
with open(file_name) as inp:
for line in inp:
line = line.strip()
id, text = line.split('\t')
id_text[id] = text
return id_text
def load_corpus(corpus='marco', q_type='train', unlabel=False):
"""load corpus"""
if not unlabel:
q_file = os.path.join(corpus_path, corpus, '%s.query.txt' % q_type)
elif corpus == 'marco':
q_file = os.path.join(corpus_path, 'augment/orcas_yahoo_nq.query.txt')
else:
q_file = os.path.join(corpus_path, 'augment/mrqa.query.txt')
q_text = load_id_text(q_file)
p_file = os.path.join(corpus_path, corpus, 'para.txt')
p_text = load_id_text(p_file)
t_file = os.path.join(corpus_path, corpus, 'para.title.txt')
p_title = load_id_text(t_file)
print('load all corpus done!')
return q_text, p_text, p_title
def load_answers(q_type='train'):
"""load exist answers in NQ"""
qid_answers = {}
file = os.path.join(corpus_path, 'nq/%s.answers.txt' % q_type)
with open(file) as inp:
for line in inp:
info = line.strip().split('\t')
qid = info[0]
answers = info[1:]
qid_answers[qid] = []
for ans in answers:
ans = ans.strip('.').lower()
qid_answers[qid].append(ans)
print('has answer qids: %s' % len(qid_answers))
return qid_answers
def has_answer(text, answers):
for answer in answers:
text = text.strip().lower().replace(' ', '')
answer = answer.strip().lower().replace(' ', '')
if text.find(answer) != -1:
return True
return False
def load_pos_examples(p_text):
"""positive examples(only for MSMARCO)"""
pos_qp = {}
file = os.path.join(corpus_path, 'marco/qrels.train.tsv')
with open(file) as inp:
for line in inp:
line = line.strip()
qid, pid = line.split('\t')
if qid not in pos_qp:
pos_qp[qid] = []
pos_qp[qid].append(pid)
print('positive qids: %s' % len(pos_qp))
# additional positive examples(collect by literal match)
pos_qp_add = {}
file_add = os.path.join(corpus_path, 'marco/qrels.train.addition.tsv')
with open(file_add) as inp:
for line in inp:
qid, pid = line.strip().split('\t')
if qid not in pos_qp_add:
pos_qp_add[qid] = []
pos_qp_add[qid].append(pid)
return pos_qp, pos_qp_add
def load_candidates(filename, col=4, topk=0):
"""Top K candidate examples"""
cand_qp_all = {}
train_qids = []
with open(filename) as inp:
for line in inp:
line = line.strip()
if col == 4:
qid, pid, idx, score = line.split('\t')
else:
qid, pid, idx = line.split('\t')
if topk > 0 and int(idx) > topk:
continue
if qid not in cand_qp_all:
cand_qp_all[qid] = []
train_qids.append(qid)
cand_qp_all[qid].append(pid)
print('load candidate qids: %s' % len(cand_qp_all))
return cand_qp_all, train_qids
def load_ce_score(filename, train_qids, topk=50):
"""Top K cross_encoder model score"""
ce_score = {}
with open(filename) as inp:
for i, line in enumerate(inp):
line = line.strip()
score = float(line)
qid = train_qids[i//topk]
if qid not in ce_score:
ce_score[qid] = []
ce_score[qid].append(score)
print('load cross_encoder score: %s' % len(ce_score))
return ce_score
if __name__ == '__main__':
load_answers() | 3,844 | 31.310924 | 78 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/train_de.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import logging
import multiprocessing
import numpy as np
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
os.environ['FLAGS_eager_delete_tensor_gb'] = '0' # enable gc
import paddle.fluid as fluid
import reader.reader_de as reader_de
from model.ernie import ErnieConfig
from finetune.dual_encoder import create_model, evaluate, predict
from optimization import optimization
from utils.args import print_arguments, check_cuda, prepare_logger
from utils.init import init_pretraining_params, init_checkpoint
from finetune_args import parser
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
args = parser.parse_args()
log = logging.getLogger()
def main(args):
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
if args.use_cuda:
dev_list = fluid.cuda_places()
place = dev_list[0]
dev_count = len(dev_list)
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
exe = fluid.Executor(place)
reader = reader_de.ClassifyReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
q_max_seq_len=args.q_max_seq_len,
p_max_seq_len=args.p_max_seq_len,
total_num=args.train_data_size,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
for_cn=args.for_cn,
task_id=args.task_id)
if not (args.do_train or args.do_val or args.do_test):
raise ValueError("For args `do_train`, `do_val` and `do_test`, at "
"least one of them must be True.")
if args.do_test:
assert args.test_save is not None
startup_prog = fluid.Program()
if args.random_seed is not None:
startup_prog.random_seed = args.random_seed
if args.predict_batch_size == None:
args.predict_batch_size = args.batch_size
if args.do_train:
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
dev_count = fleet.worker_num()
train_data_generator = reader.data_generator(
input_file=args.train_set,
batch_size=args.batch_size,
epoch=args.epoch,
dev_count=1,
trainer_id=fleet.worker_index(),
trainer_num=fleet.worker_num(),
shuffle=True,
phase="train")
num_train_examples = reader.get_num_examples(args.train_set)
if args.in_tokens:
if args.batch_size < args.max_seq_len:
raise ValueError('if in_tokens=True, batch_size should greater than max_sqelen, got batch_size:%d seqlen:%d' % (args.batch_size, args.max_seq_len))
max_train_steps = args.epoch * num_train_examples // (
args.batch_size // args.max_seq_len) // dev_count
else:
max_train_steps = args.epoch * num_train_examples // args.batch_size // dev_count
warmup_steps = int(max_train_steps * args.warmup_proportion)
log.info("Device count: %d" % dev_count)
log.info("Num train examples: %d" % num_train_examples)
log.info("Max train steps: %d" % max_train_steps)
log.info("Num warmup steps: %d" % warmup_steps)
train_program = fluid.Program()
# use fleet api
exec_strategy = fluid.ExecutionStrategy()
if args.use_fast_executor:
exec_strategy.use_experimental_executor = True
exec_strategy.num_threads = dev_count
if args.is_distributed:
exec_strategy.num_threads = 3
exec_strategy.num_iteration_per_drop_scope = args.num_iteration_per_drop_scope
dist_strategy = DistributedStrategy()
dist_strategy.exec_strategy = exec_strategy
dist_strategy.nccl_comm_num = 1
if args.is_distributed:
dist_strategy.nccl_comm_num = 2
dist_strategy.use_hierarchical_allreduce = True
if args.use_recompute:
dist_strategy.forward_recompute = True
dist_strategy.enable_sequential_execution = True
if args.use_mix_precision:
dist_strategy.use_amp = True
with fluid.program_guard(train_program, startup_prog):
with fluid.unique_name.guard():
train_pyreader, graph_vars, checkpoints = create_model(
args,
pyreader_name='train_reader',
ernie_config=ernie_config,
batch_size=args.batch_size,
fleet_handle=fleet)
if args.use_recompute:
dist_strategy.recompute_checkpoints=checkpoints
scheduled_lr = optimization(
loss=graph_vars["loss"],
warmup_steps=warmup_steps,
num_train_steps=max_train_steps,
learning_rate=args.learning_rate,
train_program=train_program,
startup_prog=startup_prog,
weight_decay=args.weight_decay,
scheduler=args.lr_scheduler,
use_dynamic_loss_scaling=args.use_dynamic_loss_scaling,
incr_every_n_steps=args.incr_every_n_steps,
decr_every_n_nan_or_inf=args.decr_every_n_nan_or_inf,
incr_ratio=args.incr_ratio,
decr_ratio=args.decr_ratio,
dist_strategy=dist_strategy,
use_lamb=args.use_lamb)
if args.do_val or args.do_test:
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
test_pyreader, test_graph_vars = create_model(
args,
pyreader_name='test_reader',
ernie_config=ernie_config,
batch_size=args.predict_batch_size,
is_prediction=True)
test_prog = test_prog.clone(for_test=True)
train_program = fleet.main_program
exe = fluid.Executor(place)
exe.run(startup_prog)
if args.do_train:
if args.init_checkpoint and args.init_pretraining_params:
log.warning(
"WARNING: args 'init_checkpoint' and 'init_pretraining_params' "
"both are set! Only arg 'init_checkpoint' is made valid.")
if args.init_checkpoint:
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
elif args.init_pretraining_params:
init_pretraining_params(
exe,
args.init_pretraining_params,
main_program=startup_prog)
elif args.do_val or args.do_test:
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
if args.do_train:
train_exe = exe
train_pyreader.decorate_tensor_provider(train_data_generator)
else:
train_exe = None
test_exe = exe
if args.do_train:
train_pyreader.start()
steps = 0
if warmup_steps > 0:
graph_vars["learning_rate"] = scheduled_lr
ce_info = []
time_begin = time.time()
last_epoch = 0
current_epoch = 0
total_loss = []
while True:
try:
steps += 1
if fleet.worker_index() != 0:
train_exe.run(fetch_list=[], program=train_program)
continue
if steps % args.skip_steps != 0:
train_exe.run(fetch_list=[], program=train_program)
else:
outputs = evaluate(
train_exe,
train_program,
train_pyreader,
graph_vars,
"train",
metric=args.metric)
if args.verbose:
verbose = "train pyreader queue size: %d, " % train_pyreader.queue.size(
)
verbose += "learning rate: %f" % (
outputs["learning_rate"]
if warmup_steps > 0 else args.learning_rate)
log.info(verbose)
current_example, current_epoch = reader.get_train_progress()
time_end = time.time()
used_time = time_end - time_begin
total_loss.append(outputs["loss"])
log.info(
"epoch: %d, progress: %d/%d, step: %d, ave loss: %f, "
"ave acc: %f, speed: %f steps/s" %
(current_epoch, current_example * dev_count, num_train_examples,
steps, np.mean(total_loss), outputs["accuracy"],
args.skip_steps / used_time))
ce_info.append(
[outputs["loss"], outputs["accuracy"], used_time])
time_begin = time.time()
if steps % args.save_steps == 0:
save_path = os.path.join(args.checkpoints,
"step_" + str(steps))
fluid.io.save_persistables(exe, save_path, fleet._origin_program)
# if steps % args.validation_steps == 0 or last_epoch != current_epoch:
if steps % args.validation_steps == 0:
# evaluate dev set
if args.do_val:
evaluate_wrapper(args, reader, exe, test_prog,
test_pyreader, test_graph_vars,
current_epoch, steps)
if args.do_test:
predict_wrapper(args, reader, exe, test_prog,
test_pyreader, test_graph_vars,
current_epoch, steps)
if last_epoch != current_epoch:
last_epoch = current_epoch
except fluid.core.EOFException:
save_path = os.path.join(args.checkpoints, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, fleet._origin_program)
train_pyreader.reset()
break
# final eval on dev set
if args.do_val:
evaluate_wrapper(args, reader, exe, test_prog, test_pyreader,
test_graph_vars, current_epoch, steps)
# final eval on test set
if args.do_test:
predict_wrapper(args, reader, exe, test_prog, test_pyreader, test_graph_vars,
current_epoch, steps)
def evaluate_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
epoch, steps):
# evaluate dev set
for ds in args.dev_set.split(','):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
ds,
batch_size=args.predict_batch_size,
epoch=1,
dev_count=1,
shuffle=False))
log.info("validation result of dataset {}:".format(ds))
evaluate_info = evaluate(
exe,
test_prog,
test_pyreader,
graph_vars,
"dev",
metric=args.metric)
log.info(evaluate_info + ', file: {}, epoch: {}, steps: {}'.format(
ds, epoch, steps))
def predict_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
epoch, steps):
test_sets = args.test_set.split(',')
save_dirs = args.test_save.split(',')
assert len(test_sets) == len(save_dirs)
for test_f, save_f in zip(test_sets, save_dirs):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
test_f,
batch_size=args.predict_batch_size,
epoch=1,
dev_count=1,
shuffle=False))
save_path = save_f + '.' + str(epoch) + '.' + str(steps)
log.info("testing {}, save to {}".format(test_f, save_path))
qids, preds, probs = predict(
exe,
test_prog,
test_pyreader,
graph_vars)
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
else:
log.warning('save dir exsits: %s, will skip saving' % save_dir)
print ("DEBUG:\t" + str(len(probs)))
with open(save_path, 'w') as f:
for p in probs:
f.write('{}\n'.format(p))
if __name__ == '__main__':
prepare_logger(log)
print_arguments(args)
check_cuda(args.use_cuda)
main(args)
| 14,023 | 36.198939 | 163 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/optimization.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization and learning rate scheduling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
def linear_warmup_decay(learning_rate, warmup_steps, num_train_steps):
""" Applies linear warmup of learning rate from 0 and decay to 0."""
with fluid.default_main_program()._lr_schedule_guard():
lr = fluid.layers.tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="scheduled_learning_rate")
global_step = fluid.layers.learning_rate_scheduler._decay_step_counter()
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step < warmup_steps):
warmup_lr = learning_rate * (global_step / warmup_steps)
fluid.layers.tensor.assign(warmup_lr, lr)
with switch.default():
decayed_lr = fluid.layers.learning_rate_scheduler.polynomial_decay(
learning_rate=learning_rate,
decay_steps=num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
fluid.layers.tensor.assign(decayed_lr, lr)
return lr
def optimization(loss,
warmup_steps,
num_train_steps,
learning_rate,
train_program,
startup_prog,
weight_decay,
scheduler='linear_warmup_decay',
use_dynamic_loss_scaling=False,
incr_every_n_steps=1000,
decr_every_n_nan_or_inf=2,
incr_ratio=2.0,
decr_ratio=0.8,
dist_strategy=None,
use_lamb=False):
if warmup_steps > 0:
if scheduler == 'noam_decay':
scheduled_lr = fluid.layers.learning_rate_scheduler\
.noam_decay(1/(warmup_steps *(learning_rate ** 2)),
warmup_steps)
elif scheduler == 'linear_warmup_decay':
scheduled_lr = linear_warmup_decay(learning_rate, warmup_steps,
num_train_steps)
else:
raise ValueError("Unkown learning rate scheduler, should be "
"'noam_decay' or 'linear_warmup_decay'")
if use_lamb:
optimizer = fluid.optimizer.LambOptimizer(learning_rate=scheduled_lr)
else:
optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr)
else:
scheduled_lr = fluid.layers.create_global_var(
name=fluid.unique_name.generate("learning_rate"),
shape=[1],
value=learning_rate,
dtype='float32',
persistable=True)
if use_lamb:
optimizer = fluid.optimizer.LambOptimizer(learning_rate=scheduled_lr)
else:
optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr)
optimizer._learning_rate_map[fluid.default_main_program(
)] = scheduled_lr
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0))
def exclude_from_weight_decay(name):
if name.find("layer_norm") > -1:
return True
bias_suffix = ["_bias", "_b", ".b_0"]
for suffix in bias_suffix:
if name.endswith(suffix):
return True
return False
param_list = dict()
for param in train_program.global_block().all_parameters():
param_list[param.name] = param * 1.0
param_list[param.name].stop_gradient = True
if dist_strategy is not None:
# use fleet api
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
_, param_grads = optimizer.minimize(loss)
if weight_decay > 0:
for param, grad in param_grads:
if exclude_from_weight_decay(param.name):
continue
with param.block.program._optimized_guard(
[param, grad]), fluid.framework.name_scope("weight_decay"):
updated_param = param - param_list[
param.name] * weight_decay * scheduled_lr
fluid.layers.assign(output=param, input=updated_param)
return scheduled_lr
| 5,185 | 37.701493 | 83 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from io import open
import collections
import unicodedata
import six
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, encoding='utf8') as fin:
for num, line in enumerate(fin):
items = convert_to_unicode(line.strip()).split("\t")
if len(items) > 2:
break
token = items[0]
index = items[1] if len(items) == 2 else num
token = token.strip()
vocab[token] = int(index)
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class CharTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in text.lower().split(" "):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def tokenize_chinese_chars(text):
"""Adds whitespace around any CJK character."""
def _is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
output = []
buff = ""
for char in text:
cp = ord(char)
if _is_chinese_char(cp) or _is_whitespace(char):
if buff != "":
output.append(buff)
buff = ""
output.append(char)
else:
buff += char
if buff != "":
output.append(buff)
return output
| 14,348 | 32.921986 | 84 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/inference_de.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import logging
import multiprocessing
import numpy as np
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
os.environ['FLAGS_eager_delete_tensor_gb'] = '0' # enable gc
import paddle.fluid as fluid
import reader.reader_de_infer as reader_de_infer
from model.ernie import ErnieConfig
from finetune.dual_encoder_infer import create_model, predict
from utils.args import print_arguments, check_cuda, prepare_logger
from utils.init import init_pretraining_params, init_checkpoint
from finetune_args import parser
args = parser.parse_args()
log = logging.getLogger()
def main(args):
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
if args.use_cuda:
dev_list = fluid.cuda_places()
place = dev_list[0]
dev_count = len(dev_list)
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
exe = fluid.Executor(place)
reader = reader_de_infer.ClassifyReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
q_max_seq_len=args.q_max_seq_len,
p_max_seq_len=args.p_max_seq_len,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
for_cn=args.for_cn,
task_id=args.task_id)
assert args.test_save is not None
startup_prog = fluid.Program()
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
test_pyreader, graph_vars = create_model(
args,
pyreader_name='test_reader',
ernie_config=ernie_config,
batch_size=args.batch_size,
is_prediction=True)
test_prog = test_prog.clone(for_test=True)
exe = fluid.Executor(place)
exe.run(startup_prog)
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
test_sets = args.test_set.split(',')
save_dirs = args.test_save.split(',')
assert len(test_sets) == len(save_dirs)
batch_size = args.batch_size if args.predict_batch_size is None else args.predict_batch_size
for test_f, save_f in zip(test_sets, save_dirs):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
test_f,
batch_size=batch_size,
epoch=1,
dev_count=1,
shuffle=False))
save_path = save_f
log.info("testing {}, save to {}".format(test_f, save_path))
predict(
args,
exe,
test_prog,
test_pyreader,
graph_vars,
output_item=args.output_item,
output_file_name=args.output_file_name,
hidden_size=ernie_config['hidden_size'])
if __name__ == '__main__':
prepare_logger(log)
print_arguments(args)
check_cuda(args.use_cuda)
main(args)
| 4,126 | 31.496063 | 96 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/finetune_args.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import argparse
from utils.args import ArgumentGroup
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
model_g = ArgumentGroup(parser, "model", "model configuration and paths.")
model_g.add_arg("ernie_config_path", str, None, "Path to the json file for ernie model config.")
model_g.add_arg("init_checkpoint", str, None, "Init checkpoint to resume training from.")
model_g.add_arg("init_pretraining_params", str, None,
"Init pre-training params which preforms fine-tuning from. If the "
"arg 'init_checkpoint' has been set, this argument wouldn't be valid.")
model_g.add_arg("checkpoints", str, "checkpoints", "Path to save checkpoints.")
model_g.add_arg("is_classify", bool, True, "is_classify")
model_g.add_arg("is_regression", bool, False, "is_regression")
model_g.add_arg("is_pretrain", bool, False, "is_pretrain")
model_g.add_arg("task_id", int, 0, "task id")
train_g = ArgumentGroup(parser, "training", "training options.")
train_g.add_arg("epoch", int, 3, "Number of epoches for fine-tuning.")
train_g.add_arg("learning_rate", float, 5e-5, "Learning rate used to train with warmup.")
train_g.add_arg("lr_scheduler", str, "linear_warmup_decay",
"scheduler of learning rate.", choices=['linear_warmup_decay', 'noam_decay'])
train_g.add_arg("weight_decay", float, 0.01, "Weight decay rate for L2 regularizer.")
train_g.add_arg("warmup_proportion", float, 0.1,
"Proportion of training steps to perform linear learning rate warmup for.")
train_g.add_arg("save_steps", int, 10000, "The steps interval to save checkpoints.")
train_g.add_arg("validation_steps", int, 1000, "The steps interval to evaluate model performance.")
train_g.add_arg("use_recompute", bool, False, "Whether to use recompute optimizer for training.")
train_g.add_arg("use_mix_precision", bool, False, "Whether to use mix-precision optimizer for training.")
train_g.add_arg("use_cross_batch", bool, False, "Whether to use cross-batch for training.")
train_g.add_arg("use_lamb", bool, False, "Whether to use LambOptimizer for training.")
train_g.add_arg("use_dynamic_loss_scaling", bool, True, "Whether to use dynamic loss scaling.")
train_g.add_arg("test_save", str, "./checkpoints/test_result", "test_save")
train_g.add_arg("metric", str, "simple_accuracy", "metric")
train_g.add_arg("incr_every_n_steps", int, 100, "Increases loss scaling every n consecutive.")
train_g.add_arg("decr_every_n_nan_or_inf", int, 2,
"Decreases loss scaling every n accumulated steps with nan or inf gradients.")
train_g.add_arg("incr_ratio", float, 2.0,
"The multiplier to use when increasing the loss scaling.")
train_g.add_arg("decr_ratio", float, 0.8,
"The less-than-one-multiplier to use when decreasing.")
log_g = ArgumentGroup(parser, "logging", "logging related.")
log_g.add_arg("skip_steps", int, 10, "The steps interval to print loss.")
log_g.add_arg("verbose", bool, False, "Whether to output verbose log.")
data_g = ArgumentGroup(parser, "data", "Data paths, vocab paths and data processing options")
data_g.add_arg("tokenizer", str, "FullTokenizer",
"ATTENTION: the INPUT must be splited by Word with blank while using SentencepieceTokenizer or WordsegTokenizer")
data_g.add_arg("train_set", str, None, "Path to training data.")
data_g.add_arg("test_set", str, None, "Path to test data.")
data_g.add_arg("dev_set", str, None, "Path to validation data.")
data_g.add_arg("vocab_path", str, None, "Vocabulary path.")
data_g.add_arg("max_seq_len", int, 512, "Number of words of the longest seqence.")
data_g.add_arg("q_max_seq_len", int, 32, "Number of words of the longest seqence.")
data_g.add_arg("p_max_seq_len", int, 256, "Number of words of the longest seqence.")
data_g.add_arg("train_data_size", int, 0, "Number of training data's total examples. Set for distribute.")
data_g.add_arg("batch_size", int, 32, "Total examples' number in batch for training. see also --in_tokens.")
data_g.add_arg("predict_batch_size", int, None, "Total examples' number in batch for predict. see also --in_tokens.")
data_g.add_arg("in_tokens", bool, False,
"If set, the batch size will be the maximum number of tokens in one batch. "
"Otherwise, it will be the maximum number of examples in one batch.")
data_g.add_arg("do_lower_case", bool, True,
"Whether to lower case the input text. Should be True for uncased models and False for cased models.")
data_g.add_arg("random_seed", int, None, "Random seed.")
data_g.add_arg("label_map_config", str, None, "label_map_path.")
data_g.add_arg("num_labels", int, 2, "label number")
data_g.add_arg("diagnostic", str, None, "GLUE Diagnostic Dataset")
data_g.add_arg("diagnostic_save", str, None, "GLUE Diagnostic save f")
data_g.add_arg("max_query_length", int, 64, "Max query length.")
data_g.add_arg("max_answer_length", int, 100, "Max answer length.")
data_g.add_arg("doc_stride", int, 128,
"When splitting up a long document into chunks, how much stride to take between chunks.")
data_g.add_arg("n_best_size", int, 20,
"The total number of n-best predictions to generate in the nbest_predictions.json output file.")
data_g.add_arg("chunk_scheme", type=str, default="IOB", choices=["IO", "IOB", "IOE", "IOBES"], help="chunk scheme")
run_type_g = ArgumentGroup(parser, "run_type", "running type options.")
run_type_g.add_arg("use_cuda", bool, True, "If set, use GPU for training.")
run_type_g.add_arg("is_distributed", bool, False, "If set, then start distributed training.")
run_type_g.add_arg("use_fast_executor", bool, False, "If set, use fast parallel executor (in experiment).")
run_type_g.add_arg("num_iteration_per_drop_scope", int, 10, "Iteration intervals to drop scope.")
run_type_g.add_arg("do_train", bool, True, "Whether to perform training.")
run_type_g.add_arg("do_val", bool, True, "Whether to perform evaluation on dev data set.")
run_type_g.add_arg("do_test", bool, True, "Whether to perform evaluation on test data set.")
run_type_g.add_arg("output_item", int, 3, "Test output format.")
run_type_g.add_arg("output_file_name", str, None, "Test output file name")
run_type_g.add_arg("test_data_cnt", int, 1110000 , "total cnt of testset")
run_type_g.add_arg("use_multi_gpu_test", bool, False, "Whether to perform evaluation using multiple gpu cards")
run_type_g.add_arg("metrics", bool, True, "Whether to perform evaluation on test data set.")
run_type_g.add_arg("shuffle", bool, True, "")
run_type_g.add_arg("for_cn", bool, False, "model train for cn or for other langs.")
| 8,257 | 67.816667 | 127 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/merge.py | import sys
total_part = 8
shift = int(sys.argv[1])
top = int(sys.argv[2])
f_list = []
for part in range(total_part):
f0 = open('res.top%s-part%s' % (top, part))
f_list.append(f0)
line_list = []
for part in range(total_part):
line = f_list[part].readline()
line_list.append(line)
out = open('output/res.top%s' % top, 'w')
last_q = ''
ans_list = {}
while line_list[-1]:
cur_list = []
for line in line_list:
sub = line.strip().split('\t')
cur_list.append(sub)
if last_q == '':
last_q = cur_list[0][0]
if cur_list[0][0] != last_q:
rank = sorted(ans_list.items(), key = lambda a:a[1], reverse=True)
for i in range(top):
out.write("%s\t%s\t%s\t%s\n" % (last_q, rank[i][0], i+1, rank[i][1]))
ans_list = {}
for i, sub in enumerate(cur_list):
ans_list[int(sub[1]) + shift*i] = float(sub[-1])
last_q = cur_list[0][0]
line_list = []
for f0 in f_list:
line = f0.readline()
line_list.append(line)
rank = sorted(ans_list.items(), key = lambda a:a[1], reverse=True)
for i in range(top):
out.write("%s\t%s\t%s\t%s\n" % (last_q, rank[i][0], i+1, rank[i][1]))
out.close()
print('output/res.top%s' % top)
| 1,230 | 24.645833 | 81 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/batching.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mask, padding and batching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
def pad_batch_data(insts,
pad_idx=0,
return_pos=False,
return_input_mask=False,
return_max_len=False,
return_num_token=False,
return_seq_lens=False):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and attention bias.
"""
return_list = []
max_len = max(len(inst) for inst in insts)
# Any token included in dict can be used to pad, since the paddings' loss
# will be masked out by weights and make no effect on parameter gradients.
inst_data = np.array(
[inst + list([pad_idx] * (max_len - len(inst))) for inst in insts])
return_list += [inst_data.astype("int64").reshape([-1, max_len, 1])]
# position data
if return_pos:
inst_pos = np.array([
list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst))
for inst in insts
])
return_list += [inst_pos.astype("int64").reshape([-1, max_len, 1])]
if return_input_mask:
# This is used to avoid attention on paddings.
input_mask_data = np.array([[1] * len(inst) + [0] *
(max_len - len(inst)) for inst in insts])
input_mask_data = np.expand_dims(input_mask_data, axis=-1)
return_list += [input_mask_data.astype("float32")]
if return_max_len:
return_list += [max_len]
if return_num_token:
num_token = 0
for inst in insts:
num_token += len(inst)
return_list += [num_token]
if return_seq_lens:
seq_lens = np.array([len(inst) for inst in insts])
return_list += [seq_lens.astype("int64").reshape([-1, 1])]
return return_list if len(return_list) > 1 else return_list[0]
if __name__ == "__main__":
pass
| 2,683 | 33.410256 | 78 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/index_search.py | import sys
import time
import faiss
import math
import numpy as np
def read_embed(file_name, dim=768, bs=3000):
if file_name.endswith('npy'):
i = 0
emb_np = np.load(file_name)
while(i < len(emb_np)):
vec_list = emb_np[i:i+bs]
i += bs
yield vec_list
else:
vec_list = []
with open(file_name) as inp:
for line in inp:
data = line.strip()
vector = [float(item) for item in data.split(' ')]
assert len(vector) == dim
vec_list.append(vector)
if len(vec_list) == bs:
yield vec_list
vec_list = []
if vec_list:
yield vec_list
def load_qid(file_name):
qid_list = []
with open(file_name) as inp:
for line in inp:
line = line.strip()
qid = line.split('\t')[0]
qid_list.append(qid)
return qid_list
def search(index, emb_file, qid_list, outfile, top_k):
q_idx = 0
with open(outfile, 'w') as out:
for batch_vec in read_embed(emb_file):
q_emb_matrix = np.array(batch_vec)
res_dist, res_p_id = index.search(q_emb_matrix.astype('float32'), top_k)
for i in range(len(q_emb_matrix)):
qid = qid_list[q_idx]
for j in range(top_k):
pid = res_p_id[i][j]
score = res_dist[i][j]
out.write('%s\t%s\t%s\t%s\n' % (qid, pid, j+1, score))
q_idx += 1
def main():
part = sys.argv[1]
topk = int(sys.argv[2])
q_text_file = sys.argv[3]
outfile = 'res.top%s-part%s' % (topk, part)
qid_list = load_qid(q_text_file)
engine = faiss.read_index("para.index.part%s" % part)
emb_file = 'query.emb.npy'
search(engine, emb_file, qid_list, outfile, topk)
if __name__ == "__main__":
main()
| 1,948 | 27.661765 | 84 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/finetune/dual_encoder_infer.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
import faiss
from scipy.stats import pearsonr, spearmanr
from six.moves import xrange
import paddle.fluid as fluid
from model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_model(args,
pyreader_name,
ernie_config,
batch_size=16,
is_prediction=False,
task_name=""):
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1],
[batch_size, 1], [batch_size, 1]],
dtypes=['int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64'],
lod_levels=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
name=pyreader_name,
use_double_buffer=True)
(src_ids_q, sent_ids_q, pos_ids_q, task_ids_q, input_mask_q,
src_ids_p, sent_ids_p, pos_ids_p, task_ids_p, input_mask_p,
labels, qids) = fluid.layers.read_file(pyreader)
ernie_q = ErnieModel(
src_ids=src_ids_q,
position_ids=pos_ids_q,
sentence_ids=sent_ids_q,
task_ids=task_ids_q,
input_mask=input_mask_q,
config=ernie_config,
model_name='titlepara_')
## pos para
ernie_p = ErnieModel(
src_ids=src_ids_p,
position_ids=pos_ids_p,
sentence_ids=sent_ids_p,
task_ids=task_ids_p,
input_mask=input_mask_p,
config=ernie_config,
model_name='titlepara_')
q_cls_feats = ernie_q.get_cls_output()
p_cls_feats = ernie_p.get_cls_output()
#p_cls_feats = fluid.layers.concat([pos_cls_feats, neg_cls_feats], axis=0)
#src_ids_p = fluid.layers.Print(src_ids_p, message='p: ')
#p_cls_feats = fluid.layers.Print(p_cls_feats, message='p: ')
#multiply
logits = fluid.layers.matmul(q_cls_feats, p_cls_feats, transpose_x=False, transpose_y=True)
probs = logits
#fluid.layers.Print(probs, message='probs: ')
#logits2 = fluid.layers.elementwise_mul(x=q_rep, y=p_rep)
#fluid.layers.Print(logits2, message='logits2: ')
#probs2 = fluid.layers.reduce_sum(logits, dim=-1)
#fluid.layers.Print(probs2, message='probs2: ')
matrix_labels = fluid.layers.eye(batch_size, batch_size, dtype='float32')
matrix_labels.stop_gradient=True
#print('DEBUG:\tstart loss')
ce_loss, _ = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=matrix_labels, soft_label=True, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss)
#print('DEBUG:\tloss done')
matrix_labels = fluid.layers.argmax(matrix_labels, axis=-1)
matrix_labels = fluid.layers.reshape(x=matrix_labels, shape=[batch_size, 1])
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(input=probs, label=matrix_labels, total=num_seqs)
#ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
# logits=logits, label=labels, return_softmax=True)
#loss = fluid.layers.mean(x=ce_loss)
#accuracy = fluid.layers.accuracy(
# input=probs, label=labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs,
"qids": qids,
"q_rep": q_cls_feats,
"p_rep": p_cls_feats
}
return pyreader, graph_vars
def build_engine(para_emb_list, dim):
index = faiss.IndexFlatIP(dim)
# add paragraph embedding
p_emb_matrix = np.asarray(para_emb_list)
index.add(p_emb_matrix.astype('float32'))
#print ("insert done", file=sys.stderr)
return index
def predict(args,
exe,
test_program,
test_pyreader,
graph_vars,
dev_count=1,
output_item=0,
output_file_name='emb',
hidden_size=768):
test_pyreader.start()
fetch_list = [graph_vars["q_rep"].name, graph_vars["p_rep"].name,]
para_embs = []
batch_id = 0
while True:
try:
batch_id += 1
if batch_id % 500 == 0:
log.info("complete batch %s" % batch_id)
q_rep, p_rep = exe.run(program=test_program,
fetch_list=fetch_list)
if output_item == 0:
for item in q_rep:
para_embs.append(np.array(item, dtype='float32'))
elif output_item == 1:
for item in p_rep:
para_embs.append(np.array(item, dtype='float32'))
except fluid.core.EOFException:
test_pyreader.reset()
break
log.info("predict embs cnt: %s" % len(para_embs))
para_embs = para_embs[:args.test_data_cnt]
log.info("cut embs cnt: %s" % len(para_embs))
if output_item == 1:
engine = build_engine(para_embs, hidden_size)
faiss.write_index(engine, output_file_name)
log.info("create index done!")
else:
emb_matrix = np.asarray(para_embs)
np.save(output_file_name + '.npy', emb_matrix)
log.info("save to npy file!")
| 6,359 | 34.333333 | 95 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/finetune/dual_encoder.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
from scipy.stats import pearsonr, spearmanr
from six.moves import xrange
import paddle.fluid as fluid
from model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_model(args,
pyreader_name,
ernie_config,
batch_size=16,
is_prediction=False,
task_name="",
fleet_handle=None):
print ("DEBUG:\tclassify")
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1],
[batch_size, 1], [batch_size, 1]],
dtypes=['int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64'],
lod_levels=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
name=task_name + "_" + pyreader_name,
use_double_buffer=True)
(src_ids_q, sent_ids_q, pos_ids_q, task_ids_q, input_mask_q,
src_ids_p_pos, sent_ids_p_pos, pos_ids_p_pos, task_ids_p_pos, input_mask_p_pos,
src_ids_p_neg, sent_ids_p_neg, pos_ids_p_neg, task_ids_p_neg, input_mask_p_neg,
labels, qids) = fluid.layers.read_file(pyreader)
ernie_q = ErnieModel(
src_ids=src_ids_q,
position_ids=pos_ids_q,
sentence_ids=sent_ids_q,
task_ids=task_ids_q,
input_mask=input_mask_q,
config=ernie_config,
model_name='titlepara_')
## pos para
ernie_pos = ErnieModel(
src_ids=src_ids_p_pos,
position_ids=pos_ids_p_pos,
sentence_ids=sent_ids_p_pos,
task_ids=task_ids_p_pos,
input_mask=input_mask_p_pos,
config=ernie_config,
model_name='titlepara_')
## neg para
ernie_neg = ErnieModel(
src_ids=src_ids_p_neg,
position_ids=pos_ids_p_neg,
sentence_ids=sent_ids_p_neg,
task_ids=task_ids_p_neg,
input_mask=input_mask_p_neg,
config=ernie_config,
model_name='titlepara_')
q_cls_feats = ernie_q.get_cls_output()
pos_cls_feats = ernie_pos.get_cls_output()
neg_cls_feats = ernie_neg.get_cls_output()
#src_ids_p_pos = fluid.layers.Print(src_ids_p_pos, message='pos: ')
#pos_cls_feats = fluid.layers.Print(pos_cls_feats, message='pos: ')
p_cls_feats = fluid.layers.concat([pos_cls_feats, neg_cls_feats], axis=0)
if args.is_pretrain:
qp_cls_feats = fluid.layers.concat([q_cls_feats, neg_cls_feats], axis=0)
if is_prediction:
p_cls_feats = fluid.layers.slice(p_cls_feats, axes=[0], starts=[0], ends=[batch_size])
multi = fluid.layers.elementwise_mul(q_cls_feats, p_cls_feats)
probs = fluid.layers.reduce_sum(multi, dim=-1)
graph_vars = {
"probs": probs,
"qids": qids,
"q_rep": q_cls_feats,
"p_rep": p_cls_feats
}
return pyreader, graph_vars
if args.use_cross_batch and fleet_handle is not None:
print("worker num is: {}".format(fleet_handle.worker_num()))
all_p_cls_feats = fluid.layers.collective._c_allgather(
p_cls_feats, fleet_handle.worker_num(), use_calc_stream=True)
if args.is_pretrain:
all_qp_cls_feats = fluid.layers.collective._c_allgather(
qp_cls_feats, fleet_handle.worker_num(), use_calc_stream=True)
#multiply
logits = fluid.layers.matmul(q_cls_feats, all_p_cls_feats, transpose_x=False, transpose_y=True)
if args.is_pretrain:
logits_qp = fluid.layers.matmul(pos_cls_feats, all_qp_cls_feats, transpose_x=False, transpose_y=True)
worker_id = fleet_handle.worker_index()
else:
logits = fluid.layers.matmul(q_cls_feats, p_cls_feats, transpose_x=False, transpose_y=True)
if args.is_pretrain:
logits_qp = fluid.layers.matmul(pos_cls_feats, qp_cls_feats, transpose_x=False, transpose_y=True)
worker_id = 0
probs = logits
all_labels = np.array(range(batch_size * worker_id * 2, batch_size * (worker_id * 2 + 1)), dtype='int64')
matrix_labels = fluid.layers.assign(all_labels)
matrix_labels = fluid.layers.unsqueeze(matrix_labels, axes=1)
matrix_labels.stop_gradient=True
# fluid.layers.Print(matrix_labels, message='matrix_labels')
#print('DEBUG:\tstart loss')
ce_loss = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=matrix_labels)
if args.is_pretrain:
alpha = 0.1
probs_qp = logits_qp
p_centric_loss = fluid.layers.softmax_with_cross_entropy(
logits=logits_qp, label=matrix_labels)
loss = (1-alpha) * fluid.layers.mean(x=ce_loss) + alpha * fluid.layers.mean(x=p_centric_loss)
else:
loss = fluid.layers.mean(x=ce_loss)
#print('DEBUG:\tloss done')
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(
input=probs, label=matrix_labels)
graph_vars = {
"loss": loss,
"probs": probs,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs,
"qids": qids,
"q_rep": q_cls_feats,
"p_rep": p_cls_feats
}
cp = []
cp.extend(ernie_q.checkpoints)
cp.extend(ernie_pos.checkpoints)
cp.extend(ernie_neg.checkpoints)
return pyreader, graph_vars, cp
def evaluate_mrr(preds):
last_qid = None
total_mrr = 0.0
qnum = 0.0
rank = 0.0
correct = False
for qid, score, label in preds:
if qid != last_qid:
rank = 0.0
qnum += 1
correct = False
last_qid = qid
rank += 1
if not correct and label != 0:
total_mrr += 1.0 / rank
correct = True
return total_mrr / qnum
def evaluate_map(preds):
def singe_map(st, en):
total_p = 0.0
correct_num = 0.0
for index in xrange(st, en):
if int(preds[index][2]) != 0:
correct_num += 1
total_p += correct_num / (index - st + 1)
if int(correct_num) == 0:
return 0.0
return total_p / correct_num
last_qid = None
total_map = 0.0
qnum = 0.0
st = 0
for i in xrange(len(preds)):
qid = preds[i][0]
if qid != last_qid:
qnum += 1
if last_qid != None:
total_map += singe_map(st, i)
st = i
last_qid = qid
total_map += singe_map(st, len(preds))
return total_map / qnum
def evaluate(exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=False,
metric='simple_accuracy'):
train_fetch_list = [
graph_vars["loss"].name, graph_vars["accuracy"].name,
graph_vars["num_seqs"].name
]
if eval_phase == "train":
if "learning_rate" in graph_vars:
train_fetch_list.append(graph_vars["learning_rate"].name)
outputs = exe.run(fetch_list=train_fetch_list, program=test_program)
ret = {"loss": np.mean(outputs[0]), "accuracy": np.mean(outputs[1])}
if "learning_rate" in graph_vars:
ret["learning_rate"] = float(outputs[3][0])
return ret
test_pyreader.start()
total_cost, total_acc, total_num_seqs, total_label_pos_num, total_pred_pos_num, total_correct_num = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
qids, labels, scores, preds = [], [], [], []
time_begin = time.time()
fetch_list = [
graph_vars["loss"].name, graph_vars["accuracy"].name,
graph_vars["probs"].name, graph_vars["labels"].name,
graph_vars["num_seqs"].name, graph_vars["qids"].name,
graph_vars["q_rep"].name, graph_vars["p_rep"].name
]
#emb_file = open('emb_qp', 'w')
while True:
try:
if use_multi_gpu_test:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids, q_rep, p_rep = exe.run(
fetch_list=fetch_list)
else:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids, q_rep, p_rep = exe.run(
program=test_program, fetch_list=fetch_list)
total_cost += np.sum(np_loss * np_num_seqs)
total_acc += np.sum(np_acc * np_num_seqs)
total_num_seqs += np.sum(np_num_seqs)
labels.extend(np_labels.reshape((-1)).tolist())
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
batch_scores = np.diag(np_probs).reshape(-1).tolist()
scores.extend(batch_scores)
#for item in list(zip(q_rep, p_rep, batch_scores)):
# _left = ' '.join([str(each) for each in item[0]])
# _right = ' '.join([str(each) for each in item[1]])
# emb_file.write(_left + '\t' + _right + '\t' + str(item[2]) + '\n')
#scores.extend(np_probs[:, 1].reshape(-1).tolist())
#np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
#preds.extend(np_preds)
#total_label_pos_num += np.sum(np_labels)
#total_pred_pos_num += np.sum(np_preds)
#total_correct_num += np.sum(np.dot(np_preds, np_labels))
except fluid.core.EOFException:
test_pyreader.reset()
break
#for score in np_preds:
# print (score)
#print ('---------------------')
#time_end = time.time()
#cost = total_cost / total_num_seqs
#elapsed_time = time_end - time_begin
#emb_file.close()
return None
evaluate_info = ""
if metric == 'acc_and_f1':
ret = acc_and_f1(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, ave_acc: %f, f1: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['acc'], ret['f1'], total_num_seqs, elapsed_time)
elif metric == 'matthews_corrcoef':
ret = matthews_corrcoef(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, matthews_corrcoef: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
elif metric == 'pearson_and_spearman':
ret = pearson_and_spearman(scores, labels)
evaluate_info = "[%s evaluation] ave loss: %f, pearson:%f, spearman:%f, corr:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['pearson'], ret['spearman'], ret['corr'], total_num_seqs, elapsed_time)
elif metric == 'simple_accuracy':
ret = simple_accuracy(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, acc:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
elif metric == "acc_and_f1_and_mrr":
ret_a = acc_and_f1(preds, labels)
preds = sorted(
zip(qids, scores, labels), key=lambda elem: (elem[0], -elem[1]))
ret_b = evaluate_mrr(preds)
evaluate_info = "[%s evaluation] ave loss: %f, acc: %f, f1: %f, mrr: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret_a['acc'], ret_a['f1'], ret_b, total_num_seqs, elapsed_time)
else:
raise ValueError('unsupported metric {}'.format(metric))
return evaluate_info
def matthews_corrcoef(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
mcc = ((tp * tn) - (fp * fn)) / np.sqrt(
(tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return mcc
def f1_score(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
p = tp / (tp + fp)
r = tp / (tp + fn)
f1 = (2 * p * r) / (p + r + 1e-8)
return f1
def pearson_and_spearman(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def acc_and_f1(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
acc = simple_accuracy(preds, labels)
f1 = f1_score(preds, labels)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def simple_accuracy(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
return (preds == labels).mean()
def predict(exe,
test_program,
test_pyreader,
graph_vars,
dev_count=1):
test_pyreader.start()
qids, scores, probs = [], [], []
preds = []
fetch_list = [graph_vars["probs"].name, graph_vars["qids"].name, \
graph_vars["q_rep"].name, graph_vars["p_rep"].name,]
emb_file = open('emb_qp', 'w')
while True:
try:
if dev_count == 1:
np_probs, np_qids, q_rep, p_rep = exe.run(program=test_program,
fetch_list=fetch_list)
else:
np_probs, np_qids, q_rep, p_rep = exe.run(fetch_list=fetch_list)
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
batch_scores = np_probs.reshape(-1).tolist()
for item in list(zip(q_rep, p_rep, batch_scores)):
_left = ' '.join([str(each) for each in item[0]])
_right = ' '.join([str(each) for each in item[1]])
#emb_file.write(_left + '\t' + _right + '\t' + str(item[2]) + '\n')
#emb_file.write(_right + '\n')
emb_file.write(str(item[2]) + '\n')
#for score in batch_scores:
# print (score)
#print ('--------')
#if is_classify:
# np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
# preds.extend(np_preds)
#elif is_regression:
# preds.extend(np_probs.reshape(-1))
probs.extend(batch_scores)
except fluid.core.EOFException:
test_pyreader.reset()
break
emb_file.close()
#probs = np.concatenate(probs, axis=0).reshape([len(preds), -1])
return qids, preds, probs
| 16,226 | 35.963554 | 132 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/reader/reader_de.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
import tokenization
from batching import pad_batch_data
log = logging.getLogger(__name__)
if six.PY3:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def csv_reader(fd, delimiter='\t', trainer_id=0, trainer_num=1):
def gen():
for i, line in enumerate(fd):
if i % trainer_num == trainer_id:
slots = line.rstrip('\n').split(delimiter)
if len(slots) == 1:
yield slots,
else:
yield slots
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
q_max_seq_len=128,
p_max_seq_len=512,
total_num=0,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
for_cn=True,
task_id=0):
self.q_max_seq_len = q_max_seq_len
self.p_max_seq_len = p_max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
# np.random.seed(random_seed)
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
self.total_num = total_num
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, q_max_seq_length, p_max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_query = tokenizer.tokenize(query)
self._truncate_seq_pair([], tokens_query, q_max_seq_length - 2)
# pos title
title_pos = tokenization.convert_to_unicode(example.title_pos)
tokens_title_pos = tokenizer.tokenize(title_pos)
# pos para
para_pos = tokenization.convert_to_unicode(example.para_pos)
tokens_para_pos = tokenizer.tokenize(para_pos)
self._truncate_seq_pair(tokens_title_pos, tokens_para_pos, p_max_seq_length - 3)
# neg title
title_neg = tokenization.convert_to_unicode(example.title_neg)
tokens_title_neg = tokenizer.tokenize(title_neg)
# neg para
para_neg = tokenization.convert_to_unicode(example.para_neg)
tokens_para_neg = tokenizer.tokenize(para_neg)
self._truncate_seq_pair(tokens_title_neg, tokens_para_neg, p_max_seq_length - 3)
tokens_q = []
text_type_ids_q = []
tokens_q.append("[CLS]")
text_type_ids_q.append(0)
for token in tokens_query:
tokens_q.append(token)
text_type_ids_q.append(0)
tokens_q.append("[SEP]")
text_type_ids_q.append(0)
token_ids_q = tokenizer.convert_tokens_to_ids(tokens_q)
position_ids_q = list(range(len(token_ids_q)))
#f = open('tid', 'a')
#for tid in range(len(token_ids_q)):
# f.write(str(token_ids_q[tid]) + ' ' + str(tokens_q[tid]) + '\n')
### pos_para
tokens_p_pos = []
text_type_ids_p_pos = []
tokens_p_pos.append("[CLS]")
text_type_ids_p_pos.append(0)
for token in tokens_title_pos:
tokens_p_pos.append(token)
text_type_ids_p_pos.append(0)
tokens_p_pos.append("[SEP]")
text_type_ids_p_pos.append(0)
for token in tokens_para_pos:
tokens_p_pos.append(token)
text_type_ids_p_pos.append(1)
tokens_p_pos.append("[SEP]")
text_type_ids_p_pos.append(1)
token_ids_p_pos = tokenizer.convert_tokens_to_ids(tokens_p_pos)
position_ids_p_pos = list(range(len(token_ids_p_pos)))
#for tid in range(len(token_ids_p_pos)):
# f.write(str(token_ids_p_pos[tid]) + ' ' + str(tokens_p_pos[tid]) + '\n')
#f.close()
### neg_para
tokens_p_neg = []
text_type_ids_p_neg = []
tokens_p_neg.append("[CLS]")
text_type_ids_p_neg.append(0)
for token in tokens_title_neg:
tokens_p_neg.append(token)
text_type_ids_p_neg.append(0)
tokens_p_neg.append("[SEP]")
text_type_ids_p_neg.append(0)
for token in tokens_para_neg:
tokens_p_neg.append(token)
text_type_ids_p_neg.append(1)
tokens_p_neg.append("[SEP]")
text_type_ids_p_neg.append(1)
token_ids_p_neg = tokenizer.convert_tokens_to_ids(tokens_p_neg)
position_ids_p_neg = list(range(len(token_ids_p_neg)))
if self.is_inference:
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p_pos', 'text_type_ids_p_pos', 'position_ids_p_pos', \
'token_ids_p_neg', 'text_type_ids_p_neg', 'position_ids_p_neg'])
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p_pos=token_ids_p_pos,
text_type_ids_p_pos=text_type_ids_p_pos,
position_ids_p_pos=position_ids_p_pos,
token_ids_p_neg=token_ids_p_neg,
text_type_ids_p_neg=text_type_ids_p_neg,
position_ids_p_neg=position_ids_p_neg)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p_pos', 'text_type_ids_p_pos', 'position_ids_p_pos', \
'token_ids_p_neg', 'text_type_ids_p_neg', 'position_ids_p_neg',
'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p_pos=token_ids_p_pos,
text_type_ids_p_pos=text_type_ids_p_pos,
position_ids_p_pos=position_ids_p_pos,
token_ids_p_neg=token_ids_p_neg,
text_type_ids_p_neg=text_type_ids_p_neg,
position_ids_p_neg=position_ids_p_neg,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, phase=None):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if phase == "train":
self.current_example = index
record = self._convert_example_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
max_len = max(max_len, len(record.token_ids_p_pos))
max_len = max(max_len, len(record.token_ids_p_neg))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
max_len = max(len(record.token_ids_p_neg), len(record.token_ids_p_pos))
batch_records = [record]
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
# examples = self._read_tsv(input_file)
# return len(examples)
return self.num_examples
def data_generator(self,
input_file,
batch_size,
epoch,
dev_count=1,
trainer_id=0,
trainer_num=1,
shuffle=True,
phase=None):
if phase == 'train':
# examples = examples[trainer_id: (len(examples) //trainer_num) * trainer_num : trainer_num]
self.num_examples_per_node = self.total_num // trainer_num
self.num_examples = self.num_examples_per_node * trainer_num
examples = self._read_tsv(input_file, batch_size=batch_size, trainer_id=trainer_id, trainer_num=trainer_num, num_examples=self.num_examples_per_node)
log.info('apply sharding %d/%d' % (trainer_id, trainer_num))
else:
examples = self._read_tsv(input_file, batch_size=batch_size)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class ClassifyReader(BaseReader):
def _read_tsv(self, input_file, batch_size=16, quotechar=None, trainer_id=0, trainer_num=1, num_examples=0):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f, trainer_id=trainer_id, trainer_num=trainer_num)
# headers = next(reader)
#headers = 'query\tpara_pos\tpara_neg\tlabel'.split('\t')
headers = 'query\ttitle_pos\tpara_pos\ttitle_neg\tpara_neg\tlabel'.split('\t')
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
for cnt, line in enumerate(reader):
if num_examples != 0 and cnt == num_examples:
break
for index, text in enumerate(line):
if index in text_indices:
if self.for_cn:
line[index] = text.replace(' ', '')
else:
line[index] = text
example = Example(*line)
examples.append(example)
while len(examples) % batch_size != 0:
examples.append(example)
return examples
def _pad_batch_records(self, batch_records):
batch_token_ids_q = [record.token_ids_q for record in batch_records]
batch_text_type_ids_q = [record.text_type_ids_q for record in batch_records]
batch_position_ids_q = [record.position_ids_q for record in batch_records]
batch_token_ids_p_pos = [record.token_ids_p_pos for record in batch_records]
batch_text_type_ids_p_pos = [record.text_type_ids_p_pos for record in batch_records]
batch_position_ids_p_pos = [record.position_ids_p_pos for record in batch_records]
batch_token_ids_p_neg = [record.token_ids_p_neg for record in batch_records]
batch_text_type_ids_p_neg = [record.text_type_ids_p_neg for record in batch_records]
batch_position_ids_p_neg = [record.position_ids_p_neg for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids_q, input_mask_q = pad_batch_data(
batch_token_ids_q, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_q = pad_batch_data(
batch_text_type_ids_q, pad_idx=self.pad_id)
padded_position_ids_q = pad_batch_data(
batch_position_ids_q, pad_idx=self.pad_id)
padded_task_ids_q = np.ones_like(padded_token_ids_q, dtype="int64") * self.task_id
padded_token_ids_p_pos, input_mask_p_pos = pad_batch_data(
batch_token_ids_p_pos, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_p_pos = pad_batch_data(
batch_text_type_ids_p_pos, pad_idx=self.pad_id)
padded_position_ids_p_pos = pad_batch_data(
batch_position_ids_p_pos, pad_idx=self.pad_id)
padded_task_ids_p_pos = np.ones_like(padded_token_ids_p_pos, dtype="int64") * self.task_id
padded_token_ids_p_neg, input_mask_p_neg = pad_batch_data(
batch_token_ids_p_neg, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_p_neg = pad_batch_data(
batch_text_type_ids_p_neg, pad_idx=self.pad_id)
padded_position_ids_p_neg = pad_batch_data(
batch_position_ids_p_neg, pad_idx=self.pad_id)
padded_task_ids_p_neg = np.ones_like(padded_token_ids_p_neg, dtype="int64") * self.task_id
return_list = [
padded_token_ids_q, padded_text_type_ids_q, padded_position_ids_q, padded_task_ids_q,
input_mask_q,
padded_token_ids_p_pos, padded_text_type_ids_p_pos, padded_position_ids_p_pos, padded_task_ids_p_pos,
input_mask_p_pos,
padded_token_ids_p_neg, padded_text_type_ids_p_neg, padded_position_ids_p_neg, padded_task_ids_p_neg,
input_mask_p_neg
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
if __name__ == '__main__':
pass
| 17,266 | 39.437939 | 161 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/reader/reader_de_infer.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
import tokenization
from batching import pad_batch_data
log = logging.getLogger(__name__)
if six.PY3:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def csv_reader(fd, delimiter='\t'):
def gen():
for i in fd:
yield i.rstrip('\n').split(delimiter)
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
q_max_seq_len=128,
p_max_seq_len=512,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
for_cn=True,
task_id=0):
self.q_max_seq_len = q_max_seq_len
self.p_max_seq_len = p_max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
np.random.seed(random_seed)
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, q_max_seq_length, p_max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_query = tokenizer.tokenize(query)
self._truncate_seq_pair([], tokens_query, q_max_seq_length - 2)
# title
title = tokenization.convert_to_unicode(example.title)
tokens_title = tokenizer.tokenize(title)
# para
para = tokenization.convert_to_unicode(example.para)
tokens_para = tokenizer.tokenize(para)
self._truncate_seq_pair(tokens_title, tokens_para, p_max_seq_length - 3)
tokens_q = []
text_type_ids_q = []
tokens_q.append("[CLS]")
text_type_ids_q.append(0)
for token in tokens_query:
tokens_q.append(token)
text_type_ids_q.append(0)
tokens_q.append("[SEP]")
text_type_ids_q.append(0)
token_ids_q = tokenizer.convert_tokens_to_ids(tokens_q)
position_ids_q = list(range(len(token_ids_q)))
#f = open('tid', 'a')
#for tid in range(len(token_ids_q)):
# f.write(str(token_ids_q[tid]) + '\t' + tokens_q[tid] + '\n')
#f.write(str(token_ids_q[tid]) + ' ')
#f.write('\t')
### para
tokens_p = []
text_type_ids_p = []
tokens_p.append("[CLS]")
text_type_ids_p.append(0)
for token in tokens_title:
tokens_p.append(token)
text_type_ids_p.append(0)
tokens_p.append("[SEP]")
text_type_ids_p.append(0)
for token in tokens_para:
tokens_p.append(token)
text_type_ids_p.append(1)
tokens_p.append("[SEP]")
text_type_ids_p.append(1)
token_ids_p = tokenizer.convert_tokens_to_ids(tokens_p)
position_ids_p = list(range(len(token_ids_p)))
#for tid in range(len(token_ids_p)):
# f.write(str(token_ids_p[tid]) + '\t' + tokens_p[tid] + '\n')
#f.write(str(token_ids_p[tid]) + ' ')
#f.write('\n')
#f.close()
if self.is_inference:
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p', 'text_type_ids_p', 'position_ids_p'])
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p=token_ids_p,
text_type_ids_p=text_type_ids_p,
position_ids_p=position_ids_p)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p', 'text_type_ids_p', 'position_ids_p', \
'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p=token_ids_p,
text_type_ids_p=text_type_ids_p,
position_ids_p=position_ids_p,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, phase=None, read_id=False):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if phase == "train":
self.current_example = index
if read_id is False:
record = self._convert_example_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
else:
record = self._convert_example_id_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
max_len = max(max_len, len(record.token_ids_p))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
max_len = len(record.token_ids_p)
batch_records = [record]
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
examples = self._read_tsv(input_file)
return len(examples)
def data_generator(self,
input_file,
batch_size,
epoch,
dev_count=1,
shuffle=True,
phase=None,
read_id=False):
examples = self._read_tsv(input_file, batch_size)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase, read_id=read_id):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class ClassifyReader(BaseReader):
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
#headers = next(reader)
headers = 'query\ttitle\tpara\tlabel'.split('\t')
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
for line in reader:
for index, text in enumerate(line):
if index in text_indices:
if self.for_cn:
line[index] = text.replace(' ', '')
else:
line[index] = text
example = Example(*line)
examples.append(example)
while len(examples) % batch_size != 0:
examples.append(example)
return examples
def _pad_batch_records(self, batch_records):
batch_token_ids_q = [record.token_ids_q for record in batch_records]
batch_text_type_ids_q = [record.text_type_ids_q for record in batch_records]
batch_position_ids_q = [record.position_ids_q for record in batch_records]
batch_token_ids_p = [record.token_ids_p for record in batch_records]
batch_text_type_ids_p = [record.text_type_ids_p for record in batch_records]
batch_position_ids_p = [record.position_ids_p for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids_q, input_mask_q = pad_batch_data(
batch_token_ids_q, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_q = pad_batch_data(
batch_text_type_ids_q, pad_idx=self.pad_id)
padded_position_ids_q = pad_batch_data(
batch_position_ids_q, pad_idx=self.pad_id)
padded_task_ids_q = np.ones_like(padded_token_ids_q, dtype="int64") * self.task_id
padded_token_ids_p, input_mask_p = pad_batch_data(
batch_token_ids_p, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_p = pad_batch_data(
batch_text_type_ids_p, pad_idx=self.pad_id)
padded_position_ids_p = pad_batch_data(
batch_position_ids_p, pad_idx=self.pad_id)
padded_task_ids_p = np.ones_like(padded_token_ids_p, dtype="int64") * self.task_id
return_list = [
padded_token_ids_q, padded_text_type_ids_q, padded_position_ids_q, padded_task_ids_q,
input_mask_q,
padded_token_ids_p, padded_text_type_ids_p, padded_position_ids_p, padded_task_ids_p,
input_mask_p,
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
if __name__ == '__main__':
pass
| 13,641 | 36.581267 | 97 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/utils/args.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Arguments for configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import six
import os
import sys
import argparse
import logging
import paddle.fluid as fluid
log = logging.getLogger(__name__)
def prepare_logger(logger, debug=False, save_to_file=None):
formatter = logging.Formatter(fmt='[%(levelname)s] %(asctime)s [%(filename)12s:%(lineno)5d]:\t%(message)s')
console_hdl = logging.StreamHandler()
console_hdl.setFormatter(formatter)
logger.addHandler(console_hdl)
if save_to_file is not None and not os.path.exists(save_to_file):
file_hdl = logging.FileHandler(save_to_file)
file_hdl.setFormatter(formatter)
logger.addHandler(file_hdl)
logger.setLevel(logging.DEBUG)
logger.propagate = False
def str2bool(v):
# because argparse does not support to parse "true, False" as python
# boolean directly
return v.lower() in ("true", "t", "1")
class ArgumentGroup(object):
def __init__(self, parser, title, des):
self._group = parser.add_argument_group(title=title, description=des)
def add_arg(self, name, type, default, help, positional_arg=False, **kwargs):
prefix = "" if positional_arg else "--"
type = str2bool if type == bool else type
self._group.add_argument(
prefix + name,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
def print_arguments(args):
log.info('----------- Configuration Arguments -----------')
for arg, value in sorted(six.iteritems(vars(args))):
log.info('%s: %s' % (arg, value))
log.info('------------------------------------------------')
def check_cuda(use_cuda, err = \
"\nYou can not set use_cuda = True in the model because you are using paddlepaddle-cpu.\n \
Please: 1. Install paddlepaddle-gpu to run your models on GPU or 2. Set use_cuda = False to run models on CPU.\n"
):
try:
if use_cuda == True and fluid.is_compiled_with_cuda() == False:
log.error(err)
sys.exit(1)
except Exception as e:
pass
| 2,996 | 34.678571 | 119 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/utils/init.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import six
import ast
import copy
import logging
import numpy as np
import paddle.fluid as fluid
log = logging.getLogger(__name__)
def init_checkpoint(exe, init_checkpoint_path, main_program):
assert os.path.exists(
init_checkpoint_path), "[%s] cann't be found." % init_checkpoint_path
def existed_persitables(var):
if not fluid.io.is_persistable(var):
return False
if not os.path.exists(os.path.join(init_checkpoint_path, var.name)):
print ("Var not exists: [%s]\t%s" % (var.name, os.path.join(init_checkpoint_path, var.name)))
#else:
# print ("Var exists: [%s]" % (var.name))
return os.path.exists(os.path.join(init_checkpoint_path, var.name))
fluid.io.load_vars(
exe,
init_checkpoint_path,
main_program=main_program,
predicate=existed_persitables)
log.info("Load model from {}".format(init_checkpoint_path))
def init_pretraining_params(exe,
pretraining_params_path,
main_program):
assert os.path.exists(pretraining_params_path
), "[%s] cann't be found." % pretraining_params_path
def existed_params(var):
if not isinstance(var, fluid.framework.Parameter):
return False
if not os.path.exists(os.path.join(pretraining_params_path, var.name)):
print ("Var not exists: [%s]\t%s" % (var.name, os.path.join(pretraining_params_path, var.name)))
#else:
# print ("Var exists: [%s]" % (var.name))
return os.path.exists(os.path.join(pretraining_params_path, var.name))
fluid.io.load_vars(
exe,
pretraining_params_path,
main_program=main_program,
predicate=existed_params)
log.info("Load pretraining parameters from {}.".format(
pretraining_params_path))
| 2,695 | 34.946667 | 108 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/model/transformer_encoder.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer encoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import paddle.fluid as fluid
import paddle.fluid.layers as layers
def multi_head_attention(queries,
keys,
values,
attn_bias,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.,
cache=None,
param_initializer=None,
name='multi_head_att'):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activiation to mask certain selected positions so that
they will not considered in attention weights.
"""
keys = queries if keys is None else keys
values = keys if values is None else values
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors.")
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_query_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_query_fc.b_0')
k = layers.fc(input=keys,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_key_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_key_fc.b_0')
v = layers.fc(input=values,
size=d_value * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_value_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_value_fc.b_0')
return q, k, v
def __split_heads(x, n_head):
"""
Reshape the last dimension of inpunt tensor x so that it becomes two
dimensions and then transpose. Specifically, input a tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] then output a tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
hidden_size = x.shape[-1]
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped = layers.reshape(
x=x, shape=[0, 0, n_head, hidden_size // n_head], inplace=True)
# permuate the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
return layers.transpose(x=reshaped, perm=[0, 2, 1, 3])
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) == 3: return x
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=True)
def scaled_dot_product_attention(q, k, v, attn_bias, d_key, dropout_rate):
"""
Scaled Dot-Product Attention
"""
scaled_q = layers.scale(x=q, scale=d_key**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
if dropout_rate:
weights = layers.dropout(
weights,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
if cache is not None: # use cache and concat time steps
# Since the inplace reshape in __split_heads changes the shape of k and
# v, which is the cache input for next time step, reshape the cache
# input from the previous time step first.
k = cache["k"] = layers.concat(
[layers.reshape(
cache["k"], shape=[0, 0, d_model]), k], axis=1)
v = cache["v"] = layers.concat(
[layers.reshape(
cache["v"], shape=[0, 0, d_model]), v], axis=1)
q = __split_heads(q, n_head)
k = __split_heads(k, n_head)
v = __split_heads(v, n_head)
ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_key,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_output_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_output_fc.b_0')
return proj_out
def positionwise_feed_forward(x,
d_inner_hid,
d_hid,
dropout_rate,
hidden_act,
param_initializer=None,
name='ffn'):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act=hidden_act,
param_attr=fluid.ParamAttr(
name=name + '_fc_0.w_0',
initializer=param_initializer),
bias_attr=name + '_fc_0.b_0')
if dropout_rate:
hidden = layers.dropout(
hidden,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
out = layers.fc(input=hidden,
size=d_hid,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_fc_1.w_0', initializer=param_initializer),
bias_attr=name + '_fc_1.b_0')
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.,
name=''):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out_dtype = out.dtype
if out_dtype == fluid.core.VarDesc.VarType.FP16:
out = layers.cast(x=out, dtype="float32")
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.ParamAttr(
name=name + '_layer_norm_scale',
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
name=name + '_layer_norm_bias',
initializer=fluid.initializer.Constant(0.)))
if out_dtype == fluid.core.VarDesc.VarType.FP16:
out = layers.cast(x=out, dtype="float16")
elif cmd == "d": # add dropout
if dropout_rate:
out = layers.dropout(
out,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name=''):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(
pre_process_layer(
enc_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att'),
None,
None,
attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
param_initializer=param_initializer,
name=name + '_multi_head_att')
attn_output = post_process_layer(
enc_input,
attn_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_att')
ffd_output = positionwise_feed_forward(
pre_process_layer(
attn_output,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_ffn'),
d_inner_hid,
d_model,
relu_dropout,
hidden_act,
param_initializer=param_initializer,
name=name + '_ffn')
return post_process_layer(
attn_output,
ffd_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_ffn'), ffd_output
def encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
model_name='',
name=''):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""
checkpoints = []
for i in range(n_layer):
enc_output, cp = encoder_layer(
enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
param_initializer=param_initializer,
name=name + '_layer_' + str(i))
checkpoints.append(cp)
enc_input = enc_output
enc_output = pre_process_layer(
enc_output, preprocess_cmd, prepostprocess_dropout, name=model_name+"post_encoder")
return enc_output, checkpoints
| 12,649 | 35.666667 | 91 | py |
RocketQA | RocketQA-main/research/PAIR_ACL2021/model/src/model/ernie.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ernie model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import json
import six
import logging
import paddle.fluid as fluid
from io import open
from model.transformer_encoder import encoder, pre_process_layer
log = logging.getLogger(__name__)
class ErnieConfig(object):
def __init__(self, config_path):
self._config_dict = self._parse(config_path)
def _parse(self, config_path):
try:
with open(config_path, 'r', encoding='utf8') as json_file:
config_dict = json.load(json_file)
except Exception:
raise IOError("Error in parsing Ernie model config file '%s'" %
config_path)
else:
return config_dict
def __getitem__(self, key):
return self._config_dict.get(key, None)
def print_config(self):
for arg, value in sorted(six.iteritems(self._config_dict)):
log.info('%s: %s' % (arg, value))
log.info('------------------------------------------------')
class ErnieModel(object):
def __init__(self,
src_ids,
position_ids,
sentence_ids,
task_ids,
input_mask,
config,
weight_sharing=True,
model_name='',
is_noise=False):
self._emb_size = config['hidden_size']
self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._voc_size = config['vocab_size']
self._max_position_seq_len = config['max_position_embeddings']
if config['sent_type_vocab_size']:
self._sent_types = config['sent_type_vocab_size']
else:
self._sent_types = config['type_vocab_size']
self._use_task_id = config['use_task_id']
if self._use_task_id:
self._task_types = config['task_type_vocab_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_probs_dropout_prob']
if is_noise:
self._prepostprocess_dropout = 0
self._attention_dropout = 0
self._weight_sharing = weight_sharing
self.checkpoints = []
self._word_emb_name = "word_embedding"
self._pos_emb_name = "pos_embedding"
self._sent_emb_name = "sent_embedding"
self._task_emb_name = "task_embedding"
self._emb_dtype = "float32"
# Initialize all weigths by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._build_model(model_name, src_ids, position_ids, sentence_ids, task_ids,
input_mask)
def _build_model(self, model_name, src_ids, position_ids, sentence_ids, task_ids,
input_mask):
# padding id in vocabulary must be set to 0
emb_out = fluid.layers.embedding(
input=src_ids,
size=[self._voc_size, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._word_emb_name, initializer=self._param_initializer),
is_sparse=False)
position_emb_out = fluid.layers.embedding(
input=position_ids,
size=[self._max_position_seq_len, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._pos_emb_name, initializer=self._param_initializer))
sent_emb_out = fluid.layers.embedding(
sentence_ids,
size=[self._sent_types, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._sent_emb_name, initializer=self._param_initializer))
emb_out = emb_out + position_emb_out
emb_out = emb_out + sent_emb_out
if self._use_task_id:
task_emb_out = fluid.layers.embedding(
task_ids,
size=[self._task_types, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._task_emb_name,
initializer=self._param_initializer))
emb_out = emb_out + task_emb_out
emb_out = pre_process_layer(
emb_out, 'nd', self._prepostprocess_dropout, name=model_name + 'pre_encoder')
self_attn_mask = fluid.layers.matmul(
x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = fluid.layers.scale(
x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = fluid.layers.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
self._enc_out, self.checkpoints = encoder(
enc_input=emb_out,
attn_bias=n_head_self_attn_mask,
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
model_name=model_name,
name=model_name+'encoder')
def get_sequence_output(self):
return self._enc_out
def get_cls_output(self):
"""Get the first feature of each sequence for classification"""
cls_output = fluid.layers.slice(
input=self._enc_out, axes=[1], starts=[0], ends=[1])
cls_output = fluid.layers.squeeze(cls_output, axes=[1])
return cls_output
def get_pooled_output(self):
"""Get the first feature of each sequence for classification"""
next_sent_feat = fluid.layers.slice(
input=self._enc_out, axes=[1], starts=[0], ends=[1])
next_sent_feat = fluid.layers.fc(
input=next_sent_feat,
size=self._emb_size,
act="tanh",
param_attr=fluid.ParamAttr(
name="pooled_fc.w_0", initializer=self._param_initializer),
bias_attr="pooled_fc.b_0")
return next_sent_feat
def get_lm_output(self, mask_label, mask_pos):
"""Get the loss & accuracy for pretraining"""
mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32')
# extract the first token feature in each sentence
self.next_sent_feat = self.get_pooled_output()
reshaped_emb_out = fluid.layers.reshape(
x=self._enc_out, shape=[-1, self._emb_size])
# extract masked tokens' feature
mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos)
# transform: fc
mask_trans_feat = fluid.layers.fc(
input=mask_feat,
size=self._emb_size,
act=self._hidden_act,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_fc.w_0',
initializer=self._param_initializer),
bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0'))
# transform: layer norm
mask_trans_feat = fluid.layers.layer_norm(
mask_trans_feat,
begin_norm_axis=len(mask_trans_feat.shape) - 1,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_layer_norm_scale',
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
name='mask_lm_trans_layer_norm_bias',
initializer=fluid.initializer.Constant(1.)))
# transform: layer norm
#mask_trans_feat = pre_process_layer(
# mask_trans_feat, 'n', name='mask_lm_trans')
mask_lm_out_bias_attr = fluid.ParamAttr(
name="mask_lm_out_fc.b_0",
initializer=fluid.initializer.Constant(value=0.0))
if self._weight_sharing:
fc_out = fluid.layers.matmul(
x=mask_trans_feat,
y=fluid.default_main_program().global_block().var(
self._word_emb_name),
transpose_y=True)
fc_out += fluid.layers.create_parameter(
shape=[self._voc_size],
dtype=self._emb_dtype,
attr=mask_lm_out_bias_attr,
is_bias=True)
else:
fc_out = fluid.layers.fc(input=mask_trans_feat,
size=self._voc_size,
param_attr=fluid.ParamAttr(
name="mask_lm_out_fc.w_0",
initializer=self._param_initializer),
bias_attr=mask_lm_out_bias_attr)
mask_lm_loss = fluid.layers.softmax_with_cross_entropy(
logits=fc_out, label=mask_label)
mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss)
return mean_mask_lm_loss
def get_task_output(self, task, task_labels):
task_fc_out = fluid.layers.fc(
input=self.next_sent_feat,
size=task["num_labels"],
param_attr=fluid.ParamAttr(
name=task["task_name"] + "_fc.w_0",
initializer=self._param_initializer),
bias_attr=task["task_name"] + "_fc.b_0")
task_loss, task_softmax = fluid.layers.softmax_with_cross_entropy(
logits=task_fc_out, label=task_labels, return_softmax=True)
task_acc = fluid.layers.accuracy(input=task_softmax, label=task_labels)
mean_task_loss = fluid.layers.mean(task_loss)
return mean_task_loss, task_acc
| 10,858 | 38.631387 | 92 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/metric/convert_recall_res_to_json.py | # Convert the retrieval output to standard json format
# loading files: para.map.json -> mapping from row id of para to pid in md5
# loading files: q2qid.dev.json -> mapping from query in Chinese to qid in md5
import hashlib
import json
import sys
from collections import defaultdict
q2id_map = sys.argv[1]
p2id_map = sys.argv[2]
recall_result = sys.argv[3]
outputf = 'output/dual_res.json'
# map query to its origianl ID
with open(q2id_map, "r") as fr:
q2qid = json.load(fr)
# map para line number to its original ID
with open(p2id_map, "r") as fr:
pcid2pid = json.load(fr)
qprank = defaultdict(list)
with open(recall_result, 'r') as f:
for line in f.readlines():
q, pcid, rank, score = line.strip().split('\t')
qprank[q2qid[q]].append(pcid2pid[pcid])
# check for length
for key in list(qprank.keys()):
assert len(qprank[key]) == 50
with open(outputf, 'w', encoding='utf-8') as fp:
json.dump(qprank, fp, ensure_ascii=False, indent='\t')
| 983 | 25.594595 | 78 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/metric/convert_rerank_res_to_json.py | import csv
import sys
import json
from collections import defaultdict
score_f = sys.argv[1]
id_f = sys.argv[2]
outputf = 'output/cross_res.json'
scores = []
q_ids = []
p_ids = []
q_dic = defaultdict(list)
with open(score_f, 'r') as f:
for line in f:
scores.append(float(line.strip()))
with open(id_f, 'r') as f:
for line in f:
v = line.strip().split('\t')
q_ids.append((v[0]))
p_ids.append((v[1]))
for q, p, s in zip(q_ids, p_ids, scores):
q_dic[q].append((s, p))
output = []
for q in q_dic:
rank = 0
cands = q_dic[q]
cands.sort(reverse=True)
for cand in cands:
rank += 1
output.append([q, cand[1], rank])
if rank > 49:
break
with open(outputf, 'w') as f:
res = dict()
for line in output:
qid, pid, rank = line
if qid not in res:
res[qid] = [0] * 50
res[qid][int(rank) - 1] = pid
json.dump(res, f, ensure_ascii=False, indent='\t')
| 983 | 19.5 | 54 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/metric/evaluation.py | """
This module computes evaluation metrics for MSMARCO dataset on the ranking task.
Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
Creation Date : 06/12/2018
Last Modified : 1/21/2019
Authors : Daniel Campos <dacamp@microsoft.com>, Rutger van Haasteren <ruvanh@microsoft.com>
"""
import sys
import json
from collections import Counter
MaxMRRRank = 10
def load_reference_from_stream(f):
qids_to_relevant_passageids = {}
for line in f:
try:
sample = json.loads(line.strip())
qid = sample["question_id"]
if qid in qids_to_relevant_passageids:
pass
else:
qids_to_relevant_passageids[qid] = []
for answer_paragraph in sample["answer_paragraphs"]:
qids_to_relevant_passageids[qid].append(answer_paragraph["paragraph_id"])
except:
raise IOError('\"%s\" is not valid format' % line)
return qids_to_relevant_passageids
def load_reference(path_to_reference):
"""Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
with open(path_to_reference, 'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids
def load_candidate_from_stream(f):
qid_to_ranked_candidate_passages = {}
try:
preds = json.load(f)
for qid in preds.keys():
tmp = [0] * 50
qid_to_ranked_candidate_passages[qid] = tmp
for rank, pid in enumerate(preds[qid][:50]):
qid_to_ranked_candidate_passages[qid][rank] = pid
except:
raise IOError('Submitted file is not valid format')
return qid_to_ranked_candidate_passages
def load_candidate(path_to_candidate):
"""Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
with open(path_to_candidate, 'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
"""
message = ''
allowed = True
# Create sets of the QIDs for the submitted and reference queries
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
# Check that we do not have multiple passages per query
for qid in qids_to_ranked_candidate_passages:
# Remove all zeros from the candidates
duplicate_pids = set(
[item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids - set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
recall_q_top1 = set()
recall_q_top50 = set()
recall_q_all = set()
for qid in qids_to_ranked_candidate_passages:
if qid in qids_to_relevant_passageids:
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0, MaxMRRRank):
if candidate_pid[i] in target_pid:
MRR += 1.0 / (i + 1)
ranking.pop()
ranking.append(i + 1)
break
for i, pid in enumerate(candidate_pid):
if pid in target_pid:
recall_q_all.add(qid)
if i < 50:
recall_q_top50.add(qid)
if i == 0:
recall_q_top1.add(qid)
break
if len(ranking) == 0:
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
MRR = MRR / len(qids_to_relevant_passageids)
recall_top1 = len(recall_q_top1) * 1.0 / len(qids_to_relevant_passageids)
recall_top50 = len(recall_q_top50) * 1.0 / len(qids_to_relevant_passageids)
recall_all = len(recall_q_all) * 1.0 / len(qids_to_relevant_passageids)
all_scores['MRR@10'] = MRR
all_scores["recall@1"] = recall_top1
all_scores["recall@50"] = recall_top50
# all_scores["recall@all"] = recall_all
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
return all_scores
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True):
qids_to_relevant_passageids = load_reference(path_to_reference)
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
if perform_checks:
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
if message != '': print(message)
return compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
def main():
"""Command line:
python result_eval.py <path_to_reference_file> <path_to_candidate_file>
"""
if len(sys.argv) == 3:
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
else:
print('Usage: result_eval.py <reference ranking> <candidate ranking>')
exit()
metrics = compute_metrics_from_files(path_to_reference, path_to_candidate)
result = dict()
for metric in sorted(metrics):
result[metric] = metrics[metric]
result_json = json.dumps(result)
print(result_json)
if __name__ == '__main__':
main()
| 6,849 | 36.845304 | 161 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/train_de.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import logging
import multiprocessing
import numpy as np
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
os.environ['FLAGS_eager_delete_tensor_gb'] = '0' # enable gc
import paddle.fluid as fluid
import paddle
if hasattr(paddle, 'enable_static'):
paddle.enable_static()
import reader.reader_de as reader_de
from model.ernie import ErnieConfig
from finetune.dual_encoder import create_model, evaluate, predict
from optimization import optimization
from utils.args import print_arguments, check_cuda, prepare_logger
from utils.init import init_pretraining_params, init_checkpoint
from finetune_args import parser
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
args = parser.parse_args()
log = logging.getLogger()
def main(args):
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
if args.use_cuda:
dev_list = fluid.cuda_places()
place = dev_list[0]
dev_count = len(dev_list)
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
exe = fluid.Executor(place)
reader = reader_de.ClassifyReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
q_max_seq_len=args.q_max_seq_len,
p_max_seq_len=args.p_max_seq_len,
total_num=args.train_data_size,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
for_cn=args.for_cn,
task_id=args.task_id)
if not (args.do_train or args.do_val or args.do_test):
raise ValueError("For args `do_train`, `do_val` and `do_test`, at "
"least one of them must be True.")
if args.do_test:
assert args.test_save is not None
startup_prog = fluid.Program()
if args.random_seed is not None:
startup_prog.random_seed = args.random_seed
if args.predict_batch_size == None:
args.predict_batch_size = args.batch_size
if args.do_train:
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
dev_count = fleet.worker_num()
train_data_generator = reader.data_generator(
input_file=args.train_set,
batch_size=args.batch_size,
epoch=args.epoch,
dev_count=1,
trainer_id=fleet.worker_index(),
trainer_num=fleet.worker_num(),
shuffle=True,
phase="train")
num_train_examples = reader.get_num_examples(args.train_set)
if args.in_tokens:
if args.batch_size < args.max_seq_len:
raise ValueError('if in_tokens=True, batch_size should greater than max_sqelen, got batch_size:%d seqlen:%d' % (args.batch_size, args.max_seq_len))
max_train_steps = args.epoch * num_train_examples // (
args.batch_size // args.max_seq_len) // dev_count
else:
max_train_steps = args.epoch * num_train_examples // args.batch_size // dev_count
warmup_steps = int(max_train_steps * args.warmup_proportion)
log.info("Device count: %d" % dev_count)
log.info("Num train examples: %d" % num_train_examples)
log.info("Max train steps: %d" % max_train_steps)
log.info("Num warmup steps: %d" % warmup_steps)
train_program = fluid.Program()
# use fleet api
exec_strategy = fluid.ExecutionStrategy()
if args.use_fast_executor:
exec_strategy.use_experimental_executor = True
exec_strategy.num_threads = dev_count
if args.is_distributed:
exec_strategy.num_threads = 3
exec_strategy.num_iteration_per_drop_scope = args.num_iteration_per_drop_scope
dist_strategy = DistributedStrategy()
dist_strategy.exec_strategy = exec_strategy
dist_strategy.nccl_comm_num = 1
if args.is_distributed:
dist_strategy.nccl_comm_num = 2
dist_strategy.use_hierarchical_allreduce = True
if args.use_recompute:
dist_strategy.forward_recompute = True
dist_strategy.enable_sequential_execution = True
if args.use_mix_precision:
dist_strategy.use_amp = True
with fluid.program_guard(train_program, startup_prog):
with fluid.unique_name.guard():
train_pyreader, graph_vars, checkpoints = create_model(
args,
pyreader_name='train_reader',
ernie_config=ernie_config,
batch_size=args.batch_size,
fleet_handle=fleet)
if args.use_recompute:
dist_strategy.recompute_checkpoints=checkpoints
scheduled_lr = optimization(
loss=graph_vars["loss"],
warmup_steps=warmup_steps,
num_train_steps=max_train_steps,
learning_rate=args.learning_rate,
train_program=train_program,
startup_prog=startup_prog,
weight_decay=args.weight_decay,
scheduler=args.lr_scheduler,
use_dynamic_loss_scaling=args.use_dynamic_loss_scaling,
incr_every_n_steps=args.incr_every_n_steps,
decr_every_n_nan_or_inf=args.decr_every_n_nan_or_inf,
incr_ratio=args.incr_ratio,
decr_ratio=args.decr_ratio,
dist_strategy=dist_strategy,
use_lamb=args.use_lamb)
if args.do_val or args.do_test:
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
test_pyreader, test_graph_vars = create_model(
args,
pyreader_name='test_reader',
ernie_config=ernie_config,
batch_size=args.predict_batch_size,
is_prediction=True)
test_prog = test_prog.clone(for_test=True)
train_program = fleet.main_program
exe = fluid.Executor(place)
exe.run(startup_prog)
if args.do_train:
if args.init_checkpoint and args.init_pretraining_params:
log.warning(
"WARNING: args 'init_checkpoint' and 'init_pretraining_params' "
"both are set! Only arg 'init_checkpoint' is made valid.")
if args.init_checkpoint:
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
elif args.init_pretraining_params:
init_pretraining_params(
exe,
args.init_pretraining_params,
main_program=startup_prog)
elif args.do_val or args.do_test:
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
if args.do_train:
train_exe = exe
train_pyreader.decorate_tensor_provider(train_data_generator)
else:
train_exe = None
test_exe = exe
if args.do_train:
train_pyreader.start()
steps = 0
if warmup_steps > 0:
graph_vars["learning_rate"] = scheduled_lr
ce_info = []
time_begin = time.time()
last_epoch = 0
current_epoch = 0
total_loss = []
while True:
try:
steps += 1
if fleet.worker_index() != 0:
train_exe.run(fetch_list=[], program=train_program)
continue
if steps % args.skip_steps != 0:
train_exe.run(fetch_list=[], program=train_program)
else:
outputs = evaluate(
train_exe,
train_program,
train_pyreader,
graph_vars,
"train",
metric=args.metric)
if args.verbose:
verbose = "train pyreader queue size: %d, " % train_pyreader.queue.size(
)
verbose += "learning rate: %f" % (
outputs["learning_rate"]
if warmup_steps > 0 else args.learning_rate)
log.info(verbose)
current_example, current_epoch = reader.get_train_progress()
time_end = time.time()
used_time = time_end - time_begin
total_loss.append(outputs["loss"])
log.info(
"epoch: %d, progress: %d/%d, step: %d, ave loss: %f, "
"ave acc: %f, speed: %f steps/s" %
(current_epoch, current_example * dev_count, num_train_examples,
steps, np.mean(total_loss), outputs["accuracy"],
args.skip_steps / used_time))
ce_info.append(
[outputs["loss"], outputs["accuracy"], used_time])
time_begin = time.time()
if steps % args.save_steps == 0:
save_path = os.path.join(args.checkpoints,
"step_" + str(steps))
fluid.io.save_persistables(exe, save_path, fleet._origin_program)
# if steps % args.validation_steps == 0 or last_epoch != current_epoch:
if steps % args.validation_steps == 0:
# evaluate dev set
if args.do_val:
evaluate_wrapper(args, reader, exe, test_prog,
test_pyreader, test_graph_vars,
current_epoch, steps)
if args.do_test:
predict_wrapper(args, reader, exe, test_prog,
test_pyreader, test_graph_vars,
current_epoch, steps)
if last_epoch != current_epoch:
last_epoch = current_epoch
except fluid.core.EOFException:
save_path = os.path.join(args.checkpoints, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, fleet._origin_program)
train_pyreader.reset()
break
# final eval on dev set
if args.do_val:
evaluate_wrapper(args, reader, exe, test_prog, test_pyreader,
test_graph_vars, current_epoch, steps)
# final eval on test set
if args.do_test:
predict_wrapper(args, reader, exe, test_prog, test_pyreader, test_graph_vars,
current_epoch, steps)
def evaluate_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
epoch, steps):
# evaluate dev set
for ds in args.dev_set.split(','):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
ds,
batch_size=args.predict_batch_size,
epoch=1,
dev_count=1,
shuffle=False))
log.info("validation result of dataset {}:".format(ds))
evaluate_info = evaluate(
exe,
test_prog,
test_pyreader,
graph_vars,
"dev",
metric=args.metric)
log.info(evaluate_info + ', file: {}, epoch: {}, steps: {}'.format(
ds, epoch, steps))
def predict_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
epoch, steps):
test_sets = args.test_set.split(',')
save_dirs = args.test_save.split(',')
assert len(test_sets) == len(save_dirs)
for test_f, save_f in zip(test_sets, save_dirs):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
test_f,
batch_size=args.predict_batch_size,
epoch=1,
dev_count=1,
shuffle=False))
save_path = save_f + '.' + str(epoch) + '.' + str(steps)
log.info("testing {}, save to {}".format(test_f, save_path))
qids, preds, probs = predict(
exe,
test_prog,
test_pyreader,
graph_vars)
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
else:
log.warning('save dir exsits: %s, will skip saving' % save_dir)
print ("DEBUG:\t" + str(len(probs)))
with open(save_path, 'w') as f:
for p in probs:
f.write('{}\n'.format(p))
if __name__ == '__main__':
prepare_logger(log)
print_arguments(args)
check_cuda(args.use_cuda)
main(args)
| 14,101 | 36.110526 | 163 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/optimization.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization and learning rate scheduling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
def linear_warmup_decay(learning_rate, warmup_steps, num_train_steps):
""" Applies linear warmup of learning rate from 0 and decay to 0."""
with fluid.default_main_program()._lr_schedule_guard():
lr = fluid.layers.tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="scheduled_learning_rate")
global_step = fluid.layers.learning_rate_scheduler._decay_step_counter()
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step < warmup_steps):
warmup_lr = learning_rate * (global_step / warmup_steps)
fluid.layers.tensor.assign(warmup_lr, lr)
with switch.default():
decayed_lr = fluid.layers.learning_rate_scheduler.polynomial_decay(
learning_rate=learning_rate,
decay_steps=num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
fluid.layers.tensor.assign(decayed_lr, lr)
return lr
def optimization(loss,
warmup_steps,
num_train_steps,
learning_rate,
train_program,
startup_prog,
weight_decay,
scheduler='linear_warmup_decay',
use_dynamic_loss_scaling=False,
incr_every_n_steps=1000,
decr_every_n_nan_or_inf=2,
incr_ratio=2.0,
decr_ratio=0.8,
dist_strategy=None,
use_lamb=False):
if warmup_steps > 0:
if scheduler == 'noam_decay':
scheduled_lr = fluid.layers.learning_rate_scheduler\
.noam_decay(1/(warmup_steps *(learning_rate ** 2)),
warmup_steps)
elif scheduler == 'linear_warmup_decay':
scheduled_lr = linear_warmup_decay(learning_rate, warmup_steps,
num_train_steps)
else:
raise ValueError("Unkown learning rate scheduler, should be "
"'noam_decay' or 'linear_warmup_decay'")
if use_lamb:
optimizer = fluid.optimizer.LambOptimizer(learning_rate=scheduled_lr)
else:
optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr)
else:
scheduled_lr = fluid.layers.create_global_var(
name=fluid.unique_name.generate("learning_rate"),
shape=[1],
value=learning_rate,
dtype='float32',
persistable=True)
if use_lamb:
optimizer = fluid.optimizer.LambOptimizer(learning_rate=scheduled_lr)
else:
optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr)
optimizer._learning_rate_map[fluid.default_main_program(
)] = scheduled_lr
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0))
def exclude_from_weight_decay(name):
if name.find("layer_norm") > -1:
return True
bias_suffix = ["_bias", "_b", ".b_0"]
for suffix in bias_suffix:
if name.endswith(suffix):
return True
return False
param_list = dict()
for param in train_program.global_block().all_parameters():
param_list[param.name] = param * 1.0
param_list[param.name].stop_gradient = True
if dist_strategy is not None:
# use fleet api
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
_, param_grads = optimizer.minimize(loss)
if weight_decay > 0:
for param, grad in param_grads:
if exclude_from_weight_decay(param.name):
continue
with param.block.program._optimized_guard(
[param, grad]), fluid.framework.name_scope("weight_decay"):
updated_param = param - param_list[
param.name] * weight_decay * scheduled_lr
fluid.layers.assign(output=param, input=updated_param)
return scheduled_lr
| 5,185 | 37.701493 | 83 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from io import open
import collections
import unicodedata
import six
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, encoding='utf8') as fin:
for num, line in enumerate(fin):
items = convert_to_unicode(line.strip()).split("\t")
if len(items) > 2:
break
token = items[0]
index = items[1] if len(items) == 2 else num
token = token.strip()
vocab[token] = int(index)
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class CharTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in text.lower().split(" "):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def tokenize_chinese_chars(text):
"""Adds whitespace around any CJK character."""
def _is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
output = []
buff = ""
for char in text:
cp = ord(char)
if _is_chinese_char(cp) or _is_whitespace(char):
if buff != "":
output.append(buff)
buff = ""
output.append(char)
else:
buff += char
if buff != "":
output.append(buff)
return output
| 14,348 | 32.921986 | 84 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/inference_de.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import logging
import multiprocessing
import numpy as np
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
os.environ['FLAGS_eager_delete_tensor_gb'] = '0' # enable gc
import paddle.fluid as fluid
import paddle
if hasattr(paddle, 'enable_static'):
paddle.enable_static()
import reader.reader_de_infer as reader_de_infer
from model.ernie import ErnieConfig
from finetune.dual_encoder_infer import create_model, predict
from utils.args import print_arguments, check_cuda, prepare_logger
from utils.init import init_pretraining_params, init_checkpoint
from finetune_args import parser
args = parser.parse_args()
log = logging.getLogger()
def main(args):
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
if args.use_cuda:
dev_list = fluid.cuda_places()
place = dev_list[0]
dev_count = len(dev_list)
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
exe = fluid.Executor(place)
reader = reader_de_infer.ClassifyReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
q_max_seq_len=args.q_max_seq_len,
p_max_seq_len=args.p_max_seq_len,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
for_cn=args.for_cn,
task_id=args.task_id)
assert args.test_save is not None
startup_prog = fluid.Program()
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
test_pyreader, graph_vars = create_model(
args,
pyreader_name='test_reader',
ernie_config=ernie_config,
batch_size=args.batch_size,
is_prediction=True)
test_prog = test_prog.clone(for_test=True)
exe = fluid.Executor(place)
exe.run(startup_prog)
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
test_sets = args.test_set.split(',')
save_dirs = args.test_save.split(',')
assert len(test_sets) == len(save_dirs)
batch_size = args.batch_size if args.predict_batch_size is None else args.predict_batch_size
for test_f, save_f in zip(test_sets, save_dirs):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
test_f,
batch_size=batch_size,
epoch=1,
dev_count=1,
shuffle=False))
save_path = save_f
log.info("testing {}, save to {}".format(test_f, save_path))
predict(
args,
exe,
test_prog,
test_pyreader,
graph_vars,
output_item=args.output_item,
output_file_name=args.output_file_name,
hidden_size=ernie_config['hidden_size'])
if __name__ == '__main__':
prepare_logger(log)
print_arguments(args)
check_cuda(args.use_cuda)
main(args)
| 4,204 | 31.346154 | 96 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/finetune_args.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import argparse
from utils.args import ArgumentGroup
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
model_g = ArgumentGroup(parser, "model", "model configuration and paths.")
model_g.add_arg("ernie_config_path", str, None, "Path to the json file for ernie model config.")
model_g.add_arg("init_checkpoint", str, None, "Init checkpoint to resume training from.")
model_g.add_arg("init_pretraining_params", str, None,
"Init pre-training params which preforms fine-tuning from. If the "
"arg 'init_checkpoint' has been set, this argument wouldn't be valid.")
model_g.add_arg("checkpoints", str, "checkpoints", "Path to save checkpoints.")
model_g.add_arg("is_classify", bool, True, "is_classify")
model_g.add_arg("is_regression", bool, False, "is_regression")
model_g.add_arg("task_id", int, 0, "task id")
train_g = ArgumentGroup(parser, "training", "training options.")
train_g.add_arg("epoch", int, 3, "Number of epoches for fine-tuning.")
train_g.add_arg("learning_rate", float, 5e-5, "Learning rate used to train with warmup.")
train_g.add_arg("lr_scheduler", str, "linear_warmup_decay",
"scheduler of learning rate.", choices=['linear_warmup_decay', 'noam_decay'])
train_g.add_arg("weight_decay", float, 0.01, "Weight decay rate for L2 regularizer.")
train_g.add_arg("warmup_proportion", float, 0.1,
"Proportion of training steps to perform linear learning rate warmup for.")
train_g.add_arg("save_steps", int, 10000, "The steps interval to save checkpoints.")
train_g.add_arg("validation_steps", int, 1000, "The steps interval to evaluate model performance.")
train_g.add_arg("use_recompute", bool, False, "Whether to use recompute optimizer for training.")
train_g.add_arg("use_mix_precision", bool, False, "Whether to use mix-precision optimizer for training.")
train_g.add_arg("use_cross_batch", bool, False, "Whether to use cross-batch for training.")
train_g.add_arg("use_lamb", bool, False, "Whether to use LambOptimizer for training.")
train_g.add_arg("use_dynamic_loss_scaling", bool, True, "Whether to use dynamic loss scaling.")
train_g.add_arg("test_save", str, "./checkpoints/test_result", "test_save")
train_g.add_arg("metric", str, "simple_accuracy", "metric")
train_g.add_arg("incr_every_n_steps", int, 100, "Increases loss scaling every n consecutive.")
train_g.add_arg("decr_every_n_nan_or_inf", int, 2,
"Decreases loss scaling every n accumulated steps with nan or inf gradients.")
train_g.add_arg("incr_ratio", float, 2.0,
"The multiplier to use when increasing the loss scaling.")
train_g.add_arg("decr_ratio", float, 0.8,
"The less-than-one-multiplier to use when decreasing.")
log_g = ArgumentGroup(parser, "logging", "logging related.")
log_g.add_arg("skip_steps", int, 10, "The steps interval to print loss.")
log_g.add_arg("verbose", bool, False, "Whether to output verbose log.")
data_g = ArgumentGroup(parser, "data", "Data paths, vocab paths and data processing options")
data_g.add_arg("tokenizer", str, "FullTokenizer",
"ATTENTION: the INPUT must be splited by Word with blank while using SentencepieceTokenizer or WordsegTokenizer")
data_g.add_arg("train_set", str, None, "Path to training data.")
data_g.add_arg("test_set", str, None, "Path to test data.")
data_g.add_arg("dev_set", str, None, "Path to validation data.")
data_g.add_arg("vocab_path", str, None, "Vocabulary path.")
data_g.add_arg("max_seq_len", int, 512, "Number of words of the longest seqence.")
data_g.add_arg("q_max_seq_len", int, 32, "Number of words of the longest seqence.")
data_g.add_arg("p_max_seq_len", int, 256, "Number of words of the longest seqence.")
data_g.add_arg("train_data_size", int, 0, "Number of training data's total examples. Set for distribute.")
data_g.add_arg("batch_size", int, 32, "Total examples' number in batch for training. see also --in_tokens.")
data_g.add_arg("predict_batch_size", int, None, "Total examples' number in batch for predict. see also --in_tokens.")
data_g.add_arg("in_tokens", bool, False,
"If set, the batch size will be the maximum number of tokens in one batch. "
"Otherwise, it will be the maximum number of examples in one batch.")
data_g.add_arg("do_lower_case", bool, True,
"Whether to lower case the input text. Should be True for uncased models and False for cased models.")
data_g.add_arg("random_seed", int, None, "Random seed.")
data_g.add_arg("label_map_config", str, None, "label_map_path.")
data_g.add_arg("num_labels", int, 2, "label number")
data_g.add_arg("diagnostic", str, None, "GLUE Diagnostic Dataset")
data_g.add_arg("diagnostic_save", str, None, "GLUE Diagnostic save f")
data_g.add_arg("max_query_length", int, 64, "Max query length.")
data_g.add_arg("max_answer_length", int, 100, "Max answer length.")
data_g.add_arg("doc_stride", int, 128,
"When splitting up a long document into chunks, how much stride to take between chunks.")
data_g.add_arg("n_best_size", int, 20,
"The total number of n-best predictions to generate in the nbest_predictions.json output file.")
data_g.add_arg("chunk_scheme", type=str, default="IOB", choices=["IO", "IOB", "IOE", "IOBES"], help="chunk scheme")
run_type_g = ArgumentGroup(parser, "run_type", "running type options.")
run_type_g.add_arg("use_cuda", bool, True, "If set, use GPU for training.")
run_type_g.add_arg("is_distributed", bool, False, "If set, then start distributed training.")
run_type_g.add_arg("use_fast_executor", bool, False, "If set, use fast parallel executor (in experiment).")
run_type_g.add_arg("num_iteration_per_drop_scope", int, 10, "Iteration intervals to drop scope.")
run_type_g.add_arg("do_train", bool, True, "Whether to perform training.")
run_type_g.add_arg("do_val", bool, True, "Whether to perform evaluation on dev data set.")
run_type_g.add_arg("do_test", bool, True, "Whether to perform evaluation on test data set.")
run_type_g.add_arg("output_item", int, 3, "Test output format.")
run_type_g.add_arg("output_file_name", str, None, "Test output file name")
run_type_g.add_arg("test_data_cnt", int, 1110000 , "total cnt of testset")
run_type_g.add_arg("use_multi_gpu_test", bool, False, "Whether to perform evaluation using multiple gpu cards")
run_type_g.add_arg("metrics", bool, True, "Whether to perform evaluation on test data set.")
run_type_g.add_arg("shuffle", bool, True, "")
run_type_g.add_arg("for_cn", bool, False, "model train for cn or for other langs.")
| 8,197 | 67.890756 | 127 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/merge.py | import sys
shift = int(sys.argv[1])
top = int(sys.argv[2])
total_part = int(sys.argv[3])
f_list = []
for part in range(total_part):
f0 = open('output/res.top%s-part%s' % (top, part))
f_list.append(f0)
line_list = []
for part in range(total_part):
line = f_list[part].readline()
line_list.append(line)
out = open('output/dev.res.top%s' % top, 'w')
last_q = ''
ans_list = {}
while line_list[-1]:
cur_list = []
for line in line_list:
sub = line.strip().split('\t')
cur_list.append(sub)
if last_q == '':
last_q = cur_list[0][0]
if cur_list[0][0] != last_q:
rank = sorted(ans_list.items(), key = lambda a:a[1], reverse=True)
for i in range(top):
out.write("%s\t%s\t%s\t%s\n" % (last_q, rank[i][0], i+1, rank[i][1]))
ans_list = {}
for i, sub in enumerate(cur_list):
ans_list[int(sub[1]) + shift*i] = float(sub[-1])
last_q = cur_list[0][0]
line_list = []
for f0 in f_list:
line = f0.readline()
line_list.append(line)
rank = sorted(ans_list.items(), key = lambda a:a[1], reverse=True)
for i in range(top):
out.write("%s\t%s\t%s\t%s\n" % (last_q, rank[i][0], i+1, rank[i][1]))
out.close()
print('output/dev.res.top%s' % top)
| 1,260 | 25.270833 | 81 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/batching.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mask, padding and batching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
def pad_batch_data(insts,
pad_idx=0,
return_pos=False,
return_input_mask=False,
return_max_len=False,
return_num_token=False,
return_seq_lens=False):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and attention bias.
"""
return_list = []
max_len = max(len(inst) for inst in insts)
# Any token included in dict can be used to pad, since the paddings' loss
# will be masked out by weights and make no effect on parameter gradients.
inst_data = np.array(
[inst + list([pad_idx] * (max_len - len(inst))) for inst in insts])
return_list += [inst_data.astype("int64").reshape([-1, max_len, 1])]
# position data
if return_pos:
inst_pos = np.array([
list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst))
for inst in insts
])
return_list += [inst_pos.astype("int64").reshape([-1, max_len, 1])]
if return_input_mask:
# This is used to avoid attention on paddings.
input_mask_data = np.array([[1] * len(inst) + [0] *
(max_len - len(inst)) for inst in insts])
input_mask_data = np.expand_dims(input_mask_data, axis=-1)
return_list += [input_mask_data.astype("float32")]
if return_max_len:
return_list += [max_len]
if return_num_token:
num_token = 0
for inst in insts:
num_token += len(inst)
return_list += [num_token]
if return_seq_lens:
seq_lens = np.array([len(inst) for inst in insts])
return_list += [seq_lens.astype("int64").reshape([-1, 1])]
return return_list if len(return_list) > 1 else return_list[0]
if __name__ == "__main__":
pass
| 2,683 | 33.410256 | 78 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/index_search.py | import sys
import time
import faiss
import math
import numpy as np
def read_embed(file_name, dim=768, bs=3000):
if file_name.endswith('npy'):
i = 0
emb_np = np.load(file_name)
while(i < len(emb_np)):
vec_list = emb_np[i:i+bs]
i += bs
yield vec_list
else:
vec_list = []
with open(file_name) as inp:
for line in inp:
data = line.strip()
vector = [float(item) for item in data.split(' ')]
assert len(vector) == dim
vec_list.append(vector)
if len(vec_list) == bs:
yield vec_list
vec_list = []
if vec_list:
yield vec_list
def load_qid(file_name):
qid_list = []
with open(file_name) as inp:
for line in inp:
line = line.strip()
qid = line.split('\t')[0]
qid_list.append(qid)
return qid_list
def search(index, emb_file, qid_list, outfile, top_k):
q_idx = 0
with open(outfile, 'w') as out:
for batch_vec in read_embed(emb_file):
q_emb_matrix = np.array(batch_vec)
res_dist, res_p_id = index.search(q_emb_matrix.astype('float32'), top_k)
for i in range(len(q_emb_matrix)):
qid = qid_list[q_idx]
for j in range(top_k):
pid = res_p_id[i][j]
score = res_dist[i][j]
out.write('%s\t%s\t%s\t%s\n' % (qid, pid, j+1, score))
q_idx += 1
def main():
part = sys.argv[1]
topk = int(sys.argv[2])
q_text_file = sys.argv[3]
outfile = 'output/res.top%s-part%s' % (topk, part)
qid_list = load_qid(q_text_file)
engine = faiss.read_index("output/para.index.part%s" % part)
emb_file = 'output/query.emb.npy'
search(engine, emb_file, qid_list, outfile, topk)
if __name__ == "__main__":
main()
| 1,969 | 27.970588 | 84 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/train_ce.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import logging
import multiprocessing
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
os.environ['FLAGS_eager_delete_tensor_gb'] = '0' # enable gc
import paddle.fluid as fluid
import paddle
if hasattr(paddle, 'enable_static'):
paddle.enable_static()
import reader.reader_ce as reader_ce
from model.ernie import ErnieConfig
from finetune.cross_encoder import create_model, evaluate, predict
from optimization import optimization
from utils.args import print_arguments, check_cuda, prepare_logger
from utils.init import init_pretraining_params, init_checkpoint
from finetune_args import parser
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
args = parser.parse_args()
log = logging.getLogger()
def main(args):
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
if args.use_cuda:
dev_list = fluid.cuda_places()
place = dev_list[0]
dev_count = len(dev_list)
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
exe = fluid.Executor(place)
reader = reader_ce.ClassifyReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
max_seq_len=args.max_seq_len,
total_num=args.train_data_size,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
for_cn=args.for_cn,
task_id=args.task_id)
if not (args.do_train or args.do_val or args.do_test):
raise ValueError("For args `do_train`, `do_val` and `do_test`, at "
"least one of them must be True.")
if args.do_test:
assert args.test_save is not None
startup_prog = fluid.Program()
if args.random_seed is not None:
startup_prog.random_seed = args.random_seed
if args.predict_batch_size == None:
args.predict_batch_size = args.batch_size
if args.do_train:
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
dev_count = fleet.worker_num()
train_data_generator = reader.data_generator(
input_file=args.train_set,
batch_size=args.batch_size,
epoch=args.epoch,
dev_count=1,
trainer_id=fleet.worker_index(),
trainer_num=fleet.worker_num(),
shuffle=True,
phase="train")
num_train_examples = reader.get_num_examples(args.train_set)
if args.in_tokens:
max_train_steps = args.epoch * num_train_examples // (
args.batch_size // args.max_seq_len) // dev_count
else:
max_train_steps = args.epoch * num_train_examples // args.batch_size // dev_count
warmup_steps = int(max_train_steps * args.warmup_proportion)
log.info("Device count: %d" % dev_count)
log.info("Num train examples: %d" % num_train_examples)
log.info("Max train steps: %d" % max_train_steps)
log.info("Num warmup steps: %d" % warmup_steps)
train_program = fluid.Program()
# use fleet api
exec_strategy = fluid.ExecutionStrategy()
if args.use_fast_executor:
exec_strategy.use_experimental_executor = True
exec_strategy.num_threads = dev_count
if args.is_distributed:
exec_strategy.num_threads = 3
exec_strategy.num_iteration_per_drop_scope = args.num_iteration_per_drop_scope
dist_strategy = DistributedStrategy()
dist_strategy.exec_strategy = exec_strategy
dist_strategy.nccl_comm_num = 1
if args.is_distributed:
dist_strategy.nccl_comm_num = 2
dist_strategy.use_hierarchical_allreduce = True
if args.use_mix_precision:
dist_strategy.use_amp = True
with fluid.program_guard(train_program, startup_prog):
with fluid.unique_name.guard():
train_pyreader, graph_vars = create_model(
args,
pyreader_name='train_reader',
ernie_config=ernie_config)
scheduled_lr = optimization(
loss=graph_vars["loss"],
warmup_steps=warmup_steps,
num_train_steps=max_train_steps,
learning_rate=args.learning_rate,
train_program=train_program,
startup_prog=startup_prog,
weight_decay=args.weight_decay,
scheduler=args.lr_scheduler,
use_dynamic_loss_scaling=args.use_dynamic_loss_scaling,
incr_every_n_steps=args.incr_every_n_steps,
decr_every_n_nan_or_inf=args.decr_every_n_nan_or_inf,
incr_ratio=args.incr_ratio,
decr_ratio=args.decr_ratio,
dist_strategy = dist_strategy)
if args.verbose:
if args.in_tokens:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
program=train_program,
batch_size=args.batch_size // args.max_seq_len)
else:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
program=train_program, batch_size=args.batch_size)
log.info("Theoretical memory usage in training: %.3f - %.3f %s" %
(lower_mem, upper_mem, unit))
if args.do_val or args.do_test:
test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
test_pyreader, graph_vars = create_model(
args,
pyreader_name='test_reader',
ernie_config=ernie_config,
is_prediction=True)
test_prog = test_prog.clone(for_test=True)
train_program = fleet.main_program
exe = fluid.Executor(place)
exe.run(startup_prog)
if args.do_train:
if args.init_checkpoint and args.init_pretraining_params:
log.warning(
"WARNING: args 'init_checkpoint' and 'init_pretraining_params' "
"both are set! Only arg 'init_checkpoint' is made valid.")
if args.init_checkpoint:
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
elif args.init_pretraining_params:
init_pretraining_params(
exe,
args.init_pretraining_params,
main_program=startup_prog)
elif args.do_val or args.do_test:
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
if args.do_train:
train_exe = exe
train_pyreader.decorate_tensor_provider(train_data_generator)
else:
train_exe = None
test_exe = exe
# if args.do_val or args.do_test:
# if args.use_multi_gpu_test:
# test_exe = fluid.ParallelExecutor(
# use_cuda=args.use_cuda,
# main_program=test_prog,
# share_vars_from=train_exe)
current_epoch = 0
steps = 0
if args.do_train:
train_pyreader.start()
if warmup_steps > 0:
graph_vars["learning_rate"] = scheduled_lr
ce_info = []
time_begin = time.time()
last_epoch = 0
while True:
try:
steps += 1
# log.info("step: %d" % steps)
if fleet.worker_index() != 0:
train_exe.run(fetch_list=[], program=train_program)
continue
if steps % args.skip_steps != 0:
train_exe.run(fetch_list=[], program=train_program)
else:
outputs = evaluate(
train_exe,
train_program,
train_pyreader,
graph_vars,
"train",
metric=args.metric)
if args.verbose:
verbose = "train pyreader queue size: %d, " % train_pyreader.queue.size(
)
verbose += "learning rate: %f" % (
outputs["learning_rate"]
if warmup_steps > 0 else args.learning_rate)
log.info(verbose)
current_example, current_epoch = reader.get_train_progress()
time_end = time.time()
used_time = time_end - time_begin
log.info(
"epoch: %d, progress: %d/%d, step: %d, ave loss: %f, "
"ave acc: %f, speed: %f steps/s" %
(current_epoch, current_example * dev_count, num_train_examples,
steps, outputs["loss"], outputs["accuracy"],
args.skip_steps / used_time))
ce_info.append(
[outputs["loss"], outputs["accuracy"], used_time])
time_begin = time.time()
if steps % args.save_steps == 0:
save_path = os.path.join(args.checkpoints,
"step_" + str(steps))
fluid.io.save_persistables(exe, save_path, fleet._origin_program)
# if steps % args.validation_steps == 0 or last_epoch != current_epoch:
if steps % args.validation_steps == 0:
# evaluate dev set
if args.do_val:
evaluate_wrapper(args, reader, exe, test_prog,
test_pyreader, graph_vars,
current_epoch, steps)
if args.do_test:
predict_wrapper(args, reader, exe, test_prog,
test_pyreader, graph_vars,
current_epoch, steps)
if last_epoch != current_epoch:
last_epoch = current_epoch
except fluid.core.EOFException:
save_path = os.path.join(args.checkpoints, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, fleet._origin_program)
train_pyreader.reset()
break
# final eval on dev set
if args.do_val:
evaluate_wrapper(args, reader, exe, test_prog, test_pyreader,
graph_vars, current_epoch, steps)
# final eval on test set
if args.do_test:
predict_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
current_epoch, steps)
# final eval on dianostic, hack for glue-ax
if args.diagnostic:
test_pyreader.decorate_tensor_provider(
reader.data_generator(
args.diagnostic,
batch_size=args.batch_size,
epoch=1,
dev_count=1,
shuffle=False))
log.info("Final diagnostic")
qids, preds, probs = predict(
test_exe,
test_prog,
test_pyreader,
graph_vars)
assert len(qids) == len(preds), '{} v.s. {}'.format(
len(qids), len(preds))
with open(args.diagnostic_save, 'w') as f:
for id, s, p in zip(qids, preds, probs):
f.write('{}\t{}\t{}\n'.format(id, s, p))
log.info("Done final diagnostic, saving to {}".format(
args.diagnostic_save))
def evaluate_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
epoch, steps):
# evaluate dev set
for ds in args.dev_set.split(','):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
ds,
batch_size=args.predict_batch_size,
epoch=1,
dev_count=1,
shuffle=False))
log.info("validation result of dataset {}:".format(ds))
evaluate_info = evaluate(
exe,
test_prog,
test_pyreader,
graph_vars,
"dev",
metric=args.metric)
log.info(evaluate_info + ', file: {}, epoch: {}, steps: {}'.format(
ds, epoch, steps))
def predict_wrapper(args, reader, exe, test_prog, test_pyreader, graph_vars,
epoch, steps):
test_sets = args.test_set.split(',')
save_dirs = args.test_save.split(',')
assert len(test_sets) == len(save_dirs)
for test_f, save_f in zip(test_sets, save_dirs):
test_pyreader.decorate_tensor_provider(
reader.data_generator(
test_f,
batch_size=args.predict_batch_size,
epoch=1,
dev_count=1,
shuffle=False))
save_path = save_f + '.' + str(epoch) + '.' + str(steps)
log.info("testing {}, save to {}".format(test_f, save_path))
qids, preds, probs = predict(
exe,
test_prog,
test_pyreader,
graph_vars)
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
else:
log.warning('save dir exsits: %s, will skip saving' % save_dir)
with open(save_path, 'w') as f:
# for id, s, p in zip(qids, preds, probs):
# f.write('{}\t{}\t{}\n'.format(id, s, p))
for p in probs:
f.write('{}\n'.format(p[1]))
if __name__ == '__main__':
prepare_logger(log)
print_arguments(args)
check_cuda(args.use_cuda)
main(args)
| 15,056 | 35.635036 | 96 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/finetune/dual_encoder_infer.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
import faiss
from scipy.stats import pearsonr, spearmanr
from six.moves import xrange
import paddle.fluid as fluid
from model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_model(args,
pyreader_name,
ernie_config,
batch_size=16,
is_prediction=False,
task_name=""):
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1],
[batch_size, 1], [batch_size, 1]],
dtypes=['int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64'],
lod_levels=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
name=pyreader_name,
use_double_buffer=True)
(src_ids_q, sent_ids_q, pos_ids_q, task_ids_q, input_mask_q,
src_ids_p, sent_ids_p, pos_ids_p, task_ids_p, input_mask_p,
labels, qids) = fluid.layers.read_file(pyreader)
ernie_q = ErnieModel(
src_ids=src_ids_q,
position_ids=pos_ids_q,
sentence_ids=sent_ids_q,
task_ids=task_ids_q,
input_mask=input_mask_q,
config=ernie_config,
model_name='query_')
## pos para
ernie_p = ErnieModel(
src_ids=src_ids_p,
position_ids=pos_ids_p,
sentence_ids=sent_ids_p,
task_ids=task_ids_p,
input_mask=input_mask_p,
config=ernie_config,
model_name='titlepara_')
q_cls_feats = ernie_q.get_cls_output()
p_cls_feats = ernie_p.get_cls_output()
#p_cls_feats = fluid.layers.concat([pos_cls_feats, neg_cls_feats], axis=0)
#src_ids_p = fluid.layers.Print(src_ids_p, message='p: ')
#p_cls_feats = fluid.layers.Print(p_cls_feats, message='p: ')
#multiply
logits = fluid.layers.matmul(q_cls_feats, p_cls_feats, transpose_x=False, transpose_y=True)
probs = logits
#fluid.layers.Print(probs, message='probs: ')
#logits2 = fluid.layers.elementwise_mul(x=q_rep, y=p_rep)
#fluid.layers.Print(logits2, message='logits2: ')
#probs2 = fluid.layers.reduce_sum(logits, dim=-1)
#fluid.layers.Print(probs2, message='probs2: ')
matrix_labels = fluid.layers.eye(batch_size, batch_size, dtype='float32')
matrix_labels.stop_gradient=True
#print('DEBUG:\tstart loss')
ce_loss, _ = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=matrix_labels, soft_label=True, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss)
#print('DEBUG:\tloss done')
matrix_labels = fluid.layers.argmax(matrix_labels, axis=-1)
matrix_labels = fluid.layers.reshape(x=matrix_labels, shape=[batch_size, 1])
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(input=probs, label=matrix_labels, total=num_seqs)
#ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
# logits=logits, label=labels, return_softmax=True)
#loss = fluid.layers.mean(x=ce_loss)
#accuracy = fluid.layers.accuracy(
# input=probs, label=labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs,
"qids": qids,
"q_rep": q_cls_feats,
"p_rep": p_cls_feats
}
return pyreader, graph_vars
def build_engine(para_emb_list, dim):
index = faiss.IndexFlatIP(dim)
# add paragraph embedding
p_emb_matrix = np.asarray(para_emb_list)
index.add(p_emb_matrix.astype('float32'))
#print ("insert done", file=sys.stderr)
return index
def predict(args,
exe,
test_program,
test_pyreader,
graph_vars,
dev_count=1,
output_item=0,
output_file_name='emb',
hidden_size=768):
test_pyreader.start()
fetch_list = [graph_vars["q_rep"].name, graph_vars["p_rep"].name,]
para_embs = []
batch_id = 0
while True:
try:
batch_id += 1
if batch_id % 500 == 0:
log.info("complete batch %s" % batch_id)
q_rep, p_rep = exe.run(program=test_program,
fetch_list=fetch_list)
if output_item == 0:
for item in q_rep:
para_embs.append(np.array(item, dtype='float32'))
elif output_item == 1:
for item in p_rep:
para_embs.append(np.array(item, dtype='float32'))
except fluid.core.EOFException:
test_pyreader.reset()
break
log.info("predict embs cnt: %s" % len(para_embs))
para_embs = para_embs[:args.test_data_cnt]
log.info("cut embs cnt: %s" % len(para_embs))
if output_item == 1:
engine = build_engine(para_embs, hidden_size)
faiss.write_index(engine, output_file_name)
log.info("create index done!")
else:
emb_matrix = np.asarray(para_embs)
np.save(output_file_name + '.npy', emb_matrix)
log.info("save to npy file!")
| 6,355 | 34.311111 | 95 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/finetune/dual_encoder.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
from scipy.stats import pearsonr, spearmanr
from six.moves import xrange
import paddle.fluid as fluid
from model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_model(args,
pyreader_name,
ernie_config,
batch_size=16,
is_prediction=False,
task_name="",
fleet_handle=None):
print ("DEBUG:\tclassify")
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1],
[batch_size, 1], [batch_size, 1]],
dtypes=['int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64'],
lod_levels=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
name=task_name + "_" + pyreader_name,
use_double_buffer=True)
(src_ids_q, sent_ids_q, pos_ids_q, task_ids_q, input_mask_q,
src_ids_p_pos, sent_ids_p_pos, pos_ids_p_pos, task_ids_p_pos, input_mask_p_pos,
src_ids_p_neg, sent_ids_p_neg, pos_ids_p_neg, task_ids_p_neg, input_mask_p_neg,
labels, qids) = fluid.layers.read_file(pyreader)
ernie_q = ErnieModel(
src_ids=src_ids_q,
position_ids=pos_ids_q,
sentence_ids=sent_ids_q,
task_ids=task_ids_q,
input_mask=input_mask_q,
config=ernie_config,
model_name='query_')
## pos para
ernie_pos = ErnieModel(
src_ids=src_ids_p_pos,
position_ids=pos_ids_p_pos,
sentence_ids=sent_ids_p_pos,
task_ids=task_ids_p_pos,
input_mask=input_mask_p_pos,
config=ernie_config,
model_name='titlepara_')
## neg para
ernie_neg = ErnieModel(
src_ids=src_ids_p_neg,
position_ids=pos_ids_p_neg,
sentence_ids=sent_ids_p_neg,
task_ids=task_ids_p_neg,
input_mask=input_mask_p_neg,
config=ernie_config,
model_name='titlepara_')
q_cls_feats = ernie_q.get_cls_output()
pos_cls_feats = ernie_pos.get_cls_output()
neg_cls_feats = ernie_neg.get_cls_output()
#src_ids_p_pos = fluid.layers.Print(src_ids_p_pos, message='pos: ')
#pos_cls_feats = fluid.layers.Print(pos_cls_feats, message='pos: ')
p_cls_feats = fluid.layers.concat([pos_cls_feats, neg_cls_feats], axis=0)
if is_prediction:
p_cls_feats = fluid.layers.slice(p_cls_feats, axes=[0], starts=[0], ends=[batch_size])
multi = fluid.layers.elementwise_mul(q_cls_feats, p_cls_feats)
probs = fluid.layers.reduce_sum(multi, dim=-1)
graph_vars = {
"probs": probs,
"qids": qids,
"q_rep": q_cls_feats,
"p_rep": p_cls_feats
}
return pyreader, graph_vars
if args.use_cross_batch and fleet_handle is not None:
print("worker num is: {}".format(fleet_handle.worker_num()))
all_p_cls_feats = fluid.layers.collective._c_allgather(
p_cls_feats, fleet_handle.worker_num(), use_calc_stream=True)
#multiply
logits = fluid.layers.matmul(q_cls_feats, all_p_cls_feats, transpose_x=False, transpose_y=True)
worker_id = fleet_handle.worker_index()
else:
logits = fluid.layers.matmul(q_cls_feats, p_cls_feats, transpose_x=False, transpose_y=True)
worker_id = 0
probs = logits
all_labels = np.array(range(batch_size * worker_id * 2, batch_size * (worker_id * 2 + 1)), dtype='int64')
matrix_labels = fluid.layers.assign(all_labels)
matrix_labels = fluid.layers.unsqueeze(matrix_labels, axes=1)
matrix_labels.stop_gradient=True
# fluid.layers.Print(matrix_labels, message='matrix_labels')
#print('DEBUG:\tstart loss')
ce_loss = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=matrix_labels)
loss = fluid.layers.mean(x=ce_loss)
#print('DEBUG:\tloss done')
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(
input=probs, label=matrix_labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs,
"qids": qids,
"q_rep": q_cls_feats,
"p_rep": p_cls_feats
}
cp = []
cp.extend(ernie_q.checkpoints)
cp.extend(ernie_pos.checkpoints)
cp.extend(ernie_neg.checkpoints)
return pyreader, graph_vars, cp
def evaluate_mrr(preds):
last_qid = None
total_mrr = 0.0
qnum = 0.0
rank = 0.0
correct = False
for qid, score, label in preds:
if qid != last_qid:
rank = 0.0
qnum += 1
correct = False
last_qid = qid
rank += 1
if not correct and label != 0:
total_mrr += 1.0 / rank
correct = True
return total_mrr / qnum
def evaluate_map(preds):
def singe_map(st, en):
total_p = 0.0
correct_num = 0.0
for index in xrange(st, en):
if int(preds[index][2]) != 0:
correct_num += 1
total_p += correct_num / (index - st + 1)
if int(correct_num) == 0:
return 0.0
return total_p / correct_num
last_qid = None
total_map = 0.0
qnum = 0.0
st = 0
for i in xrange(len(preds)):
qid = preds[i][0]
if qid != last_qid:
qnum += 1
if last_qid != None:
total_map += singe_map(st, i)
st = i
last_qid = qid
total_map += singe_map(st, len(preds))
return total_map / qnum
def evaluate(exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=False,
metric='simple_accuracy'):
train_fetch_list = [
graph_vars["loss"], graph_vars["accuracy"],
graph_vars["num_seqs"]
]
if eval_phase == "train":
if "learning_rate" in graph_vars:
train_fetch_list.append(graph_vars["learning_rate"])
outputs = exe.run(fetch_list=train_fetch_list, program=test_program)
ret = {"loss": np.mean(outputs[0]), "accuracy": np.mean(outputs[1])}
if "learning_rate" in graph_vars:
ret["learning_rate"] = float(outputs[3][0])
return ret
test_pyreader.start()
total_cost, total_acc, total_num_seqs, total_label_pos_num, total_pred_pos_num, total_correct_num = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
qids, labels, scores, preds = [], [], [], []
time_begin = time.time()
fetch_list = [
graph_vars["loss"], graph_vars["accuracy"],
graph_vars["probs"], graph_vars["labels"],
graph_vars["num_seqs"], graph_vars["qids"],
graph_vars["q_rep"], graph_vars["p_rep"]
]
#emb_file = open('emb_qp', 'w')
while True:
try:
if use_multi_gpu_test:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids, q_rep, p_rep = exe.run(
fetch_list=fetch_list)
else:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids, q_rep, p_rep = exe.run(
program=test_program, fetch_list=fetch_list)
total_cost += np.sum(np_loss * np_num_seqs)
total_acc += np.sum(np_acc * np_num_seqs)
total_num_seqs += np.sum(np_num_seqs)
labels.extend(np_labels.reshape((-1)).tolist())
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
batch_scores = np.diag(np_probs).reshape(-1).tolist()
scores.extend(batch_scores)
#for item in list(zip(q_rep, p_rep, batch_scores)):
# _left = ' '.join([str(each) for each in item[0]])
# _right = ' '.join([str(each) for each in item[1]])
# emb_file.write(_left + '\t' + _right + '\t' + str(item[2]) + '\n')
#scores.extend(np_probs[:, 1].reshape(-1).tolist())
#np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
#preds.extend(np_preds)
#total_label_pos_num += np.sum(np_labels)
#total_pred_pos_num += np.sum(np_preds)
#total_correct_num += np.sum(np.dot(np_preds, np_labels))
except fluid.core.EOFException:
test_pyreader.reset()
break
#for score in np_preds:
# print (score)
#print ('---------------------')
#time_end = time.time()
#cost = total_cost / total_num_seqs
#elapsed_time = time_end - time_begin
#emb_file.close()
return None
evaluate_info = ""
if metric == 'acc_and_f1':
ret = acc_and_f1(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, ave_acc: %f, f1: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['acc'], ret['f1'], total_num_seqs, elapsed_time)
elif metric == 'matthews_corrcoef':
ret = matthews_corrcoef(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, matthews_corrcoef: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
elif metric == 'pearson_and_spearman':
ret = pearson_and_spearman(scores, labels)
evaluate_info = "[%s evaluation] ave loss: %f, pearson:%f, spearman:%f, corr:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['pearson'], ret['spearman'], ret['corr'], total_num_seqs, elapsed_time)
elif metric == 'simple_accuracy':
ret = simple_accuracy(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, acc:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
elif metric == "acc_and_f1_and_mrr":
ret_a = acc_and_f1(preds, labels)
preds = sorted(
zip(qids, scores, labels), key=lambda elem: (elem[0], -elem[1]))
ret_b = evaluate_mrr(preds)
evaluate_info = "[%s evaluation] ave loss: %f, acc: %f, f1: %f, mrr: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret_a['acc'], ret_a['f1'], ret_b, total_num_seqs, elapsed_time)
else:
raise ValueError('unsupported metric {}'.format(metric))
return evaluate_info
def matthews_corrcoef(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
mcc = ((tp * tn) - (fp * fn)) / np.sqrt(
(tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return mcc
def f1_score(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
p = tp / (tp + fp)
r = tp / (tp + fn)
f1 = (2 * p * r) / (p + r + 1e-8)
return f1
def pearson_and_spearman(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def acc_and_f1(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
acc = simple_accuracy(preds, labels)
f1 = f1_score(preds, labels)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def simple_accuracy(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
return (preds == labels).mean()
def predict(exe,
test_program,
test_pyreader,
graph_vars,
dev_count=1):
test_pyreader.start()
qids, scores, probs = [], [], []
preds = []
fetch_list = [graph_vars["probs"], graph_vars["qids"], \
graph_vars["q_rep"], graph_vars["p_rep"],]
emb_file = open('emb_qp', 'w')
while True:
try:
if dev_count == 1:
np_probs, np_qids, q_rep, p_rep = exe.run(program=test_program,
fetch_list=fetch_list)
else:
np_probs, np_qids, q_rep, p_rep = exe.run(fetch_list=fetch_list)
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
batch_scores = np_probs.reshape(-1).tolist()
for item in list(zip(q_rep, p_rep, batch_scores)):
_left = ' '.join([str(each) for each in item[0]])
_right = ' '.join([str(each) for each in item[1]])
#emb_file.write(_left + '\t' + _right + '\t' + str(item[2]) + '\n')
#emb_file.write(_right + '\n')
emb_file.write(str(item[2]) + '\n')
#for score in batch_scores:
# print (score)
#print ('--------')
#if is_classify:
# np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
# preds.extend(np_preds)
#elif is_regression:
# preds.extend(np_probs.reshape(-1))
probs.extend(batch_scores)
except fluid.core.EOFException:
test_pyreader.reset()
break
emb_file.close()
#probs = np.concatenate(probs, axis=0).reshape([len(preds), -1])
return qids, preds, probs
| 15,265 | 35.175355 | 132 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/finetune/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/finetune/cross_encoder.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
from scipy.stats import pearsonr, spearmanr
from six.moves import xrange
import paddle.fluid as fluid
from model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_model(args,
pyreader_name,
ernie_config,
is_prediction=False,
task_name=""):
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, 1], [-1, 1]],
dtypes=[
'int64', 'int64', 'int64', 'int64', 'float32', 'int64', 'int64'
],
lod_levels=[0, 0, 0, 0, 0, 0, 0],
name=task_name + "_" + pyreader_name,
use_double_buffer=True)
(src_ids, sent_ids, pos_ids, task_ids, input_mask, labels,
qids) = fluid.layers.read_file(pyreader)
def _model(is_noise=False):
ernie = ErnieModel(
src_ids=src_ids,
position_ids=pos_ids,
sentence_ids=sent_ids,
task_ids=task_ids,
input_mask=input_mask,
config=ernie_config,
is_noise=is_noise)
cls_feats = ernie.get_pooled_output()
if not is_noise:
cls_feats = fluid.layers.dropout(
x=cls_feats,
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
logits = fluid.layers.fc(
input=cls_feats,
size=args.num_labels,
param_attr=fluid.ParamAttr(
name=task_name + "_cls_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=task_name + "_cls_out_b",
initializer=fluid.initializer.Constant(0.)))
"""
if is_prediction:
probs = fluid.layers.softmax(logits)
feed_targets_name = [
src_ids.name, sent_ids.name, pos_ids.name, input_mask.name
]
if ernie_version == "2.0":
feed_targets_name += [task_ids.name]
return pyreader, probs, feed_targets_name
"""
num_seqs = fluid.layers.create_tensor(dtype='int64')
## add focal loss
ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=labels, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss)
accuracy = fluid.layers.accuracy(
input=probs, label=labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs,
"qids": qids
}
return graph_vars
if not is_prediction:
graph_vars = _model(is_noise=True)
old_loss = graph_vars["loss"]
token_emb = fluid.default_main_program().global_block().var("word_embedding")
# print(token_emb)
token_emb.stop_gradient = False
token_gradient = fluid.gradients(old_loss, token_emb)[0]
token_gradient.stop_gradient = False
epsilon = 1e-8
norm = (fluid.layers.sqrt(
fluid.layers.reduce_sum(fluid.layers.square(token_gradient)) + epsilon))
gp = (0.01 * token_gradient) / norm
gp.stop_gradient = True
fluid.layers.assign(token_emb + gp, token_emb)
graph_vars = _model()
fluid.layers.assign(token_emb - gp, token_emb)
else:
graph_vars = _model()
return pyreader, graph_vars
def evaluate_mrr(preds):
last_qid = None
total_mrr = 0.0
qnum = 0.0
rank = 0.0
correct = False
for qid, score, label in preds:
if qid != last_qid:
rank = 0.0
qnum += 1
correct = False
last_qid = qid
rank += 1
if not correct and label != 0:
total_mrr += 1.0 / rank
correct = True
return total_mrr / qnum
def evaluate(exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=False,
metric='simple_accuracy'):
train_fetch_list = [
graph_vars["loss"].name, graph_vars["accuracy"].name,
graph_vars["num_seqs"].name
]
if eval_phase == "train":
if "learning_rate" in graph_vars:
train_fetch_list.append(graph_vars["learning_rate"].name)
outputs = exe.run(fetch_list=train_fetch_list, program=test_program)
ret = {"loss": np.mean(outputs[0]), "accuracy": np.mean(outputs[1])}
if "learning_rate" in graph_vars:
ret["learning_rate"] = float(outputs[3][0])
return ret
test_pyreader.start()
total_cost, total_acc, total_num_seqs, total_label_pos_num, total_pred_pos_num, total_correct_num = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
qids, labels, scores, preds = [], [], [], []
time_begin = time.time()
fetch_list = [
graph_vars["loss"].name, graph_vars["accuracy"].name,
graph_vars["probs"].name, graph_vars["labels"].name,
graph_vars["num_seqs"].name, graph_vars["qids"].name
]
while True:
try:
if use_multi_gpu_test:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids = exe.run(
fetch_list=fetch_list)
else:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids = exe.run(
program=test_program, fetch_list=fetch_list)
total_cost += np.sum(np_loss * np_num_seqs)
total_acc += np.sum(np_acc * np_num_seqs)
total_num_seqs += np.sum(np_num_seqs)
labels.extend(np_labels.reshape((-1)).tolist())
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
scores.extend(np_probs[:, 1].reshape(-1).tolist())
np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
preds.extend(np_preds)
total_label_pos_num += np.sum(np_labels)
total_pred_pos_num += np.sum(np_preds)
total_correct_num += np.sum(np.dot(np_preds, np_labels))
except fluid.core.EOFException:
test_pyreader.reset()
break
time_end = time.time()
cost = total_cost / total_num_seqs
elapsed_time = time_end - time_begin
evaluate_info = ""
if metric == 'acc_and_f1':
ret = acc_and_f1(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, ave_acc: %f, f1: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['acc'], ret['f1'], total_num_seqs, elapsed_time)
elif metric == 'matthews_corrcoef':
ret = matthews_corrcoef(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, matthews_corrcoef: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
elif metric == 'pearson_and_spearman':
ret = pearson_and_spearman(scores, labels)
evaluate_info = "[%s evaluation] ave loss: %f, pearson:%f, spearman:%f, corr:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['pearson'], ret['spearman'], ret['corr'], total_num_seqs, elapsed_time)
elif metric == 'simple_accuracy':
ret = simple_accuracy(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, acc:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
elif metric == "acc_and_f1_and_mrr":
ret_a = acc_and_f1(preds, labels)
preds = sorted(
zip(qids, scores, labels), key=lambda elem: (elem[0], -elem[1]))
ret_b = evaluate_mrr(preds)
evaluate_info = "[%s evaluation] ave loss: %f, acc: %f, f1: %f, mrr: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret_a['acc'], ret_a['f1'], ret_b, total_num_seqs, elapsed_time)
else:
raise ValueError('unsupported metric {}'.format(metric))
return evaluate_info
def matthews_corrcoef(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
mcc = ((tp * tn) - (fp * fn)) / np.sqrt(
(tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return mcc
def f1_score(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
p = tp / (tp + fp)
r = tp / (tp + fn)
f1 = (2 * p * r) / (p + r + 1e-8)
return f1
def pearson_and_spearman(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def acc_and_f1(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
acc = simple_accuracy(preds, labels)
f1 = f1_score(preds, labels)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def simple_accuracy(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
return (preds == labels).mean()
def predict(exe,
test_program,
test_pyreader,
graph_vars,
dev_count=1):
test_pyreader.start()
qids, scores, probs = [], [], []
preds = []
fetch_list = [graph_vars["probs"].name, graph_vars["qids"].name]
while True:
try:
if dev_count == 1:
np_probs, np_qids = exe.run(program=test_program,
fetch_list=fetch_list)
else:
np_probs, np_qids = exe.run(fetch_list=fetch_list)
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
preds.extend(np_preds)
probs.append(np_probs)
except fluid.core.EOFException:
test_pyreader.reset()
break
probs = np.concatenate(probs, axis=0).reshape([len(preds), -1])
return qids, preds, probs
| 11,546 | 33.885196 | 132 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/reader/reader_de.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
import tokenization
from batching import pad_batch_data
log = logging.getLogger(__name__)
if six.PY3:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def csv_reader(fd, delimiter='\t', trainer_id=0, trainer_num=1):
def gen():
for i, line in enumerate(fd):
if i % trainer_num == trainer_id:
slots = line.rstrip('\n').split(delimiter)
if len(slots) == 1:
yield slots,
else:
yield slots
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
q_max_seq_len=128,
p_max_seq_len=512,
total_num=0,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
for_cn=True,
task_id=0):
self.q_max_seq_len = q_max_seq_len
self.p_max_seq_len = p_max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
# np.random.seed(random_seed)
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
self.total_num = total_num
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, q_max_seq_length, p_max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_query = tokenizer.tokenize(query)
self._truncate_seq_pair([], tokens_query, q_max_seq_length - 2)
# pos title
title_pos = tokenization.convert_to_unicode(example.title_pos)
tokens_title_pos = tokenizer.tokenize(title_pos)
# pos para
para_pos = tokenization.convert_to_unicode(example.para_pos)
tokens_para_pos = tokenizer.tokenize(para_pos)
self._truncate_seq_pair(tokens_title_pos, tokens_para_pos, p_max_seq_length - 3)
# neg title
title_neg = tokenization.convert_to_unicode(example.title_neg)
tokens_title_neg = tokenizer.tokenize(title_neg)
# neg para
para_neg = tokenization.convert_to_unicode(example.para_neg)
tokens_para_neg = tokenizer.tokenize(para_neg)
self._truncate_seq_pair(tokens_title_neg, tokens_para_neg, p_max_seq_length - 3)
tokens_q = []
text_type_ids_q = []
tokens_q.append("[CLS]")
text_type_ids_q.append(0)
for token in tokens_query:
tokens_q.append(token)
text_type_ids_q.append(0)
tokens_q.append("[SEP]")
text_type_ids_q.append(0)
token_ids_q = tokenizer.convert_tokens_to_ids(tokens_q)
position_ids_q = list(range(len(token_ids_q)))
#f = open('tid', 'a')
#for tid in range(len(token_ids_q)):
# f.write(str(token_ids_q[tid]) + ' ' + str(tokens_q[tid]) + '\n')
### pos_para
tokens_p_pos = []
text_type_ids_p_pos = []
tokens_p_pos.append("[CLS]")
text_type_ids_p_pos.append(0)
for token in tokens_title_pos:
tokens_p_pos.append(token)
text_type_ids_p_pos.append(0)
tokens_p_pos.append("[SEP]")
text_type_ids_p_pos.append(0)
for token in tokens_para_pos:
tokens_p_pos.append(token)
text_type_ids_p_pos.append(1)
tokens_p_pos.append("[SEP]")
text_type_ids_p_pos.append(1)
token_ids_p_pos = tokenizer.convert_tokens_to_ids(tokens_p_pos)
position_ids_p_pos = list(range(len(token_ids_p_pos)))
#for tid in range(len(token_ids_p_pos)):
# f.write(str(token_ids_p_pos[tid]) + ' ' + str(tokens_p_pos[tid]) + '\n')
#f.close()
### neg_para
tokens_p_neg = []
text_type_ids_p_neg = []
tokens_p_neg.append("[CLS]")
text_type_ids_p_neg.append(0)
for token in tokens_title_neg:
tokens_p_neg.append(token)
text_type_ids_p_neg.append(0)
tokens_p_neg.append("[SEP]")
text_type_ids_p_neg.append(0)
for token in tokens_para_neg:
tokens_p_neg.append(token)
text_type_ids_p_neg.append(1)
tokens_p_neg.append("[SEP]")
text_type_ids_p_neg.append(1)
token_ids_p_neg = tokenizer.convert_tokens_to_ids(tokens_p_neg)
position_ids_p_neg = list(range(len(token_ids_p_neg)))
if self.is_inference:
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p_pos', 'text_type_ids_p_pos', 'position_ids_p_pos', \
'token_ids_p_neg', 'text_type_ids_p_neg', 'position_ids_p_neg'])
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p_pos=token_ids_p_pos,
text_type_ids_p_pos=text_type_ids_p_pos,
position_ids_p_pos=position_ids_p_pos,
token_ids_p_neg=token_ids_p_neg,
text_type_ids_p_neg=text_type_ids_p_neg,
position_ids_p_neg=position_ids_p_neg)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p_pos', 'text_type_ids_p_pos', 'position_ids_p_pos', \
'token_ids_p_neg', 'text_type_ids_p_neg', 'position_ids_p_neg',
'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p_pos=token_ids_p_pos,
text_type_ids_p_pos=text_type_ids_p_pos,
position_ids_p_pos=position_ids_p_pos,
token_ids_p_neg=token_ids_p_neg,
text_type_ids_p_neg=text_type_ids_p_neg,
position_ids_p_neg=position_ids_p_neg,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, phase=None):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if phase == "train":
self.current_example = index
record = self._convert_example_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
max_len = max(max_len, len(record.token_ids_p_pos))
max_len = max(max_len, len(record.token_ids_p_neg))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
max_len = max(len(record.token_ids_p_neg), len(record.token_ids_p_pos))
batch_records = [record]
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
# examples = self._read_tsv(input_file)
# return len(examples)
return self.num_examples
def data_generator(self,
input_file,
batch_size,
epoch,
dev_count=1,
trainer_id=0,
trainer_num=1,
shuffle=True,
phase=None):
if phase == 'train':
# examples = examples[trainer_id: (len(examples) //trainer_num) * trainer_num : trainer_num]
self.num_examples_per_node = self.total_num // trainer_num
self.num_examples = self.num_examples_per_node * trainer_num
examples = self._read_tsv(input_file, batch_size=batch_size, trainer_id=trainer_id, trainer_num=trainer_num, num_examples=self.num_examples_per_node)
log.info('apply sharding %d/%d' % (trainer_id, trainer_num))
else:
examples = self._read_tsv(input_file, batch_size=batch_size)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class ClassifyReader(BaseReader):
def _read_tsv(self, input_file, batch_size=16, quotechar=None, trainer_id=0, trainer_num=1, num_examples=0):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f, trainer_id=trainer_id, trainer_num=trainer_num)
# headers = next(reader)
#headers = 'query\tpara_pos\tpara_neg\tlabel'.split('\t')
headers = 'query\ttitle_pos\tpara_pos\ttitle_neg\tpara_neg\tlabel'.split('\t')
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
for cnt, line in enumerate(reader):
if num_examples != 0 and cnt == num_examples:
break
for index, text in enumerate(line):
if index in text_indices:
if self.for_cn:
line[index] = text.replace(' ', '')
else:
line[index] = text
example = Example(*line)
examples.append(example)
while len(examples) % batch_size != 0:
examples.append(example)
return examples
def _pad_batch_records(self, batch_records):
batch_token_ids_q = [record.token_ids_q for record in batch_records]
batch_text_type_ids_q = [record.text_type_ids_q for record in batch_records]
batch_position_ids_q = [record.position_ids_q for record in batch_records]
batch_token_ids_p_pos = [record.token_ids_p_pos for record in batch_records]
batch_text_type_ids_p_pos = [record.text_type_ids_p_pos for record in batch_records]
batch_position_ids_p_pos = [record.position_ids_p_pos for record in batch_records]
batch_token_ids_p_neg = [record.token_ids_p_neg for record in batch_records]
batch_text_type_ids_p_neg = [record.text_type_ids_p_neg for record in batch_records]
batch_position_ids_p_neg = [record.position_ids_p_neg for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids_q, input_mask_q = pad_batch_data(
batch_token_ids_q, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_q = pad_batch_data(
batch_text_type_ids_q, pad_idx=self.pad_id)
padded_position_ids_q = pad_batch_data(
batch_position_ids_q, pad_idx=self.pad_id)
padded_task_ids_q = np.ones_like(padded_token_ids_q, dtype="int64") * self.task_id
padded_token_ids_p_pos, input_mask_p_pos = pad_batch_data(
batch_token_ids_p_pos, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_p_pos = pad_batch_data(
batch_text_type_ids_p_pos, pad_idx=self.pad_id)
padded_position_ids_p_pos = pad_batch_data(
batch_position_ids_p_pos, pad_idx=self.pad_id)
padded_task_ids_p_pos = np.ones_like(padded_token_ids_p_pos, dtype="int64") * self.task_id
padded_token_ids_p_neg, input_mask_p_neg = pad_batch_data(
batch_token_ids_p_neg, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_p_neg = pad_batch_data(
batch_text_type_ids_p_neg, pad_idx=self.pad_id)
padded_position_ids_p_neg = pad_batch_data(
batch_position_ids_p_neg, pad_idx=self.pad_id)
padded_task_ids_p_neg = np.ones_like(padded_token_ids_p_neg, dtype="int64") * self.task_id
return_list = [
padded_token_ids_q, padded_text_type_ids_q, padded_position_ids_q, padded_task_ids_q,
input_mask_q,
padded_token_ids_p_pos, padded_text_type_ids_p_pos, padded_position_ids_p_pos, padded_task_ids_p_pos,
input_mask_p_pos,
padded_token_ids_p_neg, padded_text_type_ids_p_neg, padded_position_ids_p_neg, padded_task_ids_p_neg,
input_mask_p_neg
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
if __name__ == '__main__':
pass
| 17,266 | 39.437939 | 161 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/reader/reader_de_infer.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
import tokenization
from batching import pad_batch_data
log = logging.getLogger(__name__)
if six.PY3:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def csv_reader(fd, delimiter='\t'):
def gen():
for i in fd:
yield i.rstrip('\n').split(delimiter)
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
q_max_seq_len=128,
p_max_seq_len=512,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
for_cn=True,
task_id=0):
self.q_max_seq_len = q_max_seq_len
self.p_max_seq_len = p_max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
np.random.seed(random_seed)
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, q_max_seq_length, p_max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_query = tokenizer.tokenize(query)
self._truncate_seq_pair([], tokens_query, q_max_seq_length - 2)
# title
title = tokenization.convert_to_unicode(example.title)
tokens_title = tokenizer.tokenize(title)
# para
para = tokenization.convert_to_unicode(example.para)
tokens_para = tokenizer.tokenize(para)
self._truncate_seq_pair(tokens_title, tokens_para, p_max_seq_length - 3)
tokens_q = []
text_type_ids_q = []
tokens_q.append("[CLS]")
text_type_ids_q.append(0)
for token in tokens_query:
tokens_q.append(token)
text_type_ids_q.append(0)
tokens_q.append("[SEP]")
text_type_ids_q.append(0)
token_ids_q = tokenizer.convert_tokens_to_ids(tokens_q)
position_ids_q = list(range(len(token_ids_q)))
#f = open('tid', 'a')
#for tid in range(len(token_ids_q)):
# f.write(str(token_ids_q[tid]) + '\t' + tokens_q[tid] + '\n')
#f.write(str(token_ids_q[tid]) + ' ')
#f.write('\t')
### para
tokens_p = []
text_type_ids_p = []
tokens_p.append("[CLS]")
text_type_ids_p.append(0)
for token in tokens_title:
tokens_p.append(token)
text_type_ids_p.append(0)
tokens_p.append("[SEP]")
text_type_ids_p.append(0)
for token in tokens_para:
tokens_p.append(token)
text_type_ids_p.append(1)
tokens_p.append("[SEP]")
text_type_ids_p.append(1)
token_ids_p = tokenizer.convert_tokens_to_ids(tokens_p)
position_ids_p = list(range(len(token_ids_p)))
#for tid in range(len(token_ids_p)):
# f.write(str(token_ids_p[tid]) + '\t' + tokens_p[tid] + '\n')
#f.write(str(token_ids_p[tid]) + ' ')
#f.write('\n')
#f.close()
if self.is_inference:
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p', 'text_type_ids_p', 'position_ids_p'])
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p=token_ids_p,
text_type_ids_p=text_type_ids_p,
position_ids_p=position_ids_p)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p', 'text_type_ids_p', 'position_ids_p', \
'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p=token_ids_p,
text_type_ids_p=text_type_ids_p,
position_ids_p=position_ids_p,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, phase=None, read_id=False):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if phase == "train":
self.current_example = index
if read_id is False:
record = self._convert_example_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
else:
record = self._convert_example_id_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
max_len = max(max_len, len(record.token_ids_p))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
max_len = len(record.token_ids_p)
batch_records = [record]
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
examples = self._read_tsv(input_file)
return len(examples)
def data_generator(self,
input_file,
batch_size,
epoch,
dev_count=1,
shuffle=True,
phase=None,
read_id=False):
examples = self._read_tsv(input_file, batch_size)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase, read_id=read_id):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class ClassifyReader(BaseReader):
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
#headers = next(reader)
headers = 'query\ttitle\tpara\tlabel'.split('\t')
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
for line in reader:
for index, text in enumerate(line):
if index in text_indices:
if self.for_cn:
line[index] = text.replace(' ', '')
else:
line[index] = text
example = Example(*line)
examples.append(example)
while len(examples) % batch_size != 0:
examples.append(example)
return examples
def _pad_batch_records(self, batch_records):
batch_token_ids_q = [record.token_ids_q for record in batch_records]
batch_text_type_ids_q = [record.text_type_ids_q for record in batch_records]
batch_position_ids_q = [record.position_ids_q for record in batch_records]
batch_token_ids_p = [record.token_ids_p for record in batch_records]
batch_text_type_ids_p = [record.text_type_ids_p for record in batch_records]
batch_position_ids_p = [record.position_ids_p for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids_q, input_mask_q = pad_batch_data(
batch_token_ids_q, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_q = pad_batch_data(
batch_text_type_ids_q, pad_idx=self.pad_id)
padded_position_ids_q = pad_batch_data(
batch_position_ids_q, pad_idx=self.pad_id)
padded_task_ids_q = np.ones_like(padded_token_ids_q, dtype="int64") * self.task_id
padded_token_ids_p, input_mask_p = pad_batch_data(
batch_token_ids_p, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_p = pad_batch_data(
batch_text_type_ids_p, pad_idx=self.pad_id)
padded_position_ids_p = pad_batch_data(
batch_position_ids_p, pad_idx=self.pad_id)
padded_task_ids_p = np.ones_like(padded_token_ids_p, dtype="int64") * self.task_id
return_list = [
padded_token_ids_q, padded_text_type_ids_q, padded_position_ids_q, padded_task_ids_q,
input_mask_q,
padded_token_ids_p, padded_text_type_ids_p, padded_position_ids_p, padded_task_ids_p,
input_mask_p,
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
if __name__ == '__main__':
pass
| 13,641 | 36.581267 | 97 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/reader/reader_ce.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
import tokenization
from batching import pad_batch_data
log = logging.getLogger(__name__)
if six.PY3:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def csv_reader(fd, delimiter='\t', trainer_id=0, trainer_num=1):
def gen():
for i, line in enumerate(fd):
if i % trainer_num == trainer_id:
slots = line.rstrip('\n').split(delimiter)
if len(slots) == 1:
yield slots,
else:
yield slots
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
max_seq_len=512,
total_num=0,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
for_cn=True,
task_id=0):
self.max_seq_len = max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
np.random.seed(random_seed)
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
self.total_num = total_num
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_a = tokenizer.tokenize(query)
tokens_b = None
title = tokenization.convert_to_unicode(example.title)
tokens_b = tokenizer.tokenize(title)
para = tokenization.convert_to_unicode(example.para)
tokens_para = tokenizer.tokenize(para)
tokens_b.extend(tokens_para)
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
# The convention in BERT/ERNIE is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
text_type_ids = []
tokens.append("[CLS]")
text_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
text_type_ids.append(0)
tokens.append("[SEP]")
text_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
text_type_ids.append(1)
tokens.append("[SEP]")
text_type_ids.append(1)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
position_ids = list(range(len(token_ids)))
if self.is_inference:
Record = namedtuple('Record',
['token_ids', 'text_type_ids', 'position_ids'])
record = Record(
token_ids=token_ids,
text_type_ids=text_type_ids,
position_ids=position_ids)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record', [
'token_ids', 'text_type_ids', 'position_ids', 'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids=token_ids,
text_type_ids=text_type_ids,
position_ids=position_ids,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, phase=None):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if phase == "train":
self.current_example = index
record = self._convert_example_to_record(example, self.max_seq_len,
self.tokenizer)
max_len = max(max_len, len(record.token_ids))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
batch_records, max_len = [record], len(record.token_ids)
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
# examples = self._read_tsv(input_file)
# return len(examples)
return self.num_examples
def data_generator(self,
input_file,
batch_size,
epoch,
dev_count=1,
trainer_id=0,
trainer_num=1,
shuffle=True,
phase=None):
if phase == 'train':
# examples = examples[trainer_id: (len(examples) //trainer_num) * trainer_num : trainer_num]
self.num_examples_per_node = self.total_num // trainer_num
self.num_examples = self.num_examples_per_node * trainer_num
examples = self._read_tsv(input_file, trainer_id=trainer_id, trainer_num=trainer_num, num_examples=self.num_examples_per_node)
log.info('apply sharding %d/%d' % (trainer_id, trainer_num))
else:
examples = self._read_tsv(input_file)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class ClassifyReader(BaseReader):
def _read_tsv(self, input_file, quotechar=None, trainer_id=0, trainer_num=1, num_examples=0):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f, trainer_id=trainer_id, trainer_num=trainer_num)
# headers = next(reader)
headers = 'query\ttitle\tpara\tlabel'.split('\t')
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
for cnt, line in enumerate(reader):
if num_examples != 0 and cnt == num_examples:
break
for index, text in enumerate(line):
if index in text_indices:
if self.for_cn:
line[index] = text.replace(' ', '')
else:
line[index] = text
example = Example(*line)
examples.append(example)
return examples
def _pad_batch_records(self, batch_records):
batch_token_ids = [record.token_ids for record in batch_records]
batch_text_type_ids = [record.text_type_ids for record in batch_records]
batch_position_ids = [record.position_ids for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids, input_mask = pad_batch_data(
batch_token_ids, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids = pad_batch_data(
batch_text_type_ids, pad_idx=self.pad_id)
padded_position_ids = pad_batch_data(
batch_position_ids, pad_idx=self.pad_id)
padded_task_ids = np.ones_like(
padded_token_ids, dtype="int64") * self.task_id
return_list = [
padded_token_ids, padded_text_type_ids, padded_position_ids,
padded_task_ids, input_mask
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
| 12,992 | 36.770349 | 138 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/reader/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/utils/args.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Arguments for configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import six
import os
import sys
import argparse
import logging
import paddle.fluid as fluid
log = logging.getLogger(__name__)
def prepare_logger(logger, debug=False, save_to_file=None):
formatter = logging.Formatter(fmt='[%(levelname)s] %(asctime)s [%(filename)12s:%(lineno)5d]:\t%(message)s')
console_hdl = logging.StreamHandler()
console_hdl.setFormatter(formatter)
logger.addHandler(console_hdl)
if save_to_file is not None and not os.path.exists(save_to_file):
file_hdl = logging.FileHandler(save_to_file)
file_hdl.setFormatter(formatter)
logger.addHandler(file_hdl)
logger.setLevel(logging.DEBUG)
logger.propagate = False
def str2bool(v):
# because argparse does not support to parse "true, False" as python
# boolean directly
return v.lower() in ("true", "t", "1")
class ArgumentGroup(object):
def __init__(self, parser, title, des):
self._group = parser.add_argument_group(title=title, description=des)
def add_arg(self, name, type, default, help, positional_arg=False, **kwargs):
prefix = "" if positional_arg else "--"
type = str2bool if type == bool else type
self._group.add_argument(
prefix + name,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
def print_arguments(args):
log.info('----------- Configuration Arguments -----------')
for arg, value in sorted(six.iteritems(vars(args))):
log.info('%s: %s' % (arg, value))
log.info('------------------------------------------------')
def check_cuda(use_cuda, err = \
"\nYou can not set use_cuda = True in the model because you are using paddlepaddle-cpu.\n \
Please: 1. Install paddlepaddle-gpu to run your models on GPU or 2. Set use_cuda = False to run models on CPU.\n"
):
try:
if use_cuda == True and fluid.is_compiled_with_cuda() == False:
log.error(err)
sys.exit(1)
except Exception as e:
pass
| 2,996 | 34.678571 | 119 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.