text stringlengths 1 93.6k |
|---|
"than this will be padded.")
|
flags.DEFINE_integer(
|
"max_num_segments_perdoc", 8,
|
"The maximum number of segments for each document"
|
)
|
class PointwiseInstance(object):
|
"""A single training instance (sentence pair)."""
|
def __init__(self, exampleid, tokens_a, tokens_b_list, relation_label):
|
self.exampleid = exampleid
|
self.tokens_a = tokens_a
|
self.tokens_b_list = tokens_b_list
|
self.relation_label = relation_label
|
def __str__(self):
|
s = ""
|
s += "example id: %s\n" % self.exampleid
|
s += "tokens a: %s\n" % (" ".join(
|
[tokenization.printable_text(x) for x in self.tokens_a]))
|
s += "tokens b: %s\n" % (" ".join(
|
[tokenization.printable_text(x) for x in self.tokens_b_list]))
|
s += "relation label: %s\n" % self.relation_label
|
s += "\n"
|
return s
|
def __repr__(self):
|
return self.__str__()
|
def create_int_feature(values):
|
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=values))
|
return feature
|
def convert_data_pointwise(writer, tokenizer, qid_list, relevance_dict, corpus_dict, query_dict, is_eval=False):
|
if is_eval:
|
max_num_example = FLAGS.rerank_threshold
|
else:
|
max_num_example = FLAGS.max_num_train_instance_perquery
|
instances = []
|
idx = 0
|
for qid in qid_list:
|
tf.logging.info("Generating data for query {}".format(qid))
|
relevance = relevance_dict.get(qid)
|
judged_docno_list = relevance.get_judged_docno_list()
|
supervised_docno_list = relevance.get_supervised_docno_list() # initial ranking
|
# training data from the judged docno, built from bm25 top1000 result
|
relevant_docno_list = set()
|
if judged_docno_list is not None:
|
relevant_docno_list = judged_docno_list[1] + judged_docno_list[2]
|
relevant_docno_list = set(relevant_docno_list)
|
for docno in supervised_docno_list[:max_num_example]:
|
relation_label = 1 if docno in relevant_docno_list else 0
|
query = query_dict[qid]
|
doc = corpus_dict[docno]
|
instance = create_instance_pointwise(tokenizer, FLAGS.max_seq_length, qid, docno, query, doc, relation_label)
|
# append and shuffle on training set
|
if not is_eval:
|
instances.append(instance)
|
else:
|
write_instance_to_example_files(writer, tokenizer, instance, idx)
|
idx += 1
|
tf.logging.info("Totally {} examples".format(len(instances)))
|
if not is_eval:
|
random.shuffle(instances)
|
for idx, instance in enumerate(instances):
|
write_instance_to_example_files(writer, tokenizer, instance, idx)
|
if is_eval:
|
write_padding_instance_to_example_files(writer)
|
writer.close()
|
print("Distribution of length. Key is length, Val is count.")
|
for key, val in stats.items():
|
print("{}\t{}".format(key, val))
|
def create_instance_pointwise(tokenizer, max_seq_length, qid, docno, query, doc, label):
|
query = tokenization.convert_to_unicode(query)
|
doc = tokenization.convert_to_unicode(doc)
|
passages = get_passages(doc, FLAGS.plen, FLAGS.overlap)
|
if len(passages) == 0:
|
tf.logging.warn("Passage length is 0 in qid {} docno {}".format(qid, docno))
|
query = tokenization.convert_to_bert_input(
|
text=query,
|
max_seq_length=64,
|
tokenizer=tokenizer,
|
add_cls=True,
|
convert_to_id=False
|
)
|
passages = [tokenization.convert_to_bert_input(
|
text=p,
|
max_seq_length=max_seq_length-len(query),
|
tokenizer=tokenizer,
|
add_cls=False,
|
convert_to_id=False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.