text
stringlengths
1
93.6k
) for p in passages]
instance = PointwiseInstance(
exampleid="{}-{}".format(qid, docno),
tokens_a=query,
tokens_b_list=passages,
relation_label=label
)
return instance
def write_padding_instance_to_example_files(writer, num_examples=50):
# 1-d arrays
input_ids = np.zeros((FLAGS.max_seq_length * FLAGS.max_num_segments_perdoc), dtype=np.int)
num_segments = FLAGS.max_num_segments_perdoc
label = 0
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["tokens_a_len"] = create_int_feature([3])
features["tokens_ids_lens"] = create_int_feature([24] * FLAGS.max_num_segments_perdoc)
features["num_segments"] = create_int_feature([num_segments])
features["label"] = create_int_feature([label])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
for _ in range(num_examples):
writer.write(tf_example.SerializeToString())
tf.logging.info("write {} padding instances successfully".format(num_examples))
def write_instance_to_example_files(writer, tokenizer, instance, instance_idx):
def padding_2d(ids_list, num_tokens_per_segment, padding_value=0):
_len = len(ids_list)
if padding_value == 0:
matrix = np.zeros((_len, num_tokens_per_segment), dtype=np.int)
elif padding_value == 1:
matrix = np.ones((_len, num_tokens_per_segment), dtype=np.int)
else:
raise ValueError("Unsupport padding value")
for i, _list in enumerate(ids_list):
matrix[i, :len(_list)] = _list
return matrix.flatten()
tokens_a = instance.tokens_a
tokens_b_list = instance.tokens_b_list
tokens_a_ids = tokenizer.convert_tokens_to_ids(tokens_a)
tokens_b_list = [tokenizer.convert_tokens_to_ids(p) for p in tokens_b_list]
label = instance.relation_label
assert len(tokens_b_list) <= FLAGS.max_num_segments_perdoc
num_segments = len(tokens_b_list)
input_ids = [tokens_a_ids + tokens_b_passage_ids for tokens_b_passage_ids in tokens_b_list]
tokens_a_len = len(tokens_a_ids) # helpful for segment ids
input_ids_lens = [len(input_id) for input_id in input_ids] # helpful for input mask
input_ids_lens = input_ids_lens + [FLAGS.max_seq_length] * (FLAGS.max_num_segments_perdoc - len(input_ids_lens))
input_ids = padding_2d(input_ids,FLAGS.max_seq_length, padding_value=0)
# write to tfrecord
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["tokens_a_len"] = create_int_feature([tokens_a_len])
features["tokens_ids_lens"] = create_int_feature(input_ids_lens)
features["num_segments"] = create_int_feature([num_segments])
features["label"] = create_int_feature([label])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
if instance_idx < 5:
tf.logging.info("*** Example ***")
tf.logging.info("tokens_a: %s" % " ".join(
[tokenization.printable_text(x) for x in instance.tokens_a]))
tf.logging.info("tokens_b_list: {}".format(instance.tokens_b_list))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info(
"%s: %s" % (feature_name, " ".join([str(x) for x in values])))
stats = collections.defaultdict(int)
def get_passages(text, plen, overlap):
""" Modified from https://github.com/AdeDZY/SIGIR19-BERT-IR/blob/master/tools/gen_passages.py
:param text:
:param plen:
:param overlap:
:return:
"""
words = text.strip().split(' ')
s, e = 0, 0
passages = []
while s < len(words):
e = s + plen
if e >= len(words):
e = len(words)
# if the last one is shorter than 'overlap', it is already in the previous passage.