text stringlengths 1 93.6k |
|---|
if len(passages) > 0 and e - s <= overlap:
|
break
|
p = ' '.join(words[s:e])
|
passages.append(p)
|
s = s + plen - overlap
|
if len(passages) > FLAGS.max_num_segments_perdoc:
|
chosen_ids = sorted(random.sample(range(1, len(passages) - 1), FLAGS.max_num_segments_perdoc - 2))
|
chosen_ids = [0] + chosen_ids + [len(passages) - 1]
|
passages = [passages[id] for id in chosen_ids]
|
global stats
|
stats[len(passages)] += 1
|
return passages
|
def main(_):
|
# training config
|
qid_list = FOLD_CONFIG_DICT[FLAGS.dataset]
|
qid_list = collections.deque(qid_list)
|
rotate = FLAGS.fold - 1
|
map(qid_list.rotate(rotate), qid_list)
|
# currently, we just set up the training step. No support for model selection now.
|
# train_qid_list, valid_qid_list, test_qid_list = qid_list[0] + qid_list[1] + qid_list[2], qid_list[3], qid_list[4]
|
train_qid_list, test_qid_list = qid_list[0] + qid_list[1] + qid_list[2] + qid_list[3], qid_list[4]
|
train_qid_list, test_qid_list = sorted(train_qid_list), sorted(test_qid_list)
|
tf.logging.info("Running on dataset: {0}, on fold {1}".format(FLAGS.dataset, FLAGS.fold))
|
tf.logging.info("Traing on following qid: {0}\n".format(train_qid_list))
|
# tf.logging.info("Validating on following qid: {0}\n".format(valid_qid_list))
|
tf.logging.info("Testing on following qid: {0}\n".format(test_qid_list))
|
relevance_dict = relevance_info.create_relevance(FLAGS.trec_run_filename, FLAGS.qrels_filename)
|
corpus_dict = file_operation.key_value_from_file(FLAGS.corpus_filename)
|
query_dict = file_operation.load_trec_topics(FLAGS.query_filename)[FLAGS.query_field]
|
tokenizer = tokenization.FullTokenizer(
|
vocab_file=FLAGS.vocab_filename,
|
do_lower_case=FLAGS.do_lower_case
|
)
|
# begin data convertion to TFrecord
|
output_path = os.path.join(FLAGS.output_dir, "dataset_train.tfrecord")
|
tf.logging.info("Writing data into {}".format(output_path))
|
writer = tf.python_io.TFRecordWriter(output_path)
|
convert_data_pointwise(
|
writer=writer,
|
tokenizer=tokenizer,
|
qid_list=train_qid_list,
|
relevance_dict=relevance_dict,
|
corpus_dict=corpus_dict,
|
query_dict=query_dict,
|
is_eval=False
|
)
|
output_path = os.path.join(FLAGS.output_dir, "dataset_test.tfrecord")
|
tf.logging.info("Writing data into {}".format(output_path))
|
writer = tf.python_io.TFRecordWriter(output_path)
|
convert_data_pointwise(
|
writer=writer,
|
tokenizer=tokenizer,
|
qid_list=test_qid_list,
|
relevance_dict=relevance_dict,
|
corpus_dict=corpus_dict,
|
query_dict=query_dict,
|
is_eval=True
|
)
|
if __name__ == '__main__':
|
flags.mark_flag_as_required("trec_run_filename")
|
flags.mark_flag_as_required("qrels_filename")
|
flags.mark_flag_as_required("query_field")
|
flags.mark_flag_as_required("query_filename")
|
flags.mark_flag_as_required("corpus_filename")
|
flags.mark_flag_as_required("dataset")
|
flags.mark_flag_as_required("fold")
|
flags.mark_flag_as_required("vocab_filename")
|
flags.mark_flag_as_required("output_dir")
|
flags.mark_flag_as_required("plen")
|
flags.mark_flag_as_required("overlap")
|
tf.app.run()
|
# <FILESEP>
|
"""ShutIt module. See http://shutit.tk
|
"""
|
from shutit_module import ShutItModule
|
import random
|
import string
|
class docker_101_tutorial(ShutItModule):
|
def build(self, shutit):
|
# Some useful API calls for reference. See shutit's docs for more info and options:
|
#
|
# ISSUING BASH COMMANDS
|
# shutit.send(send,expect=<default>) - Send a command, wait for expect (string or compiled regexp)
|
# to be seen before continuing. By default this is managed
|
# by ShutIt with shell prompts.
|
# shutit.multisend(send,send_dict) - Send a command, dict contains {expect1:response1,expect2:response2,...}
|
# shutit.send_and_get_output(send) - Returns the output of the sent command
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.