text
stringlengths
1
93.6k
csv_file.to_csv(f'RESULTS/{crystal_property}_results.csv')
# <FILESEP>
import os
import random
import collections
import tensorflow.compat.v1 as tf
import numpy as np
from bert import tokenization
from utils import file_operation, relevance_info
from utils.fold_config import FOLD_CONFIG_DICT
random.seed(118)
tf.random.set_random_seed(118)
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"trec_run_filename", None,
"where the trec run file (e.g. produced by BM25) is"
)
flags.DEFINE_string(
"qrels_filename", None,
"where the qrels file is"
)
flags.DEFINE_string(
"query_field", 'title',
"None if no field, else title, desc, narr, question")
flags.DEFINE_string(
"query_filename", None,
"where the query file is. support TREC file now")
flags.DEFINE_string(
"corpus_filename", None,
"where the corpus file is. format: docno \t content")
flags.DEFINE_string(
"dataset", None,
"which dataset to run on. it would correspond to the fold config of qids"
)
flags.DEFINE_integer(
"fold", 3,
"run fold")
flags.DEFINE_integer(
"plen", 150,
"length of segmented passage"
)
flags.DEFINE_integer(
"overlap", 50,
"overlap between continuous segmented passages"
)
flags.DEFINE_integer(
"max_num_train_instance_perquery", 1000,
"The maximum number of training instances utilized from initial ranking"
)
flags.DEFINE_integer(
"rerank_threshold", 100,
"the maximum number of top documents to be reranked"
)
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_filename", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_filename", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "