text stringlengths 1 93.6k |
|---|
return self.__isint(v) or v == '\n' or '-->' in v
|
def __process_sentences(self, v) -> List[str]:
|
sentence = tokenize.sent_tokenize(v)
|
return sentence
|
def save_data(self, save_path, sentences) -> None:
|
with open(save_path, 'w') as f:
|
for sentence in sentences:
|
f.write("%s\n" % sentence)
|
def run(self) -> List[str]:
|
total: str = ''
|
for data in self.all_data:
|
if not self.__should_skip(data):
|
cleaned = data.replace('>', '').replace('\n', '').strip()
|
if cleaned:
|
total += ' ' + cleaned
|
sentences = self.__process_sentences(total)
|
return sentences
|
def convert_to_paragraphs(self) -> str:
|
sentences: List[str] = self.run()
|
return ' '.join([sentence.strip() for sentence in sentences]).strip()
|
@app.route('/summarize', methods=['POST'])
|
def convert_raw_text():
|
ratio = float(request.args.get('ratio', 0.2))
|
min_length = int(request.args.get('min_length', 25))
|
max_length = int(request.args.get('max_length', 500))
|
data = request.data
|
if not data:
|
abort(make_response(jsonify(message="Request must have raw text"), 400))
|
parsed = Parser(data).convert_to_paragraphs()
|
summary = summarizer(parsed, ratio=ratio, min_length=min_length, max_length=max_length)
|
return jsonify({
|
'summary': summary
|
})
|
if __name__ == '__main__':
|
parser = argparse.ArgumentParser(description='')
|
parser.add_argument('-model', dest='model', default='bert-base-multilingual-uncased', help='The model to use')
|
parser.add_argument('-greediness', dest='greediness', help='', default=0.45)
|
parser.add_argument('-reduce', dest='reduce', help='', default='mean')
|
parser.add_argument('-hidden', dest='hidden', help='', default=-2)
|
parser.add_argument('-port', dest='port', help='', default=5000)
|
parser.add_argument('-host', dest='host', help='', default='0.0.0.0')
|
args = parser.parse_args()
|
print(f"Using Model: {args.model}")
|
summarizer = Summarizer(
|
model=args.model,
|
hidden=int(args.hidden),
|
reduce_option=args.reduce,
|
greedyness=float(args.greediness)
|
)
|
app.run(host=args.host, port=int(args.port) ,debug=True)
|
# <FILESEP>
|
# -*- coding:utf-8 -*-
|
import tensorflow as tf
|
import numpy as np
|
class Data_set(object):
|
def __init__(self, config, shuffle, name):
|
self.tfrecord_file = config.tfdata_path
|
self.batch_size = config.batch_size
|
self.min_after_dequeue = config.min_after_dequeue
|
self.capacity = config.capacity
|
self.actual_image_size = config.train_image_size
|
self.shuffle = shuffle
|
self.name = name
|
def read_processing_generate_image_label_batch(self):
|
if self.name.find('train') != -1:
|
# get filename list
|
tfrecord_filename = tf.gfile.Glob(self.tfrecord_file + '*%s*' % self.name)
|
print('tfrecord train filename', tfrecord_filename)
|
filename_queue = tf.train.string_input_producer(tfrecord_filename, num_epochs=None, shuffle=True)
|
# get tensor of image/label
|
image, label = read_tfrecord_and_decode_into_image_label_pair_tensors(filename_queue,
|
self.actual_image_size)
|
#image = channels_image_standardization(image)
|
image = image_standardization(image)
|
image = dataaugmentation(image)
|
image_batch, label_batch = tf.train.shuffle_batch([image, label],
|
batch_size=self.batch_size,
|
capacity=self.capacity,
|
num_threads=2,
|
min_after_dequeue=self.min_after_dequeue)
|
image_batch = tf.contrib.image.rotate(image_batch, tf.random_uniform(shape = (tf.shape(image_batch)[0], ), minval=-0.5, maxval=0.5, seed=37), interpolation='BILINEAR')
|
else:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.