text stringlengths 1 93.6k |
|---|
global_step = tf.train.get_or_create_global_step()
|
lr_values = [params['learning_rate'] * decay for decay in params['lr_decay_factors']]
|
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
|
[int(_) for _ in params['decay_boundaries']],
|
lr_values)
|
truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype))
|
# Create a tensor named learning_rate for logging purposes.
|
tf.identity(truncated_learning_rate, name='learning_rate')
|
tf.summary.scalar('learning_rate', truncated_learning_rate)
|
optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate,
|
momentum=params['momentum'])
|
# Batch norm requires update_ops to be added as a train_op dependency.
|
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
|
with tf.control_dependencies(update_ops):
|
train_op = optimizer.minimize(loss, global_step)
|
else:
|
train_op = None
|
cls_accuracy = tf.metrics.accuracy(glabels, predictions['classes'])
|
metrics = {'cls_accuracy': cls_accuracy}
|
# Create a tensor named train_accuracy for logging purposes.
|
tf.identity(cls_accuracy[1], name='cls_accuracy')
|
tf.summary.scalar('cls_accuracy', cls_accuracy[1])
|
return tf.estimator.EstimatorSpec(
|
mode=mode,
|
predictions=predictions,
|
loss=loss,
|
train_op=train_op,
|
eval_metric_ops=metrics,
|
scaffold = tf.train.Scaffold(init_fn=train_helper.get_init_fn_for_scaffold(FLAGS)))
|
def parse_comma_list(args):
|
return [float(s.strip()) for s in args.split(',')]
|
def main(_):
|
# Using the Winograd non-fused algorithms provides a small performance boost.
|
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
|
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction)
|
config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False, intra_op_parallelism_threads = FLAGS.num_cpu_threads, inter_op_parallelism_threads = FLAGS.num_cpu_threads, gpu_options = gpu_options)
|
# Set up a RunConfig to only save checkpoints once per training cycle.
|
run_config = tf.estimator.RunConfig().replace(
|
save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace(
|
save_checkpoints_steps=None).replace(
|
save_summary_steps=FLAGS.save_summary_steps).replace(
|
keep_checkpoint_max=5).replace(
|
log_step_count_steps=FLAGS.log_every_n_steps).replace(
|
session_config=config)
|
xdetector = tf.estimator.Estimator(
|
model_fn=xdet_model_fn, model_dir=FLAGS.model_dir, config=run_config,
|
params={
|
'resnet_size': FLAGS.resnet_size,
|
'data_format': FLAGS.data_format,
|
'model_scope': FLAGS.model_scope,
|
'num_classes': FLAGS.num_classes,
|
'negative_ratio': FLAGS.negative_ratio,
|
'match_threshold': FLAGS.match_threshold,
|
'neg_threshold': FLAGS.neg_threshold,
|
'weight_decay': FLAGS.weight_decay,
|
'momentum': FLAGS.momentum,
|
'learning_rate': FLAGS.learning_rate,
|
'end_learning_rate': FLAGS.end_learning_rate,
|
'learning_rate_decay_factor': FLAGS.learning_rate_decay_factor,
|
'decay_steps': FLAGS.decay_steps,
|
'decay_boundaries': parse_comma_list(FLAGS.decay_boundaries),
|
'lr_decay_factors': parse_comma_list(FLAGS.lr_decay_factors),
|
})
|
tensors_to_log = {
|
'learning_rate': 'learning_rate',
|
'cross_entropy_loss': 'cross_entropy_loss',
|
'location_loss': 'location_loss',
|
'total_loss': 'total_loss',
|
'cls_accuracy': 'cls_accuracy',
|
}
|
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=FLAGS.log_every_n_steps)
|
print('Starting a training cycle.')
|
xdetector.train(input_fn=input_pipeline(), hooks=[logging_hook])
|
if __name__ == '__main__':
|
tf.logging.set_verbosity(tf.logging.INFO)
|
tf.app.run()
|
# <FILESEP>
|
from framework import get_init_file_path, release_addon
|
from main import ACTIVE_ADDON, IS_EXTENSION
|
# 发布前请修改ACTIVE_ADDON参数
|
# The name of the addon to be released, this name is defined in the config.py of the addon as __addon_name__
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.