text stringlengths 1 93.6k |
|---|
glabels = labels['targets'][:num_feature_layers][0]
|
gtargets = labels['targets'][num_feature_layers : 2 * num_feature_layers][0]
|
#gtargets = tf.Print(gtargets, [gtargets], message='gtargets:', summarize=100)
|
gscores = labels['targets'][2 * num_feature_layers : 3 * num_feature_layers][0]
|
with tf.variable_scope(params['model_scope'], default_name = None, values = [features], reuse=tf.AUTO_REUSE):
|
backbone = xdet_body.xdet_resnet_v2(params['resnet_size'], params['data_format'])
|
multi_merged_feature = backbone(inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
|
cls_pred, location_pred = xdet_body.xdet_head(multi_merged_feature, params['num_classes'], num_anchors_list[0], (mode == tf.estimator.ModeKeys.TRAIN), data_format=params['data_format'])
|
if params['data_format'] == 'channels_first':
|
cls_pred = tf.transpose(cls_pred, [0, 2, 3, 1])
|
location_pred = tf.transpose(location_pred, [0, 2, 3, 1])
|
bboxes_pred = labels['decode_fn'](location_pred)#(tf.reshape(location_pred, tf.shape(location_pred).as_list()[0:-1] + [-1, 4]))
|
cls_pred = tf.reshape(cls_pred, [-1, params['num_classes']])
|
location_pred = tf.reshape(location_pred, [-1, 4])
|
glabels = tf.reshape(glabels, [-1])
|
gscores = tf.reshape(gscores, [-1])
|
gtargets = tf.reshape(gtargets, [-1, 4])
|
# raw mask for positive > 0.5, and for negetive < 0.3
|
# each positive examples has one label
|
positive_mask = glabels > 0#tf.logical_and(glabels > 0, gscores > params['match_threshold'])
|
fpositive_mask = tf.cast(positive_mask, tf.float32)
|
n_positives = tf.reduce_sum(fpositive_mask)
|
batch_glabels = tf.reshape(glabels, [tf.shape(features)[0], -1])
|
batch_n_positives = tf.count_nonzero(batch_glabels, -1)
|
batch_negtive_mask = tf.equal(batch_glabels, 0)
|
batch_n_negtives = tf.count_nonzero(batch_negtive_mask, -1)
|
batch_n_neg_select = tf.cast(params['negative_ratio'] * tf.cast(batch_n_positives, tf.float32), tf.int32)
|
batch_n_neg_select = tf.minimum(batch_n_neg_select, tf.cast(batch_n_negtives, tf.int32))
|
# hard negative mining for classification
|
predictions_for_bg = tf.nn.softmax(tf.reshape(cls_pred, [tf.shape(features)[0], -1, params['num_classes']]))[:, :, 0]
|
prob_for_negtives = tf.where(batch_negtive_mask,
|
0. - predictions_for_bg,
|
# ignore all the positives
|
0. - tf.ones_like(predictions_for_bg))
|
topk_prob_for_bg, _ = tf.nn.top_k(prob_for_negtives, k=tf.shape(prob_for_negtives)[1])
|
score_at_k = tf.gather_nd(topk_prob_for_bg, tf.stack([tf.range(tf.shape(features)[0]), batch_n_neg_select - 1], axis=-1))
|
selected_neg_mask = prob_for_negtives >= tf.expand_dims(score_at_k, axis=-1)
|
negtive_mask = tf.reshape(tf.logical_and(batch_negtive_mask, selected_neg_mask), [-1])#tf.logical_and(tf.equal(glabels, 0), gscores > 0.)
|
#negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(positive_mask), gscores < params['neg_threshold']), gscores > 0.)
|
#negtive_mask = tf.logical_and(gscores < params['neg_threshold'], tf.logical_not(positive_mask))
|
# # random select negtive examples for classification
|
# selected_neg_mask = tf.random_uniform(tf.shape(gscores), minval=0, maxval=1.) < tf.where(
|
# tf.greater(n_negtives, 0),
|
# tf.divide(tf.cast(n_neg_to_select, tf.float32), n_negtives),
|
# tf.zeros_like(tf.cast(n_neg_to_select, tf.float32)),
|
# name='rand_select_negtive')
|
# include both selected negtive and all positive examples
|
final_mask = tf.stop_gradient(tf.logical_or(negtive_mask, positive_mask))
|
total_examples = tf.reduce_sum(tf.cast(final_mask, tf.float32))
|
# add mask for glabels and cls_pred here
|
glabels = tf.boolean_mask(tf.clip_by_value(glabels, 0, FLAGS.num_classes), tf.stop_gradient(final_mask))
|
cls_pred = tf.boolean_mask(cls_pred, tf.stop_gradient(final_mask))
|
location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask))
|
gtargets = tf.boolean_mask(gtargets, tf.stop_gradient(positive_mask))
|
predictions = {
|
'classes': tf.argmax(cls_pred, axis=-1),
|
'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1),
|
'bboxes_predict': tf.reshape(bboxes_pred, [-1, 4]) }
|
if mode == tf.estimator.ModeKeys.PREDICT:
|
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
|
# Calculate loss, which includes softmax cross entropy and L2 regularization.
|
cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.)
|
#cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred)
|
# Create a tensor named cross_entropy for logging purposes.
|
tf.identity(cross_entropy, name='cross_entropy_loss')
|
tf.summary.scalar('cross_entropy_loss', cross_entropy)
|
loc_loss = tf.cond(n_positives > 0., lambda: modified_smooth_l1(location_pred, tf.stop_gradient(gtargets), sigma=1.), lambda: tf.zeros_like(location_pred))
|
#loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets))
|
loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1))
|
loc_loss = tf.identity(loc_loss, name='location_loss')
|
tf.summary.scalar('location_loss', loc_loss)
|
tf.losses.add_loss(loc_loss)
|
# Add weight decay to the loss. We exclude the batch norm variables because
|
# doing so leads to a small improvement in accuracy.
|
loss = 1.2 * (cross_entropy + loc_loss) + params['weight_decay'] * tf.add_n(
|
[tf.nn.l2_loss(v) for v in tf.trainable_variables()
|
if 'batch_normalization' not in v.name])
|
total_loss = tf.identity(loss, name='total_loss')
|
if mode == tf.estimator.ModeKeys.TRAIN:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.