text
stringlengths
1
93.6k
# 插件的config.py文件中定义的插件名称 __addon_name__
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('addon', default=ACTIVE_ADDON, nargs='?', help='addon name')
parser.add_argument('--is_extension', default=IS_EXTENSION, action='store_true', help='If true, package the addon '
'as extension, framework '
'will convert absolute '
'import to relative import '
'for you and will take care '
'of packaging the '
'extension. Default is the '
'value of IS_EXTENSION')
parser.add_argument('--disable_zip', default=False, action='store_true', help='If true, release the addon into a '
'plain folder and do not zip it '
'into an installable package, '
'useful if you want to add more '
'files and zip by yourself.')
parser.add_argument('--with_version', default=False, action='store_true', help='Append the addon version number ('
'as specified in bl_info) to the '
'released zip file name.')
parser.add_argument('--with_timestamp', default=False, action='store_true', help='Append a timestamp to the zip '
'file name.')
args = parser.parse_args()
release_addon(target_init_file=get_init_file_path(args.addon),
addon_name=args.addon,
need_zip=not args.disable_zip,
is_extension=args.is_extension,
with_timestamp=args.with_timestamp,
with_version=args.with_version,
)
# <FILESEP>
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import math
from datasets import dataset_factory
from preprocessing import preprocessing_factory
FLAGS = tf.app.flags.FLAGS
def prepare_traindata(dataset_dir, batch_size):
dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train', dataset_dir)
provider = slim.dataset_data_provider.DatasetDataProvider(dataset=dataset, num_readers=4, shuffle=True)
[image, label] = provider.get(['image', 'label'])
image_preprocessing_fn = preprocessing_factory.get_preprocessing(FLAGS.preprocessing, is_training=True)
image = image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
images, labels = tf.train.shuffle_batch([image, label], batch_size=batch_size, num_threads=4,
capacity=8 * batch_size, min_after_dequeue=4 * batch_size)
return images, labels
def prepare_testdata(dataset_dir, batch_size):
dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'test', dataset_dir)
provider = slim.dataset_data_provider.DatasetDataProvider(dataset, num_readers=1, shuffle=False)
[image, label] = provider.get(['image', 'label'])
image_preprocessing_fn = preprocessing_factory.get_preprocessing(FLAGS.preprocessing, is_training=False)
image = image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
images, labels = tf.train.batch([image, label], batch_size=batch_size, num_threads=1,
capacity=4 * batch_size, allow_smaller_final_batch=False)
return images, labels
def config_lr(max_steps):
if 'cifar' in FLAGS.dataset_name:
# start to decay lr at the 250th epoch
boundaries = [int(250.0 / 500.0 * max_steps)]
values = [0.1]
elif 'svhn' in FLAGS.dataset_name:
# start to decay lr at the beginning: 0th epoch
boundaries = [int(0 * max_steps)]
values = [0.02]
return boundaries, values
def linear_decay_lr(step, boundaries, values, max_steps):
# decay learning rate linearly
if 'svhn' in FLAGS.dataset_name:
decayed_lr = (float(max_steps - (step + 1)) / float(max_steps)) * values[0]
else:
if step < boundaries[0]:
decayed_lr = values[0]
else:
ratio = (float(max_steps - (step + 1)) / float(max_steps - boundaries[0]))
decayed_lr = ratio * values[0]