repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/benchmark_cnn.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow benchmark library.
See the README for more information.
"""
from __future__ import print_function
import argparse
from collections import namedtuple
import math
import multiprocessing
import os
import threading
import time
from absl import flags as absl_flags
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import debug as tf_debug
from tensorflow.python.client import timeline
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util import nest
import benchmark_storage
import cnn_util
import constants
import data_utils
import datasets
import flags
import variable_mgr
import variable_mgr_util
from cnn_util import log_fn
from models import model_config
from platforms import util as platforms_util
from power_logger import NvidiaPowerReader
_DEFAULT_NUM_BATCHES = 100
# TODO(reedwm): add upper_bound and lower_bound to appropriate integer and
# float flags, and change certain string flags to enum flags.
flags.DEFINE_string('model', 'trivial',
'Name of the model to run, the list of supported models '
'are defined in models/model.py')
# The code will first check if it's running under benchmarking mode
# or evaluation mode, depending on 'eval':
# Under the evaluation mode, this script will read a saved model,
# and compute the accuracy of the model against a validation dataset.
# Additional ops for accuracy and top_k predictors are only used under
# this mode.
# Under the benchmarking mode, user can specify whether nor not to use
# the forward-only option, which will only compute the loss function.
# forward-only cannot be enabled with eval at the same time.
flags.DEFINE_boolean('eval', False, 'whether use eval or benchmarking')
flags.DEFINE_integer('eval_interval_secs', 0,
'How often to run eval on saved checkpoints. Usually the '
'same as save_model_secs from the corresponding training '
'run. Pass 0 to eval only once.')
flags.DEFINE_boolean('forward_only', True,
'whether use forward-only or training for benchmarking')
flags.DEFINE_boolean('print_training_accuracy', False,
'whether to calculate and print training accuracy during '
'training')
flags.DEFINE_integer('batch_size', 20, 'batch size per compute device')
flags.DEFINE_integer('batch_group_size', 1,
'number of groups of batches processed in the image '
'producer.')
flags.DEFINE_integer('num_batches', 10000, 'number of batches to run, excluding '
'warmup. Defaults to %d' % _DEFAULT_NUM_BATCHES)
flags.DEFINE_float('num_epochs', None,
'number of epochs to run, excluding warmup. '
'This and --num_batches cannot both be specified.')
flags.DEFINE_integer('num_warmup_batches', None,
'number of batches to run before timing')
flags.DEFINE_integer('autotune_threshold', None,
'The autotune threshold for the models')
flags.DEFINE_integer('num_gpus', 1, 'the number of GPUs to run on')
flags.DEFINE_string('gpu_indices', '', 'indices of worker GPUs in ring order')
flags.DEFINE_integer('display_every', 500,
'Number of local steps after which progress is printed '
'out')
flags.DEFINE_string('data_dir', None,
'Path to dataset in TFRecord format (aka Example '
'protobufs). If not specified, synthetic data will be '
'used.')
flags.DEFINE_string('data_name', None,
'Name of dataset: imagenet or cifar10. If not specified, '
'it is automatically guessed based on data_dir.')
flags.DEFINE_string('resize_method', 'bilinear',
'Method for resizing input images: crop, nearest, '
'bilinear, bicubic, area, or round_robin. The `crop` mode '
'requires source images to be at least as large as the '
'network input size. The `round_robin` mode applies '
'different resize methods based on position in a batch in '
'a round-robin fashion. Other modes support any sizes and '
'apply random bbox distortions before resizing (even with '
'distortions=False).')
flags.DEFINE_boolean('distortions', True,
'Enable/disable distortions during image preprocessing. '
'These include bbox and color distortions.')
flags.DEFINE_boolean('use_datasets', True,
'Enable use of datasets for input pipeline')
flags.DEFINE_string('input_preprocessor', 'default',
'Name of input preprocessor. The list of supported input '
'preprocessors are defined in preprocessing.py.')
flags.DEFINE_string('gpu_thread_mode', 'gpu_private',
'Methods to assign GPU host work to threads. '
'global: all GPUs and CPUs share the same global threads; '
'gpu_private: a private threadpool for each GPU; '
'gpu_shared: all GPUs share the same threadpool.')
flags.DEFINE_integer('per_gpu_thread_count', 0,
'The number of threads to use for GPU. Only valid when '
'gpu_thread_mode is not global.')
flags.DEFINE_boolean('hierarchical_copy', False,
'Use hierarchical copies. Currently only optimized for '
'use on a DGX-1 with 8 GPUs and may perform poorly on '
'other hardware. Requires --num_gpus > 1, and only '
'recommended when --num_gpus=8')
# TODO(hinsu): Support auto-detection of the network topology while still
# retaining the ability to specify a particular topology for debugging.
flags.DEFINE_enum(
'network_topology', constants.NetworkTopology.DGX1,
(constants.NetworkTopology.DGX1, constants.NetworkTopology.GCP_V100),
'Network topology specifies the topology used to connect multiple devices. '
'Network topology is used to decide the hierarchy to use for the '
'hierarchical_copy.')
flags.DEFINE_integer('gradient_repacking', 0, 'Use gradient repacking. It'
'currently only works with replicated mode. At the end of'
'of each step, it repacks the gradients for more efficient'
'cross-device transportation. A non-zero value specifies'
'the number of split packs that will be formed.',
lower_bound=0)
flags.DEFINE_boolean('compact_gradient_transfer', True, 'Compact gradient'
'as much as possible for cross-device transfer and '
'aggregation.')
flags.DEFINE_enum('variable_consistency', 'strong', ('strong', 'relaxed'),
'The data consistency for trainable variables. With strong '
'consistency, the variable always have the updates from '
'previous step. With relaxed consistency, all the updates '
'will eventually show up in the variables. Likely one step '
'behind.')
flags.DEFINE_boolean('cache_data', False,
'Enable use of a special datasets pipeline that reads a '
'single TFRecord into memory and repeats it infinitely '
'many times. The purpose of this flag is to make it '
'possible to write regression tests that are not '
'bottlenecked by CNS throughput.')
flags.DEFINE_enum('local_parameter_device', 'gpu', ('cpu', 'gpu', 'CPU', 'GPU'),
'Device to use as parameter server: cpu or gpu. For '
'distributed training, it can affect where caching of '
'variables happens.')
flags.DEFINE_enum('device', 'gpu', ('cpu', 'gpu', 'CPU', 'GPU'),
'Device to use for computation: cpu or gpu')
flags.DEFINE_enum('data_format', 'NCHW', ('NHWC', 'NCHW'),
'Data layout to use: NHWC (TF native) or NCHW (cuDNN '
'native, requires GPU).')
flags.DEFINE_integer('num_intra_threads', 0,
'Number of threads to use for intra-op parallelism. If '
'set to 0, the system will pick an appropriate number.')
flags.DEFINE_integer('num_inter_threads', 0,
'Number of threads to use for inter-op parallelism. If '
'set to 0, the system will pick an appropriate number.')
flags.DEFINE_string('trace_file', '/home/sniper/tf_cnn_benchmarks/trace_data.ctf',
'Enable TensorFlow tracing and write trace to this file.')
flags.DEFINE_boolean('use_chrome_trace_format', True,
'If True, the trace_file, if specified, will be in a '
'Chrome trace format. If False, then it will be a '
'StepStats raw proto.')
_NUM_STEPS_TO_PROFILE = 10
_NUM_OPS_TO_PRINT = 20
flags.DEFINE_string('tfprof_file', None,
'If specified, write a tfprof ProfileProto to this file. '
'The performance and other aspects of the model can then '
'be analyzed with tfprof. See '
'https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/profiler/g3doc/command_line.md ' # pylint: disable=line-too-long
'for more info on how to do this. The first %d steps '
'are profiled. Additionally, the top %d most time '
'consuming ops will be printed.\n'
'Note: profiling with tfprof is very slow, but most of the '
'overhead is spent between steps. So, profiling results '
'are more accurate than the slowdown would suggest.' %
(_NUM_STEPS_TO_PROFILE, _NUM_OPS_TO_PRINT))
flags.DEFINE_string('graph_file', None,
'Write the model\'s graph definition to this file. '
'Defaults to binary format unless filename ends in "txt".')
flags.DEFINE_string('partitioned_graph_file_prefix', None,
'If specified, after the graph has been partitioned and '
'optimized, write out each partitioned graph to a file '
'with the given prefix.')
flags.DEFINE_enum('optimizer', 'sgd', ('momentum', 'sgd', 'rmsprop'),
'Optimizer to use: momentum or sgd or rmsprop')
flags.DEFINE_float('init_learning_rate', None,
'Initial learning rate for training.')
flags.DEFINE_string('piecewise_learning_rate_schedule', None,
'Specifies a piecewise learning rate schedule based on the '
'number of epochs. This is the form LR0;E1;LR1;...;En;LRn, '
'where each LRi is a learning rate and each Ei is an epoch '
'indexed from 0. The learning rate is LRi if the '
'E(i-1) <= current_epoch < Ei. For example, if this '
'paramater is 0.3;10;0.2;25;0.1, the learning rate is 0.3 '
'for the first 10 epochs, then is 0.2 for the next 15 '
'epochs, then is 0.1 until training ends.')
flags.DEFINE_float('num_epochs_per_decay', 0,
'Steps after which learning rate decays. If 0, the learning '
'rate does not decay.')
flags.DEFINE_float('learning_rate_decay_factor', 0,
'Learning rate decay factor. Decay by this factor every '
'`num_epochs_per_decay` epochs. If 0, learning rate does '
'not decay.')
flags.DEFINE_float('num_learning_rate_warmup_epochs', 0,
'Slowly increase to the initial learning rate in the first '
'num_learning_rate_warmup_epochs linearly.')
flags.DEFINE_float('minimum_learning_rate', 0,
'The minimum learning rate. The learning rate will '
'never decay past this value. Requires `learning_rate`, '
'`num_epochs_per_decay` and `learning_rate_decay_factor` to '
'be set.')
flags.DEFINE_float('momentum', 0.9, 'Momentum for training.')
flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum in RMSProp.')
flags.DEFINE_float('rmsprop_epsilon', 1.0, 'Epsilon term for RMSProp.')
flags.DEFINE_float('gradient_clip', None,
'Gradient clipping magnitude. Disabled by default.')
flags.DEFINE_float('weight_decay', 0.00004,
'Weight decay factor for training.')
flags.DEFINE_float('gpu_memory_frac_for_testing', 0,
'If non-zero, the fraction of GPU memory that will be used. '
'Useful for testing the benchmark script, as this allows '
'distributed mode to be run on a single machine. For '
'example, if there are two tasks, each can be allocated '
'~40 percent of the memory on a single machine',
lower_bound=0., upper_bound=1.)
flags.DEFINE_boolean('use_tf_layers', True,
'If True, use tf.layers for neural network layers. This '
'should not affect performance or accuracy in any way.')
flags.DEFINE_integer('tf_random_seed', 1234,
'The TensorFlow random seed. Useful for debugging NaNs, '
'as this can be set to various values to see if the NaNs '
'depend on the seed.')
flags.DEFINE_string('debugger', None,
'If set, use the TensorFlow debugger. If set to "cli", use '
'the local CLI debugger. Otherwise, this must be in the '
'form hostname:port (e.g., localhost:7007) in which case '
'the experimental TensorBoard debugger will be used')
flags.DEFINE_boolean('use_python32_barrier', False,
'When on, use threading.Barrier at Python 3.2.')
flags.DEFINE_boolean('datasets_use_prefetch', True,
'Enable use of prefetched datasets for input pipeline. '
'This option is meaningless if use_datasets=False.')
flags.DEFINE_integer('datasets_prefetch_buffer_size', 1,
'Prefetching op buffer size per compute device.')
flags.DEFINE_integer('datasets_num_private_threads', None,
'Number of threads for a private threadpool created for '
'all datasets computation. By default, we pick an '
'appropriate number. If set to 0, we use the default '
'tf-Compute threads for dataset operations.')
# Performance tuning parameters.
flags.DEFINE_boolean('winograd_nonfused', True,
'Enable/disable using the Winograd non-fused algorithms.')
flags.DEFINE_boolean(
'batchnorm_persistent', True,
'Enable/disable using the CUDNN_BATCHNORM_SPATIAL_PERSISTENT '
'mode for batchnorm.')
flags.DEFINE_boolean('sync_on_finish', False,
'Enable/disable whether the devices are synced after each '
'step.')
flags.DEFINE_boolean('staged_vars', False,
'whether the variables are staged from the main '
'computation')
flags.DEFINE_boolean('force_gpu_compatible', False,
'whether to enable force_gpu_compatible in GPU_Options')
flags.DEFINE_boolean('allow_growth', None,
'whether to enable allow_growth in GPU_Options')
flags.DEFINE_boolean('xla', False, 'whether to enable XLA')
flags.DEFINE_boolean('fuse_decode_and_crop', True,
'Fuse decode_and_crop for image preprocessing.')
flags.DEFINE_boolean('distort_color_in_yiq', True,
'Distort color of input images in YIQ space.')
flags.DEFINE_boolean('enable_layout_optimizer', False,
'whether to enable layout optimizer')
flags.DEFINE_string('rewriter_config', None,
'Config for graph optimizers, described as a '
'RewriterConfig proto buffer.')
flags.DEFINE_enum('loss_type_to_report', 'total_loss',
('base_loss', 'total_loss'),
'Which type of loss to output and to write summaries for. '
'The total loss includes L2 loss while the base loss does '
'not. Note that the total loss is always used while '
'computing gradients during training if weight_decay > 0, '
'but explicitly computing the total loss, instead of just '
'computing its gradients, can have a performance impact.')
flags.DEFINE_boolean('single_l2_loss_op', False,
'If True, instead of using an L2 loss op per variable, '
'concatenate the variables into a single tensor and do a '
'single L2 loss on the concatenated tensor.')
flags.DEFINE_boolean('use_resource_vars', False,
'Use resource variables instead of normal variables. '
'Resource variables are slower, but this option is useful '
'for debugging their performance.')
# Performance tuning specific to MKL.
flags.DEFINE_boolean('mkl', False, 'If true, set MKL environment variables.')
flags.DEFINE_integer('kmp_blocktime', 30,
'The time, in milliseconds, that a thread should wait, '
'after completing the execution of a parallel region, '
'before sleeping')
flags.DEFINE_string('kmp_affinity', 'granularity=fine,verbose,compact,1,0',
'Restricts execution of certain threads (virtual execution '
'units) to a subset of the physical processing units in a '
'multiprocessor computer.')
flags.DEFINE_integer('kmp_settings', 1,
'If set to 1, MKL settings will be printed.')
# fp16 parameters. If use_fp16=False, no other fp16 parameters apply.
flags.DEFINE_boolean('use_fp16', False,
'Use 16-bit floats for certain tensors instead of 32-bit '
'floats. This is currently experimental.')
# TODO(reedwm): The default loss scale of 128 causes most models to diverge
# on the second step with synthetic data. Changing the tf.set_random_seed
# call to tf.set_random_seed(1235) or most other seed values causes the
# issue not to occur.
flags.DEFINE_float('fp16_loss_scale', None,
'If fp16 is enabled, the loss is multiplied by this amount '
'right before gradients are computed, then each gradient '
'is divided by this amount. Mathematically, this has no '
'effect, but it helps avoid fp16 underflow. Set to 1 to '
'effectively disable.')
flags.DEFINE_boolean('fp16_vars', False,
'If fp16 is enabled, also use fp16 for variables. If '
'False, the variables are stored in fp32 and casted to '
'fp16 when retrieved. Recommended to leave as False.')
flags.DEFINE_boolean('fp16_enable_auto_loss_scale', False,
'If True and use_fp16 is True, automatically adjust the '
'loss scale during training.')
flags.DEFINE_integer('fp16_inc_loss_scale_every_n', 1000,
'If fp16 is enabled and fp16_enable_auto_loss_scale is '
'True, increase the loss scale every n steps.')
# The method for managing variables:
# parameter_server: variables are stored on a parameter server that holds
# the master copy of the variable. In local execution, a local device
# acts as the parameter server for each variable; in distributed
# execution, the parameter servers are separate processes in the
# cluster.
# For each step, each tower gets a copy of the variables from the
# parameter server, and sends its gradients to the param server.
# replicated: each GPU has its own copy of the variables. To apply
# gradients, an all_reduce algorithm or or regular cross-device
# aggregation is used to replicate the combined gradients to all
# towers (depending on all_reduce_spec parameter setting).
# independent: each GPU has its own copy of the variables, and gradients
# are not shared between towers. This can be used to check performance
# when no data is moved between GPUs.
# distributed_replicated: Distributed training only. Each GPU has a copy
# of the variables, and updates its copy after the parameter servers
# are all updated with the gradients from all servers. Only works with
# cross_replica_sync=true. Unlike 'replicated', currently never uses
# nccl all-reduce for replicating within a server.
# distributed_all_reduce: Distributed training where all replicas run
# in a single session, using all-reduce to mutally reduce the
# gradients. Uses no parameter servers. When there is only one
# worker, this is the same as replicated.
# collective_all_reduce: Distributed training where all replicas run
# independepently except for variable initialization and for
# gradient reduction which is done via collective all-reduce.
# NOTE: collective_all_reduce in conjunction with use_fp16 can
# lead to NaNs in some models (resnet50). TODO(tucker): fix it.
# horovod: Distributed training using Horovod library. Runs workers using
# an MPI framework (e.g. Open MPI). Each worker runs training on
# single GPU, and averages gradients using NCCL or MPI all-reduce.
# See https://github.com/uber/horovod for more details.
flags.DEFINE_enum('variable_update', 'parameter_server',
('parameter_server', 'replicated', 'distributed_replicated',
'independent', 'distributed_all_reduce',
'collective_all_reduce', 'horovod'),
'The method for managing variables: parameter_server, '
'replicated, distributed_replicated, independent, '
'distributed_all_reduce, collective_all_reduce, horovod')
flags.DEFINE_string('all_reduce_spec', None,
'A specification of the all_reduce algorithm to be used '
'for reducing gradients. For more details, see '
'parse_all_reduce_spec in variable_mgr.py. An '
'all_reduce_spec has BNF form:\n'
'int ::= positive whole number\n'
'g_int ::= int[KkMGT]?\n'
'alg_spec ::= alg | alg#int\n'
'range_spec ::= alg_spec | alg_spec/alg_spec\n'
'spec ::= range_spec | range_spec:g_int:range_spec\n'
'NOTE: not all syntactically correct constructs are '
'supported.\n\n'
'Examples:\n '
'"xring" == use one global ring reduction for all '
'tensors\n'
'"pscpu" == use CPU at worker 0 to reduce all tensors\n'
'"nccl" == use NCCL to locally reduce all tensors. '
'Limited to 1 worker.\n'
'"nccl/xring" == locally (to one worker) reduce values '
'using NCCL then ring reduce across workers.\n'
'"pscpu:32k:xring" == use pscpu algorithm for tensors of '
'size up to 32kB, then xring for larger tensors.')
# If variable_update==distributed_all_reduce then it may be advantageous
# to aggregate small tensors into one prior to reduction. These parameters
# control that aggregation.
flags.DEFINE_integer('agg_small_grads_max_bytes', 0,
'If > 0, try to aggregate tensors of less than this '
'number of bytes prior to all-reduce.')
flags.DEFINE_integer('agg_small_grads_max_group', 10,
'When aggregating small tensors for all-reduce do not '
'aggregate more than this many into one new tensor.')
flags.DEFINE_integer('allreduce_merge_scope', 1,
'Establish a name scope around this many '
'gradients prior to creating the all-reduce operations. '
'It may affect the ability of the backend to merge '
'parallel ops.')
# Distributed training parameters.
flags.DEFINE_enum('job_name', '', ('ps', 'worker', 'controller', ''),
'One of "ps", "worker", "controller", "". Empty for local '
'training')
flags.DEFINE_string('ps_hosts', '', 'Comma-separated list of target hosts')
flags.DEFINE_string('worker_hosts', '', 'Comma-separated list of target hosts')
flags.DEFINE_string('controller_host', None, 'optional controller host')
flags.DEFINE_integer('task_index', 0, 'Index of task within the job')
flags.DEFINE_string('server_protocol', 'grpc', 'protocol for servers')
flags.DEFINE_boolean('cross_replica_sync', True, '')
flags.DEFINE_string('horovod_device', '', 'Device to do Horovod all-reduce on: '
'empty (default), cpu or gpu. Default with utilize GPU if '
'Horovod was compiled with the HOROVOD_GPU_ALLREDUCE '
'option, and CPU otherwise.')
# Summary and Save & load checkpoints.
flags.DEFINE_integer('summary_verbosity', 0, 'Verbosity level for summary ops. '
'level 0: disable any summary.\n'
'level 1: small and fast ops, e.g.: learning_rate, '
'total_loss.\n'
'level 2: medium-cost ops, e.g. histogram of all '
'gradients.\n'
'level 3: expensive ops: images and histogram of each '
'gradient.\n')
flags.DEFINE_integer('save_summaries_steps', 0,
'How often to save summaries for trained models. Pass 0 '
'to disable summaries.')
flags.DEFINE_integer('save_model_secs', 0,
'How often to save trained models. Pass 0 to disable '
'checkpoints.')
flags.DEFINE_string('train_dir', None,
'Path to session checkpoints. Pass None to disable saving '
'checkpoint at the end.')
flags.DEFINE_string('eval_dir', '/tmp/tf_cnn_benchmarks/eval',
'Directory where to write eval event logs.')
flags.DEFINE_string('result_storage', None,
'Specifies storage option for benchmark results. None '
'means results won\'t be stored. '
'`cbuild_benchmark_datastore` means results will be stored '
'in cbuild datastore (note: this option requires special '
'permissions and meant to be used from cbuilds).')
# Benchmark logging for model garden metric
flags.DEFINE_string('benchmark_log_dir', None,
'The directory to place the log files containing the '
'results of benchmark. The logs are created by '
'BenchmarkFileLogger. Requires the root of the Tensorflow '
' models repository to be in $PYTHTONPATH.')
platforms_util.define_platform_params()
class GlobalStepWatcher(threading.Thread):
"""A helper class for global_step.
Polls for changes in the global_step of the model, and finishes when the
number of steps for the global run are done.
"""
def __init__(self, sess, global_step_op, start_at_global_step,
end_at_global_step):
threading.Thread.__init__(self)
self.sess = sess
self.global_step_op = global_step_op
self.start_at_global_step = start_at_global_step
self.end_at_global_step = end_at_global_step
self.start_time = 0
self.start_step = 0
self.finish_time = 0
self.finish_step = 0
def run(self):
while self.finish_time == 0:
time.sleep(.25)
global_step_val, = self.sess.run([self.global_step_op])
if self.start_time == 0 and global_step_val >= self.start_at_global_step:
# Use tf.logging.info instead of log_fn, since print (which is log_fn)
# is not thread safe and may interleave the outputs from two parallel
# calls to print, which can break tests.
tf.logging.info('Starting real work at step %s at time %s' %
(global_step_val, time.ctime()))
self.start_time = time.time()
self.start_step = global_step_val
if self.finish_time == 0 and global_step_val >= self.end_at_global_step:
tf.logging.info('Finishing real work at step %s at time %s' %
(global_step_val, time.ctime()))
self.finish_time = time.time()
self.finish_step = global_step_val
def done(self):
return self.finish_time > 0
def num_steps(self):
return self.finish_step - self.start_step
def elapsed_time(self):
return self.finish_time - self.start_time
class CheckpointNotFoundException(Exception):
pass
def get_data_type(params):
"""Returns BenchmarkCNN's data type as determined by use_fp16.
Args:
params: Params tuple, typically created by make_params or
make_params_from_flags.
"""
return tf.float16 if params.use_fp16 else tf.float32
# Note that we monkey patch this function in the unit tests. So if this is
# inlined or renamed, the unit tests must be updated.
def loss_function(logits, labels, aux_logits):
"""Loss function."""
with tf.name_scope('xentropy'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
if aux_logits is not None:
with tf.name_scope('aux_xentropy'):
aux_cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=aux_logits, labels=labels)
aux_loss = 0.4 * tf.reduce_mean(aux_cross_entropy, name='aux_loss')
loss = tf.add_n([loss, aux_loss])
return loss
def create_config_proto(params):
"""Returns session config proto.
Args:
params: Params tuple, typically created by make_params or
make_params_from_flags.
"""
config = tf.ConfigProto()
config.allow_soft_placement = True
config.intra_op_parallelism_threads = params.num_intra_threads
config.inter_op_parallelism_threads = params.num_inter_threads
# config.gpu_options.experimental.collective_group_leader = '/job:worker/replica:0/task:0'
config.gpu_options.force_gpu_compatible = params.force_gpu_compatible
if params.allow_growth is not None:
config.gpu_options.allow_growth = params.allow_growth
if params.gpu_memory_frac_for_testing > 0:
config.gpu_options.per_process_gpu_memory_fraction = (
params.gpu_memory_frac_for_testing)
if params.xla:
config.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
if params.enable_layout_optimizer:
config.graph_options.rewrite_options.layout_optimizer = (
rewriter_config_pb2.RewriterConfig.ON)
if params.rewriter_config:
rewriter_config = rewriter_config_pb2.RewriterConfig()
text_format.Merge(params.rewriter_config, rewriter_config)
config.graph_options.rewrite_options.CopyFrom(rewriter_config)
if params.variable_update == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
config.gpu_options.visible_device_list = str(hvd.local_rank())
return config
def get_mode_from_params(params):
"""Returns the mode in which this script is running.
Args:
params: Params tuple, typically created by make_params or
make_params_from_flags.
Raises:
ValueError: Unsupported params settings.
"""
if params.forward_only and params.eval:
raise ValueError('Only one of forward_only and eval parameters is true')
if params.eval:
return 'evaluation'
if params.forward_only:
return 'forward-only'
return 'training'
# How many digits to show for the loss and accuracies during training.
LOSS_AND_ACCURACY_DIGITS_TO_SHOW = 3
def benchmark_one_step(sess,
fetches,
step,
batch_size,
step_train_times,
trace_filename,
partitioned_graph_file_prefix,
profiler,
image_producer,
params,
summary_op=None,
show_images_per_sec=True,
benchmark_logger=None,
collective_graph_key=0):
"""Advance one step of benchmarking."""
should_profile = profiler and 0 <= step < _NUM_STEPS_TO_PROFILE
need_options_and_metadata = (
should_profile or collective_graph_key > 0 or
((trace_filename or partitioned_graph_file_prefix) and step == -2)
)
if need_options_and_metadata:
run_options = tf.RunOptions()
if (trace_filename and step == -2) or should_profile:
run_options.trace_level = tf.RunOptions.FULL_TRACE
if partitioned_graph_file_prefix and step == -2:
run_options.output_partition_graphs = True
if collective_graph_key > 0:
run_options.experimental.collective_graph_key = collective_graph_key
run_metadata = tf.RunMetadata()
else:
run_options = None
run_metadata = None
summary_str = None
start_time = time.time()
if summary_op is None:
results = sess.run(fetches, options=run_options, run_metadata=run_metadata)
else:
(results, summary_str) = sess.run(
[fetches, summary_op], options=run_options, run_metadata=run_metadata)
if not params.forward_only:
lossval = results['average_loss']
else:
lossval = 0.
if image_producer is not None:
image_producer.notify_image_consumption()
train_time = time.time() - start_time
step_train_times.append(train_time)
if (show_images_per_sec and step >= 0 and
(step == 0 or (step + 1) % params.display_every == 0)):
log_str = '%i\t%s\t%.*f' % (
step + 1, get_perf_timing_str(batch_size, step_train_times),
LOSS_AND_ACCURACY_DIGITS_TO_SHOW, lossval)
if 'top_1_accuracy' in results:
log_str += '\t%.*f\t%.*f' % (
LOSS_AND_ACCURACY_DIGITS_TO_SHOW, results['top_1_accuracy'],
LOSS_AND_ACCURACY_DIGITS_TO_SHOW, results['top_5_accuracy'])
log_fn(log_str)
if benchmark_logger:
# TODO(scottzhu): This might impact the benchmark speed since it writes
# the benchmark log to local directory.
benchmark_logger.log_evaluation_result(results)
if need_options_and_metadata:
if should_profile:
profiler.add_step(step, run_metadata)
if trace_filename and step == -2:
log_fn('Dumping trace to %s' % trace_filename)
trace_dir = os.path.dirname(trace_filename)
if not gfile.Exists(trace_dir):
gfile.MakeDirs(trace_dir)
with gfile.Open(trace_filename, 'w') as trace_file:
if params.use_chrome_trace_format:
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
trace_file.write(trace.generate_chrome_trace_format(show_memory=True))
else:
trace_file.write(str(run_metadata.step_stats))
if partitioned_graph_file_prefix and step == -2:
path, filename = os.path.split(partitioned_graph_file_prefix)
if '.' in filename:
base_filename, ext = filename.rsplit('.', 1)
ext = '.' + ext
else:
base_filename, ext = filename, ''
as_text = filename.endswith('txt')
for graph_def in run_metadata.partition_graphs:
device = graph_def.node[0].device.replace('/', '_').replace(':', '_')
graph_filename = '%s%s%s' % (base_filename, device, ext)
log_fn('Writing partitioned GraphDef as %s to %s' % (
'text' if as_text else 'binary',
os.path.join(path, graph_filename)))
tf.train.write_graph(graph_def, path, graph_filename, as_text)
return summary_str
def get_perf_timing_str(batch_size, step_train_times, scale=1):
times = np.array(step_train_times)
speeds = batch_size / times
speed_mean = scale * batch_size / np.mean(times)
if scale == 1:
speed_uncertainty = np.std(speeds) / np.sqrt(float(len(speeds)))
speed_madstd = 1.4826 * np.median(np.abs(speeds - np.median(speeds)))
speed_jitter = speed_madstd
return ('images/sec: %.1f +/- %.1f (jitter = %.1f)' %
(speed_mean, speed_uncertainty, speed_jitter))
else:
return 'images/sec: %.1f' % speed_mean
def load_checkpoint(saver, sess, ckpt_dir):
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
if ckpt and ckpt.model_checkpoint_path:
if os.path.isabs(ckpt.model_checkpoint_path):
# Restores from checkpoint with absolute path.
model_checkpoint_path = ckpt.model_checkpoint_path
else:
# Restores from checkpoint with relative path.
model_checkpoint_path = os.path.join(ckpt_dir, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/imagenet_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
if not global_step.isdigit():
global_step = 0
else:
global_step = int(global_step)
saver.restore(sess, model_checkpoint_path)
log_fn('Successfully loaded model from %s.' % ckpt.model_checkpoint_path)
return global_step
else:
raise CheckpointNotFoundException('No checkpoint file found.')
# Params are passed to BenchmarkCNN's constructor. Params is a map from name
# to value, with one field per key in flags.param_specs.
#
# Call make_params() or make_params_from_flags() below to construct a Params
# tuple with default values from flags.param_specs, rather than constructing
# Params directly.
Params = namedtuple('Params', flags.param_specs.keys()) # pylint: disable=invalid-name
def validate_params(params):
"""Validates that the Params tuple had valid values.
When command-line flags are defined for each ParamSpec by calling
flags.define_flags(), calling this function is unnecessary because absl
already does flag validation. Otherwise, this function should be called.
Args:
params: A Params tuple.
Raises:
ValueError: An element of params had an invalid value.
"""
for name, value in params._asdict().items():
param_spec = flags.param_specs[name]
if param_spec.flag_type in ('integer', 'float'):
if (param_spec.kwargs['lower_bound'] is not None and
value < param_spec.kwargs['lower_bound']):
raise ValueError('Param %s value of %s is lower than the lower bound '
'of %s' %
(name, value, param_spec.kwargs['lower_bound']))
if (param_spec.kwargs['upper_bound'] is not None and
param_spec.kwargs['upper_bound'] < value):
raise ValueError('Param %s value of %s is higher than the upper bound '
'of %s' %
(name, value, param_spec.kwargs['upper_bound']))
elif (param_spec.flag_type == 'enum' and
value not in param_spec.kwargs['enum_values']):
raise ValueError('Param %s of value %s is not in %s'%
(name, value, param_spec.kwargs['enum_values']))
def make_params(**kwargs):
"""Create a Params tuple for BenchmarkCNN from kwargs.
Default values are filled in from flags.param_specs.
Args:
**kwargs: kwarg values will override the default values.
Returns:
Params namedtuple for constructing BenchmarkCNN.
"""
# Create a (name: default_value) map from flags.param_specs.
default_kwargs = {
name: flags.param_specs[name].default_value
for name in flags.param_specs
}
params = Params(**default_kwargs)._replace(**kwargs)
validate_params(params)
return params
def make_params_from_flags():
"""Create a Params tuple for BenchmarkCNN from absl_flags.FLAGS.
Returns:
Params namedtuple for constructing BenchmarkCNN.
"""
# Collect (name: value) pairs for absl_flags.FLAGS with matching names in
# flags.param_specs.
flag_values = {name: getattr(absl_flags.FLAGS, name)
for name in flags.param_specs.keys()}
return Params(**flag_values)
def get_num_batches_and_epochs(params, batch_size, num_examples_per_epoch):
"""Returns the number of batches and epochs to run for.
Args:
params: Params tuple, typically created by make_params or
make_params_from_flags.
batch_size: The number of images per step.
num_examples_per_epoch: The number of images in a single epoch.
Returns:
num_batches: The number of batches to run for.
num_epochs: The number of epochs to run for. This might be slightly
smaller than params.num_epochs if specified, because the number of batches
must be an integer.
Raises:
ValueError: Invalid or unsupported params.
"""
if params.num_batches and params.num_epochs:
raise ValueError('At most one of --num_batches and --num_epochs may be '
'specified.')
if params.num_epochs:
num_batches = int(float(params.num_epochs) * num_examples_per_epoch /
batch_size)
else:
num_batches = params.num_batches or _DEFAULT_NUM_BATCHES
num_epochs = num_batches * batch_size / float(num_examples_per_epoch)
return (num_batches, num_epochs)
def get_piecewise_learning_rate(piecewise_learning_rate_schedule,
global_step, num_batches_per_epoch):
"""Returns a piecewise learning rate tensor.
Args:
piecewise_learning_rate_schedule: The --piecewise_learning_rate_schedule
parameter
global_step: Scalar tensor representing the global step.
num_batches_per_epoch: float indicating the number of batches per epoch.
Returns:
A scalar float tensor, representing the learning rate.
Raises:
ValueError: piecewise_learning_rate_schedule is not formatted correctly.
"""
pieces = piecewise_learning_rate_schedule.split(';')
if len(pieces) % 2 == 0:
raise ValueError('--piecewise_learning_rate_schedule must have an odd '
'number of components')
values = []
boundaries = []
for i, piece in enumerate(pieces):
if i % 2 == 0:
try:
values.append(float(piece))
except ValueError:
raise ValueError('Invalid learning rate: ' + piece)
else:
try:
boundaries.append(int(int(piece) * num_batches_per_epoch) - 1)
except ValueError:
raise ValueError('Invalid epoch: ' + piece)
return tf.train.piecewise_constant(global_step, boundaries, values,
name='piecewise_learning_rate')
def get_learning_rate(params, global_step, num_examples_per_epoch, model,
batch_size):
"""Returns a learning rate tensor based on global_step.
Args:
params: Params tuple, typically created by make_params or
make_params_from_flags.
global_step: Scalar tensor representing the global step.
num_examples_per_epoch: The number of examples per epoch.
model: The model.Model object to obtain the default learning rate from if no
learning rate is specified.
batch_size: Number of examples per step
Returns:
A scalar float tensor, representing the learning rate. When evaluated, the
learning rate depends on the current value of global_step.
Raises:
ValueError: Invalid or unsupported params.
"""
num_batches_per_epoch = (float(num_examples_per_epoch) / batch_size)
if params.piecewise_learning_rate_schedule:
if (params.init_learning_rate or params.learning_rate_decay_factor or
params.minimum_learning_rate or params.num_epochs_per_decay):
raise ValueError('No other learning rate-related flags can be specified '
'if --piecewise_learning_rate_schedule is specified')
learning_rate = get_piecewise_learning_rate(
params.piecewise_learning_rate_schedule,
global_step, num_batches_per_epoch)
elif params.init_learning_rate:
learning_rate = params.init_learning_rate
if (params.num_epochs_per_decay > 0 and
params.learning_rate_decay_factor > 0):
decay_steps = int(num_batches_per_epoch * params.num_epochs_per_decay)
# Decay the learning rate exponentially based on the number of steps.
learning_rate = tf.train.exponential_decay(
params.init_learning_rate,
global_step,
decay_steps,
params.learning_rate_decay_factor,
staircase=True)
if params.minimum_learning_rate != 0.:
learning_rate = tf.maximum(learning_rate,
params.minimum_learning_rate)
else:
learning_rate = model.get_learning_rate(global_step, batch_size)
if params.num_learning_rate_warmup_epochs > 0 and (
params.init_learning_rate or params.piecewise_learning_rate_schedule):
warmup_steps = int(num_batches_per_epoch *
params.num_learning_rate_warmup_epochs)
init_lr = (params.init_learning_rate or
float(params.piecewise_learning_rate_schedule.split(';')[0]))
warmup_lr = init_lr * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32)
learning_rate = tf.cond(global_step < warmup_steps,
lambda: warmup_lr, lambda: learning_rate)
return learning_rate
def get_optimizer(params, learning_rate):
"""Returns the optimizer that should be used based on params."""
if params.optimizer == 'momentum':
opt = tf.train.MomentumOptimizer(
learning_rate, params.momentum, use_nesterov=True)
elif params.optimizer == 'sgd':
opt = tf.train.GradientDescentOptimizer(learning_rate)
elif params.optimizer == 'rmsprop':
opt = tf.train.RMSPropOptimizer(
learning_rate,
params.rmsprop_decay,
momentum=params.rmsprop_momentum,
epsilon=params.rmsprop_epsilon)
else:
raise ValueError('Optimizer "%s" was not recognized',
params.optimizer)
return opt
def generate_tfprof_profile(profiler, tfprof_file):
"""Generates a tfprof profile, writing it to a file and printing top ops.
Args:
profiler: A tf.profiler.Profiler. `profiler.add_step` must have already been
called.
tfprof_file: The filename to write the ProfileProto to.
"""
profile_proto = profiler.serialize_to_string()
log_fn('Dumping ProfileProto to %s' % tfprof_file)
with gfile.Open(tfprof_file, 'wb') as f:
f.write(profile_proto)
# Print out the execution times of the top operations. Note this
# information can also be obtained with the dumped ProfileProto, but
# printing it means tfprof doesn't have to be used if all the user wants
# is the top ops.
options = tf.profiler.ProfileOptionBuilder.time_and_memory()
options['max_depth'] = _NUM_OPS_TO_PRINT
options['order_by'] = 'accelerator_micros'
profiler.profile_operations(options)
class BenchmarkCNN(object):
"""Class for benchmarking a cnn network."""
def __init__(self, params, dataset=None, model=None):
"""Initialize BenchmarkCNN.
Args:
params: Params tuple, typically created by make_params or
make_params_from_flags.
dataset: If not None, the dataset to use. Otherwise, params is used to
obtain the dataset.
model: If not None, the model to use. Otherwise, params is used to obtain
the model.
Raises:
ValueError: Unsupported params settings.
"""
self.params = params
self.dataset = dataset or datasets.create_dataset(self.params.data_dir,
self.params.data_name)
self.model = model or model_config.get_model_config(self.params.model,
self.dataset)
self.trace_filename = self.params.trace_file
self.data_format = self.params.data_format
self.enable_layout_optimizer = self.params.enable_layout_optimizer
self.rewriter_config = self.params.rewriter_config
autotune_threshold = self.params.autotune_threshold if (
self.params.autotune_threshold) else 1
min_autotune_warmup = 5 * autotune_threshold * autotune_threshold
self.num_warmup_batches = self.params.num_warmup_batches if (
self.params.num_warmup_batches is not None) else max(
10, min_autotune_warmup)
self.graph_file = self.params.graph_file
self.resize_method = self.params.resize_method
self.sync_queue_counter = 0
self.num_gpus = self.params.num_gpus
if self.params.gpu_indices:
self.gpu_indices = [int(x) for x in self.params.gpu_indices.split(',')]
else:
self.gpu_indices = [x for x in range(self.num_gpus)]
self.use_synthetic_gpu_images = self.dataset.use_synthetic_gpu_images()
if (self.params.device == 'cpu' and self.params.data_format == 'NCHW' and
not self.params.mkl):
raise ValueError('device=cpu requires that data_format=NHWC')
if ((self.params.num_epochs_per_decay or
self.params.learning_rate_decay_factor) and
not (self.params.init_learning_rate and self.params.num_epochs_per_decay
and self.params.learning_rate_decay_factor)):
raise ValueError('If one of num_epochs_per_decay or '
'learning_rate_decay_factor is set, both must be set'
'and learning_rate must be set')
if (self.params.minimum_learning_rate and
not (self.params.init_learning_rate and self.params.num_epochs_per_decay
and self.params.learning_rate_decay_factor)):
raise ValueError('minimum_learning_rate requires learning_rate,'
'num_epochs_per_decay, and '
'learning_rate_decay_factor to be set')
if (self.params.use_fp16 and self.params.fp16_vars and
'replicated' in self.params.variable_update and
self.params.all_reduce_spec and 'nccl' in self.params.all_reduce_spec):
raise ValueError('fp16 variables are not supported with NCCL')
if (self.params.use_fp16 and self.params.fp16_vars and
self.params.gradient_repacking):
raise ValueError('--fp16_vars cannot be used with --gradient_repacking')
if self.params.variable_update == 'horovod' and self.params.num_gpus > 1:
raise ValueError('Horovod benchmarks require num_gpus=1 on each worker')
if self.params.variable_update == 'horovod' and self.params.job_name:
raise ValueError('job_name should not be specified for Horovod.')
if self.params.use_fp16 and self.params.fp16_enable_auto_loss_scale:
if self.params.all_reduce_spec and 'nccl' in self.params.all_reduce_spec:
raise ValueError('Automatic loss scaling is not supported with NCCL.')
if self.params.variable_update not in ('parameter_server', 'replicated',
'independent'):
raise ValueError('Automatic loss scaling is not supported with '
'variable_update=%s.' % self.params.variable_update)
if self.params.staged_vars:
raise ValueError('Automatic loss scaling is not supported with'
'staged_vars.')
if (self.params.debugger is not None and self.params.debugger != 'cli' and
':' not in self.params.debugger):
raise ValueError('--debugger must be "cli" or in the form '
'host:port')
if self.params.hierarchical_copy and self.params.num_gpus <= 1:
raise ValueError('--hierarchical_copy requires --num_gpus to be greater '
'than 1')
# Use the batch size from the command line if specified, otherwise use the
# model's default batch size. Scale the benchmark's batch size by the
# number of GPUs.
if self.params.batch_size > 0:
self.model.set_batch_size(self.params.batch_size)
self.batch_size = self.model.get_batch_size() * self.num_gpus
self.batch_group_size = self.params.batch_group_size
self.enable_auto_loss_scale = (
self.params.use_fp16 and self.params.fp16_enable_auto_loss_scale)
self.loss_scale = None
self.loss_scale_normal_steps = None
self.job_name = self.params.job_name # "" for local training
# PS server is used for distributed jobs not using all-reduce.
use_ps_server = self.job_name and (self.params.variable_update !=
'distributed_all_reduce' and
self.params.variable_update !=
'collective_all_reduce')
# controller is used for distributed_all_reduce with > 1 worker.
use_controller = (
self.params.variable_update == 'distributed_all_reduce' and
self.job_name)
if use_controller and not params.controller_host:
raise ValueError('When variable_update==distributed_all_reduce '
'controller_host must also be specified.')
# collective_all_reduce doesn't need a controller or ps
self.distributed_collective = (
self.params.variable_update == 'collective_all_reduce' and
self.job_name)
self.local_parameter_device_flag = self.params.local_parameter_device
if self.job_name:
self.task_index = self.params.task_index
self.cluster_manager = platforms_util.get_cluster_manager(
params, create_config_proto(params))
assert isinstance(self.cluster_manager, cnn_util.BaseClusterManager)
worker_prefix = '/job:worker/replica:0/task:%s' % self.task_index
if use_ps_server:
self.param_server_device = tf.train.replica_device_setter(
worker_device=worker_prefix + '/cpu:0',
cluster=self.cluster_manager.get_cluster_spec())
# This device on which the queues for managing synchronization between
# servers should be stored.
self.sync_queue_devices = [
'/job:ps/replica:0/task:%s/cpu:0' % i
for i in range(self.cluster_manager.num_ps())
]
else:
self.sync_queue_devices = ['/job:worker/replica:0/task:0/cpu:0']
else:
self.task_index = 0
self.cluster_manager = None
worker_prefix = ''
self.param_server_device = '/%s:0' % self.params.local_parameter_device
self.sync_queue_devices = [self.param_server_device]
if self.cluster_manager:
self.num_workers = self.cluster_manager.num_workers()
elif self.params.variable_update == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
self.num_workers = hvd.size()
else:
self.num_workers = 1
self.num_ps = self.cluster_manager.num_ps() if self.cluster_manager else 0
if self.num_workers > 1 and self.params.all_reduce_spec == 'nccl':
raise ValueError('--all_reduce_spec=nccl is invalid in a '
'multi-worker job')
# Device to use for ops that need to always run on the local worker's CPU.
self.cpu_device = '%s/cpu:0' % worker_prefix
# Device to use for ops that need to always run on the local worker's
# compute device, and never on a parameter server device.
self.raw_devices = [
'%s/%s:%i' % (worker_prefix, self.params.device, i)
for i in xrange(self.num_gpus)
]
subset = 'validation' if params.eval else 'train'
self.num_batches, self.num_epochs = get_num_batches_and_epochs(
params, self.batch_size * self.num_workers,
self.dataset.num_examples_per_epoch(subset))
if (self.params.staged_vars and
self.params.variable_update != 'parameter_server'):
raise ValueError('staged_vars for now is only supported with '
'variable_update=parameter_server')
if self.params.variable_update == 'parameter_server':
if self.job_name:
if not self.params.staged_vars:
self.variable_mgr = variable_mgr.VariableMgrDistributedFetchFromPS(
self)
else:
self.variable_mgr = (
variable_mgr.VariableMgrDistributedFetchFromStagedPS(self))
else:
if not self.params.staged_vars:
self.variable_mgr = variable_mgr.VariableMgrLocalFetchFromPS(self)
else:
self.variable_mgr = variable_mgr.VariableMgrLocalFetchFromStagedPS(
self)
elif self.params.variable_update == 'replicated':
if self.job_name:
raise ValueError('Invalid variable_update in distributed mode: %s' %
self.params.variable_update)
self.variable_mgr = variable_mgr.VariableMgrLocalReplicated(
self, self.params.all_reduce_spec,
self.params.agg_small_grads_max_bytes,
self.params.agg_small_grads_max_group,
self.params.allreduce_merge_scope)
elif self.params.variable_update == 'distributed_all_reduce':
assert self.params.cross_replica_sync
self.variable_mgr = variable_mgr.VariableMgrDistributedAllReduce(
self, self.params.all_reduce_spec,
('worker' if self.num_workers > 1 else 'localhost'),
self.num_workers, self.params.agg_small_grads_max_bytes,
self.params.agg_small_grads_max_group,
self.params.allreduce_merge_scope)
elif self.params.variable_update == 'collective_all_reduce':
assert self.params.cross_replica_sync
if self.num_workers > 1:
raise ValueError('collective_all_reduce not yet supported for '
'num_workers > 1')
self.variable_mgr = variable_mgr.VariableMgrCollectiveAllReduce(
self, self.params.all_reduce_spec,
self.num_workers, self.num_gpus, self.task_index,
self.params.allreduce_merge_scope)
elif self.params.variable_update == 'distributed_replicated':
assert self.params.cross_replica_sync
if not self.job_name:
raise ValueError('Invalid variable_update in local mode: %s' %
self.params.variable_update)
self.variable_mgr = variable_mgr.VariableMgrDistributedReplicated(self)
elif self.params.variable_update in ('independent', 'horovod'):
if self.job_name:
raise ValueError('Invalid variable_update in distributed mode: %s' %
self.params.variable_update)
self.variable_mgr = variable_mgr.VariableMgrIndependent(self)
else:
raise ValueError(
'Invalid variable_update: %s' % self.params.variable_update)
# Device to use for running on the local worker's compute device, but
# with variables assigned to parameter server devices.
self.devices = self.variable_mgr.get_devices()
if self.job_name:
if use_ps_server:
self.global_step_device = self.param_server_device
else:
self.global_step_device = '/job:worker/replica:0/task:0/cpu:0'
else:
self.global_step_device = self.cpu_device
self.image_preprocessor = self.get_image_preprocessor()
self.datasets_use_prefetch = (
self.params.datasets_use_prefetch and
self.image_preprocessor.supports_datasets())
self.init_global_step = 0
self._config_benchmark_logger()
def _config_benchmark_logger(self):
"""Config the model garden benchmark logger."""
model_benchmark_logger = None
if self.params.benchmark_log_dir is not None:
try:
from official.utils.logs import logger as models_logger # pylint: disable=g-import-not-at-top
except ImportError:
tf.logging.fatal('Please include tensorflow/models to the PYTHONPATH '
'in order to use BenchmarkLogger. Configured '
'benchmark_log_dir: %s'
% self.params.benchmark_log_dir)
raise
model_benchmark_logger = models_logger.BenchmarkFileLogger(
self.params.benchmark_log_dir)
self.benchmark_logger = model_benchmark_logger
def reset_devices_for_task(self, task_num, is_local=False):
"""Used to imitate another task when building a distributed graph."""
worker_prefix = ('job:localhost' if is_local else
'/job:worker/replica:0/task:%s' % task_num)
self.cpu_device = '%s/cpu:0' % worker_prefix
self.raw_devices = [
'%s/%s:%i' % (worker_prefix, self.params.device, i)
for i in xrange(self.num_gpus)
]
self.devices = self.variable_mgr.get_devices()
def raw_devices_across_tasks(self, is_local=False):
"""Returns list of raw device names across all tasks."""
if is_local:
assert self.num_workers == 1
return self.raw_devices
else:
return [
'job:worker/replica:0/task%s/%s:%i' % (t, self.params.device, i)
for t in xrange(self.num_workers)
for i in xrange(self.num_gpus)
]
def print_info(self):
"""Print basic information."""
benchmark_info = self._get_params_info()
log_fn('Model: %s' % self.model.get_model())
log_fn('Dataset: %s' % benchmark_info['dataset_name'])
log_fn('Mode: %s' % get_mode_from_params(self.params))
log_fn('SingleSess: %s' % benchmark_info['single_session'])
log_fn('Batch size: %s global' % (self.batch_size * self.num_workers))
log_fn(' %s per device' % (self.batch_size /
len(self.raw_devices)))
if self.batch_group_size > 1:
log_fn(' %d batches per prepocessing group' %
self.batch_group_size)
log_fn('Num batches: %d' % self.num_batches)
log_fn('Num epochs: %.2f' % self.num_epochs)
log_fn('Devices: %s' % benchmark_info['device_list'])
log_fn('Data format: %s' % self.data_format)
log_fn('Layout optimizer: %s' % self.enable_layout_optimizer)
if self.rewriter_config:
log_fn('RewriterConfig: %s' % self.rewriter_config)
log_fn('Optimizer: %s' % self.params.optimizer)
log_fn('Variables: %s' % self.params.variable_update)
if (self.params.variable_update == 'replicated' or
self.params.variable_update == 'distributed_all_reduce'
or self.params.variable_update == 'collective_all_reduce'):
log_fn('AllReduce: %s' % self.params.all_reduce_spec)
if self.job_name:
log_fn('Sync: %s' % self.params.cross_replica_sync)
if self.params.staged_vars:
log_fn('Staged vars: %s' % self.params.staged_vars)
if self.params.variable_update == 'horovod' and self.params.horovod_device:
log_fn('Horovod on: %s' % self.params.horovod_device)
log_fn('==========')
def _get_params_info(self):
"""Get the common parameters info for the benchmark run.
Returns:
A dict of processed parameters.
"""
dataset_name = self.dataset.name
if self.dataset.use_synthetic_gpu_images():
dataset_name += ' (synthetic)'
single_session = self.params.variable_update == 'distributed_all_reduce'
if single_session:
device_list = self.raw_devices_across_tasks()
elif self.params.variable_update == 'horovod':
device_list = ['horovod/%s:%d' % (self.params.device, idx)
for idx in range(self.num_workers)]
else:
device_list = self.raw_devices
return {
'dataset_name': dataset_name,
'single_session': single_session,
'device_list': device_list,}
def _log_benchmark_run(self):
"""Log the benchmark info to the logger.
The info logged here should be similar to print_info(), but in a structured
JSON format.
"""
if self.benchmark_logger:
benchmark_info = self._get_params_info()
run_param = {
'model': self.model.get_model(),
'dataset': benchmark_info['dataset_name'],
'mode': get_mode_from_params(self.params),
'single_sess': benchmark_info['single_session'],
'devices': benchmark_info['device_list'],
'batch_size': self.batch_size,
'batch_size_per_device': self.batch_size / len(self.raw_devices),
'num_batches': self.num_batches,
'num_epochs': self.num_epochs,
'data_format': self.data_format,
'layout_optimizer': self.enable_layout_optimizer,
'rewrite_config': self.rewriter_config,
'optimizer': self.params.optimizer,
}
# TODO(scottzhu): tf_cnn_benchmark might execute several times with
# different param setting on the same box. This will cause the run file to
# only contain the latest info. The benchmark_log_dir should be updated
# for every new run.
self.benchmark_logger.log_run_info(
self.model.get_model(), benchmark_info['dataset_name'], run_param)
def run(self):
"""Run the benchmark task assigned to this process.
Returns:
Dictionary of statistics for training or eval.
Raises:
ValueError: unrecognized job name.
"""
if self.params.job_name == 'ps':
log_fn('Running parameter server %s' % self.task_index)
self.cluster_manager.join_server()
return {}
# For distributed_all_reduce with multiple workers, drive
# from a separate controller process.
if self.params.variable_update == 'distributed_all_reduce':
if self.params.job_name == 'worker':
log_fn('Starting worker %s' % self.task_index)
self.cluster_manager.join_server()
return
elif self.params.job_name and self.params.job_name != 'controller':
raise ValueError('unrecognized job name: %s' % self.params.job_name)
with tf.Graph().as_default():
self._log_benchmark_run()
if self.params.eval:
return self._eval_cnn()
else:
return self._benchmark_cnn()
def _eval_cnn(self):
"""Evaluate a model every self.params.eval_interval_secs.
Returns:
Dictionary containing eval statistics. Currently returns an empty
dictionary.
"""
if self.datasets_use_prefetch:
(image_producer_ops, enqueue_ops, fetches) = (
self._build_model_with_dataset_prefetching())
else:
(image_producer_ops, enqueue_ops, fetches) = self._build_model()
saver = tf.train.Saver(self.variable_mgr.savable_variables())
summary_writer = tf.summary.FileWriter(self.params.eval_dir,
tf.get_default_graph())
target = ''
local_var_init_op = tf.local_variables_initializer()
table_init_ops = tf.tables_initializer()
variable_mgr_init_ops = [local_var_init_op]
if table_init_ops:
variable_mgr_init_ops.extend([table_init_ops])
with tf.control_dependencies([local_var_init_op]):
variable_mgr_init_ops.extend(self.variable_mgr.get_post_init_ops())
local_var_init_op_group = tf.group(*variable_mgr_init_ops)
summary_op = tf.summary.merge_all()
# TODO(huangyp): Check if checkpoints haven't updated for hours and abort.
while True:
self._eval_once(saver, summary_writer, target, local_var_init_op_group,
image_producer_ops, enqueue_ops, fetches, summary_op)
if self.params.eval_interval_secs <= 0:
break
time.sleep(self.params.eval_interval_secs)
return {}
def _eval_once(self, saver, summary_writer, target, local_var_init_op_group,
image_producer_ops, enqueue_ops, fetches, summary_op):
"""Evaluate the model from a checkpoint using validation dataset."""
with tf.Session(
target=target, config=create_config_proto(self.params)) as sess:
if self.params.train_dir is None:
raise ValueError('Trained model directory not specified')
try:
global_step = load_checkpoint(saver, sess, self.params.train_dir)
except CheckpointNotFoundException:
log_fn('Checkpoint not found in %s' % self.params.train_dir)
return
sess.run(local_var_init_op_group)
if self.dataset.queue_runner_required():
tf.train.start_queue_runners(sess=sess)
image_producer = None
if image_producer_ops is not None:
image_producer = cnn_util.ImageProducer(
sess, image_producer_ops, self.batch_group_size,
self.params.use_python32_barrier)
image_producer.start()
for i in xrange(len(enqueue_ops)):
sess.run(enqueue_ops[:(i + 1)])
image_producer.notify_image_consumption()
loop_start_time = start_time = time.time()
top_1_accuracy_sum = 0.0
top_5_accuracy_sum = 0.0
total_eval_count = self.num_batches * self.batch_size
for step in xrange(self.num_batches):
if (self.params.save_summaries_steps > 0 and
(step + 1) % self.params.save_summaries_steps == 0):
results, summary_str = sess.run([fetches, summary_op])
summary_writer.add_summary(summary_str)
else:
results = sess.run(fetches)
top_1_accuracy_sum += results['top_1_accuracy']
top_5_accuracy_sum += results['top_5_accuracy']
if (step + 1) % self.params.display_every == 0:
duration = time.time() - start_time
examples_per_sec = (
self.batch_size * self.params.display_every / duration)
log_fn('%i\t%.1f examples/sec' % (step + 1, examples_per_sec))
start_time = time.time()
if image_producer is not None:
image_producer.notify_image_consumption()
loop_end_time = time.time()
if image_producer is not None:
image_producer.done()
accuracy_at_1 = top_1_accuracy_sum / self.num_batches
accuracy_at_5 = top_5_accuracy_sum / self.num_batches
summary = tf.Summary()
summary.value.add(tag='eval/Accuracy@1', simple_value=accuracy_at_1)
summary.value.add(tag='eval/Accuracy@5', simple_value=accuracy_at_5)
summary_writer.add_summary(summary, global_step)
log_fn('Accuracy @ 1 = %.4f Accuracy @ 5 = %.4f [%d examples]' %
(accuracy_at_1, accuracy_at_5, total_eval_count))
elapsed_time = loop_end_time - loop_start_time
images_per_sec = (self.num_batches * self.batch_size / elapsed_time)
# Note that we compute the top 1 accuracy and top 5 accuracy for each
# batch, which will have a slight performance impact.
log_fn('-' * 64)
log_fn('total images/sec: %.2f' % images_per_sec)
log_fn('-' * 64)
if self.benchmark_logger:
eval_result = {
'eval_top_1_accuracy', accuracy_at_1,
'eval_top_5_accuracy', accuracy_at_5,
'eval_average_examples_per_sec', images_per_sec,
tf.GraphKeys.GLOBAL_STEP, global_step,
}
self.benchmark_logger.log_evaluation_result(eval_result)
def _benchmark_cnn(self):
"""Run cnn in benchmark mode. Skip the backward pass if forward_only is on.
Returns:
Dictionary containing training statistics (num_workers, num_steps,
average_wall_time, images_per_sec).
"""
if self.params.variable_update == 'distributed_all_reduce':
self.single_session = True
if self.datasets_use_prefetch:
(image_producer_ops, enqueue_ops, fetches) = (
self._build_model_single_session_with_dataset_prefetching())
else:
(image_producer_ops, enqueue_ops, fetches) = (
self._build_model_single_session())
else:
self.single_session = False
if self.datasets_use_prefetch:
(image_producer_ops, enqueue_ops, fetches) = (
self._build_model_with_dataset_prefetching())
else:
(image_producer_ops, enqueue_ops, fetches) = self._build_model()
fetches_list = nest.flatten(list(fetches.values()))
main_fetch_group = tf.group(*fetches_list)
execution_barrier = None
if (not self.single_session and self.job_name and
not self.params.cross_replica_sync):
execution_barrier = self.add_sync_queues_and_barrier(
'execution_barrier_', [])
global_step = tf.train.get_global_step()
with tf.device(self.global_step_device):
with tf.control_dependencies([main_fetch_group]):
fetches['inc_global_step'] = global_step.assign_add(1)
if ((not self.single_session) and (not self.distributed_collective) and
self.job_name and self.params.cross_replica_sync):
# Block all replicas until all replicas are ready for next step.
fetches['sync_queues'] = self.add_sync_queues_and_barrier(
'sync_queues_step_end_', [main_fetch_group])
local_var_init_op = tf.local_variables_initializer()
table_init_ops = tf.tables_initializer()
variable_mgr_init_ops = [local_var_init_op]
if table_init_ops:
variable_mgr_init_ops.extend([table_init_ops])
with tf.control_dependencies([local_var_init_op]):
variable_mgr_init_ops.extend(self.variable_mgr.get_post_init_ops())
if ((not self.single_session) and (not self.distributed_collective) and
self.job_name and self.params.cross_replica_sync):
# Ensure all workers execute variable_mgr_init_ops before they start
# executing the model.
variable_mgr_init_ops.append(
self.add_sync_queues_and_barrier('init_ops_end_',
variable_mgr_init_ops))
local_var_init_op_group = tf.group(*variable_mgr_init_ops)
if self.params.variable_update == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
# First worker will be 'chief' - it will write summaries and
# save checkpoints.
is_chief = hvd.rank() == 0
else:
is_chief = (not self.job_name or self.task_index == 0)
summary_op = tf.summary.merge_all()
summary_writer = None
if (is_chief and self.params.summary_verbosity and self.params.train_dir and
self.params.save_summaries_steps > 0):
summary_writer = tf.summary.FileWriter(self.params.train_dir,
tf.get_default_graph())
# We want to start the benchmark timer right after a image_producer barrier
# and avoids undesired waiting times on barriers.
if ((self.num_warmup_batches + len(enqueue_ops) - 1) %
self.batch_group_size) != 0:
self.num_warmup_batches = int(
math.ceil((self.num_warmup_batches + len(enqueue_ops) - 1.0) /
(self.batch_group_size
)) * self.batch_group_size - len(enqueue_ops) + 1)
log_fn('Round up warm up steps to %d to match batch_group_size' %
self.num_warmup_batches)
assert ((self.num_warmup_batches + len(enqueue_ops) - 1) %
self.batch_group_size) == 0
# We run the summaries in the same thread as the training operations by
# passing in None for summary_op to avoid a summary_thread being started.
# Running summaries and training operations in parallel could run out of
# GPU memory.
if is_chief:
saver = tf.train.Saver(
self.variable_mgr.savable_variables(), save_relative_paths=True)
else:
saver = None
ready_for_local_init_op = None
if self.job_name and not self.single_session:
# In distributed mode, we don't want to run local_var_init_op_group until
# the global variables are initialized, because local_var_init_op_group
# may use global variables (such as in distributed replicated mode). We
# don't set this in non-distributed mode, because in non-distributed mode,
# local_var_init_op_group may itself initialize global variables (such as
# in replicated mode).
ready_for_local_init_op = tf.report_uninitialized_variables(
tf.global_variables())
if self.params.variable_update == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
bcast_global_variables_op = hvd.broadcast_global_variables(0)
else:
bcast_global_variables_op = None
if self.params.variable_update == 'collective_all_reduce':
# It doesn't matter what this collective_graph_key value is,
# so long as it's > 0 and the same at every worker.
init_run_options = tf.RunOptions()
init_run_options.experimental.collective_graph_key = 6
else:
init_run_options = tf.RunOptions()
sv = tf.train.Supervisor(
# For the purpose of Supervisor, all Horovod workers are 'chiefs',
# since we want session to be initialized symmetrically on all the
# workers.
is_chief=is_chief or (self.params.variable_update == 'horovod'
or self.distributed_collective),
# Log dir should be unset on non-chief workers to prevent Horovod
# workers from corrupting each other's checkpoints.
logdir=self.params.train_dir if is_chief else None,
ready_for_local_init_op=ready_for_local_init_op,
local_init_op=local_var_init_op_group,
saver=saver,
global_step=global_step,
summary_op=None,
save_model_secs=self.params.save_model_secs,
summary_writer=summary_writer)
# local_init_run_options=init_run_options)
power_logger = NvidiaPowerReader()
power_logger.read_multi_asynch(30, delay=10)
step_train_times = []
start_standard_services = (
self.params.summary_verbosity >= 1 or
self.dataset.queue_runner_required())
target = self.cluster_manager.get_target() if self.cluster_manager else ''
with sv.managed_session(
master=target,
config=create_config_proto(self.params),
start_standard_services=start_standard_services) as sess:
if bcast_global_variables_op:
sess.run(bcast_global_variables_op)
image_producer = None
if image_producer_ops is not None:
image_producer = cnn_util.ImageProducer(
sess, image_producer_ops, self.batch_group_size,
self.params.use_python32_barrier)
image_producer.start()
for i in xrange(len(enqueue_ops)):
sess.run(enqueue_ops[:(i + 1)])
image_producer.notify_image_consumption()
self.init_global_step, = sess.run([global_step])
if self.job_name and not self.params.cross_replica_sync:
# TODO(zhengxq): Do we need to use a global step watcher at all?
global_step_watcher = GlobalStepWatcher(
sess, global_step,
self.num_workers * self.num_warmup_batches +
self.init_global_step,
self.num_workers * (self.num_warmup_batches + self.num_batches) - 1)
global_step_watcher.start()
else:
global_step_watcher = None
if self.graph_file is not None:
path, filename = os.path.split(self.graph_file)
as_text = filename.endswith('txt')
log_fn('Writing GraphDef as %s to %s' % ( # pyformat break
'text' if as_text else 'binary', self.graph_file))
tf.train.write_graph(sess.graph.as_graph_def(add_shapes=True), path,
filename, as_text)
log_fn('Running warm up')
local_step = -1 * self.num_warmup_batches
if not global_step_watcher:
# In cross-replica sync mode, all workers must run the same number of
# local steps, or else the workers running the extra step will block.
done_fn = lambda: local_step == self.num_batches
else:
done_fn = global_step_watcher.done
if self.params.debugger is not None:
if self.params.debugger == 'cli':
log_fn('The CLI TensorFlow debugger will be used.')
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
else:
log_fn('The TensorBoard debugger plugin will be used.')
sess = tf_debug.TensorBoardDebugWrapperSession(sess,
self.params.debugger)
profiler = tf.profiler.Profiler() if self.params.tfprof_file else None
loop_start_time = time.time()
while not done_fn():
if local_step == 0:
log_fn('Done warm up')
if execution_barrier:
log_fn('Waiting for other replicas to finish warm up')
sess.run([execution_barrier])
header_str = ('Step\tImg/sec\t' +
self.params.loss_type_to_report.replace('/', ' '))
if self.params.print_training_accuracy or self.params.forward_only:
header_str += '\ttop_1_accuracy\ttop_5_accuracy'
log_fn(header_str)
assert len(step_train_times) == self.num_warmup_batches
# reset times to ignore warm up batch
step_train_times = []
loop_start_time = time.time()
if (summary_writer and
(local_step + 1) % self.params.save_summaries_steps == 0):
fetch_summary = summary_op
else:
fetch_summary = None
collective_graph_key = 7 if (
self.params.variable_update == 'collective_all_reduce') else 0
summary_str = benchmark_one_step(
sess, fetches, local_step,
self.batch_size * (self.num_workers
if self.single_session else 1), step_train_times,
self.trace_filename, self.params.partitioned_graph_file_prefix,
profiler, image_producer, self.params, fetch_summary,
benchmark_logger=self.benchmark_logger,
collective_graph_key=collective_graph_key)
if summary_str is not None and is_chief:
sv.summary_computed(sess, summary_str)
local_step += 1
loop_end_time = time.time()
# Waits for the global step to be done, regardless of done_fn.
if global_step_watcher:
while not global_step_watcher.done():
time.sleep(.25)
if not global_step_watcher:
elapsed_time = loop_end_time - loop_start_time
average_wall_time = elapsed_time / local_step if local_step > 0 else 0
images_per_sec = (self.num_workers * local_step * self.batch_size /
elapsed_time)
num_steps = local_step * self.num_workers
else:
# NOTE: Each worker independently increases the global step. So,
# num_steps will be the sum of the local_steps from each worker.
num_steps = global_step_watcher.num_steps()
elapsed_time = global_step_watcher.elapsed_time()
average_wall_time = (elapsed_time * self.num_workers / num_steps
if num_steps > 0 else 0)
images_per_sec = num_steps * self.batch_size / elapsed_time
log_fn('-' * 64)
log_fn('total images/sec: %.2f' % images_per_sec)
log_fn('-' * 64)
if image_producer is not None:
image_producer.done()
if is_chief:
store_benchmarks({'total_images_per_sec': images_per_sec}, self.params)
if self.benchmark_logger:
self.benchmark_logger.log_metric(
'average_examples_per_sec', images_per_sec, global_step=num_steps)
# Save the model checkpoint.
if self.params.train_dir is not None and is_chief:
checkpoint_path = os.path.join(self.params.train_dir, 'model.ckpt')
if not gfile.Exists(self.params.train_dir):
gfile.MakeDirs(self.params.train_dir)
sv.saver.save(sess, checkpoint_path, global_step)
if execution_barrier:
# Wait for other workers to reach the end, so this worker doesn't
# go away underneath them.
sess.run([execution_barrier])
sv.stop()
power_logger.stop()
power_logger.write_results_to_file(self.params.model)
m_p = power_logger.power_stats(power_logger.last_measurements)[0][0]
log_fn('mean power used: %f W' % m_p)
log_fn('final_metric: %f' % (1.0 / (images_per_sec * m_p)))
if profiler:
generate_tfprof_profile(profiler, self.params.tfprof_file)
return {
'num_workers': self.num_workers,
'num_steps': num_steps,
'average_wall_time': average_wall_time,
'images_per_sec': images_per_sec
}
def _build_image_processing(self, shift_ratio=0):
""""Build the image (pre)processing portion of the model graph."""
with tf.device(self.cpu_device):
if self.params.eval:
subset = 'validation'
else:
subset = 'train'
image_producer_ops = []
image_producer_stages = []
images_splits, labels_splits = self.image_preprocessor.minibatch(
self.dataset,
subset=subset,
use_datasets=self.params.use_datasets,
cache_data=self.params.cache_data,
shift_ratio=shift_ratio)
images_shape = images_splits[0].get_shape()
labels_shape = labels_splits[0].get_shape()
for device_num in range(len(self.devices)):
image_producer_stages.append(
data_flow_ops.StagingArea(
[images_splits[0].dtype, labels_splits[0].dtype],
shapes=[images_shape, labels_shape]))
for group_index in xrange(self.batch_group_size):
if not self.use_synthetic_gpu_images:
batch_index = group_index + device_num * self.batch_group_size
put_op = image_producer_stages[device_num].put(
[images_splits[batch_index], labels_splits[batch_index]])
image_producer_ops.append(put_op)
return (image_producer_ops, image_producer_stages)
def _build_model(self):
"""Build the TensorFlow graph."""
# Adjust seed so different workers start read different input files.
if self.params.variable_update == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
seed_adjustment = hvd.rank()
else:
seed_adjustment = 0
tf.set_random_seed(self.params.tf_random_seed + seed_adjustment)
np.random.seed(4321 + seed_adjustment)
phase_train = not (self.params.eval or self.params.forward_only)
log_fn('Generating model')
losses = []
device_grads = []
all_logits = []
all_top_1_ops = []
all_top_5_ops = []
enqueue_ops = []
gpu_compute_stage_ops = []
gpu_grad_stage_ops = []
with tf.device(self.global_step_device):
global_step = tf.train.get_or_create_global_step()
if self.params.use_fp16:
init_loss_scale_val = float(self.params.fp16_loss_scale or
self.model.get_fp16_loss_scale())
if self.enable_auto_loss_scale or init_loss_scale_val != 1:
self.loss_scale = tf.get_variable(
name='loss_scale',
initializer=init_loss_scale_val,
dtype=tf.float32,
trainable=False)
self.loss_scale_normal_steps = tf.get_variable(
name='loss_scale_normal_steps', initializer=0, trainable=False)
else:
self.loss_scale = None
self.loss_scale_normal_steps = None
# Build the processing and model for the worker.
(image_producer_ops,
image_producer_stages) = self._build_image_processing(shift_ratio=0)
image_producer_ops = tf.group(*image_producer_ops)
update_ops = None
staging_delta_ops = []
for device_num in range(len(self.devices)):
with self.variable_mgr.create_outer_variable_scope(
device_num), tf.name_scope('tower_%i' % device_num) as name_scope:
results = self.add_forward_pass_and_gradients(
phase_train, device_num, device_num,
image_producer_stages[device_num], gpu_compute_stage_ops,
gpu_grad_stage_ops)
if phase_train:
losses.append(results['loss'])
device_grads.append(results['gradvars'])
else:
all_logits.append(results['logits'])
if not phase_train or self.params.print_training_accuracy:
all_top_1_ops.append(results['top_1_op'])
all_top_5_ops.append(results['top_5_op'])
if device_num == 0:
# Retain the Batch Normalization updates operations only from the
# first tower. These operations update the moving mean and moving
# variance variables, which are updated (but not used) during
# training, and used during evaluation. The moving mean and variance
# approximate the true mean and variance across all images in the
# dataset. Therefore, in replicated mode, these moving averages would
# be almost identical for each tower, and so we only update and save
# the moving averages for one tower. In parameter server mode, all
# towers share a copy of the variables so we also only need to update
# and save the moving averages once.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)
staging_delta_ops = list(self.variable_mgr.staging_delta_ops)
if self.variable_mgr.supports_staged_vars():
for staging_ops in self.variable_mgr.staging_vars_on_devices:
gpu_compute_stage_ops.extend(
[put_op for _, (put_op, _) in six.iteritems(staging_ops)])
enqueue_ops.append(tf.group(*gpu_compute_stage_ops))
if gpu_grad_stage_ops:
staging_delta_ops += gpu_grad_stage_ops
if staging_delta_ops:
enqueue_ops.append(tf.group(*(staging_delta_ops)))
fetches = self._build_fetches(global_step, all_logits, losses, device_grads,
enqueue_ops, update_ops, all_top_1_ops,
all_top_5_ops, phase_train)
return (image_producer_ops, enqueue_ops, fetches)
# TODO(rohanj): Refactor this function and share with other code path.
def _build_model_with_dataset_prefetching(self):
"""Build the TensorFlow graph using datasets prefetching."""
assert not self.params.staged_vars
assert not self.variable_mgr.supports_staged_vars()
# Adjust seed so different workers start read different input files.
if self.params.variable_update == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
seed_adjustment = hvd.rank()
else:
seed_adjustment = 0
tf.set_random_seed(self.params.tf_random_seed + seed_adjustment)
np.random.seed(4321 + seed_adjustment)
phase_train = not (self.params.eval or self.params.forward_only)
log_fn('Generating model')
losses = []
device_grads = []
all_logits = []
all_top_1_ops = []
all_top_5_ops = []
with tf.device(self.global_step_device):
global_step = tf.train.get_or_create_global_step()
if self.params.use_fp16:
init_loss_scale_val = float(self.params.fp16_loss_scale or
self.model.get_fp16_loss_scale())
if self.enable_auto_loss_scale or init_loss_scale_val != 1:
self.loss_scale = tf.get_variable(
name='loss_scale',
initializer=init_loss_scale_val,
dtype=tf.float32,
trainable=False)
self.loss_scale_normal_steps = tf.get_variable(
name='loss_scale_normal_steps', initializer=0, trainable=False)
else:
self.loss_scale = None
self.loss_scale_normal_steps = None
# Build the processing and model for the worker.
function_buffering_resources = data_utils.build_prefetch_image_processing(
self.model.get_image_size(), self.model.get_image_size(),
self.batch_size, len(
self.devices), self.image_preprocessor.parse_and_preprocess,
self.cpu_device, self.params, self.devices, self.dataset)
update_ops = None
for device_num in range(len(self.devices)):
with self.variable_mgr.create_outer_variable_scope(
device_num), tf.name_scope('tower_%i' % device_num) as name_scope:
function_buffering_resource = function_buffering_resources[device_num]
results = self.add_forward_pass_and_gradients(
phase_train, device_num, device_num, None, None, None,
function_buffering_resource)
if phase_train:
losses.append(results['loss'])
device_grads.append(results['gradvars'])
else:
all_logits.append(results['logits'])
if not phase_train or self.params.print_training_accuracy:
all_top_1_ops.append(results['top_1_op'])
all_top_5_ops.append(results['top_5_op'])
if device_num == 0:
# Retain the Batch Normalization updates operations only from the
# first tower. These operations update the moving mean and moving
# variance variables, which are updated (but not used) during
# training, and used during evaluation. The moving mean and variance
# approximate the true mean and variance across all images in the
# dataset. Therefore, in replicated mode, these moving averages would
# be almost identical for each tower, and so we only update and save
# the moving averages for one tower. In parameter server mode, all
# towers share a copy of the variables so we also only need to update
# and save the moving averages once.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)
assert not self.variable_mgr.staging_delta_ops
fetches = self._build_fetches(global_step, all_logits, losses, device_grads,
None, update_ops, all_top_1_ops,
all_top_5_ops, phase_train)
return (None, [], fetches)
def _build_fetches(self, global_step, all_logits, losses, device_grads,
enqueue_ops, update_ops, all_top_1_ops, all_top_5_ops,
phase_train):
"""Complete construction of model graph, populating the fetches map."""
fetches = {}
if enqueue_ops:
fetches['enqueue_ops'] = enqueue_ops
if all_top_1_ops:
fetches['top_1_accuracy'] = tf.reduce_sum(all_top_1_ops) / self.batch_size
if self.task_index == 0 and self.params.summary_verbosity >= 1:
tf.summary.scalar('top_1_accuracy', fetches['top_1_accuracy'])
if all_top_5_ops:
fetches['top_5_accuracy'] = tf.reduce_sum(all_top_5_ops) / self.batch_size
if self.task_index == 0 and self.params.summary_verbosity >= 1:
tf.summary.scalar('top_5_accuracy', fetches['top_5_accuracy'])
if not phase_train:
if self.params.forward_only:
fetches['all_logits'] = tf.concat(all_logits, 0)
return fetches
apply_gradient_devices, gradient_state = (
self.variable_mgr.preprocess_device_grads(device_grads))
training_ops = []
for d, device in enumerate(apply_gradient_devices):
with tf.device(device):
average_loss = tf.reduce_mean(losses)
avg_grads = self.variable_mgr.get_gradients_to_apply(d, gradient_state)
gradient_clip = self.params.gradient_clip
learning_rate = get_learning_rate(self.params, global_step,
self.dataset.num_examples_per_epoch(),
self.model, self.batch_size)
if gradient_clip is not None:
clipped_grads = [(tf.clip_by_value(grad, -gradient_clip,
+gradient_clip), var)
for grad, var in avg_grads]
else:
clipped_grads = avg_grads
learning_rate = tf.identity(learning_rate, name='learning_rate')
opt = get_optimizer(self.params, learning_rate)
loss_scale_params = variable_mgr_util.AutoLossScaleParams(
enable_auto_loss_scale=self.enable_auto_loss_scale,
loss_scale=self.loss_scale,
loss_scale_normal_steps=self.loss_scale_normal_steps,
inc_loss_scale_every_n=self.params.fp16_inc_loss_scale_every_n,
is_chief=not self.job_name or self.task_index == 0)
self.variable_mgr.append_apply_gradients_ops(
gradient_state, opt, clipped_grads, training_ops, loss_scale_params)
train_op = tf.group(*(training_ops + update_ops))
with tf.device(self.cpu_device):
if self.task_index == 0 and self.params.summary_verbosity >= 1:
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.scalar(self.params.loss_type_to_report, average_loss)
if self.loss_scale is not None:
tf.summary.scalar('loss_scale', self.loss_scale)
tf.summary.scalar('loss_scale_normal_steps',
self.loss_scale_normal_steps)
if self.params.summary_verbosity >= 2:
# Histogram of log values of all non-zero gradients.
all_grads = []
for grad, var in avg_grads:
all_grads.append(tf.reshape(grad, [-1]))
grads = tf.abs(tf.concat(all_grads, 0))
# exclude grads with zero values.
indices_for_non_zero_grads = tf.where(tf.not_equal(grads, 0))
log_grads = tf.reshape(
tf.log(tf.gather(grads, indices_for_non_zero_grads)), [-1])
tf.summary.histogram('log_gradients', log_grads)
if self.params.summary_verbosity >= 3:
for grad, var in avg_grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
fetches['train_op'] = train_op
fetches['average_loss'] = average_loss
return fetches
def _build_model_single_session(self):
"""Build the TensorFlow graph for multiple replicas in a single_session.
Returns:
image_producer_ops:
enqueue_ops:
fetches:
Raises:
ValueError: optimizer not recognized.
Single session runs multiple model replicas as part of one large
distributed graph, whose global execution is always step-synchronized.
"""
# verify assumptions
assert self.params.task_index == 0
assert not self.params.eval
assert not self.params.forward_only
assert not self.params.staged_vars
tf.set_random_seed(self.params.tf_random_seed)
np.random.seed(4321)
phase_train = True
log_fn('Generating model')
losses = []
device_grads = []
all_logits = []
all_top_1_ops = []
all_top_5_ops = []
enqueue_ops = []
gpu_compute_stage_ops = []
gpu_grad_stage_ops = []
with tf.device(self.global_step_device):
global_step = tf.train.get_or_create_global_step()
update_ops = []
global_image_producer_ops = []
is_local = not self.job_name
if is_local:
assert self.num_workers == 1
for task_num in range(self.num_workers):
# Reset the devices that self.variable_mgr knows about to those
# belonging to the next worker (task).
self.reset_devices_for_task(task_num, is_local)
# Build the per-worker image processing
(image_producer_ops, image_producer_stages) = (
self._build_image_processing(
shift_ratio=(float(task_num) / self.num_workers)))
global_image_producer_ops.extend(image_producer_ops)
# Build the per-worker model replica.
for rel_device_num in range(len(self.devices)):
abs_device_num = task_num * len(self.devices) + rel_device_num
with self.variable_mgr.create_outer_variable_scope(
abs_device_num), tf.name_scope(
'task_%i_tower_%i' % (task_num, rel_device_num)) as name_scope:
task_results = self.add_forward_pass_and_gradients(
phase_train, rel_device_num, abs_device_num,
image_producer_stages[rel_device_num], gpu_compute_stage_ops,
gpu_grad_stage_ops)
if phase_train:
losses.append(task_results['loss'])
device_grads.append(task_results['gradvars'])
else:
all_logits.append(task_results['logits'])
if not phase_train or self.params.print_training_accuracy:
all_top_1_ops.append(task_results['top_1_op'])
all_top_5_ops.append(task_results['top_5_op'])
if rel_device_num == 0:
# Retain the Batch Normalization updates operations only
# from the first tower. These operations update the moving
# mean and moving variance variables, which are updated
# (but not used) during training, and used during
# evaluation. The moving mean and variance approximate the
# true mean and variance across all images in the
# dataset. Therefore, in replicated mode, these moving
# averages would be almost identical for each tower, and
# so we only update and save the moving averages for one
# tower. In parameter server mode, all towers share a copy
# of the variables so we also only need to update and save
# the moving averages once.
update_ops.extend(
tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope))
assert not self.variable_mgr.staging_delta_ops
enqueue_ops.append(tf.group(*gpu_compute_stage_ops))
assert not self.variable_mgr.supports_staged_vars()
assert not gpu_grad_stage_ops
fetches = self._build_fetches(global_step, all_logits, losses, device_grads,
enqueue_ops, update_ops, all_top_1_ops,
all_top_5_ops, phase_train)
global_image_producer_ops = tf.group(*global_image_producer_ops)
return (global_image_producer_ops, enqueue_ops, fetches)
# TODO(rohanj): Refactor this function and share with other code path.
def _build_model_single_session_with_dataset_prefetching(self):
"""Build the TensorFlow graph for multiple replicas in a single_session.
Returns:
image_producer_ops:
enqueue_ops:
fetches:
Raises:
ValueError: optimizer not recognized.
Single session runs multiple model replicas as part of one large
distributed graph, whose global execution is always step-synchronized.
"""
# verify assumptions
assert self.params.task_index == 0
assert not self.params.eval
assert not self.params.forward_only
assert not self.params.staged_vars
tf.set_random_seed(self.params.tf_random_seed)
np.random.seed(4321)
phase_train = True
log_fn('Generating model')
losses = []
device_grads = []
all_logits = []
all_top_1_ops = []
all_top_5_ops = []
with tf.device(self.global_step_device):
global_step = tf.train.get_or_create_global_step()
update_ops = []
is_local = not self.job_name
if is_local:
assert self.num_workers == 1
for task_num in range(self.num_workers):
# Reset the devices that self.variable_mgr knows about to those
# belonging to the next worker (task).
self.reset_devices_for_task(task_num, is_local)
# Build the per-worker image processing
function_buffering_resources = data_utils.build_prefetch_image_processing(
self.model.get_image_size(), self.model.get_image_size(),
self.batch_size // len(self.devices), self.cpu_device, self.params,
self.devices, self.dataset)
# Build the per-worker model replica.
for rel_device_num in range(len(self.devices)):
abs_device_num = task_num * len(self.devices) + rel_device_num
with self.variable_mgr.create_outer_variable_scope(
abs_device_num), tf.name_scope(
'task_%i_tower_%i' % (task_num, rel_device_num)) as name_scope:
function_buffering_resource = (
function_buffering_resources[rel_device_num])
task_results = self.add_forward_pass_and_gradients(
phase_train, rel_device_num, abs_device_num, None, None, None,
function_buffering_resource)
if phase_train:
losses.append(task_results['loss'])
device_grads.append(task_results['gradvars'])
else:
all_logits.append(task_results['logits'])
if not phase_train or self.params.print_training_accuracy:
all_top_1_ops.append(task_results['top_1_op'])
all_top_5_ops.append(task_results['top_5_op'])
if rel_device_num == 0:
# Retain the Batch Normalization updates operations only
# from the first tower. These operations update the moving
# mean and moving variance variables, which are updated
# (but not used) during training, and used during
# evaluation. The moving mean and variance approximate the
# true mean and variance across all images in the
# dataset. Therefore, in replicated mode, these moving
# averages would be almost identical for each tower, and
# so we only update and save the moving averages for one
# tower. In parameter server mode, all towers share a copy
# of the variables so we also only need to update and save
# the moving averages once.
update_ops.extend(
tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope))
assert not self.variable_mgr.staging_delta_ops
assert not self.variable_mgr.supports_staged_vars()
fetches = self._build_fetches(global_step, all_logits, losses, device_grads,
None, update_ops, all_top_1_ops,
all_top_5_ops, phase_train)
return (None, [], fetches)
def add_forward_pass_and_gradients(self,
phase_train,
rel_device_num,
abs_device_num,
image_producer_stage,
gpu_compute_stage_ops,
gpu_grad_stage_ops,
function_buffering_resource=None):
"""Add ops for forward-pass and gradient computations."""
nclass = self.dataset.num_classes
data_type = get_data_type(self.params)
image_size = self.model.get_image_size()
if self.datasets_use_prefetch and function_buffering_resource is not None:
with tf.device(self.raw_devices[rel_device_num]):
images, labels = data_utils.get_images_and_labels(
function_buffering_resource, data_type)
images = tf.reshape(
images,
shape=[
self.batch_size // self.num_gpus, image_size, image_size,
self.dataset.depth
])
else:
if not self.use_synthetic_gpu_images:
with tf.device(self.cpu_device):
host_images, host_labels = image_producer_stage.get()
images_shape = host_images.get_shape()
labels_shape = host_labels.get_shape()
with tf.device(self.raw_devices[rel_device_num]):
if not self.use_synthetic_gpu_images:
gpu_compute_stage = data_flow_ops.StagingArea(
[host_images.dtype, host_labels.dtype],
shapes=[images_shape, labels_shape])
# The CPU-to-GPU copy is triggered here.
gpu_compute_stage_op = gpu_compute_stage.put(
[host_images, host_labels])
images, labels = gpu_compute_stage.get()
images = tf.reshape(images, shape=images_shape)
gpu_compute_stage_ops.append(gpu_compute_stage_op)
else:
# Minor hack to avoid H2D copy when using synthetic data
image_shape = [
self.batch_size // self.num_gpus, image_size, image_size,
self.dataset.depth
]
labels_shape = [self.batch_size // self.num_gpus]
# Synthetic image should be within [0, 255].
images = tf.truncated_normal(
image_shape,
dtype=data_type,
mean=127,
stddev=60,
name='synthetic_images')
images = tf.contrib.framework.local_variable(
images, name='gpu_cached_images')
labels = tf.random_uniform(
labels_shape,
minval=0,
maxval=nclass - 1,
dtype=tf.int32,
name='synthetic_labels')
with tf.device(self.devices[rel_device_num]):
logits, aux_logits = self.model.build_network(
images, phase_train, nclass, self.dataset.depth, data_type,
self.data_format, self.params.use_tf_layers, self.params.fp16_vars)
results = {} # The return value
if not phase_train or self.params.print_training_accuracy:
top_1_op = tf.reduce_sum(
tf.cast(tf.nn.in_top_k(logits, labels, 1), data_type))
top_5_op = tf.reduce_sum(
tf.cast(tf.nn.in_top_k(logits, labels, 5), data_type))
results['top_1_op'] = top_1_op
results['top_5_op'] = top_5_op
if not phase_train:
results['logits'] = logits
return results
loss_func = self.model.loss_function or loss_function
base_loss = loss_func(logits, labels, aux_logits=aux_logits)
params = self.variable_mgr.trainable_variables_on_device(
rel_device_num, abs_device_num)
fp32_params = params
if data_type == tf.float16 and self.params.fp16_vars:
# fp16 reductions are very slow on GPUs, so cast to fp32 before calling
# tf.nn.l2_loss and tf.add_n.
# TODO(b/36217816): Once the bug is fixed, investigate if we should do
# this reduction in fp16.
fp32_params = (tf.cast(p, tf.float32) for p in params)
total_loss = base_loss
if rel_device_num == len(self.devices) - 1:
# We compute the L2 loss for only one device instead of all of them,
# because the L2 loss for each device is the same. To adjust for this,
# we multiply the L2 loss by the number of devices. We choose the last
# device because for some reason, on a Volta DGX1, the first four
# GPUs take slightly longer to complete a step than the last four.
# TODO(reedwm): Shard the L2 loss computations across GPUs.
if self.params.single_l2_loss_op:
# TODO(reedwm): If faster, create a fused op that does the L2 loss on
# multiple tensors, and use that instead of concatenating tensors.
reshaped_params = [tf.reshape(p, (-1,)) for p in fp32_params]
l2_loss = tf.nn.l2_loss(tf.concat(reshaped_params, axis=0))
else:
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in fp32_params])
weight_decay = self.params.weight_decay
if weight_decay is not None and weight_decay != 0.:
total_loss += len(self.devices) * weight_decay * l2_loss
aggmeth = tf.AggregationMethod.DEFAULT
scaled_loss = (total_loss if self.loss_scale is None
else total_loss * self.loss_scale)
grads = tf.gradients(scaled_loss, params, aggregation_method=aggmeth)
if self.loss_scale is not None:
# TODO(reedwm): If automatic loss scaling is not used, we could avoid
# these multiplications by directly modifying the learning rate instead.
# If this is done, care must be taken to ensure that this scaling method
# is correct, as some optimizers square gradients and do other
# operations which might not be compatible with modifying both the
# gradients and the learning rate.
grads = [
grad * tf.cast(1. / self.loss_scale, grad.dtype) for grad in grads
]
if self.params.variable_update == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
if self.params.horovod_device:
horovod_device = '/%s:0' % self.params.horovod_device
else:
horovod_device = ''
# All-reduce gradients using Horovod.
grads = [hvd.allreduce(grad, average=False, device_dense=horovod_device)
for grad in grads]
if self.params.staged_vars:
grad_dtypes = [grad.dtype for grad in grads]
grad_shapes = [grad.shape for grad in grads]
grad_stage = data_flow_ops.StagingArea(grad_dtypes, grad_shapes)
grad_stage_op = grad_stage.put(grads)
# In general, this decouples the computation of the gradients and
# the updates of the weights.
# During the pipeline warm up, this runs enough training to produce
# the first set of gradients.
gpu_grad_stage_ops.append(grad_stage_op)
grads = grad_stage.get()
param_refs = self.variable_mgr.trainable_variables_on_device(
rel_device_num, abs_device_num, writable=True)
gradvars = list(zip(grads, param_refs))
if self.params.loss_type_to_report == 'total_loss':
results['loss'] = total_loss
else:
results['loss'] = base_loss
results['gradvars'] = gradvars
return results
def get_image_preprocessor(self):
"""Returns the image preprocessor to used, based on the model.
Returns:
The image preprocessor, or None if synthetic data should be used.
"""
image_size = self.model.get_image_size()
input_data_type = get_data_type(self.params)
shift_ratio = 0
if self.job_name:
# shift_ratio prevents multiple workers from processing the same batch
# during a step
shift_ratio = float(self.task_index) / self.num_workers
processor_class = self.dataset.get_image_preprocessor(
self.params.input_preprocessor)
assert processor_class
return processor_class(
image_size,
image_size,
self.batch_size * self.batch_group_size,
len(self.devices) * self.batch_group_size,
dtype=input_data_type,
train=(not self.params.eval),
distortions=self.params.distortions,
resize_method=self.resize_method,
shift_ratio=shift_ratio,
summary_verbosity=self.params.summary_verbosity,
distort_color_in_yiq=self.params.distort_color_in_yiq,
fuse_decode_and_crop=self.params.fuse_decode_and_crop)
def add_sync_queues_and_barrier(self, name_prefix, enqueue_after_list):
"""Adds ops to enqueue on all worker queues.
Args:
name_prefix: prefixed for the shared_name of ops.
enqueue_after_list: control dependency from ops.
Returns:
An op that should be used as control dependency before starting next step.
"""
self.sync_queue_counter += 1
with tf.device(self.sync_queue_devices[(
self.sync_queue_counter % len(self.sync_queue_devices))]):
sync_queues = [
tf.FIFOQueue(self.num_workers, [tf.bool], shapes=[[]],
shared_name='%s%s' % (name_prefix, i))
for i in range(self.num_workers)]
queue_ops = []
# For each other worker, add an entry in a queue, signaling that it can
# finish this step.
token = tf.constant(False)
with tf.control_dependencies(enqueue_after_list):
for i, q in enumerate(sync_queues):
if i == self.task_index:
queue_ops.append(tf.no_op())
else:
queue_ops.append(q.enqueue(token))
# Drain tokens off queue for this worker, one for each other worker.
queue_ops.append(
sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1))
return tf.group(*queue_ops)
def store_benchmarks(names_to_values, params):
if params.result_storage:
benchmark_storage.store_benchmark(names_to_values, params.result_storage)
def setup(params):
"""Sets up the environment that BenchmarkCNN should run in.
Args:
params: Params tuple, typically created by make_params or
make_params_from_flags.
Returns:
A potentially modified params.
Raises:
ValueError: invalid parames combinations.
"""
if params.batchnorm_persistent:
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
else:
os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None)
if params.winograd_nonfused:
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
else:
os.environ.pop('TF_ENABLE_WINOGRAD_NONFUSED', None)
if params.autotune_threshold:
os.environ['TF_AUTOTUNE_THRESHOLD'] = str(params.autotune_threshold)
os.environ['TF_SYNC_ON_FINISH'] = str(int(params.sync_on_finish))
argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Sets environment variables for MKL
if params.mkl:
os.environ['KMP_BLOCKTIME'] = str(params.kmp_blocktime)
os.environ['KMP_SETTINGS'] = str(params.kmp_settings)
os.environ['KMP_AFFINITY'] = params.kmp_affinity
if params.num_intra_threads > 0:
os.environ['OMP_NUM_THREADS'] = str(params.num_intra_threads)
# Sets GPU thread settings
params = params._replace(gpu_thread_mode=params.gpu_thread_mode.lower())
if params.gpu_thread_mode not in ['global', 'gpu_shared', 'gpu_private']:
raise ValueError('Invalid gpu_thread_mode: %s' % params.gpu_thread_mode)
os.environ['TF_GPU_THREAD_MODE'] = params.gpu_thread_mode
if params.per_gpu_thread_count and params.gpu_thread_mode == 'global':
raise ValueError(
'Invalid per_gpu_thread_count with gpu_thread_mode=global: %s' %
params.per_gpu_thread_count)
# Default to two threads. One for the device compute and the other for
# memory copies.
per_gpu_thread_count = params.per_gpu_thread_count or 2
total_gpu_thread_count = per_gpu_thread_count * params.num_gpus
if params.gpu_thread_mode == 'gpu_private':
os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
elif params.gpu_thread_mode == 'gpu_shared':
os.environ['TF_GPU_THREAD_COUNT'] = str(total_gpu_thread_count)
cpu_count = multiprocessing.cpu_count()
if not params.num_inter_threads and params.gpu_thread_mode in [
'gpu_private', 'gpu_shared'
]:
main_thread_count = max(cpu_count - total_gpu_thread_count, 1)
params = params._replace(num_inter_threads=main_thread_count)
if (params.datasets_use_prefetch and
params.datasets_num_private_threads is None):
# From the total cpu thread count, subtract the total_gpu_thread_count,
# and then 2 threads per GPU device for event monitoring and sending /
# receiving tensors
num_monitoring_threads = 2 * params.num_gpus
num_private_threads = max(
cpu_count - total_gpu_thread_count - num_monitoring_threads, 1)
params = params._replace(datasets_num_private_threads=num_private_threads)
if params.variable_update == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
hvd.init()
platforms_util.initialize(params, create_config_proto(params))
return params
| 119,614
| 45.005769
| 155
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/flags.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions to define flags and params.
Calling a DEFINE_* function will add a ParamSpec namedtuple to the param_spec
dict. The DEFINE_* arguments match those in absl. Calling define_flags() creates
a command-line flag for every ParamSpec defined by a DEFINE_* functions.
The reason we don't use absl flags directly is that we want to be able to use
tf_cnn_benchmarks as a library. When using it as a library, we don't want to
define any flags, but instead pass parameters to the BenchmarkCNN constructor.
"""
from collections import namedtuple
from absl import flags as absl_flags
import six
FLAGS = absl_flags.FLAGS
# ParamSpec describes one of benchmark_cnn.BenchmarkCNN's parameters.
ParamSpec = namedtuple('_ParamSpec',
['flag_type', 'default_value', 'description',
'kwargs'])
# Maps from parameter name to its ParamSpec.
param_specs = {}
def DEFINE_string(name, default, help): # pylint: disable=invalid-name,redefined-builtin
param_specs[name] = ParamSpec('string', default, help, {})
def DEFINE_boolean(name, default, help): # pylint: disable=invalid-name,redefined-builtin
param_specs[name] = ParamSpec('boolean', default, help, {})
def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None): # pylint: disable=invalid-name,redefined-builtin
kwargs = {'lower_bound': lower_bound, 'upper_bound': upper_bound}
param_specs[name] = ParamSpec('integer', default, help, kwargs)
def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None): # pylint: disable=invalid-name,redefined-builtin
kwargs = {'lower_bound': lower_bound, 'upper_bound': upper_bound}
param_specs[name] = ParamSpec('float', default, help, kwargs)
def DEFINE_enum(name, default, enum_values, help): # pylint: disable=invalid-name,redefined-builtin
kwargs = {'enum_values': enum_values}
param_specs[name] = ParamSpec('enum', default, help, kwargs)
def DEFINE_list(name, default, help): # pylint: disable=invalid-name,redefined-builtin
param_specs[name] = ParamSpec('list', default, help, {})
def define_flags():
"""Define a command line flag for each ParamSpec in flags.param_specs."""
define_flag = {
'boolean': absl_flags.DEFINE_boolean,
'float': absl_flags.DEFINE_float,
'integer': absl_flags.DEFINE_integer,
'string': absl_flags.DEFINE_string,
'enum': absl_flags.DEFINE_enum,
'list': absl_flags.DEFINE_list
}
for name, param_spec in six.iteritems(param_specs):
if param_spec.flag_type not in define_flag:
raise ValueError('Unknown flag_type %s' % param_spec.flag_type)
else:
define_flag[param_spec.flag_type](name, param_spec.default_value,
help=param_spec.description,
**param_spec.kwargs)
| 3,534
| 38.719101
| 126
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/allreduce.py
|
# # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# # ==============================================================================
# """Utilities for allreduce."""
# from __future__ import print_function
# import collections as pycoll
# import re
# from six.moves import xrange # pylint: disable=redefined-builtin
# import tensorflow as tf
# from tensorflow.contrib.all_reduce.python import all_reduce
# from tensorflow.python.framework import device as pydev
# from tensorflow.python.framework import ops
# AllReduceSpecTuple = pycoll.namedtuple('AllReduceSpecTuple', 'alg shards limit')
# def parse_general_int(s):
# """Parse integer with power-of-2 suffix eg. 32k."""
# mo = re.match(r'(\d+)([KkMGT]?)$', s)
# if mo:
# i, suffix = mo.group(1, 2)
# v = int(i)
# if suffix:
# if suffix == 'K' or suffix == 'k':
# v *= 1024
# elif suffix == 'M':
# v *= (1024 * 1024)
# elif suffix == 'G':
# v *= (1024 * 1024 * 1024)
# elif suffix == 'T':
# v *= (1024 * 1024 * 1024 * 1024)
# else:
# raise ValueError('invalid integer string %s' % s)
# return v
# else:
# v = int(s)
# return v
# def parse_all_reduce_spec(all_reduce_spec):
# """Parse all_reduce_spec.
# Args:
# all_reduce_spec: a string specifying a combination of all-reduce
# algorithms to apply for gradient reduction.
# Returns:
# a list of AllReduceSpecTuple.
# Raises:
# ValueError: all_reduce_spec is not well-formed.
# An all_reduce_spec has BNF form:
# int ::= positive whole number
# g_int ::= int[KkMGT]?
# alg_spec ::= alg | alg#int
# range_spec ::= alg_spec | alg_spec/alg_spec
# spec ::= range_spec | range_spec:g_int:range_spec
# Not all syntactically correct specifications are supported.
# Examples of supported all_reduce_spec strings, with semantics explained:
# 'collective' == apply tf.collective_reduce operator to all tensors.
# 'xring' == apply ring all-reduce to all tensors
# 'xring#2' == apply ring all-reduce to all tensors, using two simultaneous
# transfer rings, each operating on 1/2 of each tensor.
# 'nccl' == apply NCCL all-reduce to all tensors (only works within
# a single worker process where all devices are GPUs)
# 'nccl/xring' == apply NCCL all-reduce to all tensors within each worker
# to produce at least one full-reduced (locally) value,
# then apply ring all-reduce to one such value from each
# worker, then apply NCCL broadcast to propagate those globally
# reduced values back to every device within each worker.
# 'pscpu' == Shuffle reduce using worker CPUs as the gather devices: each
# distributed tensor is reduced by copying all instances to
# one of the worker CPUs, computing the reduction there, then
# copying back to each participating device. Tensor reductions
# are assigned to specific CPUs round-robin.
# 'psgpu#4' == Arrange all GPUs across all workers into groups of 4.
# Each distributed tensor is shuffle reduced against one
# such group of 4 GPUs, selected round-robin. That is, each
# tensor is split across 4 shards for the reduction.
# 'pscpu:2k:pscpu#2:64k:xring' == Apply single-shard pscpu to
# tensors of size <= 2048 elements, apply 2-shard pscpu to
# tensors up to size 64k elements, apply xring to larger tensors.
# 'pscpu/pscpu#2' == Use shuffle gather to locally reduce each tensor on
# the worker's CPU, then use 2-shard shuffle to reduce those
# locally reduced tensors across workers (on the worker CPUs), then
# scatter the globally reduced values locally from each worker CPU.
# """
# range_parts = all_reduce_spec.split(':') + ['-1']
# if len(range_parts) % 2:
# raise ValueError('all_reduce_spec not well formed: %s' % all_reduce_spec)
# limit = 0
# spec = []
# alg = None
# shards = 1
# for i, range_part in enumerate(range_parts):
# if i % 2 == 1:
# try:
# limit = parse_general_int(range_part)
# spec.append(AllReduceSpecTuple(alg=alg, shards=shards, limit=limit))
# except ValueError:
# raise ValueError('all_reduce_spec (%s) contains non-integer range %s' %
# (all_reduce_spec, range_part))
# else:
# alg = range_part
# alg_parts = range_part.split('#')
# alg = alg_parts[0]
# if len(alg_parts) > 1:
# try:
# shards = int(alg_parts[1])
# except ValueError:
# raise ValueError('all_reduce_spec (%s) contains non-integer '
# 'shards %s' % all_reduce_spec, alg_parts[1])
# else:
# shards = 1
# if alg not in [
# 'nccl', 'nccl/xring', 'nccl/rechd', 'nccl/pscpu', 'xring', 'pscpu',
# 'psgpu', 'pscpu/pscpu', 'collective'
# ]:
# raise ValueError('all_reduce_spec (%s) contains invalid alg %s' %
# (all_reduce_spec, alg))
# return spec
# def build_all_reduce_device_prefixes(job_name, num_tasks):
# """Build list of device prefix names for all_reduce.
# Args:
# job_name: 'worker', 'ps' or 'localhost'.
# num_tasks: number of jobs across which device names should be generated.
# Returns:
# A list of device name prefix strings. Each element spells out the full
# host name without adding the device.
# e.g. '/job:worker/task:0'
# """
# if job_name != 'localhost':
# return ['/job:%s/task:%d' % (job_name, d) for d in range(0, num_tasks)]
# else:
# assert num_tasks == 1
# return ['/job:%s' % job_name]
# def group_device_names(devices, group_size):
# """Group device names into groups of group_size.
# Args:
# devices: list of strings naming devices.
# group_size: int >= 1
# Returns:
# list of lists of devices, where each inner list is group_size long,
# and each device appears at least once in an inner list. If
# len(devices) % group_size = 0 then each device will appear
# exactly once.
# Raises:
# ValueError: group_size > len(devices)
# """
# num_devices = len(devices)
# if group_size > num_devices:
# raise ValueError('only %d devices, but group_size=%d' % (num_devices,
# group_size))
# num_groups = (
# num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
# groups = [[] for i in range(num_groups)]
# for i in range(0, num_groups * group_size):
# groups[i % num_groups].append(devices[i % num_devices])
# return groups
# def split_grads_by_size(threshold_size, device_grads):
# """Break gradients into two sets according to tensor size.
# Args:
# threshold_size: int size cutoff for small vs large tensor.
# device_grads: List of lists of (gradient, variable) tuples. The outer
# list is over devices. The inner list is over individual gradients.
# Returns:
# small_grads: Subset of device_grads where shape is <= theshold_size
# elements.
# large_grads: Subset of device_grads where shape is > threshold_size
# elements.
# """
# small_grads = []
# large_grads = []
# for dl in device_grads:
# small_dl = []
# large_dl = []
# for (g, v) in dl:
# tensor_size = g.get_shape().num_elements()
# if tensor_size <= threshold_size:
# small_dl.append([g, v])
# else:
# large_dl.append([g, v])
# if small_dl:
# small_grads.append(small_dl)
# if large_dl:
# large_grads.append(large_dl)
# return small_grads, large_grads
# _instance_key = 1
# def new_collective_instance_key():
# """Returns a new instance key for use in defining a collective op."""
# global _instance_key
# v = _instance_key
# _instance_key += 1
# return v
# _group_key = 1
# _group_key_table = dict()
# def collective_group_key(devices):
# """Returns a group key for the set of devices.
# Args:
# devices: list of strings naming devices in a collective group.
# Returns:
# int key uniquely identifying the set of device names.
# """
# global _group_key
# global _group_key_table
# parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
# concat = ','.join(names)
# if concat not in _group_key_table.keys():
# new_key = _group_key
# _group_key += 1
# _group_key_table[concat] = new_key
# rv = _group_key_table[concat]
# return rv
# def build_collective_reduce(input_tensors, num_workers,
# red_op='Add', un_op='Id'):
# """Build a subgraph that does one full all-reduce, using the collective Op.
# Args:
# input_tensors: tensors within a single worker graph that are to be reduced
# together; must be one per device.
# num_workers: total number of workers with identical independent graphs that
# will be doing this same reduction. The reduction will actually include
# the corresponding tensors at all these workers.
# red_op: string naming the reduction op
# un_op: string naming the unary final op
# Returns:
# An array of final tensors, one per device, computed by the full reduction.
# Raises:
# ValueError: There must be at least two tensors over all the workers.
# """
# group_size = len(input_tensors) * num_workers
# if group_size < 2:
# raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
# devices = [t.device for t in input_tensors]
# num_devices = len(devices)
# group_key = collective_group_key(devices)
# instance_key = new_collective_instance_key()
# out_tensors = []
# subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
# for d in range(num_devices):
# with ops.device(devices[d]):
# reduce_op = collective_ops.all_reduce(input_tensors[d],
# group_size, group_key, instance_key,
# red_op, un_op,
# subdiv_offsets)
# out_tensors.append(reduce_op)
# return out_tensors
# def broadcast_send(t, shape, dtype, group_size, group_key, instance_key):
# return collective_ops.broadcast_send(t, shape, dtype, group_size, group_key,
# instance_key)
# def broadcast_recv(shape, dtype, group_size, group_key, instance_key):
# return collective_ops.broadcast_recv(shape, dtype, group_size, group_key,
# instance_key)
# def sum_grad_and_var_all_reduce(single_session,
# grad_and_vars,
# num_workers,
# alg,
# gpu_indices,
# aux_devices=None,
# num_shards=1):
# """Apply all-reduce algorithm over specified gradient tensors."""
# scaled_grads = [g for g, _ in grad_and_vars]
# if alg == 'collective':
# assert not single_session
# summed_grads = build_collective_reduce(
# scaled_grads, num_workers, 'Add', 'Id')
# else:
# with tf.name_scope('allreduce'):
# # Note that each grad_and_vars looks like the following:
# # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
# if alg == 'nccl':
# summed_grads = all_reduce.build_nccl_all_reduce(scaled_grads, tf.add)
# elif alg == 'xring':
# summed_grads = all_reduce.build_ring_all_reduce(
# scaled_grads, num_workers, num_shards, gpu_indices, tf.add)
# elif alg == 'nccl/xring':
# summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
# tf.add)
# elif alg == 'nccl/rechd':
# summed_grads = all_reduce.build_nccl_then_recursive_hd(
# scaled_grads, tf.add)
# elif alg == 'nccl/pscpu':
# summed_grads = all_reduce.build_nccl_then_shuffle(
# scaled_grads, aux_devices, tf.add, tf.add_n)
# elif alg == 'pscpu/pscpu':
# summed_grads = all_reduce.build_shuffle_then_shuffle(
# scaled_grads,
# aux_devices,
# # TODO(tucker): devise a way of better specifying the device set
# # for the second level.
# [aux_devices[0]],
# tf.add_n)
# elif alg in ['pscpu', 'psgpu']:
# summed_grads = all_reduce.build_shuffle_all_reduce(
# scaled_grads, aux_devices, tf.add_n)
# else:
# raise ValueError('unsupported all_reduce alg: ', alg)
# result = []
# for (_, v), g in zip(grad_and_vars, summed_grads):
# result.append([g, v])
# return result
# def contains_any(haystack, needles):
# """Tests if any needle is a substring of haystack.
# Args:
# haystack: a string
# needles: list of strings
# Returns:
# True if any element of needles is a substring of haystack,
# False otherwise.
# """
# for n in needles:
# if n in haystack:
# return True
# return False
# def sum_gradients_all_reduce(single_session,
# dev_prefixes,
# tower_grads,
# num_workers,
# alg,
# num_shards,
# gpu_indices,
# agg_small_grads_max_bytes=0,
# agg_small_grads_max_group=10,
# allreduce_merge_scope=1):
# """Apply all-reduce algorithm over specified gradient tensors.
# Args:
# single_session: true if reduction is applied to one graph across
# all workers, false if ths application is to a single-worker graph only.
# dev_prefixes: list of prefix strings to use to generate PS device names.
# tower_grads: the gradients to reduce.
# num_workers: number of worker processes across entire job.
# alg: the all-reduce algorithm to apply.
# num_shards: alg-specific sharding factor.
# gpu_indices: indices of local GPUs in order usable for ring-reduce.
# agg_small_grads_max_bytes: largest tensor eligible for aggregation,
# in number of bytes.
# agg_small_grads_max_group: largest permitted aggregation of small
# tensors.
# allreduce_merge_scope: size of groups into which to partition consecutive
# gradients grouped under a common 'allreduce' name scope for application
# of ScopedAllocator optimization.
# Returns:
# list of reduced tensors
# """
# alg_contains_shuffle = contains_any(alg, ['pscpu', 'psgpu'])
# is_hierarchical = '/' in alg
# if 'pscpu' in alg:
# aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
# elif 'psgpu' in alg:
# aux_devices = [
# prefix + '/gpu:%d' % i
# for i in range(len(gpu_indices))
# for prefix in dev_prefixes
# ]
# else:
# aux_devices = ['/job:localhost/cpu:0']
# aux_device_groups = group_device_names(aux_devices, num_shards
# if alg_contains_shuffle else 1)
# group_index = 0
# if agg_small_grads_max_bytes > 0 and agg_small_grads_max_group > 0:
# tower_grads, packing = pack_small_tensors(
# tower_grads,
# max_bytes=agg_small_grads_max_bytes,
# max_group=agg_small_grads_max_group)
# else:
# packing = None
# reduced_gv_list = []
# gv = list(zip(*tower_grads))
# chunked_gv = [gv[x:x + allreduce_merge_scope]
# for x in xrange(0, len(gv), allreduce_merge_scope)]
# for chunk in chunked_gv:
# with tf.name_scope('allreduce'):
# for grad_and_vars in chunk:
# reduced_gv_list.append(sum_grad_and_var_all_reduce(
# single_session,
# grad_and_vars, num_workers, alg, gpu_indices,
# (aux_devices if is_hierarchical
# else aux_device_groups[group_index]),
# num_shards))
# group_index = (group_index + 1) % len(aux_device_groups)
# new_tower_grads = [list(x) for x in zip(*reduced_gv_list)]
# if packing:
# new_tower_grads = unpack_small_tensors(new_tower_grads, packing)
# return new_tower_grads
# def extract_ranges(index_list, range_size_limit=32):
# """Extract consecutive ranges and singles from index_list.
# Args:
# index_list: List of monotone increasing non-negative integers.
# range_size_limit: Largest size range to return. If a larger
# consecutive range exists it will be returned as multiple
# ranges.
# Returns:
# ranges, singles where ranges is a list of [first, last] pairs of
# consecutive elements in index_list, and singles is all of the
# other elements, in original order.
# """
# if not index_list:
# return [], []
# first = index_list[0]
# last = first
# ranges = []
# singles = []
# for i in index_list[1:]:
# if i == last + 1 and (last - first) <= range_size_limit:
# last = i
# else:
# if last > first:
# ranges.append([first, last])
# else:
# singles.append(first)
# first = i
# last = i
# if last > first:
# ranges.append([first, last])
# else:
# singles.append(first)
# return ranges, singles
# GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
# def pack_range(key, packing, grad_vars, rng):
# """Form the concatenation of a specified range of gradient tensors.
# Args:
# key: Value under which to store meta-data in packing that will be used
# later to restore the grad_var list structure.
# packing: Dict holding data describing packed ranges of small tensors.
# grad_vars: List of (grad, var) pairs for one tower.
# rng: A pair of integers giving the first, last indices of a consecutive
# range of tensors to be packed.
# Returns:
# A tensor that is the concatenation of all the specified small tensors.
# """
# to_pack = grad_vars[rng[0]:rng[1] + 1]
# members = []
# variables = []
# restore_shapes = []
# with tf.name_scope('pack'):
# for g, v in to_pack:
# variables.append(v)
# restore_shapes.append(g.shape)
# with tf.device(g.device):
# members.append(tf.reshape(g, [-1]))
# packing[key] = GradPackTuple(
# indices=range(rng[0], rng[1] + 1),
# vars=variables,
# shapes=restore_shapes)
# with tf.device(members[0].device):
# return tf.concat(members, 0)
# def unpack_grad_tuple(gv, gpt):
# """Unpack a previously packed collection of gradient tensors.
# Args:
# gv: A (grad, var) pair to be unpacked.
# gpt: A GradPackTuple describing the packing operation that produced gv.
# Returns:
# A list of (grad, var) pairs corresponding to the values that were
# originally packed into gv, maybe following subsequent operations like
# reduction.
# """
# elt_widths = [x.num_elements() for x in gpt.shapes]
# with tf.device(gv[0][0].device):
# with tf.name_scope('unpack'):
# splits = tf.split(gv[0], elt_widths)
# unpacked_gv = []
# for idx, s in enumerate(splits):
# unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]), gpt.vars[idx]))
# return unpacked_gv
# def pack_small_tensors(tower_grads, max_bytes=0, max_group=0):
# """Concatenate small gradient tensors together for reduction.
# Args:
# tower_grads: List of lists of (gradient, variable) tuples.
# max_bytes: Int giving max number of bytes in a tensor that
# may be considered small.
# max_group: Int giving max number of small tensors that may be
# concatenated into one new tensor.
# Returns:
# new_tower_grads, packing where new_tower_grads is identical to
# tower_grads except that all feasible small_tensors have been removed
# from their places and concatenated into larger tensors that are
# now in the front of the list for each tower, and packing contains
# the data necessary to restore the tower_grads structure.
# Look through the first tower for gradients of the same type (float),
# and small size, that are all sequential. For each such group,
# replace by a new tensor that is a flattened concatenation. Note
# that the corresponding variable will be absent, which doesn't matter
# because it isn't used during all-reduce.
# Requires:
# Every gv_list in towers must have isomorphic structure including identical
# tensor sizes and types.
# """
# small_indices = []
# large_indices = []
# for idx, (g, _) in enumerate(tower_grads[0]):
# if g.dtype == tf.float32 and (4 * g.shape.num_elements()) <= max_bytes:
# small_indices.append(idx)
# else:
# large_indices.append(idx)
# small_ranges, small_singles = extract_ranges(
# small_indices, range_size_limit=max_group)
# large_indices = sorted(large_indices + small_singles)
# num_gv = len(tower_grads[0])
# packing = {}
# if small_ranges:
# new_tower_grads = []
# for dev_idx, gv_list in enumerate(tower_grads):
# assert len(gv_list) == num_gv
# new_gv_list = []
# for r in small_ranges:
# key = '%d:%d' % (dev_idx, len(new_gv_list))
# new_gv_list.append((pack_range(key, packing, gv_list, r),
# 'packing_var_placeholder'))
# for i in large_indices:
# new_gv_list.append(gv_list[i])
# new_tower_grads.append(new_gv_list)
# return new_tower_grads, packing
# else:
# return tower_grads, None
# def unpack_small_tensors(tower_grads, packing):
# """Undo the structure alterations to tower_grads done by pack_small_tensors.
# Args:
# tower_grads: List of List of (grad, var) tuples.
# packing: A dict generated by pack_small_tensors describing the changes
# it made to tower_grads.
# Returns:
# new_tower_grads: identical to tower_grads except that concatentations
# of small tensors have been split apart and returned to their original
# positions, paired with their original variables.
# """
# if not packing:
# return tower_grads
# new_tower_grads = []
# num_devices = len(tower_grads)
# num_packed = len(packing.keys()) // num_devices
# for dev_idx, gv_list in enumerate(tower_grads):
# new_gv_list = gv_list[num_packed:]
# for i in xrange(0, num_packed):
# k = '%d:%d' % (dev_idx, i)
# gpt = packing[k]
# gv = unpack_grad_tuple(gv_list[i], gpt)
# for gi, idx in enumerate(gpt.indices):
# assert idx == gpt.indices[gi]
# new_gv_list.insert(idx, gv[gi])
# new_tower_grads.append(new_gv_list)
# return new_tower_grads
| 23,559
| 36.575758
| 82
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/benchmark_storage.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides ways to store benchmark output."""
def store_benchmark(data, storage_type=None):
"""Store benchmark data.
Args:
data: Dictionary mapping from string benchmark name to
numeric benchmark value.
storage_type: (string) Specifies where to store benchmark
result. If storage_type is
'cbuild_benchmark_datastore': store outputs in our continuous
build datastore. gcloud must be setup in current environment
pointing to the project where data will be added.
"""
if storage_type == 'cbuild_benchmark_datastore':
try:
# pylint: disable=g-import-not-at-top
import cbuild_benchmark_storage
# pylint: enable=g-import-not-at-top
except ImportError:
raise ImportError(
'Missing cbuild_benchmark_storage.py required for '
'benchmark_cloud_datastore option')
cbuild_benchmark_storage.upload_to_benchmark_datastore(data)
else:
assert False, 'unknown storage_type: ' + storage_type
| 1,679
| 39
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/cbuild_benchmark_storage.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides a way to store benchmark results in GCE Datastore.
Datastore client is initialized from current environment.
Data is stored using the format defined in:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/test/upload_test_benchmarks_index.yaml
"""
from datetime import datetime
import json
import os
import sys
from google.cloud import datastore
_TEST_NAME_ENV_VAR = 'TF_DIST_BENCHMARK_NAME'
def upload_to_benchmark_datastore(data, test_name=None, start_time=None):
"""Use a new datastore.Client to upload data to datastore.
Create the datastore Entities from that data and upload them to the
datastore in a batch using the client connection.
Args:
data: Map from benchmark names to values.
test_name: Name of this test. If not specified, name will be set either
from TF_DIST_BENCHMARK_NAME environment variable or to default name
'TestBenchmark'.
start_time: (datetime) Time to record for this test.
Raises:
ValueError: if test_name is not passed in and TF_DIST_BENCHMARK_NAME
is not set.
"""
client = datastore.Client()
if not test_name:
if _TEST_NAME_ENV_VAR in os.environ:
test_name = os.environ[_TEST_NAME_ENV_VAR]
else:
raise ValueError(
'No test name passed in for benchmarks. '
'Either pass a test_name to upload_to_benchmark_datastore or '
'set %s environment variable.' % _TEST_NAME_ENV_VAR)
test_name = unicode(test_name)
if not start_time:
start_time = datetime.now()
# Create one Entry Entity for each benchmark entry. The wall-clock timing is
# the attribute to be fetched and displayed. The full entry information is
# also stored as a non-indexed JSON blob.
entries = []
batch = []
for name, value in data.items():
e_key = client.key('Entry')
e_val = datastore.Entity(e_key, exclude_from_indexes=['info'])
entry_map = {'name': name, 'wallTime': value, 'iters': '1'}
entries.append(entry_map)
e_val.update({
'test': test_name,
'start': start_time,
'entry': unicode(name),
'timing': value,
'info': unicode(json.dumps(entry_map))
})
batch.append(e_val)
# Create the Test Entity containing all the test information as a
# non-indexed JSON blob.
test_result = json.dumps(
{'name': test_name,
'startTime': (start_time - datetime(1970, 1, 1)).total_seconds(),
'entries': {'entry': entries},
'runConfiguration': {'argument': sys.argv[1:]}})
t_key = client.key('Test')
t_val = datastore.Entity(t_key, exclude_from_indexes=['info'])
t_val.update({
'test': test_name,
'start': start_time,
'info': unicode(test_result)
})
batch.append(t_val)
# Put the whole batch of Entities in the datastore.
client.put_multi(batch)
| 3,511
| 34.12
| 110
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/platforms/util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility code for a certain platform.
This file simply imports everything from the default platform. To switch to a
different platform, the import statement can be changed to point to a new
platform.
Creating a custom platform can be useful to, e.g., run some initialization code
required by the platform or register a platform-specific model.
"""
from platforms.default.util import * # pylint: disable=unused-import,wildcard-import
| 1,129
| 40.851852
| 85
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/platforms/__init__.py
| 0
| 0
| 0
|
py
|
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/platforms/default/util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility code for the default platform."""
import os
import sys
import tempfile
import cnn_util
_ROOT_PROJECT_DIR = os.path.dirname(cnn_util.__file__)
def define_platform_params():
"""Defines platform-specific parameters.
Currently there are no platform-specific parameters to be defined.
"""
pass
def get_cluster_manager(params, config_proto):
"""Returns the cluster manager to be used."""
return cnn_util.GrpcClusterManager(params, config_proto)
def get_command_to_run_python_module(module):
"""Returns a command to run a Python module."""
python_interpretter = sys.executable
if not python_interpretter:
raise ValueError('Could not find Python interpreter')
return [python_interpretter,
os.path.join(_ROOT_PROJECT_DIR, module + '.py')]
def get_test_output_dir():
"""Returns a directory where test outputs should be placed."""
base_dir = os.environ.get('TEST_OUTPUTS_DIR',
'/tmp/tf_cnn_benchmarks_test_outputs')
if not os.path.exists(base_dir):
os.mkdir(base_dir)
return tempfile.mkdtemp(dir=base_dir)
def get_test_data_dir():
"""Returns the path to the test_data directory."""
return os.path.join(_ROOT_PROJECT_DIR, 'test_data')
def _initialize(params, config_proto):
# Currently, no platform initialization needs to be done.
del params, config_proto
_is_initalized = False
def initialize(params, config_proto):
global _is_initalized
if _is_initalized:
return
_is_initalized = True
_initialize(params, config_proto)
| 2,226
| 27.551282
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/platforms/default/__init__.py
| 0
| 0
| 0
|
py
|
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/mobilenet_v2.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet V2 model, branched from slim models for fp16 performance study.
Architecture: https://arxiv.org/abs/1801.04381
The base model gives 72.2% accuracy on ImageNet, with 300MMadds,
3.4 M parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from models import mobilenet as lib
from models import mobilenet_conv_blocks as ops
from models import model
slim = tf.contrib.slim
op = lib.op
expand_input = ops.expand_input_by_factor
# pyformat: disable
# Architecture: https://arxiv.org/abs/1801.04381
V2_DEF = dict(
defaults={
# Note: these parameters of batch norm affect the architecture
# that's why they are here and not in training_scope.
(slim.batch_norm,): {'center': True, 'scale': True},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
},
(ops.expanded_conv,): {
'expansion_size': expand_input(6),
'split_expansion': 1,
'normalizer_fn': slim.batch_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(ops.expanded_conv,
expansion_size=expand_input(1, divisible_by=1),
num_outputs=16),
op(ops.expanded_conv, stride=2, num_outputs=24),
op(ops.expanded_conv, stride=1, num_outputs=24),
op(ops.expanded_conv, stride=2, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=2, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=2, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=320),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280)
],
)
# pyformat: enable
@slim.add_arg_scope
def mobilenet(input_tensor,
num_classes=1001,
depth_multiplier=1.0,
scope='MobilenetV2',
conv_defs=None,
finegrain_classification_mode=False,
min_depth=None,
divisible_by=None,
**kwargs):
"""Creates mobilenet V2 network.
Inference mode is created by default. To create training use training_scope
below.
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
Args:
input_tensor: The input tensor
num_classes: number of classes
depth_multiplier: The multiplier applied to scale number of
channels in each layer. Note: this is called depth multiplier in the
paper but the name is kept for consistency with slim's model builder.
scope: Scope of the operator
conv_defs: Allows to override default conv def.
finegrain_classification_mode: When set to True, the model
will keep the last layer large even for small multipliers. Following
https://arxiv.org/abs/1801.04381
suggests that it improves performance for ImageNet-type of problems.
*Note* ignored if final_endpoint makes the builder exit earlier.
min_depth: If provided, will ensure that all layers will have that
many channels after application of depth multiplier.
divisible_by: If provided will ensure that all layers # channels
will be divisible by this number.
**kwargs: passed directly to mobilenet.mobilenet:
prediction_fn- what prediction function to use.
reuse-: whether to reuse variables (if reuse set to true, scope
must be given).
Returns:
logits/endpoints pair
Raises:
ValueError: On invalid arguments
"""
if conv_defs is None:
conv_defs = V2_DEF
if 'multiplier' in kwargs:
raise ValueError('mobilenetv2 doesn\'t support generic '
'multiplier parameter use "depth_multiplier" instead.')
if finegrain_classification_mode:
conv_defs = copy.deepcopy(conv_defs)
if depth_multiplier < 1:
conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier
depth_args = {}
# NB: do not set depth_args unless they are provided to avoid overriding
# whatever default depth_multiplier might have thanks to arg_scope.
if min_depth is not None:
depth_args['min_depth'] = min_depth
if divisible_by is not None:
depth_args['divisible_by'] = divisible_by
with slim.arg_scope((lib.depth_multiplier,), **depth_args):
return lib.mobilenet(
input_tensor,
num_classes=num_classes,
conv_defs=conv_defs,
scope=scope,
multiplier=depth_multiplier,
**kwargs)
@slim.add_arg_scope
def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
return mobilenet(input_tensor,
depth_multiplier=depth_multiplier,
base_only=True, **kwargs)
def training_scope(**kwargs):
"""Defines MobilenetV2 training scope.
Usage:
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
with slim.
Args:
**kwargs: Passed to mobilenet.training_scope. The following parameters
are supported:
weight_decay- The weight decay to use for regularizing the model.
stddev- Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob- dropout keep probability
bn_decay- decay for the batch norm moving averages.
Returns:
An `arg_scope` to use for the mobilenet v2 model.
"""
return lib.training_scope(**kwargs)
class MobilenetModel(model.Model):
"""Mobilenet model configuration."""
def __init__(self):
super(MobilenetModel, self).__init__('mobilenet', 224, 32, 0.005)
def add_inference(self, cnn):
with tf.contrib.slim.arg_scope(training_scope(is_training=cnn.phase_train)):
cnn.top_layer, _ = mobilenet(cnn.top_layer, is_training=cnn.phase_train)
cnn.top_size = cnn.top_layer.shape[-1].value
| 7,317
| 35.59
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/nasnet_model.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model configurations for nasnet.
Paper: https://arxiv.org/abs/1707.07012
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from models import model
from models import nasnet_utils
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
# Notes for training NASNet Cifar Model
# -------------------------------------
# batch_size: 32
# learning rate: 0.025
# cosine (single period) learning rate decay
# auxiliary head loss weighting: 0.4
# clip global norm of all gradients by 5
def _cifar_config(is_training=True, data_format=None, total_steps=None):
drop_path_keep_prob = 1.0 if not is_training else 0.6
return tf.contrib.training.HParams(
stem_multiplier=3.0,
drop_path_keep_prob=drop_path_keep_prob,
num_cells=18,
use_aux_head=1,
num_conv_filters=32,
dense_dropout_keep_prob=1.0,
filter_scaling_rate=2.0,
num_reduction_layers=2,
skip_reduction_layer_input=0,
data_format=data_format or 'NHWC',
# 600 epochs with a batch size of 32
# This is used for the drop path probabilities since it needs to increase
# the drop out probability over the course of training.
total_training_steps=total_steps or 937500,
)
# Notes for training large NASNet model on ImageNet
# -------------------------------------
# batch size (per replica): 16
# learning rate: 0.015 * 100
# learning rate decay factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 100 replicas
# auxiliary head loss weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10
def _large_imagenet_config(is_training=True, data_format=None,
total_steps=None):
drop_path_keep_prob = 1.0 if not is_training else 0.7
return tf.contrib.training.HParams(
stem_multiplier=3.0,
dense_dropout_keep_prob=0.5,
num_cells=18,
filter_scaling_rate=2.0,
num_conv_filters=168,
drop_path_keep_prob=drop_path_keep_prob,
use_aux_head=1,
num_reduction_layers=2,
skip_reduction_layer_input=1,
data_format=data_format or 'NHWC',
total_training_steps=total_steps or 250000,
)
# Notes for training the mobile NASNet ImageNet model
# -------------------------------------
# batch size (per replica): 32
# learning rate: 0.04 * 50
# learning rate scaling factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 50 replicas
# auxiliary head weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10
def _mobile_imagenet_config(data_format=None, total_steps=None):
return tf.contrib.training.HParams(
stem_multiplier=1.0,
dense_dropout_keep_prob=0.5,
num_cells=12,
filter_scaling_rate=2.0,
drop_path_keep_prob=1.0,
num_conv_filters=44,
use_aux_head=1,
num_reduction_layers=2,
skip_reduction_layer_input=0,
data_format=data_format or 'NHWC',
total_training_steps=total_steps or 250000,
)
def nasnet_cifar_arg_scope(weight_decay=5e-4,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5):
"""Defines the default arg scope for the NASNet-A Cifar model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Cifar Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope(
[slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'):
with arg_scope(
[slim.conv2d, slim.separable_conv2d],
activation_fn=None,
biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def nasnet_mobile_arg_scope(weight_decay=4e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3):
"""Defines the default arg scope for the NASNet-A Mobile ImageNet model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Mobile Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope(
[slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'):
with arg_scope(
[slim.conv2d, slim.separable_conv2d],
activation_fn=None,
biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def nasnet_large_arg_scope(weight_decay=5e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3):
"""Defines the default arg scope for the NASNet-A Large ImageNet model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Large Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope(
[slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'):
with arg_scope(
[slim.conv2d, slim.separable_conv2d],
activation_fn=None,
biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def _build_aux_head(net, end_points, num_classes, hparams, scope):
"""Auxiliary head used for all models across all datasets."""
with tf.variable_scope(scope):
aux_logits = tf.identity(net)
with tf.variable_scope('aux_logits'):
aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='proj')
aux_logits = slim.batch_norm(aux_logits, scope='aux_bn0')
aux_logits = tf.nn.relu(aux_logits)
# Shape of feature map before the final layer.
shape = aux_logits.shape
if hparams.data_format == 'NHWC':
shape = shape[1:3]
else:
shape = shape[2:4]
aux_logits = slim.conv2d(aux_logits, 768, shape, padding='VALID')
aux_logits = slim.batch_norm(aux_logits, scope='aux_bn1')
aux_logits = tf.nn.relu(aux_logits)
aux_logits = tf.contrib.layers.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes)
end_points['AuxLogits'] = aux_logits
def _imagenet_stem(inputs, hparams, stem_cell):
"""Stem used for models trained on ImageNet."""
num_stem_cells = 2
# 149 x 149 x 32
num_stem_filters = int(32 * hparams.stem_multiplier)
net = slim.conv2d(
inputs,
num_stem_filters, [3, 3],
stride=2,
scope='conv0',
padding='VALID')
net = slim.batch_norm(net, scope='conv0_bn')
# Run the reduction cells
cell_outputs = [None, net]
filter_scaling = 1.0 / (hparams.filter_scaling_rate**num_stem_cells)
for cell_num in range(num_stem_cells):
net = stem_cell(
net,
scope='cell_stem_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=cell_num)
cell_outputs.append(net)
filter_scaling *= hparams.filter_scaling_rate
return net, cell_outputs
def _cifar_stem(inputs, hparams):
"""Stem used for models trained on Cifar."""
num_stem_filters = int(hparams.num_conv_filters * hparams.stem_multiplier)
net = slim.conv2d(inputs, num_stem_filters, 3, scope='l1_stem_3x3')
net = slim.batch_norm(net, scope='l1_stem_bn')
return net, [None, net]
def build_nasnet_cifar(images,
num_classes=None,
is_training=True,
data_format=None,
total_steps=None):
"""Build NASNet model for the Cifar Dataset."""
hparams = _cifar_config(
is_training=is_training, data_format=data_format, total_steps=total_steps)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope(
[
slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm,
slim.separable_conv2d, nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim
],
data_format=hparams.data_format):
return _build_nasnet_base(
images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='cifar')
build_nasnet_cifar.default_image_size = 32
def build_nasnet_mobile(images,
num_classes=None,
is_training=True,
data_format=None,
total_steps=None,
final_endpoint=None):
"""Build NASNet Mobile model for the ImageNet Dataset."""
hparams = _mobile_imagenet_config(
data_format=data_format, total_steps=total_steps)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
# If ImageNet, then add an additional two for the stem cells
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope(
[
slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm,
slim.separable_conv2d, nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim
],
data_format=hparams.data_format):
return _build_nasnet_base(
images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='imagenet',
final_endpoint=final_endpoint)
build_nasnet_mobile.default_image_size = 224
def build_nasnet_large(images,
num_classes=None,
is_training=True,
data_format=None,
total_steps=None,
final_endpoint=None):
"""Build NASNet Large model for the ImageNet Dataset."""
hparams = _large_imagenet_config(
is_training=is_training, data_format=data_format, total_steps=total_steps)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
# If ImageNet, then add an additional two for the stem cells
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope(
[
slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm,
slim.separable_conv2d, nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim
],
data_format=hparams.data_format):
return _build_nasnet_base(
images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='imagenet',
final_endpoint=final_endpoint)
build_nasnet_large.default_image_size = 331
def _build_nasnet_base(images,
normal_cell,
reduction_cell,
num_classes,
hparams,
is_training,
stem_type,
final_endpoint=None):
"""Constructs a NASNet image model."""
end_points = {}
def add_and_check_endpoint(endpoint_name, net):
end_points[endpoint_name] = net
return final_endpoint and (endpoint_name == final_endpoint)
# Find where to place the reduction cells or stride normal cells
reduction_indices = nasnet_utils.calc_reduction_layers(
hparams.num_cells, hparams.num_reduction_layers)
stem_cell = reduction_cell
if stem_type == 'imagenet':
stem = lambda: _imagenet_stem(images, hparams, stem_cell)
elif stem_type == 'cifar':
stem = lambda: _cifar_stem(images, hparams)
else:
raise ValueError('Unknown stem_type: ', stem_type)
net, cell_outputs = stem()
if add_and_check_endpoint('Stem', net):
return net, end_points
# Setup for building in the auxiliary head.
aux_head_cell_idxes = []
if len(reduction_indices) >= 2:
aux_head_cell_idxes.append(reduction_indices[1] - 1)
# Run the cells
filter_scaling = 1.0
# true_cell_num accounts for the stem cells
true_cell_num = 2 if stem_type == 'imagenet' else 0
for cell_num in range(hparams.num_cells):
stride = 1
if hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
if cell_num in reduction_indices:
filter_scaling *= hparams.filter_scaling_rate
net = reduction_cell(
net,
scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=true_cell_num)
if add_and_check_endpoint('Reduction_Cell_{}'.format(
reduction_indices.index(cell_num)), net):
return net, end_points
true_cell_num += 1
cell_outputs.append(net)
if not hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
net = normal_cell(
net,
scope='cell_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=stride,
prev_layer=prev_layer,
cell_num=true_cell_num)
if add_and_check_endpoint('Cell_{}'.format(cell_num), net):
return net, end_points
true_cell_num += 1
if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and
num_classes and is_training):
aux_net = tf.nn.relu(net)
_build_aux_head(
aux_net,
end_points,
num_classes,
hparams,
scope='aux_{}'.format(cell_num))
cell_outputs.append(net)
# Final softmax layer
with tf.variable_scope('final_layer'):
net = tf.nn.relu(net)
net = nasnet_utils.global_avg_pool(net)
if add_and_check_endpoint('global_pool', net) or num_classes is None:
return net, end_points
net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout')
logits = slim.fully_connected(net, num_classes)
if add_and_check_endpoint('Logits', logits):
return net, end_points
predictions = tf.nn.softmax(logits, name='predictions')
if add_and_check_endpoint('Predictions', predictions):
return net, end_points
return logits, end_points
class NasnetModel(model.Model):
"""Nasnet model configuration."""
def __init__(self):
super(NasnetModel, self).__init__('nasnet', 224, 32, 0.005)
def add_inference(self, cnn):
tf.logging.info('input_image_shape: {}'.format(cnn.top_layer.shape))
cnn.top_layer, _ = build_nasnet_mobile(
images=cnn.top_layer,
is_training=cnn.phase_train,
data_format=cnn.data_format)
cnn.top_size = cnn.top_layer.shape[-1].value
class NasnetLargeModel(model.Model):
"""Nasnet model configuration."""
def __init__(self):
super(NasnetLargeModel, self).__init__('nasnet', 331, 16, 0.005)
def add_inference(self, cnn):
tf.logging.info('input_image_shape: {}'.format(cnn.top_layer.shape))
cnn.top_layer, _ = build_nasnet_large(
images=cnn.top_layer,
is_training=cnn.phase_train,
data_format=cnn.data_format)
cnn.top_size = cnn.top_layer.shape[-1].value
class NasnetCifarModel(model.Model):
"""Nasnet cifar model configuration."""
def __init__(self):
super(NasnetCifarModel, self).__init__('nasnet', 32, 32, 0.025)
def add_inference(self, cnn):
tf.logging.info('input_image_shape: {}'.format(cnn.top_layer.shape))
cnn.top_layer, _ = build_nasnet_cifar(
images=cnn.top_layer,
is_training=cnn.phase_train,
data_format=cnn.data_format)
cnn.top_size = cnn.top_layer.shape[-1].value
| 20,624
| 34.683391
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/overfeat_model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Overfeat model configuration.
References:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus,
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
"""
from models import model
class OverfeatModel(model.Model):
def __init__(self):
super(OverfeatModel, self).__init__('overfeat', 231, 32, 0.005)
def add_inference(self, cnn):
# Note: VALID requires padding the images by 3 in width and height
cnn.conv(96, 11, 11, 4, 4, mode='VALID')
cnn.mpool(2, 2)
cnn.conv(256, 5, 5, 1, 1, mode='VALID')
cnn.mpool(2, 2)
cnn.conv(512, 3, 3)
cnn.conv(1024, 3, 3)
cnn.conv(1024, 3, 3)
cnn.mpool(2, 2)
cnn.reshape([-1, 1024 * 6 * 6])
cnn.affine(3072)
cnn.dropout()
cnn.affine(4096)
cnn.dropout()
| 1,554
| 30.734694
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/vgg_model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vgg model configuration.
Includes multiple models: vgg11, vgg16, vgg19, corresponding to
model A, D, and E in Table 1 of [1].
References:
[1] Simonyan, Karen, Andrew Zisserman
Very Deep Convolutional Networks for Large-Scale Image Recognition
arXiv:1409.1556 (2014)
"""
from six.moves import xrange # pylint: disable=redefined-builtin
import model
def _construct_vgg(cnn, num_conv_layers):
"""Build vgg architecture from blocks."""
assert len(num_conv_layers) == 5
for _ in xrange(num_conv_layers[0]):
cnn.conv(64, 3, 3)
cnn.mpool(2, 2)
for _ in xrange(num_conv_layers[1]):
cnn.conv(128, 3, 3)
cnn.mpool(2, 2)
for _ in xrange(num_conv_layers[2]):
cnn.conv(256, 3, 3)
cnn.mpool(2, 2)
for _ in xrange(num_conv_layers[3]):
cnn.conv(512, 3, 3)
cnn.mpool(2, 2)
for _ in xrange(num_conv_layers[4]):
cnn.conv(512, 3, 3)
cnn.mpool(2, 2)
cnn.reshape([-1, 512 * 7 * 7])
cnn.affine(4096)
cnn.dropout()
cnn.affine(4096)
cnn.dropout()
class Vgg11Model(model.Model):
def __init__(self):
super(Vgg11Model, self).__init__('vgg11', 224, 64, 0.005)
def add_inference(self, cnn):
_construct_vgg(cnn, [1, 1, 2, 2, 2])
class Vgg16Model(model.Model):
def __init__(self):
super(Vgg16Model, self).__init__('vgg16', 224, 64, 0.005)
def add_inference(self, cnn):
_construct_vgg(cnn, [2, 2, 3, 3, 3])
class Vgg19Model(model.Model):
def __init__(self):
super(Vgg19Model, self).__init__('vgg19', 224, 64, 0.005)
def add_inference(self, cnn):
_construct_vgg(cnn, [2, 2, 4, 4, 4])
| 2,263
| 27.3
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/densenet_model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Densenet model configuration.
References:
"Densely Connected Convolutional Networks": https://arxiv.org/pdf/1608.06993
"""
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from models import model as model_lib
class DensenetCifar10Model(model_lib.Model):
"""Densenet cnn network configuration."""
def __init__(self, model, layer_counts, growth_rate):
self.growth_rate = growth_rate
super(DensenetCifar10Model, self).__init__(model, 32, 64, 0.1,
layer_counts=layer_counts)
self.batch_norm_config = {'decay': 0.9, 'epsilon': 1e-5, 'scale': True}
def dense_block(self, cnn, growth_rate):
input_layer = cnn.top_layer
c = cnn.batch_norm(input_layer, **self.batch_norm_config)
c = tf.nn.relu(c)
c = cnn.conv(growth_rate, 3, 3, 1, 1, stddev=np.sqrt(2.0/9/growth_rate),
activation=None, input_layer=c)
channel_index = 3 if cnn.channel_pos == 'channels_last' else 1
cnn.top_layer = tf.concat([input_layer, c], channel_index)
cnn.top_size += growth_rate
def transition_layer(self, cnn):
in_size = cnn.top_size
cnn.batch_norm(**self.batch_norm_config)
cnn.top_layer = tf.nn.relu(cnn.top_layer)
cnn.conv(in_size, 1, 1, 1, 1, stddev=np.sqrt(2.0/9/in_size))
cnn.apool(2, 2, 2, 2)
def add_inference(self, cnn):
if self.layer_counts is None:
raise ValueError('Layer counts not specified for %s' % self.get_model())
if self.growth_rate is None:
raise ValueError('Growth rate not specified for %s' % self.get_model())
cnn.conv(16, 3, 3, 1, 1, activation=None)
# Block 1
for _ in xrange(self.layer_counts[0]):
self.dense_block(cnn, self.growth_rate)
self.transition_layer(cnn)
# Block 2
for _ in xrange(self.layer_counts[1]):
self.dense_block(cnn, self.growth_rate)
self.transition_layer(cnn)
# Block 3
for _ in xrange(self.layer_counts[2]):
self.dense_block(cnn, self.growth_rate)
cnn.batch_norm(**self.batch_norm_config)
cnn.top_layer = tf.nn.relu(cnn.top_layer)
channel_index = 3 if cnn.channel_pos == 'channels_last' else 1
cnn.top_size = cnn.top_layer.get_shape().as_list()[channel_index]
cnn.spatial_mean()
def get_learning_rate(self, global_step, batch_size):
num_batches_per_epoch = int(50000 / batch_size)
boundaries = num_batches_per_epoch * np.array([150, 225, 300],
dtype=np.int64)
boundaries = [x for x in boundaries]
values = [0.1, 0.01, 0.001, 0.0001]
return tf.train.piecewise_constant(global_step, boundaries, values)
def create_densenet40_k12_model():
return DensenetCifar10Model('densenet40_k12', (12, 12, 12), 12)
def create_densenet100_k12_model():
return DensenetCifar10Model('densenet100_k12', (32, 32, 32), 12)
def create_densenet100_k24_model():
return DensenetCifar10Model('densenet100_k24', (32, 32, 32), 24)
| 3,693
| 37.479167
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/model_config.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model configurations for CNN benchmarks.
"""
from models import alexnet_model
from models import densenet_model
from models import googlenet_model
from models import inception_model
from models import lenet_model
from models import mobilenet_v2
from models import nasnet_model
from models import official_resnet_model
from models import overfeat_model
from models import resnet_model
from models import trivial_model
from models import vgg_model
import squeezenet_model
_model_name_to_imagenet_model = {
'vgg11': vgg_model.Vgg11Model,
'vgg16': vgg_model.Vgg16Model,
'vgg19': vgg_model.Vgg19Model,
'lenet': lenet_model.Lenet5Model,
'googlenet': googlenet_model.GooglenetModel,
'overfeat': overfeat_model.OverfeatModel,
'alexnet': alexnet_model.AlexnetModel,
'trivial': trivial_model.TrivialModel,
'inception3': inception_model.Inceptionv3Model,
'inception4': inception_model.Inceptionv4Model,
'official_resnet18_v2':
lambda: official_resnet_model.ImagenetResnetModel(18),
'official_resnet34_v2':
lambda: official_resnet_model.ImagenetResnetModel(34),
'official_resnet50_v2':
lambda: official_resnet_model.ImagenetResnetModel(50),
'official_resnet101_v2':
lambda: official_resnet_model.ImagenetResnetModel(101),
'official_resnet152_v2':
lambda: official_resnet_model.ImagenetResnetModel(152),
'official_resnet200_v2':
lambda: official_resnet_model.ImagenetResnetModel(200),
'official_resnet18':
lambda: official_resnet_model.ImagenetResnetModel(18, version=1),
'official_resnet34':
lambda: official_resnet_model.ImagenetResnetModel(34, version=1),
'official_resnet50':
lambda: official_resnet_model.ImagenetResnetModel(50, version=1),
'official_resnet101':
lambda: official_resnet_model.ImagenetResnetModel(101, version=1),
'official_resnet152':
lambda: official_resnet_model.ImagenetResnetModel(152, version=1),
'official_resnet200':
lambda: official_resnet_model.ImagenetResnetModel(200, version=1),
'resnet50': resnet_model.create_resnet50_model,
'resnet50_v2': resnet_model.create_resnet50_v2_model,
'resnet101': resnet_model.create_resnet101_model,
'resnet101_v2': resnet_model.create_resnet101_v2_model,
'resnet152': resnet_model.create_resnet152_model,
'resnet152_v2': resnet_model.create_resnet152_v2_model,
'nasnet': nasnet_model.NasnetModel,
'nasnetlarge': nasnet_model.NasnetLargeModel,
'mobilenet': mobilenet_v2.MobilenetModel,
'squeezenet': lambda: squeezenet_model.SqueezenetModel,
}
_model_name_to_cifar_model = {
'alexnet': alexnet_model.AlexnetCifar10Model,
'resnet20': resnet_model.create_resnet20_cifar_model,
'resnet20_v2': resnet_model.create_resnet20_v2_cifar_model,
'resnet32': resnet_model.create_resnet32_cifar_model,
'resnet32_v2': resnet_model.create_resnet32_v2_cifar_model,
'resnet44': resnet_model.create_resnet44_cifar_model,
'resnet44_v2': resnet_model.create_resnet44_v2_cifar_model,
'resnet56': resnet_model.create_resnet56_cifar_model,
'resnet56_v2': resnet_model.create_resnet56_v2_cifar_model,
'resnet110': resnet_model.create_resnet110_cifar_model,
'resnet110_v2': resnet_model.create_resnet110_v2_cifar_model,
'trivial': trivial_model.TrivialCifar10Model,
'densenet40_k12': densenet_model.create_densenet40_k12_model,
'densenet100_k12': densenet_model.create_densenet100_k12_model,
'densenet100_k24': densenet_model.create_densenet100_k24_model,
'nasnet': nasnet_model.NasnetCifarModel,
'squeezenet': squeezenet_model.SqueezenetCifar10Model,
}
def _get_model_map(dataset_name):
if 'cifar10' == dataset_name:
return _model_name_to_cifar_model
elif dataset_name in ('imagenet', 'synthetic'):
return _model_name_to_imagenet_model
else:
raise ValueError('Invalid dataset name: %s' % dataset_name)
def get_model_config(model_name, dataset):
"""Map model name to model network configuration."""
model_map = _get_model_map(dataset.name)
if model_name not in model_map:
raise ValueError('Invalid model name \'%s\' for dataset \'%s\'' %
(model_name, dataset.name))
else:
return model_map[model_name]()
def register_model(model_name, dataset_name, model_func):
"""Register a new model that can be obtained with `get_model_config`."""
model_map = _get_model_map(dataset_name)
if model_name in model_map:
raise ValueError('Model "%s" is already registered for dataset "%s"' %
(model_name, dataset_name))
model_map[model_name] = model_func
| 5,300
| 40.093023
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/nasnet_utils.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A custom module for some common operations used by NASNet.
Functions exposed in this file:
- calc_reduction_layers
- get_channel_index
- get_channel_dim
- global_avg_pool
- factorized_reduction
- drop_path
Classes exposed in this file:
- NasNetABaseCell
- NasNetANormalCell
- NasNetAReductionCell
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
INVALID = 'null'
def calc_reduction_layers(num_cells, num_reduction_layers):
"""Figure out what layers should have reductions."""
reduction_layers = []
for pool_num in range(1, num_reduction_layers + 1):
layer_num = (float(pool_num) / (num_reduction_layers + 1)) * num_cells
layer_num = int(layer_num)
reduction_layers.append(layer_num)
return reduction_layers
@tf.contrib.framework.add_arg_scope
def get_channel_index(data_format=INVALID):
assert data_format != INVALID
axis = 3 if data_format == 'NHWC' else 1
return axis
@tf.contrib.framework.add_arg_scope
def get_channel_dim(shape, data_format=INVALID):
assert data_format != INVALID
assert len(shape) == 4
if data_format == 'NHWC':
return int(shape[3])
elif data_format == 'NCHW':
return int(shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
@tf.contrib.framework.add_arg_scope
def global_avg_pool(x, data_format=INVALID):
"""Average pool away the height and width spatial dimensions of x."""
assert data_format != INVALID
assert data_format in ['NHWC', 'NCHW']
assert x.shape.ndims == 4
if data_format == 'NHWC':
return tf.reduce_mean(x, [1, 2])
else:
return tf.reduce_mean(x, [2, 3])
@tf.contrib.framework.add_arg_scope
def factorized_reduction(net, output_filters, stride, data_format=INVALID):
"""Reduces the shape of net without information loss due to striding."""
assert output_filters % 2 == 0, (
'Need even number of filters when using this factorized reduction.')
assert data_format != INVALID
if stride == 1:
net = slim.conv2d(net, output_filters, 1, scope='path_conv')
net = slim.batch_norm(net, scope='path_bn')
return net
if data_format == 'NHWC':
stride_spec = [1, stride, stride, 1]
else:
stride_spec = [1, 1, stride, stride]
# Skip path 1
path1 = tf.nn.avg_pool(
net, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')
# Skip path 2
# First pad with 0's on the right and bottom, then shift the filter to
# include those 0's that were added.
if data_format == 'NHWC':
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(net, pad_arr)[:, 1:, 1:, :]
concat_axis = 3
else:
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(net, pad_arr)[:, :, 1:, 1:]
concat_axis = 1
path2 = tf.nn.avg_pool(
path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
path2 = slim.conv2d(path2, int(output_filters / 2), 1, scope='path2_conv')
# Concat and apply BN
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = slim.batch_norm(final_path, scope='final_path_bn')
return final_path
@tf.contrib.framework.add_arg_scope
def drop_path(net, keep_prob, is_training=True):
"""Drops out a whole example hiddenstate with the specified probability."""
if is_training:
batch_size = tf.shape(net)[0]
noise_shape = [batch_size, 1, 1, 1]
keep_prob = tf.cast(keep_prob, dtype=net.dtype)
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape, dtype=net.dtype)
binary_tensor = tf.floor(random_tensor)
net = tf.div(net, keep_prob) * binary_tensor
return net
def _operation_to_filter_shape(operation):
splitted_operation = operation.split('x')
filter_shape = int(splitted_operation[0][-1])
assert filter_shape == int(
splitted_operation[1][0]), 'Rectangular filters not supported.'
return filter_shape
def _operation_to_num_layers(operation):
splitted_operation = operation.split('_')
if 'x' in splitted_operation[-1]:
return 1
return int(splitted_operation[-1])
def _operation_to_info(operation):
"""Takes in operation name and returns meta information.
An example would be 'separable_3x3_4' -> (3, 4).
Args:
operation: String that corresponds to convolution operation.
Returns:
Tuple of (filter shape, num layers).
"""
num_layers = _operation_to_num_layers(operation)
filter_shape = _operation_to_filter_shape(operation)
return num_layers, filter_shape
def _stacked_separable_conv(net, stride, operation, filter_size):
"""Takes in an operations and parses it to the correct sep operation."""
num_layers, kernel_size = _operation_to_info(operation)
net_type = net.dtype
net = tf.cast(net, tf.float32) if net_type == tf.float16 else net
for layer_num in range(num_layers - 1):
net = tf.nn.relu(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
stride = 1
net = tf.nn.relu(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers))
net = tf.cast(net, net_type)
return net
def _operation_to_pooling_type(operation):
"""Takes in the operation string and returns the pooling type."""
splitted_operation = operation.split('_')
return splitted_operation[0]
def _operation_to_pooling_shape(operation):
"""Takes in the operation string and returns the pooling kernel shape."""
splitted_operation = operation.split('_')
shape = splitted_operation[-1]
assert 'x' in shape
filter_height, filter_width = shape.split('x')
assert filter_height == filter_width
return int(filter_height)
def _operation_to_pooling_info(operation):
"""Parses the pooling operation string to return its type and shape."""
pooling_type = _operation_to_pooling_type(operation)
pooling_shape = _operation_to_pooling_shape(operation)
return pooling_type, pooling_shape
def _pooling(net, stride, operation):
"""Parses operation and performs the correct pooling operation on net."""
padding = 'SAME'
pooling_type, pooling_shape = _operation_to_pooling_info(operation)
if pooling_type == 'avg':
net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding=padding)
elif pooling_type == 'max':
net = slim.max_pool2d(net, pooling_shape, stride=stride, padding=padding)
else:
raise NotImplementedError('Unimplemented pooling type: ', pooling_type)
return net
class NasNetABaseCell(object):
"""NASNet Cell class that is used as a 'layer' in image architectures.
Args:
num_conv_filters: The number of filters for each convolution operation.
operations: List of operations that are performed in the NASNet Cell in
order.
used_hiddenstates: Binary array that signals if the hiddenstate was used
within the cell. This is used to determine what outputs of the cell
should be concatenated together.
hiddenstate_indices: Determines what hiddenstates should be combined
together with the specified operations to create the NASNet cell.
"""
def __init__(self, num_conv_filters, operations, used_hiddenstates,
hiddenstate_indices, drop_path_keep_prob, total_num_cells,
total_training_steps):
self._num_conv_filters = num_conv_filters
self._operations = operations
self._used_hiddenstates = used_hiddenstates
self._hiddenstate_indices = hiddenstate_indices
self._drop_path_keep_prob = drop_path_keep_prob
self._total_num_cells = total_num_cells
self._total_training_steps = total_training_steps
def _reduce_prev_layer(self, prev_layer, curr_layer):
"""Matches dimension of prev_layer to the curr_layer."""
# Set the prev layer to the current layer if it is none
if prev_layer is None:
return curr_layer
curr_num_filters = self._filter_size
prev_num_filters = get_channel_dim(prev_layer.shape)
curr_filter_shape = int(curr_layer.shape[2])
prev_filter_shape = int(prev_layer.shape[2])
if curr_filter_shape != prev_filter_shape:
prev_layer = tf.nn.relu(prev_layer)
prev_layer = factorized_reduction(prev_layer, curr_num_filters, stride=2)
elif curr_num_filters != prev_num_filters:
prev_layer = tf.nn.relu(prev_layer)
prev_layer = slim.conv2d(
prev_layer, curr_num_filters, 1, scope='prev_1x1')
prev_layer = slim.batch_norm(prev_layer, scope='prev_bn')
return prev_layer
def _cell_base(self, net, prev_layer):
"""Runs the beginning of the conv cell before the predicted ops are run."""
num_filters = self._filter_size
# Check to be sure prev layer stuff is setup correctly
prev_layer = self._reduce_prev_layer(prev_layer, net)
net = tf.nn.relu(net)
net = slim.conv2d(net, num_filters, 1, scope='1x1')
net = slim.batch_norm(net, scope='beginning_bn')
split_axis = get_channel_index()
net = tf.split(axis=split_axis, num_or_size_splits=1, value=net)
for split in net:
assert int(split.shape[split_axis] == int(
self._num_conv_filters * self._filter_scaling))
net.append(prev_layer)
return net
def __call__(self,
net,
scope=None,
filter_scaling=1,
stride=1,
prev_layer=None,
cell_num=-1):
"""Runs the conv cell."""
self._cell_num = cell_num
self._filter_scaling = filter_scaling
self._filter_size = int(self._num_conv_filters * filter_scaling)
i = 0
with tf.variable_scope(scope):
net = self._cell_base(net, prev_layer)
for iteration in range(5):
with tf.variable_scope('comb_iter_{}'.format(iteration)):
left_hiddenstate_idx, right_hiddenstate_idx = (
self._hiddenstate_indices[i], self._hiddenstate_indices[i + 1])
original_input_left = left_hiddenstate_idx < 2
original_input_right = right_hiddenstate_idx < 2
h1 = net[left_hiddenstate_idx]
h2 = net[right_hiddenstate_idx]
operation_left = self._operations[i]
operation_right = self._operations[i + 1]
i += 2
# Apply conv operations
with tf.variable_scope('left'):
h1 = self._apply_conv_operation(h1, operation_left, stride,
original_input_left)
with tf.variable_scope('right'):
h2 = self._apply_conv_operation(h2, operation_right, stride,
original_input_right)
# Combine hidden states using 'add'.
with tf.variable_scope('combine'):
h = h1 + h2
# Add hiddenstate to the list of hiddenstates we can choose from
net.append(h)
with tf.variable_scope('cell_output'):
net = self._combine_unused_states(net)
return net
def _apply_conv_operation(self, net, operation, stride,
is_from_original_input):
"""Applies the predicted conv operation to net."""
# Dont stride if this is not one of the original hiddenstates
if stride > 1 and not is_from_original_input:
stride = 1
input_filters = get_channel_dim(net.shape)
filter_size = self._filter_size
if 'separable' in operation:
net = _stacked_separable_conv(net, stride, operation, filter_size)
elif operation in ['none']:
# Check if a stride is needed, then use a strided 1x1 here
if stride > 1 or (input_filters != filter_size):
net = tf.nn.relu(net)
net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
elif 'pool' in operation:
net = _pooling(net, stride, operation)
if input_filters != filter_size:
net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
else:
raise ValueError('Unimplemented operation', operation)
if operation != 'none':
net = self._apply_drop_path(net)
return net
def _combine_unused_states(self, net):
"""Concatenate the unused hidden states of the cell."""
used_hiddenstates = self._used_hiddenstates
final_height = int(net[-1].shape[2])
final_num_filters = get_channel_dim(net[-1].shape)
assert len(used_hiddenstates) == len(net)
for idx, used_h in enumerate(used_hiddenstates):
curr_height = int(net[idx].shape[2])
curr_num_filters = get_channel_dim(net[idx].shape)
# Determine if a reduction should be applied to make the number of
# filters match.
should_reduce = final_num_filters != curr_num_filters
should_reduce = (final_height != curr_height) or should_reduce
should_reduce = should_reduce and not used_h
if should_reduce:
stride = 2 if final_height != curr_height else 1
with tf.variable_scope('reduction_{}'.format(idx)):
net[idx] = factorized_reduction(net[idx], final_num_filters, stride)
states_to_combine = ([
h for h, is_used in zip(net, used_hiddenstates) if not is_used
])
# Return the concat of all the states
concat_axis = get_channel_index()
net = tf.concat(values=states_to_combine, axis=concat_axis)
return net
@tf.contrib.framework.add_arg_scope # No public API. For internal use only.
def _apply_drop_path(self,
net,
current_step=None,
use_summaries=True,
drop_connect_version='v3'):
"""Apply drop_path regularization.
Args:
net: the Tensor that gets drop_path regularization applied.
current_step: a float32 Tensor with the current global_step value,
to be divided by hparams.total_training_steps. Usually None, which
defaults to tf.train.get_or_create_global_step() properly casted.
use_summaries: a Python boolean. If set to False, no summaries are output.
drop_connect_version: one of 'v1', 'v2', 'v3', controlling whether
the dropout rate is scaled by current_step (v1), layer (v2), or
both (v3, the default).
Returns:
The dropped-out value of `net`.
"""
drop_path_keep_prob = self._drop_path_keep_prob
if drop_path_keep_prob < 1.0:
assert drop_connect_version in ['v1', 'v2', 'v3']
if drop_connect_version in ['v2', 'v3']:
# Scale keep prob by layer number
assert self._cell_num != -1
# The added 2 is for the reduction cells
num_cells = self._total_num_cells
layer_ratio = (self._cell_num + 1) / float(num_cells)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('layer_ratio', layer_ratio)
drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob)
if drop_connect_version in ['v1', 'v3']:
# Decrease the keep probability over time
if not current_step:
current_step = tf.cast(tf.train.get_or_create_global_step(),
tf.float32)
drop_path_burn_in_steps = self._total_training_steps
current_ratio = current_step / drop_path_burn_in_steps
current_ratio = tf.minimum(1.0, current_ratio)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('current_ratio', current_ratio)
drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob))
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('drop_path_keep_prob', drop_path_keep_prob)
net = drop_path(net, drop_path_keep_prob)
return net
class NasNetANormalCell(NasNetABaseCell):
"""NASNetA Normal Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps):
operations = [
'separable_5x5_2', 'separable_3x3_2', 'separable_5x5_2',
'separable_3x3_2', 'avg_pool_3x3', 'none', 'avg_pool_3x3',
'avg_pool_3x3', 'separable_3x3_2', 'none'
]
used_hiddenstates = [1, 0, 0, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 1, 1, 0, 1, 1, 1, 0, 0]
super(NasNetANormalCell, self).__init__(
num_conv_filters, operations, used_hiddenstates, hiddenstate_indices,
drop_path_keep_prob, total_num_cells, total_training_steps)
class NasNetAReductionCell(NasNetABaseCell):
"""NASNetA Reduction Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps):
operations = [
'separable_5x5_2', 'separable_7x7_2', 'max_pool_3x3', 'separable_7x7_2',
'avg_pool_3x3', 'separable_5x5_2', 'none', 'avg_pool_3x3',
'separable_3x3_2', 'max_pool_3x3'
]
used_hiddenstates = [1, 1, 1, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 0, 1, 0, 1, 3, 2, 2, 0]
super(NasNetAReductionCell, self).__init__(
num_conv_filters, operations, used_hiddenstates, hiddenstate_indices,
drop_path_keep_prob, total_num_cells, total_training_steps)
| 18,310
| 36.21748
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/lenet_model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lenet model configuration.
References:
LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick Haffner
Gradient-based learning applied to document recognition
Proceedings of the IEEE (1998)
"""
import model
class Lenet5Model(model.Model):
def __init__(self):
super(Lenet5Model, self).__init__('lenet5', 28, 32, 0.005)
def add_inference(self, cnn):
# Note: This matches TF's MNIST tutorial model
cnn.conv(32, 5, 5)
cnn.mpool(2, 2)
cnn.conv(64, 5, 5)
cnn.mpool(2, 2)
cnn.reshape([-1, 64 * 7 * 7])
cnn.affine(512)
| 1,249
| 30.25
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/mobilenet_conv_blocks.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convolution blocks for mobilenet."""
import contextlib
import functools
import tensorflow as tf
slim = tf.contrib.slim
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _split_divisible(num, num_ways, divisible_by=8):
"""Evenly splits num, num_ways so each piece is a multiple of divisible_by."""
assert num % divisible_by == 0
assert num / num_ways >= divisible_by
# Note: want to round down, we adjust each split to match the total.
base = num // num_ways // divisible_by * divisible_by
result = []
accumulated = 0
for i in range(num_ways):
r = base
while accumulated + r < num * (i + 1) / num_ways:
r += divisible_by
result.append(r)
accumulated += r
assert accumulated == num
return result
@contextlib.contextmanager
def _v1_compatible_scope_naming(scope):
if scope is None: # Create uniqified separable blocks.
with tf.variable_scope(None, default_name='separable') as s, \
tf.name_scope(s.original_name_scope):
yield ''
else:
# We use scope_depthwise, scope_pointwise for compatibility with V1 ckpts.
# which provide numbered scopes.
scope += '_'
yield scope
@slim.add_arg_scope
def split_separable_conv2d(input_tensor,
num_outputs,
scope=None,
normalizer_fn=None,
stride=1,
rate=1,
endpoints=None,
use_explicit_padding=False):
"""Separable mobilenet V1 style convolution.
Depthwise convolution, with default non-linearity,
followed by 1x1 depthwise convolution. This is similar to
slim.separable_conv2d, but differs in tha it applies batch
normalization and non-linearity to depthwise. This matches
the basic building of Mobilenet Paper
(https://arxiv.org/abs/1704.04861)
Args:
input_tensor: input
num_outputs: number of outputs
scope: optional name of the scope. Note if provided it will use
scope_depthwise for deptwhise, and scope_pointwise for pointwise.
normalizer_fn: which normalizer function to use for depthwise/pointwise
stride: stride
rate: output rate (also known as dilation rate)
endpoints: optional, if provided, will export additional tensors to it.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
Returns:
output tesnor
"""
with _v1_compatible_scope_naming(scope) as scope:
dw_scope = scope + 'depthwise'
endpoints = endpoints if endpoints is not None else {}
kernel_size = [3, 3]
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
input_tensor = _fixed_padding(input_tensor, kernel_size, rate)
net = slim.separable_conv2d(
input_tensor,
None,
kernel_size,
depth_multiplier=1,
stride=stride,
rate=rate,
normalizer_fn=normalizer_fn,
padding=padding,
scope=dw_scope)
endpoints[dw_scope] = net
pw_scope = scope + 'pointwise'
net = slim.conv2d(
net,
num_outputs, [1, 1],
stride=1,
normalizer_fn=normalizer_fn,
scope=pw_scope)
endpoints[pw_scope] = net
return net
def expand_input_by_factor(n, divisible_by=8):
return lambda num_inputs, **_: _make_divisible(num_inputs * n, divisible_by)
@slim.add_arg_scope
def expanded_conv(input_tensor,
num_outputs,
expansion_size=expand_input_by_factor(6),
stride=1,
rate=1,
kernel_size=(3, 3),
residual=True,
normalizer_fn=None,
split_projection=1,
split_expansion=1,
expansion_transform=None,
depthwise_location='expansion',
depthwise_channel_multiplier=1,
endpoints=None,
use_explicit_padding=False,
padding='SAME',
scope=None):
"""Depthwise Convolution Block with expansion.
Builds a composite convolution that has the following structure
expansion (1x1) -> depthwise (kernel_size) -> projection (1x1)
Args:
input_tensor: input
num_outputs: number of outputs in the final layer.
expansion_size: the size of expansion, could be a constant or a callable.
If latter it will be provided 'num_inputs' as an input. For forward
compatibility it should accept arbitrary keyword arguments.
Default will expand the input by factor of 6.
stride: depthwise stride
rate: depthwise rate
kernel_size: depthwise kernel
residual: whether to include residual connection between input
and output.
normalizer_fn: batchnorm or otherwise
split_projection: how many ways to split projection operator
(that is conv expansion->bottleneck)
split_expansion: how many ways to split expansion op
(that is conv bottleneck->expansion) ops will keep depth divisible
by this value.
expansion_transform: Optional function that takes expansion
as a single input and returns output.
depthwise_location: where to put depthwise covnvolutions supported
values None, 'input', 'output', 'expansion'
depthwise_channel_multiplier: depthwise channel multiplier:
each input will replicated (with different filters)
that many times. So if input had c channels,
output will have c x depthwise_channel_multpilier.
endpoints: An optional dictionary into which intermediate endpoints are
placed. The keys "expansion_output", "depthwise_output",
"projection_output" and "expansion_transform" are always populated, even
if the corresponding functions are not invoked.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
padding: Padding type to use if `use_explicit_padding` is not set.
scope: optional scope.
Returns:
Tensor of depth num_outputs
Raises:
TypeError: on inval
"""
with tf.variable_scope(scope, default_name='expanded_conv') as s, \
tf.name_scope(s.original_name_scope):
prev_depth = input_tensor.get_shape().as_list()[3]
if depthwise_location not in [None, 'input', 'output', 'expansion']:
raise TypeError('%r is unknown value for depthwise_location' %
depthwise_location)
if use_explicit_padding:
if padding != 'SAME':
raise TypeError('`use_explicit_padding` should only be used with '
'"SAME" padding.')
padding = 'VALID'
depthwise_func = functools.partial(
slim.separable_conv2d,
num_outputs=None,
kernel_size=kernel_size,
depth_multiplier=depthwise_channel_multiplier,
stride=stride,
rate=rate,
normalizer_fn=normalizer_fn,
padding=padding,
scope='depthwise')
# b1 -> b2 * r -> b2
# i -> (o * r) (bottleneck) -> o
input_tensor = tf.identity(input_tensor, 'input')
net = input_tensor
if depthwise_location == 'input':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net, activation_fn=None)
if callable(expansion_size):
inner_size = expansion_size(num_inputs=prev_depth)
else:
inner_size = expansion_size
if inner_size > net.shape[3]:
net = split_conv(
net,
inner_size,
num_ways=split_expansion,
scope='expand',
stride=1,
normalizer_fn=normalizer_fn)
net = tf.identity(net, 'expansion_output')
if endpoints is not None:
endpoints['expansion_output'] = net
if depthwise_location == 'expansion':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net)
net = tf.identity(net, name='depthwise_output')
if endpoints is not None:
endpoints['depthwise_output'] = net
if expansion_transform:
net = expansion_transform(expansion_tensor=net, input_tensor=input_tensor)
# Note in contrast with expansion, we always have
# projection to produce the desired output size.
net = split_conv(
net,
num_outputs,
num_ways=split_projection,
stride=1,
scope='project',
normalizer_fn=normalizer_fn,
activation_fn=tf.identity)
if endpoints is not None:
endpoints['projection_output'] = net
if depthwise_location == 'output':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net, activation_fn=None)
if callable(residual): # custom residual
net = residual(input_tensor=input_tensor, output_tensor=net)
elif (residual and
# stride check enforces that we don't add residuals when spatial
# dimensions are None
stride == 1 and
# Depth matches
net.get_shape().as_list()[3] ==
input_tensor.get_shape().as_list()[3]):
net += input_tensor
return tf.identity(net, name='output')
def split_conv(input_tensor,
num_outputs,
num_ways,
scope,
divisible_by=8,
**kwargs):
"""Creates a split convolution.
Split convolution splits the input and output into
'num_blocks' blocks of approximately the same size each,
and only connects $i$-th input to $i$ output.
Args:
input_tensor: input tensor
num_outputs: number of output filters
num_ways: num blocks to split by.
scope: scope for all the operators.
divisible_by: make sure that every part is divisiable by this.
**kwargs: will be passed directly into conv2d operator
Returns:
tensor
"""
b = input_tensor.get_shape().as_list()[3]
if num_ways == 1 or min(b // num_ways,
num_outputs // num_ways) < divisible_by:
# Don't do any splitting if we end up with less than 8 filters
# on either side.
return slim.conv2d(input_tensor, num_outputs, [1, 1], scope=scope, **kwargs)
outs = []
input_splits = _split_divisible(b, num_ways, divisible_by=divisible_by)
output_splits = _split_divisible(
num_outputs, num_ways, divisible_by=divisible_by)
inputs = tf.split(input_tensor, input_splits, axis=3, name='split_' + scope)
base = scope
for i, (input_tensor, out_size) in enumerate(zip(inputs, output_splits)):
scope = base + '_part_%d' % (i,)
n = slim.conv2d(input_tensor, out_size, [1, 1], scope=scope, **kwargs)
n = tf.identity(n, scope + '_output')
outs.append(n)
return tf.concat(outs, 3, name=scope + '_concat')
| 13,014
| 35.456583
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base model configuration for CNN benchmarks."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base model configuration for CNN benchmarks."""
import tensorflow as tf
import convnet_builder
from cnn_util import log_fn
class Model(object):
"""Base model configuration for CNN benchmarks."""
def __init__(self,
model,
image_size,
batch_size,
learning_rate,
layer_counts=None,
fp16_loss_scale=128):
self.model = model
self.image_size = image_size
self.batch_size = batch_size
self.default_batch_size = batch_size
self.learning_rate = learning_rate
self.layer_counts = layer_counts
# TODO(reedwm) Set custom loss scales for each model instead of using the
# default of 128.
self.fp16_loss_scale = fp16_loss_scale
def get_model(self):
return self.model
def get_image_size(self):
return self.image_size
def get_batch_size(self):
return self.batch_size
def set_batch_size(self, batch_size):
self.batch_size = batch_size
def get_default_batch_size(self):
return self.default_batch_size
def get_layer_counts(self):
return self.layer_counts
def get_fp16_loss_scale(self):
return self.fp16_loss_scale
def get_learning_rate(self, global_step, batch_size):
del global_step
del batch_size
return self.learning_rate
def add_inference(self, unused_cnn):
raise ValueError('Must be implemented in derived classes')
def skip_final_affine_layer(self):
"""Returns if the caller of this class should skip the final affine layer.
Normally, this class adds a final affine layer to the model after calling
self.add_inference(), to generate the logits. If a subclass override this
method to return True, the caller should not add the final affine layer.
This is useful for tests.
"""
return False
def build_network(self, images, phase_train=True, nclass=1001, image_depth=3,
data_type=tf.float32, data_format='NCHW',
use_tf_layers=True, fp16_vars=False):
"""Returns logits and aux_logits from images."""
if data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
var_type = tf.float32
if data_type == tf.float16 and fp16_vars:
var_type = tf.float16
network = convnet_builder.ConvNetBuilder(
images, image_depth, phase_train, use_tf_layers,
data_format, data_type, var_type)
with tf.variable_scope('cg', custom_getter=network.get_custom_getter()):
self.add_inference(network)
log_fn("Number of parameters: %d" % network.n_parameters)
# Add the final fully-connected class layer
logits = (network.affine(nclass, activation='linear')
if not self.skip_final_affine_layer()
else network.top_layer)
aux_logits = None
if network.aux_top_layer is not None:
with network.switch_to_aux_top_layer():
aux_logits = network.affine(
nclass, activation='linear', stddev=0.001)
if data_type == tf.float16:
# TODO(reedwm): Determine if we should do this cast here.
logits = tf.cast(logits, tf.float32)
if aux_logits is not None:
aux_logits = tf.cast(aux_logits, tf.float32)
return logits, aux_logits
# Subclasses can override this to define their own loss function. By default,
# benchmark_cnn.py defines its own loss function. If overridden, it must have
# the same signature as benchmark_cnn.loss_function.
loss_function = None
| 4,910
| 36.204545
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/squeezenet_model.py
|
# Copyright 2017 Ioannis Athanasiadis(supernlogn) one of the wanna be TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SqueezeNet model configuration.
References:
Iandola, Forrest N., et al. "SqueezeNet: AlexNet-level accuracy
with 50x fewer parameters and< 0.5 MB model size."
arXiv preprint arXiv:1602.07360 (2016).
"""
from six.moves import xrange # pylint: disable=redefined-builtin
import model
import tensorflow as tf
from tensorflow.contrib.framework import add_arg_scope
from tensorflow.contrib.layers.python.layers import utils
def fire_module(cnn,
squeeze_depth,
expand_depth,
reuse=None,
scope=None,
outputs_collections=None):
def squeeze(cnn, num_outputs):
cnn.conv(num_outputs, 1, 1, 1, 1)
def expand(cnn, num_outputs):
input_layer = cnn.top_layer
cnn.conv(num_outputs, 1, 1, 1, 1, input_layer=input_layer)
e1x1 = cnn.top_layer
cnn.conv(num_outputs, 3, 3, 1, 1, input_layer=input_layer)
e3x3 = cnn.top_layer
cnn.concat_layers(list_of_layers=[e1x1, e3x3])
squeeze(cnn, squeeze_depth)
expand(cnn, expand_depth)
# return utils.collect_named_outputs(outputs_collections,
# sc.original_name_scope, outputs)
class SqueezenetModel(model.Model):
def __init__(self, model):
image_size = 224
batch_size = 32
learning_rate = 10.0 ** (-4)
self.num_classes = 1000
super(SqueezenetModel, self).__init__(
model, image_size, batch_size, learning_rate)
def set_batch_size(self, b_s):
pass
def add_inference(self, cnn):
"""Original squeezenet architecture for 224x224 images."""
cnn.conv(96, 7, 7, 2, 2)
cnn.mpool(3, 3, 2, 2)
fire_module(cnn, 16, 64)
fire_module(cnn, 16, 64)
fire_module(cnn, 32, 128)
cnn.mpool(3, 3, 2, 2)
fire_module(cnn, 32, 128)
fire_module(cnn, 48, 192)
fire_module(cnn, 48, 192)
fire_module(cnn, 64, 256)
cnn.mpool(3, 3, 2, 2)
fire_module(cnn, 64, 256)
cnn.conv(self.num_classes, 1, 1, 1, 1)
cnn.apool(13, 13, 1, 1)
cnn.spatial_mean()
class SqueezenetCifar10Model(model.Model):
def __init__(self, model):
batch_norm_decay = 0.999
self.num_classes = 10
image_size = 32
batch_size = 64
learning_rate = 10.0 ** (-4)
layer_counts = None
super(SqueezenetCifar10Model, self).__init__(
model, image_size, batch_size, learning_rate)
def add_inference(self, cnn):
"""Modified version of squeezenet for CIFAR images"""
cnn.conv(96, 2, 2, 1, 1, activation=None)
cnn.mpool(2, 2, 1, 1)
fire_module(cnn, 16, 64)
fire_module(cnn, 16, 64)
fire_module(cnn, 32, 128)
cnn.mpool(2, 2, 1, 1)
fire_module(cnn, 32, 128)
fire_module(cnn, 48, 192)
fire_module(cnn, 48, 192)
fire_module(cnn, 64, 256)
cnn.mpool(2, 2, 1, 1)
fire_module(cnn, 64, 256)
# Use global average pooling per 'Network in Network [1]'
# net = slim.avg_pool2d(net, [4, 4], scope='avgpool10')
cnn.apool(4, 4, 1, 1)
# net = slim.conv2d(net, num_classes, [1, 1],
# activation_fn=None,
# normalizer_fn=None,
# scope='conv10')
cnn.conv(self.num_classes, 1, 1, 1, 1)
cnn.spatial_mean()
| 3,937
| 32.372881
| 110
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/alexnet_model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Alexnet model configuration.
References:
Krizhevsky, Alex, Ilya Sutskever, and Geoffrey E. Hinton
ImageNet Classification with Deep Convolutional Neural Networks
Advances in Neural Information Processing Systems. 2012
"""
import tensorflow as tf
import model
class AlexnetModel(model.Model):
"""Alexnet cnn model."""
def __init__(self):
super(AlexnetModel, self).__init__('alexnet', 224 + 3, 512, 0.005)
def add_inference(self, cnn):
# Note: VALID requires padding the images by 3 in width and height
cnn.conv(64, 11, 11, 4, 4, 'VALID')
cnn.mpool(3, 3, 2, 2)
cnn.conv(192, 5, 5)
cnn.mpool(3, 3, 2, 2)
cnn.conv(384, 3, 3)
cnn.conv(384, 3, 3)
cnn.conv(256, 3, 3)
cnn.mpool(3, 3, 2, 2)
cnn.reshape([-1, 256 * 6 * 6])
cnn.affine(4096)
cnn.dropout()
cnn.affine(4096)
cnn.dropout()
class AlexnetCifar10Model(model.Model):
"""Alexnet cnn model for cifar datasets.
The model architecture follows the one defined in the tensorflow tutorial
model.
Reference model: tensorflow/models/tutorials/image/cifar10/cifar10.py
Paper: http://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdf
"""
def __init__(self):
super(AlexnetCifar10Model, self).__init__('alexnet', 32, 128, 0.1)
def add_inference(self, cnn):
cnn.conv(64, 5, 5, 1, 1, 'SAME', stddev=5e-2)
cnn.mpool(3, 3, 2, 2, mode='SAME')
cnn.lrn(depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
cnn.conv(64, 5, 5, 1, 1, 'SAME', bias=0.1, stddev=5e-2)
cnn.lrn(depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
cnn.mpool(3, 3, 2, 2, mode='SAME')
shape = cnn.top_layer.get_shape().as_list()
flat_dim = shape[1] * shape[2] * shape[3]
cnn.reshape([-1, flat_dim])
cnn.affine(384, stddev=0.04, bias=0.1)
cnn.affine(192, stddev=0.04, bias=0.1)
def get_learning_rate(self, global_step=None, batch_size=None):
if global_step is None or batch_size is None:
return self.learning_rate
num_examples_per_epoch = 50000
num_epochs_per_decay = 100
decay_steps = int(num_epochs_per_decay * num_examples_per_epoch /
batch_size)
decay_factor = 0.1
return tf.train.exponential_decay(
self.learning_rate, global_step, decay_steps, decay_factor,
staircase=True)
| 3,004
| 33.147727
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/googlenet_model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Googlenet model configuration.
References:
Szegedy, Christian, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich
Going deeper with convolutions
arXiv preprint arXiv:1409.4842 (2014)
"""
import model
class GooglenetModel(model.Model):
def __init__(self):
super(GooglenetModel, self).__init__('googlenet', 224, 32, 0.005)
def add_inference(self, cnn):
def inception_v1(cnn, k, l, m, n, p, q):
cols = [[('conv', k, 1, 1)], [('conv', l, 1, 1), ('conv', m, 3, 3)],
[('conv', n, 1, 1), ('conv', p, 5, 5)],
[('mpool', 3, 3, 1, 1, 'SAME'), ('conv', q, 1, 1)]]
cnn.inception_module('incept_v1', cols)
cnn.conv(64, 7, 7, 2, 2)
cnn.mpool(3, 3, 2, 2, mode='SAME')
cnn.conv(64, 1, 1)
cnn.conv(192, 3, 3)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 64, 96, 128, 16, 32, 32)
inception_v1(cnn, 128, 128, 192, 32, 96, 64)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 192, 96, 208, 16, 48, 64)
inception_v1(cnn, 160, 112, 224, 24, 64, 64)
inception_v1(cnn, 128, 128, 256, 24, 64, 64)
inception_v1(cnn, 112, 144, 288, 32, 64, 64)
inception_v1(cnn, 256, 160, 320, 32, 128, 128)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 256, 160, 320, 32, 128, 128)
inception_v1(cnn, 384, 192, 384, 48, 128, 128)
cnn.apool(7, 7, 1, 1, mode='VALID')
cnn.reshape([-1, 1024])
| 2,173
| 36.482759
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/trivial_model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trivial model configuration."""
from models import model
class TrivialModel(model.Model):
"""Trivial model configuration."""
def __init__(self):
super(TrivialModel, self).__init__('trivial', 224 + 3, 32, 0.005)
def add_inference(self, cnn):
cnn.reshape([-1, 227 * 227 * 3])
cnn.affine(1)
cnn.affine(4096)
class TrivialCifar10Model(model.Model):
"""Trivial cifar10 model configuration."""
def __init__(self):
super(TrivialCifar10Model, self).__init__('trivial', 32, 32, 0.005)
def add_inference(self, cnn):
cnn.reshape([-1, 32 * 32 * 3])
cnn.affine(1)
cnn.affine(4096)
| 1,312
| 31.02439
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/__init__.py
| 0
| 0
| 0
|
py
|
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/official_resnet_model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import official resnet models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import datasets
from models import model as model_lib
class ImagenetResnetModel(model_lib.Model):
"""Official resnet models."""
def __init__(self, resnet_size, version=2):
"""These are the parameters that work for Imagenet data.
Args:
resnet_size: The number of convolutional layers needed in the model.
version: 1 or 2 for v1 or v2, respectively.
"""
default_batch_sizes = {
50: 128,
101: 32,
152: 32
}
batch_size = default_batch_sizes.get(resnet_size, 32)
default_learning_rate = 0.0125 * batch_size / 32
model_name = 'official_resnet_{}_v{}'.format(resnet_size, version)
super(ImagenetResnetModel, self).__init__(
model_name, 224, batch_size, default_learning_rate)
self.resnet_size = resnet_size
self.version = version
def get_learning_rate(self, global_step, batch_size):
num_batches_per_epoch = (
float(datasets.IMAGENET_NUM_TRAIN_IMAGES) / batch_size)
boundaries = [int(num_batches_per_epoch * x) for x in [30, 60, 80, 90]]
values = [1, 0.1, 0.01, 0.001, 0.0001]
adjusted_learning_rate = (
self.learning_rate / self.default_batch_size * batch_size)
values = [v * adjusted_learning_rate for v in values]
return tf.train.piecewise_constant(global_step, boundaries, values)
def build_network(self, images, phase_train=True, nclass=1001, image_depth=3,
data_type=tf.float32, data_format='NCHW',
use_tf_layers=True, fp16_vars=False):
del image_depth
del data_format
del use_tf_layers
# pylint: disable=g-import-not-at-top
try:
from official.resnet.imagenet_main import ImagenetModel
except ImportError:
tf.logging.fatal('Please include tensorflow/models to the PYTHONPATH.')
raise
images = tf.cast(images, data_type)
model_class = ImagenetModel(resnet_size=self.resnet_size,
resnet_version=self.version,
# The official model dtype seems to be ignored,
# as the dtype it uses is the dtype of the input
# images. Doesn't hurt to set it though.
dtype=data_type)
logits = model_class(images, phase_train)
logits = tf.cast(logits, tf.float32)
return logits, None
| 3,225
| 38.82716
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/inception_model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception model configuration.
Includes multiple models: inception3, inception4, inception-resnet2.
References:
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
Inception-v4, Inception-ResNet and the Impact of Residual Connections on
Learning
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich
Going Deeper with Convolutions
http://arxiv.org/pdf/1409.4842v1.pdf
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna
Rethinking the Inception Architecture for Computer Vision
arXiv preprint arXiv:1512.00567 (2015)
Inception v3 model: http://arxiv.org/abs/1512.00567
Inception v4 and Resnet V2 architectures: http://arxiv.org/abs/1602.07261
"""
from six.moves import xrange # pylint: disable=redefined-builtin
import model
class Inceptionv3Model(model.Model):
def __init__(self, auxiliary=False):
self._auxiliary = auxiliary
super(Inceptionv3Model, self).__init__('inception3', 299, 32, 0.005)
def add_inference(self, cnn):
def inception_v3_a(cnn, n):
cols = [[('conv', 64, 1, 1)], [('conv', 48, 1, 1), ('conv', 64, 5, 5)],
[('conv', 64, 1, 1), ('conv', 96, 3, 3), ('conv', 96, 3, 3)],
[('apool', 3, 3, 1, 1, 'SAME'), ('conv', n, 1, 1)]]
cnn.inception_module('incept_v3_a', cols)
def inception_v3_b(cnn):
cols = [[('conv', 384, 3, 3, 2, 2, 'VALID')],
[('conv', 64, 1, 1),
('conv', 96, 3, 3),
('conv', 96, 3, 3, 2, 2, 'VALID')],
[('mpool', 3, 3, 2, 2, 'VALID')]]
cnn.inception_module('incept_v3_b', cols)
def inception_v3_c(cnn, n):
cols = [[('conv', 192, 1, 1)],
[('conv', n, 1, 1), ('conv', n, 1, 7), ('conv', 192, 7, 1)],
[('conv', n, 1, 1), ('conv', n, 7, 1), ('conv', n, 1, 7),
('conv', n, 7, 1), ('conv', 192, 1, 7)],
[('apool', 3, 3, 1, 1, 'SAME'), ('conv', 192, 1, 1)]]
cnn.inception_module('incept_v3_c', cols)
def inception_v3_d(cnn):
cols = [[('conv', 192, 1, 1), ('conv', 320, 3, 3, 2, 2, 'VALID')],
[('conv', 192, 1, 1), ('conv', 192, 1, 7), ('conv', 192, 7, 1),
('conv', 192, 3, 3, 2, 2, 'VALID')],
[('mpool', 3, 3, 2, 2, 'VALID')]]
cnn.inception_module('incept_v3_d', cols)
def inception_v3_e(cnn, pooltype):
cols = [[('conv', 320, 1, 1)], [('conv', 384, 1, 1), ('conv', 384, 1, 3)],
[('share',), ('conv', 384, 3, 1)],
[('conv', 448, 1, 1), ('conv', 384, 3, 3), ('conv', 384, 1, 3)],
[('share',), ('share',), ('conv', 384, 3, 1)],
[('mpool' if pooltype == 'max' else 'apool', 3, 3, 1, 1, 'SAME'),
('conv', 192, 1, 1)]]
cnn.inception_module('incept_v3_e', cols)
def incept_v3_aux(cnn):
assert cnn.aux_top_layer is None
cnn.aux_top_layer = cnn.top_layer
cnn.aux_top_size = cnn.top_size
with cnn.switch_to_aux_top_layer():
cnn.apool(5, 5, 3, 3, mode='VALID')
cnn.conv(128, 1, 1, mode='SAME')
cnn.conv(768, 5, 5, mode='VALID', stddev=0.01)
cnn.reshape([-1, 768])
cnn.use_batch_norm = True
cnn.conv(32, 3, 3, 2, 2, mode='VALID') # 299 x 299 x 3
cnn.conv(32, 3, 3, 1, 1, mode='VALID') # 149 x 149 x 32
cnn.conv(64, 3, 3, 1, 1, mode='SAME') # 147 x 147 x 64
cnn.mpool(3, 3, 2, 2, mode='VALID') # 147 x 147 x 64
cnn.conv(80, 1, 1, 1, 1, mode='VALID') # 73 x 73 x 80
cnn.conv(192, 3, 3, 1, 1, mode='VALID') # 71 x 71 x 192
cnn.mpool(3, 3, 2, 2, 'VALID') # 35 x 35 x 192
inception_v3_a(cnn, 32) # 35 x 35 x 256 mixed.
inception_v3_a(cnn, 64) # 35 x 35 x 288 mixed_1.
inception_v3_a(cnn, 64) # 35 x 35 x 288 mixed_2
inception_v3_b(cnn) # 17 x 17 x 768 mixed_3
inception_v3_c(cnn, 128) # 17 x 17 x 768 mixed_4
inception_v3_c(cnn, 160) # 17 x 17 x 768 mixed_5
inception_v3_c(cnn, 160) # 17 x 17 x 768 mixed_6
inception_v3_c(cnn, 192) # 17 x 17 x 768 mixed_7
if self._auxiliary:
incept_v3_aux(cnn) # Auxillary Head logits
inception_v3_d(cnn) # 17 x 17 x 1280 mixed_8
inception_v3_e(cnn, 'avg') # 8 x 8 x 2048 mixed_9
inception_v3_e(cnn, 'max') # 8 x 8 x 2048 mixed_10
cnn.apool(8, 8, 1, 1, 'VALID') # 8 x 8 x 2048
cnn.reshape([-1, 2048]) # 1 x 1 x 2048
# Stem functions
def inception_v4_sa(cnn):
cols = [[('mpool', 3, 3, 2, 2, 'VALID')], [('conv', 96, 3, 3, 2, 2, 'VALID')]]
cnn.inception_module('incept_v4_sa', cols)
def inception_v4_sb(cnn):
cols = [[('conv', 64, 1, 1), ('conv', 96, 3, 3, 1, 1, 'VALID')],
[('conv', 64, 1, 1), ('conv', 64, 7, 1), ('conv', 64, 1, 7),
('conv', 96, 3, 3, 1, 1, 'VALID')]]
cnn.inception_module('incept_v4_sb', cols)
def inception_v4_sc(cnn):
cols = [[('conv', 192, 3, 3, 2, 2, 'VALID')],
[('mpool', 3, 3, 2, 2, 'VALID')]]
cnn.inception_module('incept_v4_sc', cols)
# Reduction functions
def inception_v4_ra(cnn, k, l, m, n):
cols = [
[('mpool', 3, 3, 2, 2, 'VALID')], [('conv', n, 3, 3, 2, 2, 'VALID')],
[('conv', k, 1, 1), ('conv', l, 3, 3), ('conv', m, 3, 3, 2, 2, 'VALID')]
]
cnn.inception_module('incept_v4_ra', cols)
def inception_v4_rb(cnn):
cols = [[('mpool', 3, 3, 2, 2, 'VALID')],
[('conv', 192, 1, 1), ('conv', 192, 3, 3, 2, 2, 'VALID')],
[('conv', 256, 1, 1), ('conv', 256, 1, 7), ('conv', 320, 7, 1),
('conv', 320, 3, 3, 2, 2, 'VALID')]]
cnn.inception_module('incept_v4_rb', cols)
class Inceptionv4Model(model.Model):
def __init__(self):
super(Inceptionv4Model, self).__init__('inception4', 299, 32, 0.005)
def add_inference(self, cnn):
def inception_v4_a(cnn):
cols = [[('apool', 3, 3, 1, 1, 'SAME'), ('conv', 96, 1, 1)],
[('conv', 96, 1, 1)], [('conv', 64, 1, 1), ('conv', 96, 3, 3)],
[('conv', 64, 1, 1), ('conv', 96, 3, 3), ('conv', 96, 3, 3)]]
cnn.inception_module('incept_v4_a', cols)
def inception_v4_b(cnn):
cols = [[('apool', 3, 3, 1, 1, 'SAME'), ('conv', 128, 1, 1)],
[('conv', 384, 1, 1)],
[('conv', 192, 1, 1), ('conv', 224, 1, 7), ('conv', 256, 7, 1)],
[('conv', 192, 1, 1), ('conv', 192, 1, 7), ('conv', 224, 7, 1),
('conv', 224, 1, 7), ('conv', 256, 7, 1)]]
cnn.inception_module('incept_v4_b', cols)
def inception_v4_c(cnn):
cols = [[('apool', 3, 3, 1, 1, 'SAME'), ('conv', 256, 1, 1)],
[('conv', 256, 1, 1)], [('conv', 384, 1, 1), ('conv', 256, 1, 3)],
[('share',), ('conv', 256, 3, 1)],
[('conv', 384, 1, 1), ('conv', 448, 1, 3), ('conv', 512, 3, 1),
('conv', 256, 3, 1)], [('share',), ('share',), ('share',),
('conv', 256, 1, 3)]]
cnn.inception_module('incept_v4_c', cols)
cnn.use_batch_norm = True
cnn.conv(32, 3, 3, 2, 2, mode='VALID')
cnn.conv(32, 3, 3, 1, 1, mode='VALID')
cnn.conv(64, 3, 3)
inception_v4_sa(cnn)
inception_v4_sb(cnn)
inception_v4_sc(cnn)
for _ in xrange(4):
inception_v4_a(cnn)
inception_v4_ra(cnn, 192, 224, 256, 384)
for _ in xrange(7):
inception_v4_b(cnn)
inception_v4_rb(cnn)
for _ in xrange(3):
inception_v4_c(cnn)
cnn.spatial_mean()
cnn.dropout(0.8)
| 8,393
| 39.747573
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/mobilenet.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet Base Class, branched from slim for fp16 performance study."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import os
import tensorflow as tf
slim = tf.contrib.slim
@slim.add_arg_scope
def apply_activation(x, name=None, activation_fn=None):
return activation_fn(x, name=name) if activation_fn else x
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@contextlib.contextmanager
def _set_arg_scope_defaults(defaults):
"""Sets arg scope defaults for all items present in defaults.
Args:
defaults: dictionary/list of pairs, containing a mapping from
function to a dictionary of default args.
Yields:
context manager where all defaults are set.
"""
if hasattr(defaults, 'items'):
items = list(defaults.items())
else:
items = defaults
if not items:
yield
else:
func, default_arg = items[0]
with slim.arg_scope(func, **default_arg):
with _set_arg_scope_defaults(items[1:]):
yield
@slim.add_arg_scope
def depth_multiplier(output_params,
multiplier,
divisible_by=8,
min_depth=8,
**unused_kwargs):
if 'num_outputs' not in output_params:
return
d = output_params['num_outputs']
output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by,
min_depth)
_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func'])
def op(opfunc, **params):
multiplier = params.pop('multiplier_transorm', depth_multiplier)
return _Op(opfunc, params=params, multiplier_func=multiplier)
class NoOpScope(object):
"""No-op context manager."""
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
def safe_arg_scope(funcs, **kwargs):
"""Returns `slim.arg_scope` with all None arguments removed.
Arguments:
funcs: Functions to pass to `arg_scope`.
**kwargs: Arguments to pass to `arg_scope`.
Returns:
arg_scope or No-op context manager.
Note: can be useful if None value should be interpreted as "do not overwrite
this parameter value".
"""
filtered_args = {name: value for name, value in kwargs.items()
if value is not None}
if filtered_args:
return slim.arg_scope(funcs, **filtered_args)
else:
return NoOpScope()
@slim.add_arg_scope
def mobilenet_base( # pylint: disable=invalid-name
inputs,
conv_defs,
multiplier=1.0,
final_endpoint=None,
output_stride=None,
use_explicit_padding=False,
scope=None,
is_training=False):
"""Mobilenet base network.
Constructs a network from inputs to the given final endpoint. By default
the network is constructed in inference mode. To create network
in training mode use:
with slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_base(...)
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
conv_defs: A list of op(...) layers specifying the net architecture.
multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
final_endpoint: The name of last layer, for early termination for
for V1-based networks: last layer is "layer_14", for V2: "layer_20"
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 1 or any even number, excluding
zero. Typical values are 8 (accurate fully convolutional mode), 16
(fast fully convolutional mode), and 32 (classification mode).
NOTE- output_stride relies on all consequent operators to support dilated
operators via "rate" parameter. This might require wrapping non-conv
operators to operate properly.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: optional variable scope.
is_training: How to setup batch_norm and other ops. Note: most of the time
this does not need be set directly. Use mobilenet.training_scope() to set
up training instead. This parameter is here for backward compatibility
only. It is safe to set it to the value matching
training_scope(is_training=...). It is also safe to explicitly set
it to False, even if there is outer training_scope set to to training.
(The network will be built in inference mode). If this is set to None,
no arg_scope is added for slim.batch_norm's is_training parameter.
Returns:
tensor_out: output tensor.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
if multiplier <= 0:
raise ValueError('multiplier is not greater than zero.')
# Set conv defs defaults and overrides.
conv_defs_defaults = conv_defs.get('defaults', {})
conv_defs_overrides = conv_defs.get('overrides', {})
if use_explicit_padding:
conv_defs_overrides = copy.deepcopy(conv_defs_overrides)
conv_defs_overrides[
(slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'}
if output_stride is not None:
if output_stride == 0 or (output_stride > 1 and output_stride % 2):
raise ValueError('Output stride must be None, 1 or a multiple of 2.')
# a) Set the tensorflow scope
# b) set padding to default: note we might consider removing this
# since it is also set by mobilenet_scope
# c) set all defaults
# d) set all extra overrides.
with _scope_all(scope, default_scope='Mobilenet'), \
safe_arg_scope([slim.batch_norm], is_training=is_training), \
_set_arg_scope_defaults(conv_defs_defaults), \
_set_arg_scope_defaults(conv_defs_overrides):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
# Insert default parameters before the base scope which includes
# any custom overrides set in mobilenet.
end_points = {}
scopes = {}
for i, opdef in enumerate(conv_defs['spec']):
params = dict(opdef.params)
opdef.multiplier_func(params, multiplier)
stride = params.get('stride', 1)
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= stride
else:
layer_stride = stride
layer_rate = 1
current_stride *= stride
# Update params.
params['stride'] = layer_stride
# Only insert rate to params if rate > 1.
if layer_rate > 1:
params['rate'] = layer_rate
# Set padding
if use_explicit_padding:
if 'kernel_size' in params:
net = _fixed_padding(net, params['kernel_size'], layer_rate)
else:
params['use_explicit_padding'] = True
end_point = 'layer_%d' % (i + 1)
try:
net = opdef.op(net, **params)
except Exception:
print('Failed to create op %i: %r params: %r' % (i, opdef, params))
raise
end_points[end_point] = net
scope = os.path.dirname(net.name)
scopes[scope] = end_point
if final_endpoint is not None and end_point == final_endpoint:
break
# Add all tensors that end with 'output' to
# endpoints
for t in net.graph.get_operations():
scope = os.path.dirname(t.name)
bn = os.path.basename(t.name)
if scope in scopes and t.name.endswith('output'):
end_points[scopes[scope] + '/' + bn] = t.outputs[0]
return net, end_points
@contextlib.contextmanager
def _scope_all(scope, default_scope=None):
with tf.variable_scope(scope, default_name=default_scope) as s,\
tf.name_scope(s.original_name_scope):
yield s
@slim.add_arg_scope
def mobilenet(inputs,
num_classes=1001,
prediction_fn=slim.softmax,
reuse=None,
scope='Mobilenet',
base_only=False,
**mobilenet_args):
"""Mobilenet model for classification, supports both V1 and V2.
Note: default mode is inference, use mobilenet.training_scope to create
training network.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
prediction_fn: a function to get predictions out of logits
(default softmax).
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
base_only: if True will only create the base of the network (no pooling
and no logits).
**mobilenet_args: passed to mobilenet_base verbatim.
- conv_defs: list of conv defs
- multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
- output_stride: will ensure that the last layer has at most total stride.
If the architecture calls for more stride than that provided
(e.g. output_stride=16, but the architecture has 5 stride=2 operators),
it will replace output_stride with fractional convolutions using Atrous
Convolutions.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation tensor.
Raises:
ValueError: Input rank is invalid.
"""
is_training = mobilenet_args.get('is_training', False)
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope:
inputs = tf.identity(inputs, 'input')
net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args)
if base_only:
return net, end_points
net = tf.identity(net, name='embedding')
with tf.variable_scope('Logits'):
net = global_pool(net)
end_points['global_pool'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, scope='Dropout', is_training=is_training)
# 1 x 1 x num_classes
# Note: legacy scope name.
logits = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='Conv2d_1c_1x1')
logits = tf.squeeze(logits, [1, 2])
logits = tf.identity(logits, name='output')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, 'Predictions')
return logits, end_points
def global_pool(input_tensor, pool_op=tf.nn.avg_pool):
"""Applies avg pool to produce 1x1 output.
NOTE: This function is funcitonally equivalenet to reduce_mean, but it has
baked in average pool which has better support across hardware.
Args:
input_tensor: input tensor
pool_op: pooling op (avg pool is default)
Returns:
a tensor batch_size x 1 x 1 x depth.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size = tf.convert_to_tensor(
[1, tf.shape(input_tensor)[1],
tf.shape(input_tensor)[2], 1])
else:
kernel_size = [1, shape[1], shape[2], 1]
output = pool_op(
input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
# Recover output shape, for unknown shape.
output.set_shape([None, 1, 1, None])
return output
def training_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
dropout_keep_prob=0.8,
bn_decay=0.997):
"""Defines Mobilenet training scope.
Usage:
with tf.contrib.slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
# the network created will be trainble with dropout/batch norm
# initialized appropriately.
Args:
is_training: if set to False this will ensure that all customizations are
set to non-training mode. This might be helpful for code that is reused
across both training/evaluation, but most of the time training_scope with
value False is not needed. If this is set to None, the parameters is not
added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob: dropout keep probability (not set if equals to None).
bn_decay: decay for the batch norm moving averages (not set if equals to
None).
Returns:
An argument scope to use via arg_scope.
"""
# Note: do not introduce parameters that would change the inference
# model here (for example whether to use bias), modify conv_def instead.
batch_norm_params = {
'decay': bn_decay,
'is_training': is_training
}
if stddev < 0:
weight_intitializer = slim.initializers.xavier_initializer()
else:
weight_intitializer = tf.truncated_normal_initializer(stddev=stddev)
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope(
[slim.conv2d, slim.fully_connected, slim.separable_conv2d],
weights_initializer=weight_intitializer,
normalizer_fn=slim.batch_norm), \
slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\
safe_arg_scope([slim.batch_norm], **batch_norm_params), \
safe_arg_scope([slim.dropout], is_training=is_training,
keep_prob=dropout_keep_prob), \
slim.arg_scope([slim.conv2d], \
weights_regularizer=slim.l2_regularizer(weight_decay)), \
slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:
return s
| 17,378
| 36.214133
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/models/resnet_model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet model configuration.
References:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition
arXiv:1512.03385 (2015)
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks
arXiv:1603.05027 (2016)
Liang-Chieh Chen, George Papandreou, Iasonas Kokkinos, Kevin Murphy,
Alan L. Yuille
DeepLab: Semantic Image Segmentation with Deep Convolutional Nets,
Atrous Convolution, and Fully Connected CRFs
arXiv:1606.00915 (2016)
"""
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import datasets
from models import model as model_lib
def bottleneck_block_v1(cnn, depth, depth_bottleneck, stride):
"""Bottleneck block with identity short-cut for ResNet v1.
Args:
cnn: the network to append bottleneck blocks.
depth: the number of output filters for this bottleneck block.
depth_bottleneck: the number of bottleneck filters for this block.
stride: Stride used in the first layer of the bottleneck block.
"""
input_layer = cnn.top_layer
in_size = cnn.top_size
name_key = 'resnet_v1'
name = name_key + str(cnn.counts[name_key])
cnn.counts[name_key] += 1
with tf.variable_scope(name):
if depth == in_size:
if stride == 1:
shortcut = input_layer
else:
shortcut = cnn.apool(
1, 1, stride, stride, input_layer=input_layer,
num_channels_in=in_size)
else:
shortcut = cnn.conv(
depth, 1, 1, stride, stride, activation=None,
use_batch_norm=True, input_layer=input_layer,
num_channels_in=in_size, bias=None)
cnn.conv(depth_bottleneck, 1, 1, stride, stride,
input_layer=input_layer, num_channels_in=in_size,
use_batch_norm=True, bias=None)
cnn.conv(depth_bottleneck, 3, 3, 1, 1, mode='SAME_RESNET',
use_batch_norm=True, bias=None)
res = cnn.conv(depth, 1, 1, 1, 1, activation=None,
use_batch_norm=True, bias=None)
output = tf.nn.relu(shortcut + res)
cnn.top_layer = output
cnn.top_size = depth
def bottleneck_block_v2(cnn, depth, depth_bottleneck, stride):
"""Bottleneck block with identity short-cut for ResNet v2.
The main difference from v1 is that a batch norm and relu are done at the
start of the block, instead of the end. This initial batch norm and relu is
collectively called a pre-activation.
Args:
cnn: the network to append bottleneck blocks.
depth: the number of output filters for this bottleneck block.
depth_bottleneck: the number of bottleneck filters for this block.
stride: Stride used in the first layer of the bottleneck block.
"""
input_layer = cnn.top_layer
in_size = cnn.top_size
name_key = 'resnet_v2'
name = name_key + str(cnn.counts[name_key])
cnn.counts[name_key] += 1
preact = cnn.batch_norm()
preact = tf.nn.relu(preact)
with tf.variable_scope(name):
if depth == in_size:
if stride == 1:
shortcut = input_layer
else:
shortcut = cnn.apool(
1, 1, stride, stride, input_layer=input_layer,
num_channels_in=in_size)
else:
shortcut = cnn.conv(
depth, 1, 1, stride, stride, activation=None, use_batch_norm=False,
input_layer=preact, num_channels_in=in_size, bias=None)
cnn.conv(depth_bottleneck, 1, 1, stride, stride,
input_layer=preact, num_channels_in=in_size,
use_batch_norm=True, bias=None)
cnn.conv(depth_bottleneck, 3, 3, 1, 1, mode='SAME_RESNET',
use_batch_norm=True, bias=None)
res = cnn.conv(depth, 1, 1, 1, 1, activation=None,
use_batch_norm=False, bias=None)
output = shortcut + res
cnn.top_layer = output
cnn.top_size = depth
def bottleneck_block(cnn, depth, depth_bottleneck, stride, pre_activation):
"""Bottleneck block with identity short-cut.
Args:
cnn: the network to append bottleneck blocks.
depth: the number of output filters for this bottleneck block.
depth_bottleneck: the number of bottleneck filters for this block.
stride: Stride used in the first layer of the bottleneck block.
pre_activation: use pre_activation structure used in v2 or not.
"""
if pre_activation:
bottleneck_block_v2(cnn, depth, depth_bottleneck, stride)
else:
bottleneck_block_v1(cnn, depth, depth_bottleneck, stride)
def residual_block(cnn, depth, stride, pre_activation):
"""Residual block with identity short-cut.
Args:
cnn: the network to append residual blocks.
depth: the number of output filters for this residual block.
stride: Stride used in the first layer of the residual block.
pre_activation: use pre_activation structure or not.
"""
input_layer = cnn.top_layer
in_size = cnn.top_size
if in_size != depth:
# Plan A of shortcut.
shortcut = cnn.apool(1, 1, stride, stride,
input_layer=input_layer,
num_channels_in=in_size)
padding = (depth - in_size) // 2
if cnn.channel_pos == 'channels_last':
shortcut = tf.pad(
shortcut, [[0, 0], [0, 0], [0, 0], [padding, padding]])
else:
shortcut = tf.pad(
shortcut, [[0, 0], [padding, padding], [0, 0], [0, 0]])
else:
shortcut = input_layer
if pre_activation:
res = cnn.batch_norm(input_layer)
res = tf.nn.relu(res)
else:
res = input_layer
cnn.conv(depth, 3, 3, stride, stride,
input_layer=res, num_channels_in=in_size,
use_batch_norm=True, bias=None)
if pre_activation:
res = cnn.conv(depth, 3, 3, 1, 1, activation=None,
use_batch_norm=False, bias=None)
output = shortcut + res
else:
res = cnn.conv(depth, 3, 3, 1, 1, activation=None,
use_batch_norm=True, bias=None)
output = tf.nn.relu(shortcut + res)
cnn.top_layer = output
cnn.top_size = depth
class ResnetModel(model_lib.Model):
"""Resnet cnn network configuration."""
def __init__(self, model, layer_counts):
default_batch_sizes = {
'resnet50': 64,
'resnet101': 32,
'resnet152': 32,
'resnet50_v2': 64,
'resnet101_v2': 32,
'resnet152_v2': 32,
}
batch_size = default_batch_sizes.get(model, 32)
super(ResnetModel, self).__init__(model, 224, batch_size, 0.004,
layer_counts)
self.pre_activation = 'v2' in model
def add_inference(self, cnn):
if self.layer_counts is None:
raise ValueError('Layer counts not specified for %s' % self.get_model())
cnn.use_batch_norm = True
cnn.batch_norm_config = {'decay': 0.9, 'epsilon': 1e-5, 'scale': True}
cnn.conv(64, 7, 7, 2, 2, mode='SAME_RESNET', use_batch_norm=True)
cnn.mpool(3, 3, 2, 2, mode='SAME')
for _ in xrange(self.layer_counts[0]):
bottleneck_block(cnn, 256, 64, 1, self.pre_activation)
for i in xrange(self.layer_counts[1]):
stride = 2 if i == 0 else 1
bottleneck_block(cnn, 512, 128, stride, self.pre_activation)
for i in xrange(self.layer_counts[2]):
stride = 2 if i == 0 else 1
bottleneck_block(cnn, 1024, 256, stride, self.pre_activation)
for i in xrange(self.layer_counts[3]):
stride = 2 if i == 0 else 1
bottleneck_block(cnn, 2048, 512, stride, self.pre_activation)
if self.pre_activation:
cnn.batch_norm()
cnn.top_layer = tf.nn.relu(cnn.top_layer)
cnn.spatial_mean()
def get_learning_rate(self, global_step, batch_size):
num_batches_per_epoch = (
float(datasets.IMAGENET_NUM_TRAIN_IMAGES) / batch_size)
boundaries = [int(num_batches_per_epoch * x) for x in [30, 60, 80, 90]]
rescaled_lr = self.learning_rate / self.default_batch_size * batch_size
values = [1, 0.1, 0.01, 0.001, 0.0001]
values = [rescaled_lr * v for v in values]
lr = tf.train.piecewise_constant(global_step, boundaries, values)
warmup_steps = int(num_batches_per_epoch * 5)
warmup_lr = (
rescaled_lr * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
def create_resnet50_model():
return ResnetModel('resnet50', (3, 4, 6, 3))
def create_resnet50_v2_model():
return ResnetModel('resnet50_v2', (3, 4, 6, 3))
def create_resnet101_model():
return ResnetModel('resnet101', (3, 4, 23, 3))
def create_resnet101_v2_model():
return ResnetModel('resnet101_v2', (3, 4, 23, 3))
def create_resnet152_model():
return ResnetModel('resnet152', (3, 8, 36, 3))
def create_resnet152_v2_model():
return ResnetModel('resnet152_v2', (3, 8, 36, 3))
class ResnetCifar10Model(model_lib.Model):
"""Resnet cnn network configuration for Cifar 10 dataset.
V1 model architecture follows the one defined in the paper:
https://arxiv.org/pdf/1512.03385.pdf.
V2 model architecture follows the one defined in the paper:
https://arxiv.org/pdf/1603.05027.pdf.
"""
def __init__(self, model, layer_counts):
self.pre_activation = 'v2' in model
super(ResnetCifar10Model, self).__init__(
model, 32, 128, 0.1, layer_counts)
def add_inference(self, cnn):
if self.layer_counts is None:
raise ValueError('Layer counts not specified for %s' % self.get_model())
cnn.use_batch_norm = True
cnn.batch_norm_config = {'decay': 0.9, 'epsilon': 1e-5, 'scale': True}
if self.pre_activation:
cnn.conv(16, 3, 3, 1, 1, use_batch_norm=True)
else:
cnn.conv(16, 3, 3, 1, 1, activation=None, use_batch_norm=True)
for i in xrange(self.layer_counts[0]):
# reshape to batch_size x 16 x 32 x 32
residual_block(cnn, 16, 1, self.pre_activation)
for i in xrange(self.layer_counts[1]):
# Subsampling is performed at the first convolution with a stride of 2
stride = 2 if i == 0 else 1
# reshape to batch_size x 32 x 16 x 16
residual_block(cnn, 32, stride, self.pre_activation)
for i in xrange(self.layer_counts[2]):
stride = 2 if i == 0 else 1
# reshape to batch_size x 64 x 8 x 8
residual_block(cnn, 64, stride, self.pre_activation)
if self.pre_activation:
cnn.batch_norm()
cnn.top_layer = tf.nn.relu(cnn.top_layer)
cnn.spatial_mean()
def get_learning_rate(self, global_step, batch_size):
num_batches_per_epoch = int(50000 / batch_size)
boundaries = num_batches_per_epoch * np.array([82, 123, 300],
dtype=np.int64)
boundaries = [x for x in boundaries]
values = [0.1, 0.01, 0.001, 0.0002]
return tf.train.piecewise_constant(global_step, boundaries, values)
def create_resnet20_cifar_model():
return ResnetCifar10Model('resnet20', (3, 3, 3))
def create_resnet20_v2_cifar_model():
return ResnetCifar10Model('resnet20_v2', (3, 3, 3))
def create_resnet32_cifar_model():
return ResnetCifar10Model('resnet32_v2', (5, 5, 5))
def create_resnet32_v2_cifar_model():
return ResnetCifar10Model('resnet32_v2', (5, 5, 5))
def create_resnet44_cifar_model():
return ResnetCifar10Model('resnet44', (7, 7, 7))
def create_resnet44_v2_cifar_model():
return ResnetCifar10Model('resnet44_v2', (7, 7, 7))
def create_resnet56_cifar_model():
return ResnetCifar10Model('resnet56', (9, 9, 9))
def create_resnet56_v2_cifar_model():
return ResnetCifar10Model('resnet56_v2', (9, 9, 9))
def create_resnet110_cifar_model():
return ResnetCifar10Model('resnet110', (18, 18, 18))
def create_resnet110_v2_cifar_model():
return ResnetCifar10Model('resnet110_v2', (18, 18, 18))
| 12,347
| 33.783099
| 80
|
py
|
tensorflow-mnist-MLP-batch_normalization-weight_initializers
|
tensorflow-mnist-MLP-batch_normalization-weight_initializers-master/run_main.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import urllib
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
DATA_DIRECTORY = "data"
LOGS_DIRECTORY = "logs/train"
# train params
training_epochs = 15
batch_size = 100
display_step = 50
# network params
n_input = 784
n_hidden_1 = 256
n_hidden_2 = 256
n_classes = 10
# Store layers weight & bias
with tf.name_scope('weight'):
normal_weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1]),name='w1_normal'),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]),name='w2_normal'),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]),name='wout_normal')
}
truncated_normal_weights = {
'h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1],stddev=0.1),name='w1_truncated_normal'),
'h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2],stddev=0.1),name='w2_truncated_normal'),
'out': tf.Variable(tf.truncated_normal([n_hidden_2, n_classes],stddev=0.1),name='wout_truncated_normal')
}
xavier_weights = {
'h1': tf.get_variable('w1_xaiver', [n_input, n_hidden_1],initializer=tf.contrib.layers.xavier_initializer()),
'h2': tf.get_variable('w2_xaiver', [n_hidden_1, n_hidden_2],initializer=tf.contrib.layers.xavier_initializer()),
'out': tf.get_variable('wout_xaiver',[n_hidden_2, n_classes],initializer=tf.contrib.layers.xavier_initializer())
}
he_weights = {
'h1': tf.get_variable('w1_he', [n_input, n_hidden_1],
initializer=tf.contrib.layers.variance_scaling_initializer()),
'h2': tf.get_variable('w2_he', [n_hidden_1, n_hidden_2],
initializer=tf.contrib.layers.variance_scaling_initializer()),
'out': tf.get_variable('wout_he', [n_hidden_2, n_classes],
initializer=tf.contrib.layers.variance_scaling_initializer())
}
with tf.name_scope('bias'):
normal_biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1]),name='b1_normal'),
'b2': tf.Variable(tf.random_normal([n_hidden_2]),name='b2_normal'),
'out': tf.Variable(tf.random_normal([n_classes]),name='bout_normal')
}
zero_biases = {
'b1': tf.Variable(tf.zeros([n_hidden_1]),name='b1_zero'),
'b2': tf.Variable(tf.zeros([n_hidden_2]),name='b2_zero'),
'out': tf.Variable(tf.zeros([n_classes]),name='bout_normal')
}
weight_initializer = {'normal':normal_weights, 'truncated_normal':truncated_normal_weights, 'xavier':xavier_weights, 'he':he_weights}
bias_initializer = {'normal':normal_biases, 'zero':zero_biases}
# user input
from argparse import ArgumentParser
WEIGHT_INIT = 'xavier'
BIAS_INIT = 'zero'
BACH_NORM = True
def build_parser():
parser = ArgumentParser()
parser.add_argument('--weight-init',
dest='weight_initializer', help='weight initializer',
metavar='WEIGHT_INIT', required=True)
parser.add_argument('--bias-init',
dest='bias_initializer', help='bias initializer',
metavar='BIAS_INIT', required=True)
parser.add_argument('--batch-norm',
dest='batch_normalization', help='boolean for activation of batch normalization',
metavar='BACH_NORM', required=True)
return parser
# Download the data from Yann's website, unless it's already here.
def maybe_download(filename):
if not tf.gfile.Exists(DATA_DIRECTORY):
tf.gfile.MakeDirs(DATA_DIRECTORY)
filepath = os.path.join(DATA_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
# Batch normalization implementation
# from https://github.com/tensorflow/tensorflow/issues/1122
def batch_norm_layer(inputT, is_training=True, scope=None):
# Note: is_training is tf.placeholder(tf.bool) type
return tf.cond(is_training,
lambda: batch_norm(inputT, is_training=True,
center=True, scale=True, activation_fn=tf.nn.relu, decay=0.9, scope=scope),
lambda: batch_norm(inputT, is_training=False,
center=True, scale=True, activation_fn=tf.nn.relu, decay=0.9,
scope=scope, reuse = True))
# Create model of MLP with batch-normalization layer
def MLPwithBN(x, weights, biases, is_training=True):
with tf.name_scope('MLPwithBN'):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = batch_norm_layer(layer_1,is_training=is_training, scope='layer_1_bn')
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = batch_norm_layer(layer_2, is_training=is_training, scope='layer_2_bn')
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Create model of MLP without batch-normalization layer
def MLPwoBN(x, weights, biases):
with tf.name_scope('MLPwoBN'):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# main function
def main():
# Parse argument
parser = build_parser()
options = parser.parse_args()
weights = weight_initializer[options.weight_initializer]
biases = bias_initializer[options.bias_initializer]
batch_normalization = options.batch_normalization
# Import data
mnist = input_data.read_data_sets('data/', one_hot=True)
# Boolean for MODE of train or test
is_training = tf.placeholder(tf.bool, name='MODE')
# tf Graph input
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10]) #answer
# Predict
if batch_normalization=='True':
y = MLPwithBN(x,weights,biases,is_training)
else:
y = MLPwoBN(x, weights, biases)
# Get loss of model
with tf.name_scope("LOSS"):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y,y_))
# Define optimizer
with tf.name_scope("ADAM"):
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
# moving_mean and moving_variance need to be updated
if batch_normalization == "True":
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
train_ops = [train_step] + update_ops
train_op_final = tf.group(*train_ops)
else:
train_op_final = train_step
# Get accuracy of model
with tf.name_scope("ACC"):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Create a summary to monitor loss tensor
tf.scalar_summary('loss', loss)
# Create a summary to monitor accuracy tensor
tf.scalar_summary('acc', accuracy)
# Merge all summaries into a single op
merged_summary_op = tf.merge_all_summaries()
# Add ops to save and restore all the variables
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer(), feed_dict={is_training: True})
# Training cycle
total_batch = int(mnist.train.num_examples / batch_size)
# op to write logs to Tensorboard
summary_writer = tf.train.SummaryWriter(LOGS_DIRECTORY, graph=tf.get_default_graph())
# Loop for epoch
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch = mnist.train.next_batch(batch_size)
# Run optimization op (backprop), loss op (to get loss value)
# and summary nodes
_, train_accuracy, summary = sess.run([train_op_final, accuracy, merged_summary_op] , feed_dict={x: batch[0], y_: batch[1], is_training: True})
# Write logs at every iteration
summary_writer.add_summary(summary, epoch * total_batch + i)
# Display logs
if i % display_step == 0:
print("Epoch:", '%04d,' % (epoch + 1),
"batch_index %4d/%4d, training accuracy %.5f" % (i, total_batch, train_accuracy))
# Calculate accuracy for all mnist test images
print("test accuracy for the latest result: %g" % accuracy.eval(
feed_dict={x: mnist.test.images, y_: mnist.test.labels, is_training: False}))
if __name__ == '__main__':
main()
| 9,299
| 39.434783
| 155
|
py
|
Geometric_Transformation_CMR
|
Geometric_Transformation_CMR-main/dataloader.py
|
import random
import shutil
import cv2
import torch
from PIL import Image
from matplotlib import pylab as plt
import nibabel as nib
from nibabel import nifti1
import torchvision
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import os
import numpy as np
class MyData(Dataset):
def __init__(self, root_dir, transform=None):
self.root_dir = root_dir
self.img_path = os.listdir(self.root_dir)
self.transform = transform
self.classes2d = ['00', '01', '02', '03', '10', '11', '12', '13']
def __getitem__(self, idx):
img_name = self.img_path[idx]
label = img_name.split('@')[-1][0:-4]
label_tensor = torch.zeros(8)
label_tensor[self.classes2d.index(label)] = 1.0
img_item_path = os.path.join(self.root_dir, img_name)
img_idx = np.array(Image.open(img_item_path), dtype='uint8')
# 自适应直方图均衡
img_idx = img_idx.reshape(3,img_idx.shape[0],img_idx.shape[1])
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
img_res = np.zeros_like(img_idx)
for i in range(img_idx.shape[0]):
img_res[i,:, :] = clahe.apply(img_idx[i,:, :])
img_res = Image.fromarray(img_res.reshape(img_res.shape[1],img_res.shape[2],3))
# 作用transform
if self.transform is not None:
img_res = self.transform(img_res)
return img_res, label_tensor
def __len__(self):
return len(self.img_path)
class GenericData:
def __init__(self, save_path, load_path, split_ratio, dim):
self.save_path = save_path
self.load_path = load_path
self.split_ratio = split_ratio #[0.8,0.2]
self.dim = dim
self.classes2d = ['00', '01', '02', '03', '10', '11', '12', '13']
def generic_data(self):
train_save_path = os.path.join(self.save_path, 'train')
test_save_path = os.path.join(self.save_path, 'test')
for path in [train_save_path, test_save_path]:
if os.path.exists(path): # 判断文件夹是否存在,如果存在,先清空
shutil.rmtree(path)
os.makedirs(path) # 新增空文件夹
if self.dim == 2:
classes = self.classes2d
else:
raise ValueError("需要对3d图像进行变换吗?")
img_path = os.listdir(self.load_path)
img_path_all = dict()
for img_name in img_path:
img_allqueue = nib.load(os.path.join(self.load_path, img_name))
width, height, queue = img_allqueue.dataobj.shape
for i in range(queue):
img = img_allqueue.dataobj[:,:,i]
for k in range(self.dim * 4):
axis_flip = int(classes[k][0])
rotation = int(classes[k][1]) * 90
img_path_all[img_name + '@{}@{}'.format(i, classes[k])] = Geo_Transform_img(img, axis_flip,rotation)
img_train, img_test = self.dict_split_shuffle(img_path_all)
for key in img_train:
plt.imsave(os.path.join(train_save_path,f'{key}.jpg'), img_train[key], cmap='gray')
for key in img_test:
plt.imsave(os.path.join(test_save_path,f'{key}.jpg'), img_test[key], cmap='gray')
def dict_split_shuffle(self, img_path_all):
tr_size = int(len(img_path_all) * self.split_ratio[0])
keys = list(img_path_all.keys())
random.shuffle(keys)
img_train = dict([(i,img_path_all[i]) for i in keys[:tr_size]])
img_test = dict([(i, img_path_all[i]) for i in keys[tr_size:]])
return img_train, img_test
def show_img(path):
img = nib.load(path)
width, height, queue = img.dataobj.shape
num = 1
for i in range(queue):
img_arry = img.dataobj[:, :, i]
plt.subplot(2, 3, num)
plt.imshow(img_arry, cmap='gray')
num += 1
plt.show()
def rotate_img(img, rot, axes):
"""
:param img: Array of two or more dimensions.
:param rot: Degrees of the array is rotated.
:param axes: The array is rotated in the plane defined by the axes.
Axes must be different.(0,1),(1,2),(0,2)
"""
if rot in [0, 90, 180, 270]:
k = rot / 90
return np.rot90(img, k, axes)
else:
raise ValueError('rotation should be 0, 90, 180, or 270')
def Geo_Transform_img(img, axis_flip, rotation):
"""
:param img: Array of two or three dimensions.
:param axis_flip: int, how many aixs should be fipped.
:param rotation: rotation degrees in [0,90,180,270]
:return:
"""
if axis_flip == 0: # 没有坐标轴翻转
return rotate_img(img, rotation, (0, 1))
elif axis_flip == 1: # 有一个坐标轴翻转
img = np.transpose(img)
return rotate_img(img, rotation, (0, 1))
# elif axis_flip == 2: # 有两个坐标轴翻转(说明是3d的情形)
# img = np.transpose(img)
# return rotate_img(img, rotation, (0, 2))
# Set the paths of the datasets.
MyoPS_C0_dir = 'datasets\MyoPS\C0'
MyoPS_LGE_dir = 'datasets\MyoPS\LGE'
MyoPS_T2_dir = 'datasets\MyoPS\T2'
MyoPS_C0_split_dir = 'datasets\MyoPS\C0_split'
MyoPS_LGE_split_dir = 'datasets\MyoPS\LGE_split'
MyoPS_T2_split_dir = 'datasets\MyoPS\T2_split'
data_generate = GenericData(save_path=MyoPS_T2_split_dir,load_path=MyoPS_T2_dir,split_ratio=[0.8,0.2],dim=2)
data_generate.generic_data()
| 5,273
| 34.635135
| 120
|
py
|
Geometric_Transformation_CMR
|
Geometric_Transformation_CMR-main/GeoNet.py
|
import torch
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear, BatchNorm2d, ReLU, BatchNorm1d
class GeoNet(nn.Module):
def __init__(self):
super(GeoNet, self).__init__()
self.conv1 = Conv2d(1, 32, kernel_size=5, padding=2)
self.conv2 = Conv2d(32, 64, kernel_size=3, padding=1)
self.conv3 = Conv2d(64, 128, kernel_size=3, padding=1)
self.conv4 = Conv2d(128, 64, kernel_size=3, padding=1)
self.model = nn.Sequential(
self.conv1,
BatchNorm2d(32),
ReLU(),
MaxPool2d(kernel_size=2, stride=2),
self.conv2,
BatchNorm2d(64),
ReLU(),
MaxPool2d(kernel_size=2, stride=2),
self.conv3,
BatchNorm2d(128),
ReLU(),
self.conv4,
BatchNorm2d(64),
ReLU(),
MaxPool2d(kernel_size=2, stride=2),
Flatten(),
Linear(64 * 32 * 32, 256),
BatchNorm1d(256),
ReLU(),
Linear(256, 8)
)
def forward(self, x):
x = self.model(x)
return x
if __name__ == '__main__': ##习惯在这个地方测试模型的正确性
model = GeoNet()
print(model)
input = torch.ones((16, 1, 256, 256))
output = model(input)
print(output.shape)
| 1,344
| 27.020833
| 99
|
py
|
Geometric_Transformation_CMR
|
Geometric_Transformation_CMR-main/OtherExperiment.py
|
from torchvision.transforms import transforms
from dataloader import *
from GeoNet import *
def predict(model):
model.eval()
total_LGE_accuracy = 0
total_C0_accuracy = 0
data_aug = transforms.Compose([
transforms.ToTensor(),
transforms.Grayscale(num_output_channels=1),
transforms.RandomRotation(10),
transforms.RandomResizedCrop((256, 256), scale=(0.7, 1), ratio=(0.8, 1.2))
])
image_datasets_LGE = MyData(os.path.join('datasets\MyoPS\LGE_split','train'), data_aug)+MyData(os.path.join('datasets\MyoPS\LGE_split','test'), data_aug)
image_datasets_C0 = MyData(os.path.join('datasets\MyoPS\C0_split', 'train'), data_aug) + MyData(os.path.join('datasets\MyoPS\C0_split', 'test'), data_aug)
data_loaders_LGE = torch.utils.data.DataLoader(image_datasets_LGE, batch_size=16, shuffle=True, num_workers=0,drop_last=True)
data_loaders_C0 = torch.utils.data.DataLoader(image_datasets_C0, batch_size=16, shuffle=True, num_workers=0,drop_last=True)
with torch.no_grad():
for data in data_loaders_LGE:
images1, targets1 = data
outputs = model(images1)
accuracy = (outputs.argmax(1) == targets1.argmax(1)).sum()
total_LGE_accuracy += accuracy
for data in data_loaders_C0:
images2, targets2 = data
outputs = model(images2)
accuracy = (outputs.argmax(1) == targets2.argmax(1)).sum()
total_C0_accuracy += accuracy
return total_LGE_accuracy,total_C0_accuracy
if __name__ == '__main__':
model = GeoNet() # 要先创建模型框架,再加载之前的状态参数
model.load_state_dict(torch.load("GeoNet_MyoPS_T2.pth"))
total_LGE_accuracy,total_C0_accuracy = predict(model)
print("LGE:{},C0:{}".format(total_LGE_accuracy/1392,total_C0_accuracy/1392))
| 1,827
| 44.7
| 158
|
py
|
Geometric_Transformation_CMR
|
Geometric_Transformation_CMR-main/train.py
|
import cv2
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from dataloader import *
from GeoNet import *
from d2l import torch as d2l
def train(image_datasets, data_loaders, epochs, learning_rate, wt_decay):
train_data_size = len(image_datasets['train'])
test_data_size = len(image_datasets['test'])
print(train_data_size,test_data_size)
train_dataloader = data_loaders['train']
test_dataloader = data_loaders['test']
# 实例化网络模型
model = GeoNet()
# 损失函数
loss_fn = nn.CrossEntropyLoss()
# 优化器
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=wt_decay)
# 设置训练网络的一些参数
# 记录训练的次数
total_train_step = 0
# 添加tensorboard
writer = SummaryWriter("logs_train3")
for i in range(epochs):
print("----------第{}轮训练开始了---------".format(i + 1))
model.train()
total_train_loss = 0
# 训练步骤开始
for data in train_dataloader:
images, targets = data
outputs = model(images)
loss = loss_fn(outputs, targets)
total_train_loss += loss.item()
total_train_step += 1
# 优化器优化模型
optimizer.zero_grad()
loss.backward()
optimizer.step()
if total_train_step % 5 == 0:
print("训练次数:{},loss:{}".format(total_train_step, loss.item()))
writer.add_scalar("train_loss", total_train_loss, i+1)
# 测试步骤开始
model.eval()
total_train_accuracy = 0
total_test_accuracy = 0
total_test_loss = 0
with torch.no_grad():
for data in test_dataloader:
images1, targets1 = data
outputs = model(images1)
loss = loss_fn(outputs, targets1)
total_test_loss += loss.item()
accuracy = (outputs.argmax(1) == targets1.argmax(1)).sum()
total_test_accuracy += accuracy
print("在{}轮训练后,整体测试集合上的accuracy:{}".format(i+1, total_test_accuracy / test_data_size))
writer.add_scalar("test_accuracy", total_test_accuracy / test_data_size, i+1)
writer.add_scalar("test_loss", total_test_loss, i + 1)
for data in train_dataloader:
images2, targets2 = data
outputs = model(images2)
accuracy = (outputs.argmax(1) == targets2.argmax(1)).sum()
total_train_accuracy += accuracy
print("在{}轮训练后,整体训练集合上的accuracy:{}".format(i+1, total_train_accuracy / train_data_size))
writer.add_scalar("train_accuracy", total_train_accuracy / train_data_size, i+1)
writer.close()
return model
def main():
MyoPS_C0_split_dir = 'datasets\MyoPS\C0_split'
MyoPS_LGE_split_dir = 'datasets\MyoPS\LGE_split'
MyoPS_T2_split_dir = 'datasets\MyoPS\T2_split'
data_transforms = {
'train': transforms.Compose([
transforms.ToTensor(),
transforms.Grayscale(num_output_channels=1),
transforms.RandomRotation(10),
transforms.RandomResizedCrop((256, 256), scale=(0.7, 1), ratio=(0.8, 1.2))
]),
'test': transforms.Compose([
transforms.ToTensor(),
transforms.Grayscale(num_output_channels=1),
transforms.RandomRotation(10),
#transforms.Resize((256,256))
transforms.RandomResizedCrop((256, 256), scale=(0.7, 1), ratio=(0.8, 1.2))
])
}
image_datasets = {
x: MyData(os.path.join(MyoPS_T2_split_dir, x), data_transforms[x])
for x in ['train', 'test']
}
data_loaders = {
x: torch.utils.data.DataLoader(image_datasets[x], batch_size=16, shuffle=True, num_workers=0,drop_last=True)
for x in ['train', 'test']
}
model = train(image_datasets, data_loaders, epochs=32, learning_rate=0.01, wt_decay=0)
torch.save(model.state_dict(), "GeoNet_MyoPS_T2.pth")
if __name__ == '__main__':
main()
| 4,003
| 33.817391
| 116
|
py
|
chatgpt-failures
|
chatgpt-failures-main/scripts/kid1/main.py
|
import random
SEQUENCES = [['East', 'South', 'West', 'North'], ['Alpha', 'Beta', 'Gamma', 'Delta'], ['Qui', 'Quo', 'Qua'], ['Donald', 'Duck', 'Dunn'],
['Mambo #1', 'Mambo #2', 'Mambo #3', 'Mambo #4', 'Mambo #5'], ['Pippo', 'Pluto', 'Paperino'], ['Apple', 'Banana', 'Cherry'],
['Jesse Pinkman', 'Saul Goodman', 'Gustavo Fring', 'Walter White'],
['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'], ['S', 'M', 'L', 'XL', 'XXL', 'XXXL'],
['Thumb', 'Index Finger', 'Middle Finger', 'Ring Finger', 'Pinky'], ['Mars', 'Earth', 'Venus', 'Mercury', 'Jupiter'],
['Orange', 'Is', 'The', 'New', 'Black'], ['Red', 'Green', 'Blue'], ['One', 'Two', 'Three', 'Four', 'Five'],
['Alpha', 'Bravo', 'Charlie', 'Delta', 'Echo'], ['Little', 'Red', 'Riding', 'Hood'], ['Car', 'Bike', 'Scooter'],
['Sun', 'Moon', 'Star'], ['Ace', 'King', 'Queen', 'Jack'], ['Python', 'Java', 'C++'], ['Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Si'],
['January', 'February', 'March', 'April'], ['Rome', 'London', 'Paris'], ['September', 'October', 'November', 'December'],
['Harry Potter', 'Hermione Granger', 'Ron Weasley'], ['Aaa', 'Bbb', 'Ccc', 'Ddd', 'Eee'], ['Cable', 'Wire', 'Plug'],
['First', 'Second', 'Third', 'Fourth'], ['Eeny', 'Meeny', 'Miny', 'Moe']]
NAMES = ['John', 'Mary', 'Mike', 'Daisy', 'Jackie', 'Cindy', 'Sam', 'Nancy', 'Karen', 'Paul', 'Dave', 'Gina', 'Joe',
'Alice', 'Rob', 'Chris', 'Scott', 'Stephanie', 'Carol', 'Mikey', 'Bobby', 'Kathy', 'Molly', 'Diane', 'Linda']
FRACTIONS = ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth']
def make_kid1_question_answer():
seq = random.choice(SEQUENCES)
random.shuffle(seq)
name = random.choice(NAMES)
ll = len(seq)
ss = ""
template = " One of them is called {}."
for nn in range(ll - 1):
ss += template.format(seq[nn])
question = "{}'s mom has {} kids. {} Who is the {} kid?".format(name, ll, ss, FRACTIONS[ll-1])
answer = name
return {"question": question, "answer": answer}
def main():
print(make_kid1_question_answer())
if __name__ == "__main__":
main()
| 2,231
| 54.8
| 144
|
py
|
ZeCon
|
ZeCon-main/main.py
|
from optimization.image_editor_zecon import ImageEditor
from optimization.arguments import get_arguments
if __name__ == "__main__":
args = get_arguments()
image_editor = ImageEditor(args)
image_editor.edit_image_by_prompt()
| 247
| 23.8
| 55
|
py
|
ZeCon
|
ZeCon-main/optimization/losses.py
|
# PatchNCE loss from https://github.com/taesungp/contrastive-unpaired-translation
from torch.nn import functional as F
import torch
import numpy as np
import torch.nn as nn
def d_clip_loss(x, y, use_cosine=False):
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
if use_cosine:
distance = 1 - (x @ y.t()).squeeze()
else:
distance = (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
return distance
def d_clip_dir_loss(x_embd,y_embd,prompt_x_embd,prompt_y_embd):
d_img = x_embd - y_embd
d_txt = prompt_x_embd - prompt_y_embd
d_img = F.normalize(d_img, dim=-1)
d_txt = F.normalize(d_txt, dim=-1)
distance = 1 - (d_img @ d_txt.t()).squeeze()
return distance
def range_loss(input):
return (input - input.clamp(-1, 1)).pow(2).mean([1, 2, 3])
def mse_loss(x_in, y_in):
mse = torch.nn.MSELoss()
return mse(x_in,y_in)
def get_features(image, model, layers=None):
if layers is None:
layers = {'0': 'conv1_1',
'2': 'conv1_2',
'5': 'conv2_1',
'7': 'conv2_2',
'10': 'conv3_1',
'19': 'conv4_1',
'21': 'conv4_2',
'28': 'conv5_1',
'31': 'conv5_2'
}
features = {}
x = image
for name, layer in model._modules.items():
x = layer(x)
if name in layers:
features[layers[name]] = x
return features
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm + 1e-7)
return out
def zecon_loss_direct(Unet, x_in, y_in,t):
total_loss = 0
nce_layers = [0,2,5,8,11]
num_patches=256
l2norm = Normalize(2)
feat_q = Unet.forward_enc(x_in,t, nce_layers)
feat_k = Unet.forward_enc(y_in,t, nce_layers)
patch_ids = []
feat_k_pool = []
feat_q_pool = []
for feat_id, feat in enumerate(feat_k):
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2) # [B,ch,h,w] > [B,h*w,ch]
patch_id = np.random.permutation(feat_reshape.shape[1])
patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)
patch_id = torch.tensor(patch_id, dtype=torch.long, device=feat.device)
x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])
patch_ids.append(patch_id)
x_sample = l2norm(x_sample)
feat_k_pool.append(x_sample)
for feat_id, feat in enumerate(feat_q):
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2) # [B,ch,h,w] > [B,h*w,ch]
patch_id = patch_ids[feat_id]
patch_id = torch.tensor(patch_id, dtype=torch.long, device=feat.device)
x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])
x_sample = l2norm(x_sample)
feat_q_pool.append(x_sample)
for f_q, f_k in zip(feat_q_pool, feat_k_pool):
loss = PatchNCELoss(f_q, f_k)
total_loss += loss.mean()
return total_loss.mean()
def PatchNCELoss(feat_q, feat_k, batch_size=1, nce_T = 0.07):
# feat_q : n_patch x 512
# feat_q : n_patch x 512
batch_size = batch_size
nce_T = nce_T
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
mask_dtype = torch.bool
num_patches = feat_q.shape[0]
dim = feat_q.shape[1]
feat_k = feat_k.detach()
# pos logit
l_pos = torch.bmm(
feat_q.view(num_patches, 1, -1), feat_k.view(num_patches, -1, 1))
l_pos = l_pos.view(num_patches, 1)
# reshape features to batch size
feat_q = feat_q.view(batch_size, -1, dim)
feat_k = feat_k.view(batch_size, -1, dim)
npatches = feat_q.size(1)
l_neg_curbatch = torch.bmm(feat_q, feat_k.transpose(2, 1))
# diagonal entries are similarity between same features, and hence meaningless.
# just fill the diagonal with very small number, which is exp(-10) and almost zero
diagonal = torch.eye(npatches, device=feat_q.device, dtype=mask_dtype)[None, :, :]
l_neg_curbatch.masked_fill_(diagonal, -10.0)
l_neg = l_neg_curbatch.view(-1, npatches)
out = torch.cat((l_pos, l_neg), dim=1) / nce_T
loss = cross_entropy_loss(out, torch.zeros(out.size(0), dtype=torch.long,
device=feat_q.device))
return loss
| 4,600
| 29.879195
| 95
|
py
|
ZeCon
|
ZeCon-main/optimization/arguments.py
|
import argparse
def get_arguments() -> argparse.Namespace:
parser = argparse.ArgumentParser()
# Inputs
parser.add_argument(
"-p_t", "--prompt_tgt", type=str, help="The prompt for the desired editing", required=False
)
parser.add_argument(
"-p_s", "--prompt_src", type=str, help="The prompt from the source", required=False
)
parser.add_argument(
"-i", "--init_image", type=str, help="The path to the source image input", required=False
)
parser.add_argument(
"-r", "--ref_image", type=str, help="The path to the reference image input", required=False
)
parser.add_argument("--mask", type=str, help="The path to the mask to edit with", default=None)
# Diffusion
parser.add_argument(
"--timestep_respacing",
type=str,
help="How to respace the intervals of the diffusion process (number between 1 and 1000).",
default="100",
)
parser.add_argument(
"--skip_timesteps",
type=int,
help="How many steps to skip during the diffusion.",
default=25,
)
parser.add_argument(
"--local_clip_guided_diffusion",
help="Indicator for using local CLIP guided diffusion (for baseline comparison)",
action="store_true",
dest="local_clip_guided_diffusion",
)
parser.add_argument(
"--data",
type=str,
default='imagenet',
help='imagenet;celeba;ffhq'
)
parser.add_argument(
"--enc",
type=str,
default='imagenet',
help='(DDIB) diffusion model for forward step'
)
parser.add_argument(
"--dec",
type=str,
default='imagenet',
help='(DDIB) diffusion model for reverse step'
)
parser.add_argument(
"--model_output_size",
type=int,
help="The resolution of the outputs of the diffusion model",
default=256,
choices=[256, 512],
)
# Augmentations
parser.add_argument("--aug_num", type=int, help="The number of augmentation", default=8)
parser.add_argument("--aug_prob", type=float, help="The probability of augmentation", default=1)
parser.add_argument("--n_patch", type=int, help="The number of patches", default=32)
parser.add_argument("--patch_min", type=float, help="Mininum patch scale", default=0.01)
parser.add_argument("--patch_max", type=float, help="Maximum patch scale", default=0.05)
# Loss
parser.add_argument(
"--l_clip_global",
type=float,
help="",
default=0,
)
parser.add_argument(
"--l_clip_global_patch",
type=float,
help="Controls how much the image should look like the prompt",
default=0,
)
parser.add_argument(
"--l_clip_dir",
type=float,
help="",
default=0,
)
parser.add_argument(
"--l_clip_dir_patch",
type=float,
help="",
default=1000,
)
parser.add_argument(
"--range_lambda",
type=float,
help="Controls how far out of range RGB values are allowed to be",
default=50,
)
parser.add_argument(
"--l_vgg",
type=float,
help="",
default=0,
)
parser.add_argument(
"--l_mse",
type=float,
help="",
default=0,
)
parser.add_argument(
"--l_zecon",
type=float,
help="",
default=0,
)
parser.add_argument(
"--diffusion_type",
type=str,
help="forward_backward",
default="ddim_ddpm",
)
parser.add_argument(
"--eta",
type=float,
help="stochasticity of DDIM",
default=0.0,
)
# Misc
parser.add_argument("--seed", type=int, help="The random seed", default=404)
parser.add_argument("--gpu_id", type=int, help="The GPU ID", default=0)
parser.add_argument("--output_path", type=str, default="output")
parser.add_argument(
"-o",
"--output_file",
type=str,
help="The filename to save, must be png",
default="output.png",
)
parser.add_argument("--iterations_num", type=int, help="The number of iterations", default=5)
parser.add_argument(
"--batch_size",
type=int,
help="The number number if images to sample each diffusion process",
default=1,
)
parser.add_argument(
"--vid",
help="Indicator for saving the video of the diffusion process",
action="store_true",
dest="save_video",
)
parser.add_argument(
"--export_assets",
help="Indicator for saving raw assets of the prediction",
action="store_true",
dest="export_assets",
)
args = parser.parse_args()
return args
| 4,838
| 27.298246
| 100
|
py
|
ZeCon
|
ZeCon-main/optimization/constants.py
|
ASSETS_DIR_NAME = "assets"
RANKED_RESULTS_DIR = "ranked"
| 56
| 27.5
| 29
|
py
|
ZeCon
|
ZeCon-main/optimization/augmentations.py
|
import torch
from torch import nn
import kornia.augmentation as K
# import ipdb
class ImageAugmentations(nn.Module):
def __init__(self, output_size, aug_prob, p_min, p_max, patch=False):
super().__init__()
self.output_size = output_size
self.aug_prob = aug_prob
self.patch = patch
self.augmentations = nn.Sequential(
K.RandomAffine(degrees=15, translate=0.1, p=aug_prob, padding_mode="border"), # type: ignore
K.RandomPerspective(0.7, p=aug_prob),
)
self.random_patch = K.RandomResizedCrop(size=(128,128), scale=(p_min,p_max))
self.avg_pool = nn.AdaptiveAvgPool2d((self.output_size, self.output_size))
def forward(self, input, num_patch=None, is_global=False):
"""Extents the input batch with augmentations
If the input is consists of images [I1, I2] the extended augmented output
will be [I1_resized, I2_resized, I1_aug1, I2_aug1, I1_aug2, I2_aug2 ...]
Args:
input ([type]): input batch of shape [batch, C, H, W]
Returns:
updated batch: of shape [batch * augmentations_number, C, H, W]
"""
if self.patch:
if is_global:
input = input.repeat(num_patch,1,1,1)
else:
input_patches = []
for i in range(num_patch):
if self.aug_prob > 0.0:
tmp = self.augmentations(self.random_patch(input))
else:
tmp = self.random_patch(input)
input_patches.append(tmp)
input = torch.cat(input_patches,dim=0)
else:
input_patches = []
for i in range(num_patch):
input_patches.append(self.augmentations(input))
input = torch.cat(input_patches,dim=0)
resized_images = self.avg_pool(input)
return resized_images
| 1,974
| 34.267857
| 105
|
py
|
ZeCon
|
ZeCon-main/optimization/image_editor_zecon.py
|
import os
from pathlib import Path
from optimization.constants import ASSETS_DIR_NAME
from utils.metrics_accumulator import MetricsAccumulator
from numpy import random
from optimization.augmentations import ImageAugmentations as ImageAugmentations
from PIL import Image
import torch
from torchvision import transforms
import torchvision.transforms.functional as F
from torchvision.transforms import functional as TF
from torch.nn.functional import mse_loss
from optimization.losses import range_loss, d_clip_loss, d_clip_dir_loss, mse_loss, get_features, zecon_loss_direct
import numpy as np
from CLIP import clip
from guided_diffusion.guided_diffusion.script_util import (
create_model_and_diffusion,
model_and_diffusion_defaults,
)
from torchvision import models
from utils.visualization import show_edited_masked_image
import matplotlib.pyplot as plt
# import ipdb
class ImageEditor:
def __init__(self, args) -> None:
self.args = args
os.makedirs(self.args.output_path, exist_ok=True)
if self.args.export_assets:
self.assets_path = Path(os.path.join(self.args.output_path, ASSETS_DIR_NAME))
os.makedirs(self.assets_path, exist_ok=True)
if self.args.seed is not None:
torch.manual_seed(self.args.seed)
np.random.seed(self.args.seed)
random.seed(self.args.seed)
self.model_config = model_and_diffusion_defaults(self.args)
# Load models
self.device = torch.device(
f"cuda:{self.args.gpu_id}" if torch.cuda.is_available() else "cpu"
)
print("Using device:", self.device)
if self.args.data == 'imagenet':
self.model, self.diffusion = create_model_and_diffusion(**self.model_config)
self.model.load_state_dict(
torch.load(
"./ckpt/256x256_diffusion_uncond.pt",
map_location="cpu",
)
)
elif self.args.data == 'ffhq':
self.model_config.update(
{
"num_channels": 128,
"num_head_channels": 64,
"num_res_blocks":1,
"attention_resolutions": "16",
"resblock_updown": True,
"use_fp16": False,
}
)
self.model, self.diffusion = create_model_and_diffusion(**self.model_config)
self.model.load_state_dict(
torch.load(
# "./ckpt/ffhq_10m.pt",
"./ckpt/ffhq_baseline.pt",
map_location="cpu",
)
)
self.model.requires_grad_(False).eval().to(self.device)
for name, param in self.model.named_parameters():
if "qkv" in name or "norm" in name or "proj" in name:
param.requires_grad_()
if self.model_config["use_fp16"]:
self.model.convert_to_fp16()
self.clip_model = (
clip.load("ViT-B/16", device=self.device, jit=False)[0].eval().requires_grad_(False)
)
self.clip_size = self.clip_model.visual.input_resolution
self.clip_normalize = transforms.Normalize(
mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]
)
self.image_augmentations = ImageAugmentations(224, self.args.aug_prob, self.args.patch_min, self.args.patch_max, patch=False)
self.patch_augmentations = ImageAugmentations(224, self.args.aug_prob, self.args.patch_min, self.args.patch_max, patch=True)
self.metrics_accumulator = MetricsAccumulator()
if self.args.l_vgg > 0:
self.vgg = models.vgg19(pretrained=True).features
self.vgg.to(self.device)
self.vgg.eval().requires_grad_(False)
self.vgg_normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
def unscale_timestep(self, t):
unscaled_timestep = (t * (self.diffusion.num_timesteps / 1000)).long()
return unscaled_timestep
def clip_global_loss(self,x_in,text_embed):
clip_loss = torch.tensor(0)
augmented_input = self.image_augmentations(x_in,num_patch=self.args.n_patch).add(1).div(2)
clip_in = self.clip_normalize(augmented_input)
image_embeds = self.clip_model.encode_image(clip_in).float()
dists = d_clip_loss(image_embeds, text_embed)
for i in range(self.args.batch_size):
clip_loss = clip_loss + dists[i :: self.args.batch_size].mean()
return clip_loss
def clip_global_patch_loss(self, x_in, text_embed):
clip_loss = torch.tensor(0)
augmented_input = self.patch_augmentations(x_in,num_patch=self.args.n_patch).add(1).div(2)
clip_in = self.clip_normalize(augmented_input)
image_embeds = self.clip_model.encode_image(clip_in).float()
dists = d_clip_loss(image_embeds, text_embed)
for i in range(self.args.batch_size):
clip_loss = clip_loss + dists[i :: self.args.batch_size].mean()
return clip_loss
def clip_dir_loss(self, x_in, y_in, text_embed, text_y_embed):
clip_loss = torch.tensor(0)
augmented_input_x = self.image_augmentations(x_in,num_patch=self.args.n_patch).add(1).div(2)
augmented_input_y = self.image_augmentations(y_in,num_patch=self.args.n_patch).add(1).div(2)
clip_in_x = self.clip_normalize(augmented_input_x)
clip_in_y = self.clip_normalize(augmented_input_y)
image_embeds_x = self.clip_model.encode_image(clip_in_x).float()
image_embeds_y = self.clip_model.encode_image(clip_in_y).float()
dists = d_clip_dir_loss(image_embeds_x, image_embeds_y, text_embed, text_y_embed)
for i in range(self.args.batch_size):
clip_loss = clip_loss + dists[i :: self.args.batch_size].mean()
return clip_loss
def clip_dir_patch_loss(self, x_in, y_in, text_embed, text_y_embed):
clip_loss = torch.tensor(0)
augmented_input_x = self.patch_augmentations(x_in,num_patch=self.args.n_patch).add(1).div(2)
augmented_input_y = self.patch_augmentations(y_in,num_patch=self.args.n_patch,is_global=True).add(1).div(2)
clip_in_x = self.clip_normalize(augmented_input_x)
clip_in_y = self.clip_normalize(augmented_input_y)
image_embeds_x = self.clip_model.encode_image(clip_in_x).float()
image_embeds_y = self.clip_model.encode_image(clip_in_y).float()
dists = d_clip_dir_loss(image_embeds_x, image_embeds_y, text_embed, text_y_embed)
for i in range(self.args.batch_size):
clip_loss = clip_loss + dists[i :: self.args.batch_size].mean()
return clip_loss
def zecon_loss(self, x_in, y_in, t):
loss = zecon_loss_direct(self.model, x_in, y_in, torch.zeros_like(t,device=self.device))
return loss.mean()
def mse_loss(self,x_in, y_in):
loss = mse_loss(x_in,y_in)
return loss.mean()
def vgg_loss(self,x_in, y_in):
content_features = get_features(self.vgg_normalize(x_in), self.vgg)
target_features = get_features(self.vgg_normalize(y_in), self.vgg)
loss = 0
loss += torch.mean((target_features['conv1_1'] - content_features['conv1_1']) ** 2)
loss += torch.mean((target_features['conv2_1'] - content_features['conv2_1']) ** 2)
# loss += torch.mean((target_features['conv4_2'] - content_features['conv4_2']) ** 2)
# loss += torch.mean((target_features['conv5_2'] - content_features['conv5_2']) ** 2)
return loss.mean()
def edit_image_by_prompt(self):
text_embed = self.clip_model.encode_text(
clip.tokenize(self.args.prompt_tgt).to(self.device)
).float()
text_y_embed = self.clip_model.encode_text(
clip.tokenize(self.args.prompt_src).to(self.device)
).float()
self.image_size = (self.model_config["image_size"], self.model_config["image_size"])
self.init_image_pil = Image.open(self.args.init_image).convert("RGB")
self.init_image_pil = self.init_image_pil.resize(self.image_size, Image.LANCZOS) # type: ignore
self.init_image = (
TF.to_tensor(self.init_image_pil).to(self.device).unsqueeze(0).mul(2).sub(1)
)
visualization_path = visualization_path = Path(
os.path.join(self.args.output_path, self.args.output_file)
)
def cond_fn(x, t, y=None):
if self.args.prompt_tgt == "":
return torch.zeros_like(x)
with torch.enable_grad():
x = x.detach().requires_grad_()
t = self.unscale_timestep(t)
out = self.diffusion.p_mean_variance(
self.model, x, t, clip_denoised=False, model_kwargs={"y": y}
)
fac = self.diffusion.sqrt_one_minus_alphas_cumprod[t[0].item()]
x_in = out["pred_xstart"] * fac + x * (1 - fac)
loss = torch.tensor(0)
if self.args.l_clip_global != 0:
clip_loss = self.clip_global_loss(x_in, text_embed) * self.args.l_clip_global
loss = loss + clip_loss
self.metrics_accumulator.update_metric("clip_loss", clip_loss.item())
if self.args.l_clip_global_patch != 0:
clip_patch_loss = self.clip_global_patch_loss(x_in, text_embed) * self.args.l_clip_global_patch
loss = loss + clip_patch_loss
self.metrics_accumulator.update_metric("clip_patch_loss", clip_patch_loss.item())
if self.args.l_clip_dir != 0:
y_t = self.diffusion.q_sample(self.init_image,t)
y_in = self.init_image * fac + y_t * (1 - fac)
clip_dir_loss = self.clip_dir_loss(x_in, y_in, text_embed, text_y_embed) * self.args.l_clip_dir
loss = loss + clip_dir_loss
self.metrics_accumulator.update_metric("clip_dir_loss", clip_dir_loss.item())
if self.args.l_clip_dir_patch != 0:
y_t = self.diffusion.q_sample(self.init_image,t)
y_in = self.init_image * fac + y_t * (1 - fac)
clip_dir_patch_loss = self.clip_dir_patch_loss(x_in, y_in, text_embed, text_y_embed) * self.args.l_clip_dir_patch
loss = loss + clip_dir_patch_loss
self.metrics_accumulator.update_metric("clip_dir_patch_loss", clip_dir_patch_loss.item())
if self.args.l_zecon != 0:
y_t = self.diffusion.q_sample(self.init_image,t)
y_in = self.init_image * fac + y_t * (1 - fac)
zecon_loss = self.zecon_loss(x_in, y_in,t) * self.args.l_zecon
loss = loss + zecon_loss
self.metrics_accumulator.update_metric("zecon_loss", zecon_loss.item())
if self.args.l_mse != 0 and t.item() < 700:
y_t = self.diffusion.q_sample(self.init_image,t)
y_in = self.init_image * fac + y_t * (1 - fac)
mse_loss = self.mse_loss(x_in, y_in) * self.args.l_mse
loss = loss + mse_loss
self.metrics_accumulator.update_metric("mse_loss", mse_loss.item())
if self.args.l_vgg != 0 and t.item() < 800:
y_t = self.diffusion.q_sample(self.init_image,t)
y_in = self.init_image * fac + y_t * (1 - fac)
vgg_loss = self.vgg_loss(x_in, y_in) * self.args.l_vgg
loss = loss + vgg_loss
self.metrics_accumulator.update_metric("vgg_loss", vgg_loss.item())
if self.args.range_lambda != 0:
r_loss = range_loss(out["pred_xstart"]).sum() * self.args.range_lambda
loss = loss + r_loss
self.metrics_accumulator.update_metric("range_loss", r_loss.item())
return -torch.autograd.grad(loss, x)[0]
save_image_interval = self.diffusion.num_timesteps // 5
for iteration_number in range(self.args.iterations_num):
fw = self.args.diffusion_type.split('_')[0]
bk = self.args.diffusion_type.split('_')[-1]
# Forward DDIM
if fw == 'ddim':
print("Forward Process to noise")
noise = self.diffusion.ddim_reverse_sample_loop(
self.model,
self.init_image,
clip_denoised=False,
skip_timesteps=self.args.skip_timesteps,
)
# Forward DDPM
elif fw == 'ddpm':
init_image_batch = torch.tile(self.init_image, dims=(self.args.batch_size, 1, 1, 1))
noise = self.diffusion.q_sample(
x_start=init_image_batch,
t=torch.tensor(self.diffusion.num_timesteps-int(self.args.skip_timesteps), dtype=torch.long, device=self.device),
noise=torch.randn((self.args.batch_size,3,self.model_config["image_size"],self.model_config["image_size"]), device=self.device),
)
else:
raise ValueError
# Reverse DDPM
if bk == 'ddpm':
samples = self.diffusion.p_sample_loop_progressive(
self.model,
(
self.args.batch_size,
3,
self.model_config["image_size"],
self.model_config["image_size"],
),
noise = noise if fw=='ddim' else None,
clip_denoised=False,
model_kwargs={},
cond_fn=cond_fn,
progress=True,
skip_timesteps=self.args.skip_timesteps,
init_image=self.init_image,
)
# Reverse DDIM
elif bk == 'ddim':
samples = self.diffusion.ddim_sample_loop_progressive(
self.model,
(
self.args.batch_size,
3,
self.model_config["image_size"],
self.model_config["image_size"],
),
noise = noise,
clip_denoised=False,
model_kwargs={},
cond_fn=cond_fn,
progress=True,
skip_timesteps=self.args.skip_timesteps,
eta=self.args.eta,
)
else:
raise ValueError
intermediate_samples = [[] for i in range(self.args.batch_size)]
total_steps = self.diffusion.num_timesteps - self.args.skip_timesteps - 1
for j, sample in enumerate(samples):
should_save_image = j % save_image_interval == 0 or j == total_steps
if should_save_image or self.args.save_video:
self.metrics_accumulator.print_average_metric()
for b in range(self.args.batch_size):
pred_image = sample["pred_xstart"][b]
pred_image = pred_image.add(1).div(2).clamp(0, 1)
pred_image_pil = TF.to_pil_image(pred_image)
filename = Path(self.args.init_image).stem
visualization_path = visualization_path.with_name(
f"{filename}_{self.args.prompt_tgt}_{iteration_number}{visualization_path.suffix}"
)
if self.args.export_assets:
pred_path = self.assets_path / visualization_path.name
pred_image_pil.save(pred_path)
intermediate_samples[b].append(pred_image_pil)
if should_save_image:
show_edited_masked_image(
title=self.args.prompt_tgt,
source_image=self.init_image_pil,
edited_image=pred_image_pil,
path=visualization_path,
)
visualization_path2 = str(visualization_path).replace('.png','_output.png')
pred_image_arr = np.array(pred_image_pil)
plt.imsave(visualization_path2, pred_image_arr)
| 17,010
| 43.648294
| 152
|
py
|
ZeCon
|
ZeCon-main/CLIP/setup.py
|
import os
import pkg_resources
from setuptools import setup, find_packages
setup(
name="clip",
py_modules=["clip"],
version="1.0",
description="",
author="OpenAI",
packages=find_packages(exclude=["tests*"]),
install_requires=[
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
include_package_data=True,
extras_require={'dev': ['pytest']},
)
| 491
| 21.363636
| 77
|
py
|
ZeCon
|
ZeCon-main/CLIP/clip/simple_tokenizer.py
|
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| 4,628
| 33.804511
| 144
|
py
|
ZeCon
|
ZeCon-main/CLIP/clip/clip.py
|
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if torch.__version__.split(".") < ["1", "7", "1"]:
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit: bool = False, download_root: str = None):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| 8,433
| 36.484444
| 149
|
py
|
ZeCon
|
ZeCon-main/CLIP/clip/model.py
|
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| 17,242
| 38.822171
| 178
|
py
|
ZeCon
|
ZeCon-main/CLIP/clip/__init__.py
|
from .clip import *
| 20
| 9.5
| 19
|
py
|
ZeCon
|
ZeCon-main/CLIP/tests/test_consistency.py
|
import numpy as np
import pytest
import torch
from PIL import Image
import clip
@pytest.mark.parametrize('model_name', clip.available_models())
def test_consistency(model_name):
device = "cpu"
jit_model, transform = clip.load(model_name, device=device, jit=True)
py_model, _ = clip.load(model_name, device=device, jit=False)
image = transform(Image.open("CLIP.png")).unsqueeze(0).to(device)
text = clip.tokenize(["a diagram", "a dog", "a cat"]).to(device)
with torch.no_grad():
logits_per_image, _ = jit_model(image, text)
jit_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
logits_per_image, _ = py_model(image, text)
py_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
assert np.allclose(jit_probs, py_probs, atol=0.01, rtol=0.1)
| 812
| 30.269231
| 73
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/setup.py
|
from setuptools import setup
setup(
name="guided-diffusion",
py_modules=["guided_diffusion"],
install_requires=["blobfile>=1.0.5", "torch", "tqdm"],
)
| 164
| 19.625
| 58
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/scripts/image_train.py
|
"""
Train a diffusion model on images.
"""
import argparse
from guided_diffusion import dist_util, logger
from guided_diffusion.image_datasets import load_data
from guided_diffusion.resample import create_named_schedule_sampler
from guided_diffusion.script_util import (
model_and_diffusion_defaults,
create_model_and_diffusion,
args_to_dict,
add_dict_to_argparser,
)
from guided_diffusion.train_util import TrainLoop
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model and diffusion...")
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys())
)
model.to(dist_util.dev())
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
logger.log("creating data loader...")
data = load_data(
data_dir=args.data_dir,
batch_size=args.batch_size,
image_size=args.image_size,
class_cond=args.class_cond,
)
logger.log("training...")
TrainLoop(
model=model,
diffusion=diffusion,
data=data,
batch_size=args.batch_size,
microbatch=args.microbatch,
lr=args.lr,
ema_rate=args.ema_rate,
log_interval=args.log_interval,
save_interval=args.save_interval,
resume_checkpoint=args.resume_checkpoint,
use_fp16=args.use_fp16,
fp16_scale_growth=args.fp16_scale_growth,
schedule_sampler=schedule_sampler,
weight_decay=args.weight_decay,
lr_anneal_steps=args.lr_anneal_steps,
).run_loop()
def create_argparser():
defaults = dict(
data_dir="",
schedule_sampler="uniform",
lr=1e-4,
weight_decay=0.0,
lr_anneal_steps=0,
batch_size=1,
microbatch=-1, # -1 disables microbatches
ema_rate="0.9999", # comma-separated list of EMA values
log_interval=10,
save_interval=10000,
resume_checkpoint="",
use_fp16=False,
fp16_scale_growth=1e-3,
)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 2,298
| 26.369048
| 86
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/scripts/image_sample.py
|
"""
Generate a large batch of image samples from a model and save them as a large
numpy array. This can be used to produce samples for FID evaluation.
"""
import argparse
import os
import numpy as np
import torch as th
import torch.distributed as dist
from guided_diffusion import dist_util, logger
from guided_diffusion.script_util import (
NUM_CLASSES,
model_and_diffusion_defaults,
create_model_and_diffusion,
add_dict_to_argparser,
args_to_dict,
)
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model and diffusion...")
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys())
)
model.load_state_dict(
dist_util.load_state_dict(args.model_path, map_location="cpu")
)
model.to(dist_util.dev())
if args.use_fp16:
model.convert_to_fp16()
model.eval()
logger.log("sampling...")
all_images = []
all_labels = []
while len(all_images) * args.batch_size < args.num_samples:
model_kwargs = {}
if args.class_cond:
classes = th.randint(
low=0, high=NUM_CLASSES, size=(args.batch_size,), device=dist_util.dev()
)
model_kwargs["y"] = classes
sample_fn = (
diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop
)
sample = sample_fn(
model,
(args.batch_size, 3, args.image_size, args.image_size),
clip_denoised=args.clip_denoised,
model_kwargs=model_kwargs,
)
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
gathered_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_samples, sample) # gather not supported with NCCL
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
if args.class_cond:
gathered_labels = [
th.zeros_like(classes) for _ in range(dist.get_world_size())
]
dist.all_gather(gathered_labels, classes)
all_labels.extend([labels.cpu().numpy() for labels in gathered_labels])
logger.log(f"created {len(all_images) * args.batch_size} samples")
arr = np.concatenate(all_images, axis=0)
arr = arr[: args.num_samples]
if args.class_cond:
label_arr = np.concatenate(all_labels, axis=0)
label_arr = label_arr[: args.num_samples]
if dist.get_rank() == 0:
shape_str = "x".join([str(x) for x in arr.shape])
out_path = os.path.join(logger.get_dir(), f"samples_{shape_str}.npz")
logger.log(f"saving to {out_path}")
if args.class_cond:
np.savez(out_path, arr, label_arr)
else:
np.savez(out_path, arr)
dist.barrier()
logger.log("sampling complete")
def create_argparser():
defaults = dict(
clip_denoised=True,
num_samples=10000,
batch_size=16,
use_ddim=False,
model_path="",
)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 3,398
| 30.183486
| 88
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/scripts/super_res_sample.py
|
"""
Generate a large batch of samples from a super resolution model, given a batch
of samples from a regular model from image_sample.py.
"""
import argparse
import os
import blobfile as bf
import numpy as np
import torch as th
import torch.distributed as dist
from guided_diffusion import dist_util, logger
from guided_diffusion.script_util import (
sr_model_and_diffusion_defaults,
sr_create_model_and_diffusion,
args_to_dict,
add_dict_to_argparser,
)
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model...")
model, diffusion = sr_create_model_and_diffusion(
**args_to_dict(args, sr_model_and_diffusion_defaults().keys())
)
model.load_state_dict(
dist_util.load_state_dict(args.model_path, map_location="cpu")
)
model.to(dist_util.dev())
if args.use_fp16:
model.convert_to_fp16()
model.eval()
logger.log("loading data...")
data = load_data_for_worker(args.base_samples, args.batch_size, args.class_cond)
logger.log("creating samples...")
all_images = []
while len(all_images) * args.batch_size < args.num_samples:
model_kwargs = next(data)
model_kwargs = {k: v.to(dist_util.dev()) for k, v in model_kwargs.items()}
sample = diffusion.p_sample_loop(
model,
(args.batch_size, 3, args.large_size, args.large_size),
clip_denoised=args.clip_denoised,
model_kwargs=model_kwargs,
)
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
all_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
dist.all_gather(all_samples, sample) # gather not supported with NCCL
for sample in all_samples:
all_images.append(sample.cpu().numpy())
logger.log(f"created {len(all_images) * args.batch_size} samples")
arr = np.concatenate(all_images, axis=0)
arr = arr[: args.num_samples]
if dist.get_rank() == 0:
shape_str = "x".join([str(x) for x in arr.shape])
out_path = os.path.join(logger.get_dir(), f"samples_{shape_str}.npz")
logger.log(f"saving to {out_path}")
np.savez(out_path, arr)
dist.barrier()
logger.log("sampling complete")
def load_data_for_worker(base_samples, batch_size, class_cond):
with bf.BlobFile(base_samples, "rb") as f:
obj = np.load(f)
image_arr = obj["arr_0"]
if class_cond:
label_arr = obj["arr_1"]
rank = dist.get_rank()
num_ranks = dist.get_world_size()
buffer = []
label_buffer = []
while True:
for i in range(rank, len(image_arr), num_ranks):
buffer.append(image_arr[i])
if class_cond:
label_buffer.append(label_arr[i])
if len(buffer) == batch_size:
batch = th.from_numpy(np.stack(buffer)).float()
batch = batch / 127.5 - 1.0
batch = batch.permute(0, 3, 1, 2)
res = dict(low_res=batch)
if class_cond:
res["y"] = th.from_numpy(np.stack(label_buffer))
yield res
buffer, label_buffer = [], []
def create_argparser():
defaults = dict(
clip_denoised=True,
num_samples=10000,
batch_size=16,
use_ddim=False,
base_samples="",
model_path="",
)
defaults.update(sr_model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 3,725
| 30.05
| 84
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/scripts/classifier_sample.py
|
"""
Like image_sample.py, but use a noisy image classifier to guide the sampling
process towards more realistic images.
"""
import argparse
import os
import numpy as np
import torch as th
import torch.distributed as dist
import torch.nn.functional as F
from guided_diffusion import dist_util, logger
from guided_diffusion.script_util import (
NUM_CLASSES,
model_and_diffusion_defaults,
classifier_defaults,
create_model_and_diffusion,
create_classifier,
add_dict_to_argparser,
args_to_dict,
)
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model and diffusion...")
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys())
)
model.load_state_dict(
dist_util.load_state_dict(args.model_path, map_location="cpu")
)
model.to(dist_util.dev())
if args.use_fp16:
model.convert_to_fp16()
model.eval()
logger.log("loading classifier...")
classifier = create_classifier(**args_to_dict(args, classifier_defaults().keys()))
classifier.load_state_dict(
dist_util.load_state_dict(args.classifier_path, map_location="cpu")
)
classifier.to(dist_util.dev())
if args.classifier_use_fp16:
classifier.convert_to_fp16()
classifier.eval()
def cond_fn(x, t, y=None):
assert y is not None
with th.enable_grad():
x_in = x.detach().requires_grad_(True)
logits = classifier(x_in, t)
log_probs = F.log_softmax(logits, dim=-1)
selected = log_probs[range(len(logits)), y.view(-1)]
return th.autograd.grad(selected.sum(), x_in)[0] * args.classifier_scale
def model_fn(x, t, y=None):
assert y is not None
return model(x, t, y if args.class_cond else None)
logger.log("sampling...")
all_images = []
all_labels = []
while len(all_images) * args.batch_size < args.num_samples:
model_kwargs = {}
classes = th.randint(
low=0, high=NUM_CLASSES, size=(args.batch_size,), device=dist_util.dev()
)
model_kwargs["y"] = classes
sample_fn = (
diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop
)
sample = sample_fn(
model_fn,
(args.batch_size, 3, args.image_size, args.image_size),
clip_denoised=args.clip_denoised,
model_kwargs=model_kwargs,
cond_fn=cond_fn,
device=dist_util.dev(),
)
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
gathered_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_samples, sample) # gather not supported with NCCL
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
gathered_labels = [th.zeros_like(classes) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_labels, classes)
all_labels.extend([labels.cpu().numpy() for labels in gathered_labels])
logger.log(f"created {len(all_images) * args.batch_size} samples")
arr = np.concatenate(all_images, axis=0)
arr = arr[: args.num_samples]
label_arr = np.concatenate(all_labels, axis=0)
label_arr = label_arr[: args.num_samples]
if dist.get_rank() == 0:
shape_str = "x".join([str(x) for x in arr.shape])
out_path = os.path.join(logger.get_dir(), f"samples_{shape_str}.npz")
logger.log(f"saving to {out_path}")
np.savez(out_path, arr, label_arr)
dist.barrier()
logger.log("sampling complete")
def create_argparser():
defaults = dict(
clip_denoised=True,
num_samples=10000,
batch_size=16,
use_ddim=False,
model_path="",
classifier_path="",
classifier_scale=1.0,
)
defaults.update(model_and_diffusion_defaults())
defaults.update(classifier_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 4,266
| 31.325758
| 88
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/scripts/classifier_train.py
|
"""
Train a noised image classifier on ImageNet.
"""
import argparse
import os
import blobfile as bf
import torch as th
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.optim import AdamW
from guided_diffusion import dist_util, logger
from guided_diffusion.fp16_util import MixedPrecisionTrainer
from guided_diffusion.image_datasets import load_data
from guided_diffusion.resample import create_named_schedule_sampler
from guided_diffusion.script_util import (
add_dict_to_argparser,
args_to_dict,
classifier_and_diffusion_defaults,
create_classifier_and_diffusion,
)
from guided_diffusion.train_util import parse_resume_step_from_filename, log_loss_dict
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model and diffusion...")
model, diffusion = create_classifier_and_diffusion(
**args_to_dict(args, classifier_and_diffusion_defaults().keys())
)
model.to(dist_util.dev())
if args.noised:
schedule_sampler = create_named_schedule_sampler(
args.schedule_sampler, diffusion
)
resume_step = 0
if args.resume_checkpoint:
resume_step = parse_resume_step_from_filename(args.resume_checkpoint)
if dist.get_rank() == 0:
logger.log(
f"loading model from checkpoint: {args.resume_checkpoint}... at {resume_step} step"
)
model.load_state_dict(
dist_util.load_state_dict(
args.resume_checkpoint, map_location=dist_util.dev()
)
)
# Needed for creating correct EMAs and fp16 parameters.
dist_util.sync_params(model.parameters())
mp_trainer = MixedPrecisionTrainer(
model=model, use_fp16=args.classifier_use_fp16, initial_lg_loss_scale=16.0
)
model = DDP(
model,
device_ids=[dist_util.dev()],
output_device=dist_util.dev(),
broadcast_buffers=False,
bucket_cap_mb=128,
find_unused_parameters=False,
)
logger.log("creating data loader...")
data = load_data(
data_dir=args.data_dir,
batch_size=args.batch_size,
image_size=args.image_size,
class_cond=True,
random_crop=True,
)
if args.val_data_dir:
val_data = load_data(
data_dir=args.val_data_dir,
batch_size=args.batch_size,
image_size=args.image_size,
class_cond=True,
)
else:
val_data = None
logger.log(f"creating optimizer...")
opt = AdamW(mp_trainer.master_params, lr=args.lr, weight_decay=args.weight_decay)
if args.resume_checkpoint:
opt_checkpoint = bf.join(
bf.dirname(args.resume_checkpoint), f"opt{resume_step:06}.pt"
)
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
opt.load_state_dict(
dist_util.load_state_dict(opt_checkpoint, map_location=dist_util.dev())
)
logger.log("training classifier model...")
def forward_backward_log(data_loader, prefix="train"):
batch, extra = next(data_loader)
labels = extra["y"].to(dist_util.dev())
batch = batch.to(dist_util.dev())
# Noisy images
if args.noised:
t, _ = schedule_sampler.sample(batch.shape[0], dist_util.dev())
batch = diffusion.q_sample(batch, t)
else:
t = th.zeros(batch.shape[0], dtype=th.long, device=dist_util.dev())
for i, (sub_batch, sub_labels, sub_t) in enumerate(
split_microbatches(args.microbatch, batch, labels, t)
):
logits = model(sub_batch, timesteps=sub_t)
loss = F.cross_entropy(logits, sub_labels, reduction="none")
losses = {}
losses[f"{prefix}_loss"] = loss.detach()
losses[f"{prefix}_acc@1"] = compute_top_k(
logits, sub_labels, k=1, reduction="none"
)
losses[f"{prefix}_acc@5"] = compute_top_k(
logits, sub_labels, k=5, reduction="none"
)
log_loss_dict(diffusion, sub_t, losses)
del losses
loss = loss.mean()
if loss.requires_grad:
if i == 0:
mp_trainer.zero_grad()
mp_trainer.backward(loss * len(sub_batch) / len(batch))
for step in range(args.iterations - resume_step):
logger.logkv("step", step + resume_step)
logger.logkv(
"samples",
(step + resume_step + 1) * args.batch_size * dist.get_world_size(),
)
if args.anneal_lr:
set_annealed_lr(opt, args.lr, (step + resume_step) / args.iterations)
forward_backward_log(data)
mp_trainer.optimize(opt)
if val_data is not None and not step % args.eval_interval:
with th.no_grad():
with model.no_sync():
model.eval()
forward_backward_log(val_data, prefix="val")
model.train()
if not step % args.log_interval:
logger.dumpkvs()
if (
step
and dist.get_rank() == 0
and not (step + resume_step) % args.save_interval
):
logger.log("saving model...")
save_model(mp_trainer, opt, step + resume_step)
if dist.get_rank() == 0:
logger.log("saving model...")
save_model(mp_trainer, opt, step + resume_step)
dist.barrier()
def set_annealed_lr(opt, base_lr, frac_done):
lr = base_lr * (1 - frac_done)
for param_group in opt.param_groups:
param_group["lr"] = lr
def save_model(mp_trainer, opt, step):
if dist.get_rank() == 0:
th.save(
mp_trainer.master_params_to_state_dict(mp_trainer.master_params),
os.path.join(logger.get_dir(), f"model{step:06d}.pt"),
)
th.save(opt.state_dict(), os.path.join(logger.get_dir(), f"opt{step:06d}.pt"))
def compute_top_k(logits, labels, k, reduction="mean"):
_, top_ks = th.topk(logits, k, dim=-1)
if reduction == "mean":
return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
elif reduction == "none":
return (top_ks == labels[:, None]).float().sum(dim=-1)
def split_microbatches(microbatch, *args):
bs = len(args[0])
if microbatch == -1 or microbatch >= bs:
yield tuple(args)
else:
for i in range(0, bs, microbatch):
yield tuple(x[i : i + microbatch] if x is not None else None for x in args)
def create_argparser():
defaults = dict(
data_dir="",
val_data_dir="",
noised=True,
iterations=150000,
lr=3e-4,
weight_decay=0.0,
anneal_lr=False,
batch_size=4,
microbatch=-1,
schedule_sampler="uniform",
resume_checkpoint="",
log_interval=10,
eval_interval=5,
save_interval=10000,
)
defaults.update(classifier_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 7,313
| 31.220264
| 99
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/scripts/image_nll.py
|
"""
Approximate the bits/dimension for an image model.
"""
import argparse
import os
import numpy as np
import torch.distributed as dist
from guided_diffusion import dist_util, logger
from guided_diffusion.image_datasets import load_data
from guided_diffusion.script_util import (
model_and_diffusion_defaults,
create_model_and_diffusion,
add_dict_to_argparser,
args_to_dict,
)
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model and diffusion...")
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys())
)
model.load_state_dict(
dist_util.load_state_dict(args.model_path, map_location="cpu")
)
model.to(dist_util.dev())
model.eval()
logger.log("creating data loader...")
data = load_data(
data_dir=args.data_dir,
batch_size=args.batch_size,
image_size=args.image_size,
class_cond=args.class_cond,
deterministic=True,
)
logger.log("evaluating...")
run_bpd_evaluation(model, diffusion, data, args.num_samples, args.clip_denoised)
def run_bpd_evaluation(model, diffusion, data, num_samples, clip_denoised):
all_bpd = []
all_metrics = {"vb": [], "mse": [], "xstart_mse": []}
num_complete = 0
while num_complete < num_samples:
batch, model_kwargs = next(data)
batch = batch.to(dist_util.dev())
model_kwargs = {k: v.to(dist_util.dev()) for k, v in model_kwargs.items()}
minibatch_metrics = diffusion.calc_bpd_loop(
model, batch, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
for key, term_list in all_metrics.items():
terms = minibatch_metrics[key].mean(dim=0) / dist.get_world_size()
dist.all_reduce(terms)
term_list.append(terms.detach().cpu().numpy())
total_bpd = minibatch_metrics["total_bpd"]
total_bpd = total_bpd.mean() / dist.get_world_size()
dist.all_reduce(total_bpd)
all_bpd.append(total_bpd.item())
num_complete += dist.get_world_size() * batch.shape[0]
logger.log(f"done {num_complete} samples: bpd={np.mean(all_bpd)}")
if dist.get_rank() == 0:
for name, terms in all_metrics.items():
out_path = os.path.join(logger.get_dir(), f"{name}_terms.npz")
logger.log(f"saving {name} terms to {out_path}")
np.savez(out_path, np.mean(np.stack(terms), axis=0))
dist.barrier()
logger.log("evaluation complete")
def create_argparser():
defaults = dict(
data_dir="", clip_denoised=True, num_samples=1000, batch_size=1, model_path=""
)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 2,934
| 29.257732
| 86
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/scripts/super_res_train.py
|
"""
Train a super-resolution model.
"""
import argparse
import torch.nn.functional as F
from guided_diffusion import dist_util, logger
from guided_diffusion.image_datasets import load_data
from guided_diffusion.resample import create_named_schedule_sampler
from guided_diffusion.script_util import (
sr_model_and_diffusion_defaults,
sr_create_model_and_diffusion,
args_to_dict,
add_dict_to_argparser,
)
from guided_diffusion.train_util import TrainLoop
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model...")
model, diffusion = sr_create_model_and_diffusion(
**args_to_dict(args, sr_model_and_diffusion_defaults().keys())
)
model.to(dist_util.dev())
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
logger.log("creating data loader...")
data = load_superres_data(
args.data_dir,
args.batch_size,
large_size=args.large_size,
small_size=args.small_size,
class_cond=args.class_cond,
)
logger.log("training...")
TrainLoop(
model=model,
diffusion=diffusion,
data=data,
batch_size=args.batch_size,
microbatch=args.microbatch,
lr=args.lr,
ema_rate=args.ema_rate,
log_interval=args.log_interval,
save_interval=args.save_interval,
resume_checkpoint=args.resume_checkpoint,
use_fp16=args.use_fp16,
fp16_scale_growth=args.fp16_scale_growth,
schedule_sampler=schedule_sampler,
weight_decay=args.weight_decay,
lr_anneal_steps=args.lr_anneal_steps,
).run_loop()
def load_superres_data(data_dir, batch_size, large_size, small_size, class_cond=False):
data = load_data(
data_dir=data_dir,
batch_size=batch_size,
image_size=large_size,
class_cond=class_cond,
)
for large_batch, model_kwargs in data:
model_kwargs["low_res"] = F.interpolate(large_batch, small_size, mode="area")
yield large_batch, model_kwargs
def create_argparser():
defaults = dict(
data_dir="",
schedule_sampler="uniform",
lr=1e-4,
weight_decay=0.0,
lr_anneal_steps=0,
batch_size=1,
microbatch=-1,
ema_rate="0.9999",
log_interval=10,
save_interval=10000,
resume_checkpoint="",
use_fp16=False,
fp16_scale_growth=1e-3,
)
defaults.update(sr_model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 2,695
| 26.232323
| 87
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/datasets/lsun_bedroom.py
|
"""
Convert an LSUN lmdb database into a directory of images.
"""
import argparse
import io
import os
from PIL import Image
import lmdb
import numpy as np
def read_images(lmdb_path, image_size):
env = lmdb.open(lmdb_path, map_size=1099511627776, max_readers=100, readonly=True)
with env.begin(write=False) as transaction:
cursor = transaction.cursor()
for _, webp_data in cursor:
img = Image.open(io.BytesIO(webp_data))
width, height = img.size
scale = image_size / min(width, height)
img = img.resize(
(int(round(scale * width)), int(round(scale * height))),
resample=Image.BOX,
)
arr = np.array(img)
h, w, _ = arr.shape
h_off = (h - image_size) // 2
w_off = (w - image_size) // 2
arr = arr[h_off : h_off + image_size, w_off : w_off + image_size]
yield arr
def dump_images(out_dir, images, prefix):
if not os.path.exists(out_dir):
os.mkdir(out_dir)
for i, img in enumerate(images):
Image.fromarray(img).save(os.path.join(out_dir, f"{prefix}_{i:07d}.png"))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--image-size", help="new image size", type=int, default=256)
parser.add_argument("--prefix", help="class name", type=str, default="bedroom")
parser.add_argument("lmdb_path", help="path to an LSUN lmdb database")
parser.add_argument("out_dir", help="path to output directory")
args = parser.parse_args()
images = read_images(args.lmdb_path, args.image_size)
dump_images(args.out_dir, images, args.prefix)
if __name__ == "__main__":
main()
| 1,722
| 30.327273
| 86
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/guided_diffusion/resample.py
|
from abc import ABC, abstractmethod
import numpy as np
import torch as th
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
class ScheduleSampler(ABC):
"""
A distribution over timesteps in the diffusion process, intended to reduce
variance of the objective.
By default, samplers perform unbiased importance sampling, in which the
objective's mean is unchanged.
However, subclasses may override sample() to change how the resampled
terms are reweighted, allowing for actual changes in the objective.
"""
@abstractmethod
def weights(self):
"""
Get a numpy array of weights, one per diffusion step.
The weights needn't be normalized, but must be positive.
"""
def sample(self, batch_size, device):
"""
Importance-sample timesteps for a batch.
:param batch_size: the number of timesteps.
:param device: the torch device to save to.
:return: a tuple (timesteps, weights):
- timesteps: a tensor of timestep indices.
- weights: a tensor of weights to scale the resulting losses.
"""
w = self.weights()
p = w / np.sum(w)
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
indices = th.from_numpy(indices_np).long().to(device)
weights_np = 1 / (len(p) * p[indices_np])
weights = th.from_numpy(weights_np).float().to(device)
return indices, weights
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossAwareSampler(ScheduleSampler):
def update_with_local_losses(self, local_ts, local_losses):
"""
Update the reweighting using losses from a model.
Call this method from each rank with a batch of timesteps and the
corresponding losses for each of those timesteps.
This method will perform synchronization to make sure all of the ranks
maintain the exact same reweighting.
:param local_ts: an integer Tensor of timesteps.
:param local_losses: a 1D Tensor of losses.
"""
batch_sizes = [
th.tensor([0], dtype=th.int32, device=local_ts.device)
for _ in range(dist.get_world_size())
]
dist.all_gather(
batch_sizes,
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
)
# Pad all_gather batches to be the maximum batch size.
batch_sizes = [x.item() for x in batch_sizes]
max_bs = max(batch_sizes)
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
dist.all_gather(timestep_batches, local_ts)
dist.all_gather(loss_batches, local_losses)
timesteps = [
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
]
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
self.update_with_all_losses(timesteps, losses)
@abstractmethod
def update_with_all_losses(self, ts, losses):
"""
Update the reweighting using losses from a model.
Sub-classes should override this method to update the reweighting
using losses from the model.
This method directly updates the reweighting without synchronizing
between workers. It is called by update_with_local_losses from all
ranks with identical arguments. Thus, it should have deterministic
behavior to maintain state across workers.
:param ts: a list of int timesteps.
:param losses: a list of float losses, one per timestep.
"""
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros(
[diffusion.num_timesteps, history_per_term], dtype=np.float64
)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
| 5,689
| 35.709677
| 87
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/guided_diffusion/losses.py
|
"""
Helpers for various likelihood-based losses. These are ported from the original
Ho et al. diffusion models codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
"""
import numpy as np
import torch as th
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for th.exp().
logvar1, logvar2 = [
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ th.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = th.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = cdf_plus - cdf_min
log_probs = th.where(
x < -0.999,
log_cdf_plus,
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
)
assert log_probs.shape == x.shape
return log_probs
| 2,534
| 31.5
| 109
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/guided_diffusion/image_datasets.py
|
import math
import random
from PIL import Image
import blobfile as bf
from mpi4py import MPI
import numpy as np
from torch.utils.data import DataLoader, Dataset
def load_data(
*,
data_dir,
batch_size,
image_size,
class_cond=False,
deterministic=False,
random_crop=False,
random_flip=True,
):
"""
For a dataset, create a generator over (images, kwargs) pairs.
Each images is an NCHW float tensor, and the kwargs dict contains zero or
more keys, each of which map to a batched Tensor of their own.
The kwargs dict can be used for class labels, in which case the key is "y"
and the values are integer tensors of class labels.
:param data_dir: a dataset directory.
:param batch_size: the batch size of each returned pair.
:param image_size: the size to which images are resized.
:param class_cond: if True, include a "y" key in returned dicts for class
label. If classes are not available and this is true, an
exception will be raised.
:param deterministic: if True, yield results in a deterministic order.
:param random_crop: if True, randomly crop the images for augmentation.
:param random_flip: if True, randomly flip the images for augmentation.
"""
if not data_dir:
raise ValueError("unspecified data directory")
all_files = _list_image_files_recursively(data_dir)
classes = None
if class_cond:
# Assume classes are the first part of the filename,
# before an underscore.
class_names = [bf.basename(path).split("_")[0] for path in all_files]
sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}
classes = [sorted_classes[x] for x in class_names]
dataset = ImageDataset(
image_size,
all_files,
classes=classes,
shard=MPI.COMM_WORLD.Get_rank(),
num_shards=MPI.COMM_WORLD.Get_size(),
random_crop=random_crop,
random_flip=random_flip,
)
if deterministic:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=1, drop_last=True
)
else:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True
)
while True:
yield from loader
def _list_image_files_recursively(data_dir):
results = []
for entry in sorted(bf.listdir(data_dir)):
full_path = bf.join(data_dir, entry)
ext = entry.split(".")[-1]
if "." in entry and ext.lower() in ["jpg", "jpeg", "png", "gif"]:
results.append(full_path)
elif bf.isdir(full_path):
results.extend(_list_image_files_recursively(full_path))
return results
class ImageDataset(Dataset):
def __init__(
self,
resolution,
image_paths,
classes=None,
shard=0,
num_shards=1,
random_crop=False,
random_flip=True,
):
super().__init__()
self.resolution = resolution
self.local_images = image_paths[shard:][::num_shards]
self.local_classes = None if classes is None else classes[shard:][::num_shards]
self.random_crop = random_crop
self.random_flip = random_flip
def __len__(self):
return len(self.local_images)
def __getitem__(self, idx):
path = self.local_images[idx]
with bf.BlobFile(path, "rb") as f:
pil_image = Image.open(f)
pil_image.load()
pil_image = pil_image.convert("RGB")
if self.random_crop:
arr = random_crop_arr(pil_image, self.resolution)
else:
arr = center_crop_arr(pil_image, self.resolution)
if self.random_flip and random.random() < 0.5:
arr = arr[:, ::-1]
arr = arr.astype(np.float32) / 127.5 - 1
out_dict = {}
if self.local_classes is not None:
out_dict["y"] = np.array(self.local_classes[idx], dtype=np.int64)
return np.transpose(arr, [2, 0, 1]), out_dict
def center_crop_arr(pil_image, image_size):
# We are not on a new enough PIL to support the `reducing_gap`
# argument, which uses BOX downsampling at powers of two first.
# Thus, we do it by hand to improve downsample quality.
while min(*pil_image.size) >= 2 * image_size:
pil_image = pil_image.resize(
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
)
scale = image_size / min(*pil_image.size)
pil_image = pil_image.resize(
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
)
arr = np.array(pil_image)
crop_y = (arr.shape[0] - image_size) // 2
crop_x = (arr.shape[1] - image_size) // 2
return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
def random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):
min_smaller_dim_size = math.ceil(image_size / max_crop_frac)
max_smaller_dim_size = math.ceil(image_size / min_crop_frac)
smaller_dim_size = random.randrange(min_smaller_dim_size, max_smaller_dim_size + 1)
# We are not on a new enough PIL to support the `reducing_gap`
# argument, which uses BOX downsampling at powers of two first.
# Thus, we do it by hand to improve downsample quality.
while min(*pil_image.size) >= 2 * smaller_dim_size:
pil_image = pil_image.resize(
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
)
scale = smaller_dim_size / min(*pil_image.size)
pil_image = pil_image.resize(
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
)
arr = np.array(pil_image)
crop_y = random.randrange(arr.shape[0] - image_size + 1)
crop_x = random.randrange(arr.shape[1] - image_size + 1)
return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
| 5,930
| 34.303571
| 88
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/guided_diffusion/logger.py
|
"""
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
import warnings
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "read"), (
"expected file or str, got %s" % filename_or_file
)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, "__float__"):
valstr = "%-8.3g" % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print("WARNING: tried to write empty key-value dict")
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append(
"| %s%s | %s%s |"
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
)
lines.append(dashes)
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "wt")
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, "dtype"):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + "\n")
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "w+t")
self.keys = []
self.sep = ","
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(k)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write("\n")
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write("\n")
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = "events"
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {"tag": k, "simple_value": float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = (
self.step
) # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=""):
os.makedirs(ev_dir, exist_ok=True)
if format == "stdout":
return HumanOutputFormat(sys.stdout)
elif format == "log":
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
elif format == "json":
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
elif format == "csv":
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
elif format == "tensorboard":
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
else:
raise ValueError("Unknown format specified: %s" % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = "wait_" + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
d = mpi_weighted_mean(
self.comm,
{
name: (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()
},
)
if self.comm.rank != 0:
d["dummy"] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
if varname in os.environ:
return int(os.environ[varname])
return 0
def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {}
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv("OPENAI_LOGDIR")
if dir is None:
dir = osp.join(
tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
else:
format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
| 13,979
| 27.185484
| 132
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/guided_diffusion/nn.py
|
"""
Various utilities for neural networks.
"""
import math
import torch as th
import torch.nn as nn
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
class SiLU(nn.Module):
def forward(self, x):
return x * th.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = th.exp(
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
if dim % 2:
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
class CheckpointFunction(th.autograd.Function):
@staticmethod
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with th.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
@staticmethod
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
with th.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = th.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
| 5,020
| 28.362573
| 88
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/guided_diffusion/fp16_util.py
|
"""
Helpers to train with 16-bit precision.
"""
import numpy as np
import torch as th
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from . import logger
INITIAL_LOG_LOSS_SCALE = 20.0
def convert_module_to_f16(l):
"""
Convert primitive modules to float16.
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
def convert_module_to_f32(l):
"""
Convert primitive modules to float32, undoing convert_module_to_f16().
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.float()
if l.bias is not None:
l.bias.data = l.bias.data.float()
def make_master_params(param_groups_and_shapes):
"""
Copy model parameters into a (differently-shaped) list of full-precision
parameters.
"""
master_params = []
for param_group, shape in param_groups_and_shapes:
master_param = nn.Parameter(
_flatten_dense_tensors(
[param.detach().float() for (_, param) in param_group]
).view(shape)
)
master_param.requires_grad = True
master_params.append(master_param)
return master_params
def model_grads_to_master_grads(param_groups_and_shapes, master_params):
"""
Copy the gradients from the model parameters into the master parameters
from make_master_params().
"""
for master_param, (param_group, shape) in zip(
master_params, param_groups_and_shapes
):
master_param.grad = _flatten_dense_tensors(
[param_grad_or_zeros(param) for (_, param) in param_group]
).view(shape)
def master_params_to_model_params(param_groups_and_shapes, master_params):
"""
Copy the master parameter data back into the model parameters.
"""
# Without copying to a list, if a generator is passed, this will
# silently not copy any parameters.
for master_param, (param_group, _) in zip(master_params, param_groups_and_shapes):
for (_, param), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
param.detach().copy_(unflat_master_param)
def unflatten_master_params(param_group, master_param):
return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
def get_param_groups_and_shapes(named_model_params):
named_model_params = list(named_model_params)
scalar_vector_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim <= 1],
(-1),
)
matrix_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim > 1],
(1, -1),
)
return [scalar_vector_named_params, matrix_named_params]
def master_params_to_state_dict(
model, param_groups_and_shapes, master_params, use_fp16
):
if use_fp16:
state_dict = model.state_dict()
for master_param, (param_group, _) in zip(
master_params, param_groups_and_shapes
):
for (name, _), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
assert name in state_dict
state_dict[name] = unflat_master_param
else:
state_dict = model.state_dict()
for i, (name, _value) in enumerate(model.named_parameters()):
assert name in state_dict
state_dict[name] = master_params[i]
return state_dict
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [
(name, state_dict[name]) for name, _ in model.named_parameters()
]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for name, _ in model.named_parameters()]
return master_params
def zero_master_grads(master_params):
for param in master_params:
param.grad = None
def zero_grad(model_params):
for param in model_params:
# Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
def param_grad_or_zeros(param):
if param.grad is not None:
return param.grad.data.detach()
else:
return th.zeros_like(param)
class MixedPrecisionTrainer:
def __init__(
self,
*,
model,
use_fp16=False,
fp16_scale_growth=1e-3,
initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE,
):
self.model = model
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.param_groups_and_shapes = None
self.lg_loss_scale = initial_lg_loss_scale
if self.use_fp16:
self.param_groups_and_shapes = get_param_groups_and_shapes(
self.model.named_parameters()
)
self.master_params = make_master_params(self.param_groups_and_shapes)
self.model.convert_to_fp16()
def zero_grad(self):
zero_grad(self.model_params)
def backward(self, loss: th.Tensor):
if self.use_fp16:
loss_scale = 2 ** self.lg_loss_scale
(loss * loss_scale).backward()
else:
loss.backward()
def optimize(self, opt: th.optim.Optimizer):
if self.use_fp16:
return self._optimize_fp16(opt)
else:
return self._optimize_normal(opt)
def _optimize_fp16(self, opt: th.optim.Optimizer):
logger.logkv_mean("lg_loss_scale", self.lg_loss_scale)
model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
grad_norm, param_norm = self._compute_norms(grad_scale=2 ** self.lg_loss_scale)
if check_overflow(grad_norm):
self.lg_loss_scale -= 1
logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
zero_master_grads(self.master_params)
return False
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
opt.step()
zero_master_grads(self.master_params)
master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
return True
def _optimize_normal(self, opt: th.optim.Optimizer):
grad_norm, param_norm = self._compute_norms()
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
opt.step()
return True
def _compute_norms(self, grad_scale=1.0):
grad_norm = 0.0
param_norm = 0.0
for p in self.master_params:
with th.no_grad():
param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2
if p.grad is not None:
grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2
return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm)
def master_params_to_state_dict(self, master_params):
return master_params_to_state_dict(
self.model, self.param_groups_and_shapes, master_params, self.use_fp16
)
def state_dict_to_master_params(self, state_dict):
return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
def check_overflow(value):
return (value == float("inf")) or (value == -float("inf")) or (value != value)
| 7,941
| 32.510549
| 114
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/guided_diffusion/unet.py
|
from abc import abstractmethod
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from .fp16_util import convert_module_to_f16, convert_module_to_f32
from .nn import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(
th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=1
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
use_checkpoint=False,
use_new_attention_order=False,
):
super().__init__()
self.channels = channels
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.use_checkpoint = use_checkpoint
self.norm = normalization(channels)
self.qkv = conv_nd(1, channels, channels * 3, 1)
if use_new_attention_order:
# split qkv before split heads
self.attention = QKVAttention(self.num_heads)
else:
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads)
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
def forward(self, x):
return checkpoint(self._forward, (x,), self.parameters(), True)
def _forward(self, x):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
h = self.attention(qkv)
h = self.proj_out(h)
return (x + h).reshape(b, c, *spatial)
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
# We perform two matmuls with the same number of ops.
# The first computes the weight matrix, the second computes
# the combination of the value vectors.
matmul_ops = 2 * b * (num_spatial ** 2) * c
model.total_ops += th.DoubleTensor([matmul_ops])
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts",
(q * scale).view(bs * self.n_heads, ch, length),
(k * scale).view(bs * self.n_heads, ch, length),
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param num_classes: if specified (as an int), then this model will be
class-conditional with `num_classes` classes.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
:param num_heads_channels: if specified, ignore num_heads and instead use
a fixed channel width per attention head.
:param num_heads_upsample: works with num_heads to set a different number
of heads for upsampling. Deprecated.
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
:param resblock_updown: use residual blocks for up/downsampling.
:param use_new_attention_order: use a different attention pattern for potentially
increased efficiency.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
ch = input_ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
ich = input_block_chans.pop()
layers = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=int(model_channels * mult),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(model_channels * mult)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
if level and i == num_res_blocks:
out_ch = ch
layers.append(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
)
if resblock_updown
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
)
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)),
)
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
def forward(self, x, timesteps, y=None):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if self.num_classes is not None:
assert y.shape == (x.shape[0],)
emb = emb + self.label_emb(y)
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
hs.append(h)
h = self.middle_block(h, emb)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb)
h = h.type(x.dtype)
return self.out(h)
# CUT feature extraction
def forward_enc(self,x, timesteps, nce_layers, y=None):
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if self.num_classes is not None:
assert y.shape == (x.shape[0],)
emb = emb + self.label_emb(y)
feats = []
h = x.type(self.dtype)
for i, module in enumerate(self.input_blocks):
h = module(h, emb)
hs.append(h)
if i in nce_layers:
feats.append(h)
# h = self.middle_block(h, emb)
# for module in self.output_blocks:
# h = th.cat([h, hs.pop()], dim=1)
# h = module(h, emb)
h = h.type(x.dtype)
return feats
class SuperResModel(UNetModel):
"""
A UNetModel that performs super-resolution.
Expects an extra kwarg `low_res` to condition on a low-resolution image.
"""
def __init__(self, image_size, in_channels, *args, **kwargs):
super().__init__(image_size, in_channels * 2, *args, **kwargs)
def forward(self, x, timesteps, low_res=None, **kwargs):
_, _, new_height, new_width = x.shape
upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
x = th.cat([x, upsampled], dim=1)
return super().forward(x, timesteps, **kwargs)
class EncoderUNetModel(nn.Module):
"""
The half UNet model with attention and timestep embedding.
For usage, see UNet.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
pool="adaptive",
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.pool = pool
if pool == "adaptive":
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
nn.AdaptiveAvgPool2d((1, 1)),
zero_module(conv_nd(dims, ch, out_channels, 1)),
nn.Flatten(),
)
elif pool == "attention":
assert num_head_channels != -1
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
AttentionPool2d(
(image_size // ds), ch, num_head_channels, out_channels
),
)
elif pool == "spatial":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
nn.ReLU(),
nn.Linear(2048, self.out_channels),
)
elif pool == "spatial_v2":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
normalization(2048),
nn.SiLU(),
nn.Linear(2048, self.out_channels),
)
else:
raise NotImplementedError(f"Unexpected {pool} pooling")
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
def forward(self, x, timesteps):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:return: an [N x K] Tensor of outputs.
"""
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
results = []
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = self.middle_block(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = th.cat(results, axis=-1)
return self.out(h)
else:
h = h.type(x.dtype)
return self.out(h)
| 32,001
| 33.822633
| 124
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/guided_diffusion/script_util.py
|
import argparse
import inspect
from . import gaussian_diffusion as gd
from .respace import SpacedDiffusion, space_timesteps
from .unet import SuperResModel, UNetModel, EncoderUNetModel
NUM_CLASSES = 1000
# def diffusion_defaults():
# """
# Defaults for image and classifier training.
# """
# return dict(
# learn_sigma=False,
# diffusion_steps=1000,
# noise_schedule="linear",
# timestep_respacing="",
# use_kl=False,
# predict_xstart=False,
# rescale_timesteps=False,
# rescale_learned_sigmas=False,
# )
def diffusion_defaults():
"""
Defaults for image and classifier training.
"""
return dict(
learn_sigma=True,
diffusion_steps=1000,
noise_schedule="linear",
# timestep_respacing="100",
use_kl=False,
predict_xstart=False,
rescale_timesteps=True,
rescale_learned_sigmas=False,
)
def classifier_defaults():
"""
Defaults for classifier models.
"""
return dict(
image_size=64,
classifier_use_fp16=False,
classifier_width=128,
classifier_depth=2,
classifier_attention_resolutions="32,16,8", # 16
classifier_use_scale_shift_norm=True, # False
classifier_resblock_updown=True, # False
classifier_pool="attention",
)
def model_and_diffusion_defaults(args):
"""
Defaults for image training.
"""
res = dict(
image_size = args.model_output_size,
num_channels = 256,
num_res_blocks = 2,
num_heads = 4,
num_heads_upsample = -1,
num_head_channels = 64,
attention_resolutions = "32, 16, 8",
channel_mult="",
dropout=0.0,
class_cond = args.model_output_size == 512,
use_checkpoint = False,
use_scale_shift_norm = True,
resblock_updown = True,
use_fp16 = True,
use_new_attention_order = False,
diffusion_steps = 1000,
rescale_timesteps = True,
timestep_respacing = args.timestep_respacing,
learn_sigma = True,
noise_schedule = "linear",
)
res.update(diffusion_defaults())
return res
def classifier_and_diffusion_defaults():
res = classifier_defaults()
res.update(diffusion_defaults())
return res
def create_model_and_diffusion(
image_size,
class_cond,
learn_sigma,
num_channels,
num_res_blocks,
channel_mult,
num_heads,
num_head_channels,
num_heads_upsample,
attention_resolutions,
dropout,
diffusion_steps,
noise_schedule,
timestep_respacing,
use_kl,
predict_xstart,
rescale_timesteps,
rescale_learned_sigmas,
use_checkpoint,
use_scale_shift_norm,
resblock_updown,
use_fp16,
use_new_attention_order,
):
model = create_model(
image_size,
num_channels,
num_res_blocks,
channel_mult=channel_mult,
learn_sigma=learn_sigma,
class_cond=class_cond,
use_checkpoint=use_checkpoint,
attention_resolutions=attention_resolutions,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
dropout=dropout,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
use_new_attention_order=use_new_attention_order,
)
diffusion = create_gaussian_diffusion(
steps=diffusion_steps,
learn_sigma=learn_sigma,
noise_schedule=noise_schedule,
use_kl=use_kl,
predict_xstart=predict_xstart,
rescale_timesteps=rescale_timesteps,
rescale_learned_sigmas=rescale_learned_sigmas,
timestep_respacing=timestep_respacing,
)
return model, diffusion
def create_model(
image_size,
num_channels,
num_res_blocks,
channel_mult="",
learn_sigma=False,
class_cond=False,
use_checkpoint=False,
attention_resolutions="16",
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
dropout=0,
resblock_updown=False,
use_fp16=False,
use_new_attention_order=False,
):
if channel_mult == "":
if image_size == 512:
channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
elif image_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif image_size == 128:
channel_mult = (1, 1, 2, 3, 4)
elif image_size == 64:
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f"unsupported image size: {image_size}")
else:
channel_mult = tuple(int(ch_mult) for ch_mult in channel_mult.split(","))
attention_ds = []
for res in attention_resolutions.split(","):
attention_ds.append(image_size // int(res))
return UNetModel(
image_size=image_size,
in_channels=3,
model_channels=num_channels,
out_channels=(3 if not learn_sigma else 6),
num_res_blocks=num_res_blocks,
attention_resolutions=tuple(attention_ds),
dropout=dropout,
channel_mult=channel_mult,
num_classes=(NUM_CLASSES if class_cond else None),
use_checkpoint=use_checkpoint,
use_fp16=use_fp16,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
resblock_updown=resblock_updown,
use_new_attention_order=use_new_attention_order,
)
def create_classifier_and_diffusion(
image_size,
classifier_use_fp16,
classifier_width,
classifier_depth,
classifier_attention_resolutions,
classifier_use_scale_shift_norm,
classifier_resblock_updown,
classifier_pool,
learn_sigma,
diffusion_steps,
noise_schedule,
timestep_respacing,
use_kl,
predict_xstart,
rescale_timesteps,
rescale_learned_sigmas,
):
classifier = create_classifier(
image_size,
classifier_use_fp16,
classifier_width,
classifier_depth,
classifier_attention_resolutions,
classifier_use_scale_shift_norm,
classifier_resblock_updown,
classifier_pool,
)
diffusion = create_gaussian_diffusion(
steps=diffusion_steps,
learn_sigma=learn_sigma,
noise_schedule=noise_schedule,
use_kl=use_kl,
predict_xstart=predict_xstart,
rescale_timesteps=rescale_timesteps,
rescale_learned_sigmas=rescale_learned_sigmas,
timestep_respacing=timestep_respacing,
)
return classifier, diffusion
def create_classifier(
image_size,
classifier_use_fp16,
classifier_width,
classifier_depth,
classifier_attention_resolutions,
classifier_use_scale_shift_norm,
classifier_resblock_updown,
classifier_pool,
):
if image_size == 512:
channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
elif image_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif image_size == 128:
channel_mult = (1, 1, 2, 3, 4)
elif image_size == 64:
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f"unsupported image size: {image_size}")
attention_ds = []
for res in classifier_attention_resolutions.split(","):
attention_ds.append(image_size // int(res))
return EncoderUNetModel(
image_size=image_size,
in_channels=3,
model_channels=classifier_width,
out_channels=1000,
num_res_blocks=classifier_depth,
attention_resolutions=tuple(attention_ds),
channel_mult=channel_mult,
use_fp16=classifier_use_fp16,
num_head_channels=64,
use_scale_shift_norm=classifier_use_scale_shift_norm,
resblock_updown=classifier_resblock_updown,
pool=classifier_pool,
)
def sr_model_and_diffusion_defaults():
res = model_and_diffusion_defaults()
res["large_size"] = 256
res["small_size"] = 64
arg_names = inspect.getfullargspec(sr_create_model_and_diffusion)[0]
for k in res.copy().keys():
if k not in arg_names:
del res[k]
return res
def sr_create_model_and_diffusion(
large_size,
small_size,
class_cond,
learn_sigma,
num_channels,
num_res_blocks,
num_heads,
num_head_channels,
num_heads_upsample,
attention_resolutions,
dropout,
diffusion_steps,
noise_schedule,
timestep_respacing,
use_kl,
predict_xstart,
rescale_timesteps,
rescale_learned_sigmas,
use_checkpoint,
use_scale_shift_norm,
resblock_updown,
use_fp16,
):
model = sr_create_model(
large_size,
small_size,
num_channels,
num_res_blocks,
learn_sigma=learn_sigma,
class_cond=class_cond,
use_checkpoint=use_checkpoint,
attention_resolutions=attention_resolutions,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
dropout=dropout,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
)
diffusion = create_gaussian_diffusion(
steps=diffusion_steps,
learn_sigma=learn_sigma,
noise_schedule=noise_schedule,
use_kl=use_kl,
predict_xstart=predict_xstart,
rescale_timesteps=rescale_timesteps,
rescale_learned_sigmas=rescale_learned_sigmas,
timestep_respacing=timestep_respacing,
)
return model, diffusion
def sr_create_model(
large_size,
small_size,
num_channels,
num_res_blocks,
learn_sigma,
class_cond,
use_checkpoint,
attention_resolutions,
num_heads,
num_head_channels,
num_heads_upsample,
use_scale_shift_norm,
dropout,
resblock_updown,
use_fp16,
):
_ = small_size # hack to prevent unused variable
if large_size == 512:
channel_mult = (1, 1, 2, 2, 4, 4)
elif large_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif large_size == 64:
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f"unsupported large size: {large_size}")
attention_ds = []
for res in attention_resolutions.split(","):
attention_ds.append(large_size // int(res))
return SuperResModel(
image_size=large_size,
in_channels=3,
model_channels=num_channels,
out_channels=(3 if not learn_sigma else 6),
num_res_blocks=num_res_blocks,
attention_resolutions=tuple(attention_ds),
dropout=dropout,
channel_mult=channel_mult,
num_classes=(NUM_CLASSES if class_cond else None),
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
)
def create_gaussian_diffusion(
*,
steps=1000,
learn_sigma=False,
sigma_small=False,
noise_schedule="linear",
use_kl=False,
predict_xstart=False,
rescale_timesteps=False,
rescale_learned_sigmas=False,
timestep_respacing="",
):
betas = gd.get_named_beta_schedule(noise_schedule, steps)
if use_kl:
loss_type = gd.LossType.RESCALED_KL
elif rescale_learned_sigmas:
loss_type = gd.LossType.RESCALED_MSE
else:
loss_type = gd.LossType.MSE
if not timestep_respacing:
timestep_respacing = [steps]
return SpacedDiffusion(
use_timesteps=space_timesteps(steps, timestep_respacing),
betas=betas,
model_mean_type=(
gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X
),
model_var_type=(
(
gd.ModelVarType.FIXED_LARGE
if not sigma_small
else gd.ModelVarType.FIXED_SMALL
)
if not learn_sigma
else gd.ModelVarType.LEARNED_RANGE
),
loss_type=loss_type,
rescale_timesteps=rescale_timesteps,
)
def add_dict_to_argparser(parser, default_dict):
for k, v in default_dict.items():
v_type = type(v)
if v is None:
v_type = str
elif isinstance(v, bool):
v_type = str2bool
parser.add_argument(f"--{k}", default=v, type=v_type)
def args_to_dict(args, keys):
return {k: getattr(args, k) for k in keys}
def str2bool(v):
"""
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected")
| 12,997
| 26.538136
| 88
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/guided_diffusion/gaussian_diffusion.py
|
"""
This code started out as a PyTorch port of Ho et al's diffusion models:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
"""
import enum
import math
from tkinter import X
import numpy as np
import torch as th
from .nn import mean_flat
from .losses import normal_kl, discretized_gaussian_log_likelihood
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps, lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = enum.auto() # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Ported directly from here, and then adapted over time to further experimentation.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
:param model_mean_type: a ModelMeanType determining what the model outputs.
:param model_var_type: a ModelVarType determining how variance is output.
:param loss_type: a LossType determining the loss function to use.
:param rescale_timesteps: if True, pass floating point timesteps into the
model so that they are always scaled like in the
original paper (0 to 1000).
"""
def __init__(
self, *, betas, model_mean_type, model_var_type, loss_type, rescale_timesteps=False,
):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
if self.model_var_type == ModelVarType.LEARNED:
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
pred_xstart = process_xstart(
self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
)
model_mean = model_output
elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
else:
raise NotImplementedError(self.model_mean_type)
assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert x_t.shape == xprev.shape
return ( # (xprev - coef2*x_t) / coef1
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
- _extract_into_tensor(
self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
)
* x_t
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return t.float() * (1000.0 / self.num_timesteps)
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, self._scale_timesteps(t), **model_kwargs)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t)
return out
def p_sample(
self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
):
final = sample
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
# postprocess_fn=None,
# randomize_class=False,
# resizers = None,
# range_t=0,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
if skip_timesteps and init_image is None:
init_image = th.zeros_like(img)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1]
batch_size = shape[0]
init_image_batch = th.tile(init_image, dims=(batch_size, 1, 1, 1))
img = self.q_sample(
x_start=init_image_batch,
t=th.tensor(indices[0], dtype=th.long, device=device),
noise=img,
)
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
# if randomize_class and "y" in model_kwargs:
# model_kwargs["y"] = th.randint(
# low=0,
# high=model.num_classes,
# size=model_kwargs["y"].shape,
# device=model_kwargs["y"].device,
# )
with th.no_grad():
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,# if t[0] > 0 else None,
model_kwargs=model_kwargs,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
skip_timesteps=0,
init_image=None,
randomize_class=False,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
skip_timesteps=0,
init_image=None,
randomize_class=False,
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1] # self.num_timesteps
# import ipdb; ipdb.set_trace()
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
# if randomize_class and "y" in model_kwargs:
# model_kwargs["y"] = th.randint(
# low=0,
# high=model.num_classes,
# size=model_kwargs["y"].shape,
# device=model_kwargs["y"].device,
# )
with th.no_grad():
out = self.ddim_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn= cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def ddim_reverse_sample(
self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_reverse_sample_loop(self,
model,
x,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
eta=0.0,
skip_timesteps=0,
device=None):
if device is None:
device = next(model.parameters()).device
img = x
indices = list(range(self.num_timesteps - skip_timesteps)) # self.num_timesteps
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * x.shape[0], device=device)
with th.no_grad():
out = self.ddim_reverse_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
eta=eta,
)
# import ipdb;ipdb.set_trace()
img = out["sample"]
return img
def _vb_terms_bpd(self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(true_mean, true_log_variance_clipped, out["mean"], out["log_variance"])
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
| 36,586
| 38.130481
| 129
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/guided_diffusion/__init__.py
|
"""
Codebase for "Improved Denoising Diffusion Probabilistic Models".
"""
| 74
| 17.75
| 65
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/guided_diffusion/train_util.py
|
import copy
import functools
import os
import blobfile as bf
import torch as th
import torch.distributed as dist
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.optim import AdamW
from . import dist_util, logger
from .fp16_util import MixedPrecisionTrainer
from .nn import update_ema
from .resample import LossAwareSampler, UniformSampler
# For ImageNet experiments, this was a good default value.
# We found that the lg_loss_scale quickly climbed to
# 20-21 within the first ~1K steps of training.
INITIAL_LOG_LOSS_SCALE = 20.0
class TrainLoop:
def __init__(
self,
*,
model,
diffusion,
data,
batch_size,
microbatch,
lr,
ema_rate,
log_interval,
save_interval,
resume_checkpoint,
use_fp16=False,
fp16_scale_growth=1e-3,
schedule_sampler=None,
weight_decay=0.0,
lr_anneal_steps=0,
):
self.model = model
self.diffusion = diffusion
self.data = data
self.batch_size = batch_size
self.microbatch = microbatch if microbatch > 0 else batch_size
self.lr = lr
self.ema_rate = (
[ema_rate]
if isinstance(ema_rate, float)
else [float(x) for x in ema_rate.split(",")]
)
self.log_interval = log_interval
self.save_interval = save_interval
self.resume_checkpoint = resume_checkpoint
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
self.weight_decay = weight_decay
self.lr_anneal_steps = lr_anneal_steps
self.step = 0
self.resume_step = 0
self.global_batch = self.batch_size * dist.get_world_size()
self.sync_cuda = th.cuda.is_available()
self._load_and_sync_parameters()
self.mp_trainer = MixedPrecisionTrainer(
model=self.model,
use_fp16=self.use_fp16,
fp16_scale_growth=fp16_scale_growth,
)
self.opt = AdamW(
self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay
)
if self.resume_step:
self._load_optimizer_state()
# Model was resumed, either due to a restart or a checkpoint
# being specified at the command line.
self.ema_params = [
self._load_ema_parameters(rate) for rate in self.ema_rate
]
else:
self.ema_params = [
copy.deepcopy(self.mp_trainer.master_params)
for _ in range(len(self.ema_rate))
]
if th.cuda.is_available():
self.use_ddp = True
self.ddp_model = DDP(
self.model,
device_ids=[dist_util.dev()],
output_device=dist_util.dev(),
broadcast_buffers=False,
bucket_cap_mb=128,
find_unused_parameters=False,
)
else:
if dist.get_world_size() > 1:
logger.warn(
"Distributed training requires CUDA. "
"Gradients will not be synchronized properly!"
)
self.use_ddp = False
self.ddp_model = self.model
def _load_and_sync_parameters(self):
resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
if resume_checkpoint:
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
if dist.get_rank() == 0:
logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
self.model.load_state_dict(
dist_util.load_state_dict(
resume_checkpoint, map_location=dist_util.dev()
)
)
dist_util.sync_params(self.model.parameters())
def _load_ema_parameters(self, rate):
ema_params = copy.deepcopy(self.mp_trainer.master_params)
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
if ema_checkpoint:
if dist.get_rank() == 0:
logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...")
state_dict = dist_util.load_state_dict(
ema_checkpoint, map_location=dist_util.dev()
)
ema_params = self.mp_trainer.state_dict_to_master_params(state_dict)
dist_util.sync_params(ema_params)
return ema_params
def _load_optimizer_state(self):
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
opt_checkpoint = bf.join(
bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
)
if bf.exists(opt_checkpoint):
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
state_dict = dist_util.load_state_dict(
opt_checkpoint, map_location=dist_util.dev()
)
self.opt.load_state_dict(state_dict)
def run_loop(self):
while (
not self.lr_anneal_steps
or self.step + self.resume_step < self.lr_anneal_steps
):
batch, cond = next(self.data)
self.run_step(batch, cond)
if self.step % self.log_interval == 0:
logger.dumpkvs()
if self.step % self.save_interval == 0:
self.save()
# Run for a finite amount of time in integration tests.
if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
return
self.step += 1
# Save the last checkpoint if it wasn't already saved.
if (self.step - 1) % self.save_interval != 0:
self.save()
def run_step(self, batch, cond):
self.forward_backward(batch, cond)
took_step = self.mp_trainer.optimize(self.opt)
if took_step:
self._update_ema()
self._anneal_lr()
self.log_step()
def forward_backward(self, batch, cond):
self.mp_trainer.zero_grad()
for i in range(0, batch.shape[0], self.microbatch):
micro = batch[i : i + self.microbatch].to(dist_util.dev())
micro_cond = {
k: v[i : i + self.microbatch].to(dist_util.dev())
for k, v in cond.items()
}
last_batch = (i + self.microbatch) >= batch.shape[0]
t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
compute_losses = functools.partial(
self.diffusion.training_losses,
self.ddp_model,
micro,
t,
model_kwargs=micro_cond,
)
if last_batch or not self.use_ddp:
losses = compute_losses()
else:
with self.ddp_model.no_sync():
losses = compute_losses()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(
t, losses["loss"].detach()
)
loss = (losses["loss"] * weights).mean()
log_loss_dict(
self.diffusion, t, {k: v * weights for k, v in losses.items()}
)
self.mp_trainer.backward(loss)
def _update_ema(self):
for rate, params in zip(self.ema_rate, self.ema_params):
update_ema(params, self.mp_trainer.master_params, rate=rate)
def _anneal_lr(self):
if not self.lr_anneal_steps:
return
frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
lr = self.lr * (1 - frac_done)
for param_group in self.opt.param_groups:
param_group["lr"] = lr
def log_step(self):
logger.logkv("step", self.step + self.resume_step)
logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
def save(self):
def save_checkpoint(rate, params):
state_dict = self.mp_trainer.master_params_to_state_dict(params)
if dist.get_rank() == 0:
logger.log(f"saving model {rate}...")
if not rate:
filename = f"model{(self.step+self.resume_step):06d}.pt"
else:
filename = f"ema_{rate}_{(self.step+self.resume_step):06d}.pt"
with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
th.save(state_dict, f)
save_checkpoint(0, self.mp_trainer.master_params)
for rate, params in zip(self.ema_rate, self.ema_params):
save_checkpoint(rate, params)
if dist.get_rank() == 0:
with bf.BlobFile(
bf.join(get_blob_logdir(), f"opt{(self.step+self.resume_step):06d}.pt"),
"wb",
) as f:
th.save(self.opt.state_dict(), f)
dist.barrier()
def parse_resume_step_from_filename(filename):
"""
Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
checkpoint's number of steps.
"""
split = filename.split("model")
if len(split) < 2:
return 0
split1 = split[-1].split(".")[0]
try:
return int(split1)
except ValueError:
return 0
def get_blob_logdir():
# You can change this to be a separate path to save checkpoints to
# a blobstore or some external drive.
return logger.get_dir()
def find_resume_checkpoint():
# On your infrastructure, you may want to override this to automatically
# discover the latest checkpoint on your blob storage, etc.
return None
def find_ema_checkpoint(main_checkpoint, step, rate):
if main_checkpoint is None:
return None
filename = f"ema_{rate}_{(step):06d}.pt"
path = bf.join(bf.dirname(main_checkpoint), filename)
if bf.exists(path):
return path
return None
def log_loss_dict(diffusion, ts, losses):
for key, values in losses.items():
logger.logkv_mean(key, values.mean().item())
# Log the quantiles (four quartiles, in particular).
for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
quartile = int(4 * sub_t / diffusion.num_timesteps)
logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
| 10,604
| 34.115894
| 88
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/guided_diffusion/respace.py
|
import numpy as np
import torch as th
from .gaussian_diffusion import GaussianDiffusion
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim") :])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class SpacedDiffusion(GaussianDiffusion):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(
model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
class _WrappedModel:
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, new_ts, **kwargs)
| 5,193
| 39.263566
| 85
|
py
|
ZeCon
|
ZeCon-main/guided_diffusion/guided_diffusion/dist_util.py
|
"""
Helpers for distributed training.
"""
import io
import os
import socket
import blobfile as bf
from mpi4py import MPI
import torch as th
import torch.distributed as dist
# Change this to reflect your cluster layout.
# The GPU for a given rank is (rank % GPUS_PER_NODE).
GPUS_PER_NODE = 8
SETUP_RETRY_COUNT = 3
def setup_dist():
"""
Setup a distributed process group.
"""
if dist.is_initialized():
return
os.environ["CUDA_VISIBLE_DEVICES"] = f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}"
comm = MPI.COMM_WORLD
backend = "gloo" if not th.cuda.is_available() else "nccl"
if backend == "gloo":
hostname = "localhost"
else:
hostname = socket.gethostbyname(socket.getfqdn())
os.environ["MASTER_ADDR"] = comm.bcast(hostname, root=0)
os.environ["RANK"] = str(comm.rank)
os.environ["WORLD_SIZE"] = str(comm.size)
port = comm.bcast(_find_free_port(), root=0)
os.environ["MASTER_PORT"] = str(port)
dist.init_process_group(backend=backend, init_method="env://")
def dev():
"""
Get the device to use for torch.distributed.
"""
if th.cuda.is_available():
return th.device(f"cuda")
return th.device("cpu")
def load_state_dict(path, **kwargs):
"""
Load a PyTorch file without redundant fetches across MPI ranks.
"""
chunk_size = 2 ** 30 # MPI has a relatively small size limit
if MPI.COMM_WORLD.Get_rank() == 0:
with bf.BlobFile(path, "rb") as f:
data = f.read()
num_chunks = len(data) // chunk_size
if len(data) % chunk_size:
num_chunks += 1
MPI.COMM_WORLD.bcast(num_chunks)
for i in range(0, len(data), chunk_size):
MPI.COMM_WORLD.bcast(data[i : i + chunk_size])
else:
num_chunks = MPI.COMM_WORLD.bcast(None)
data = bytes()
for _ in range(num_chunks):
data += MPI.COMM_WORLD.bcast(None)
return th.load(io.BytesIO(data), **kwargs)
def sync_params(params):
"""
Synchronize a sequence of Tensors across ranks from rank 0.
"""
for p in params:
with th.no_grad():
dist.broadcast(p, 0)
def _find_free_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
finally:
s.close()
| 2,424
| 24.797872
| 87
|
py
|
ZeCon
|
ZeCon-main/utils/visualization.py
|
from pathlib import Path
import matplotlib.pyplot as plt
from typing import Optional, Union
from PIL.Image import Image
def show_edited_masked_image(
title: str,
source_image: Image,
edited_image: Image,
mask: Optional[Image] = None,
path: Optional[Union[str, Path]] = None,
distance: Optional[str] = None,
):
fig_idx = 1
rows = 1
cols = 3 if mask is not None else 2
fig = plt.figure(figsize=(12, 5))
figure_title = f'Prompt: "{title}"'
if distance is not None:
figure_title += f" ({distance})"
plt.title(figure_title)
plt.axis("off")
fig.add_subplot(rows, cols, fig_idx)
fig_idx += 1
_set_image_plot_name("Source Image")
plt.imshow(source_image)
if mask is not None:
fig.add_subplot(rows, cols, fig_idx)
_set_image_plot_name("Mask")
plt.imshow(mask)
plt.gray()
fig_idx += 1
fig.add_subplot(rows, cols, fig_idx)
_set_image_plot_name("Edited Image")
plt.imshow(edited_image)
if path is not None:
plt.savefig(path, bbox_inches="tight")
else:
plt.show(block=True)
plt.close()
def _set_image_plot_name(name):
plt.title(name)
plt.xticks([])
plt.yticks([])
| 1,237
| 21.925926
| 46
|
py
|
ZeCon
|
ZeCon-main/utils/metrics_accumulator.py
|
from collections import defaultdict
import numpy as np
class MetricsAccumulator:
def __init__(self) -> None:
self.accumulator = defaultdict(lambda: [])
def update_metric(self, metric_name, metric_value):
self.accumulator[metric_name].append(metric_value)
def print_average_metric(self):
for k, v in self.accumulator.items():
average_v = np.array(v).mean()
print(f"{k} - {average_v:.2f}")
self.__init__()
| 478
| 24.210526
| 58
|
py
|
Few-NERD
|
Few-NERD-main/run_supervised.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert). """
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from seqeval.metrics import precision_score, recall_score, f1_score
# from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from util.supervised_util import convert_examples_to_features, get_labels, read_examples_from_file
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import WEIGHTS_NAME, BertConfig, BertForTokenClassification, BertTokenizer
from util.metric import Metrics
logger = logging.getLogger(__name__)
# ALL_MODELS = sum(
# (tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, )),
# ())
MODEL_CLASSES = {
"bert": (BertConfig, BertForTokenClassification, BertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
# if args.local_rank in [-1, 0]:
# tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
# scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss = 0.0
best_metric = 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2] if args.model_type in ["bert", "xlnet"] else None,
# XLM and RoBERTa don"t use segment_ids
"labels": batch[3]}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
# model.zero_grad()
optimizer.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev")
if results["f1"] > best_metric:
best_metric = results["f1"]
output_dir = os.path.join(args.output_dir, "checkpoint-best")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
tokenizer.save_pretrained(output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
# if args.local_rank in [-1, 0]:
# tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2] if args.model_type in ["bert", "xlnet"] else None,
# XLM and RoBERTa don"t use segment_ids
"labels": batch[3]}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[0], outputs[1]
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
# memory management
del outputs, tmp_eval_loss, logits
torch.cuda.empty_cache()
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
metric = Metrics()
p, r, f = metric.metrics_by_entity(preds_list, out_label_list)
results = {
"precision": p,
"recall": r,
"f1": f
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}".format(mode,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(examples, labels, args.max_seq_length, tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--labels", default="", type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action="store_true",
help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true",
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true",
help="Whether to run predictions on the test set.")
parser.add_argument("--evaluate_during_training", action="store_true",
help="Whether to run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action="store_true",
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=50,
help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action="store_true",
help="Avoid using CUDA when available")
parser.add_argument("--overwrite_output_dir", action="store_true",
help="Overwrite the content of the output directory")
parser.add_argument("--overwrite_cache", action="store_true",
help="Overwrite the cached training and evaluation sets")
parser.add_argument("--seed", type=int, default=42,
help="random seed for initialization")
parser.add_argument("--fp16", action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument("--fp16_opt_level", type=str, default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare CONLL-2003 task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train")
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
best_checkpoint = os.path.join(args.output_dir, "checkpoint-best")
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0) and not os.path.exists(best_checkpoint):
os.makedirs(best_checkpoint)
logger.info("Saving model checkpoint to %s", best_checkpoint)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training
model_to_save.save_pretrained(best_checkpoint)
tokenizer.save_pretrained(best_checkpoint)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(best_checkpoint, "training_args.bin"))
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(best_checkpoint, do_lower_case=args.do_lower_case)
checkpoints = [best_checkpoint]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)))
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step)
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
if args.do_predict and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(best_checkpoint, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(best_checkpoint)
model.to(args.device)
result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test")
# Save results
output_test_results_file = os.path.join(best_checkpoint, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
# Save predictions
output_test_predictions_file = os.path.join(best_checkpoint, "test_predictions.txt")
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(args.data_dir, "test.txt"), "r") as f:
example_id = 0
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(line)
if not predictions[example_id]:
example_id += 1
elif predictions[example_id]:
output_line = line.split()[0] + " " + predictions[example_id].pop(0) + "\n"
writer.write(output_line)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
return results
if __name__ == "__main__":
main()
| 28,048
| 52.940385
| 184
|
py
|
Few-NERD
|
Few-NERD-main/train_demo.py
|
from transformers import BertTokenizer
from util.data_loader import get_loader
from util.framework import FewShotNERFramework
from util.word_encoder import BERTWordEncoder
from model.proto import Proto
from model.nnshot import NNShot
import sys
import torch
from torch import optim, nn
import numpy as np
import json
import argparse
import os
import torch
import random
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default='inter',
help='training mode, must be in [inter, intra]')
parser.add_argument('--trainN', default=2, type=int,
help='N in train')
parser.add_argument('--N', default=2, type=int,
help='N way')
parser.add_argument('--K', default=2, type=int,
help='K shot')
parser.add_argument('--Q', default=3, type=int,
help='Num of query per class')
parser.add_argument('--batch_size', default=4, type=int,
help='batch size')
parser.add_argument('--train_iter', default=600, type=int,
help='num of iters in training')
parser.add_argument('--val_iter', default=100, type=int,
help='num of iters in validation')
parser.add_argument('--test_iter', default=500, type=int,
help='num of iters in testing')
parser.add_argument('--val_step', default=20, type=int,
help='val after training how many iters')
parser.add_argument('--model', default='proto',
help='model name, must be proto, nnshot, or structshot')
parser.add_argument('--max_length', default=100, type=int,
help='max length')
parser.add_argument('--lr', default=1e-4, type=float,
help='learning rate')
parser.add_argument('--grad_iter', default=1, type=int,
help='accumulate gradient every x iterations')
parser.add_argument('--load_ckpt', default=None,
help='load ckpt')
parser.add_argument('--save_ckpt', default=None,
help='save ckpt')
parser.add_argument('--fp16', action='store_true',
help='use nvidia apex fp16')
parser.add_argument('--only_test', action='store_true',
help='only test')
parser.add_argument('--ckpt_name', type=str, default='',
help='checkpoint name.')
parser.add_argument('--seed', type=int, default=0,
help='random seed')
parser.add_argument('--ignore_index', type=int, default=-1,
help='label index to ignore when calculating loss and metrics')
parser.add_argument('--use_sampled_data', action='store_true',
help='use released sampled data, the data should be stored at "data/episode-data/" ')
# only for bert / roberta
parser.add_argument('--pretrain_ckpt', default=None,
help='bert / roberta pre-trained checkpoint')
# only for prototypical networks
parser.add_argument('--dot', action='store_true',
help='use dot instead of L2 distance for proto')
# only for structshot
parser.add_argument('--tau', default=0.05, type=float,
help='StructShot parameter to re-normalizes the transition probabilities')
# experiment
parser.add_argument('--use_sgd_for_bert', action='store_true',
help='use SGD instead of AdamW for BERT.')
opt = parser.parse_args()
trainN = opt.trainN
N = opt.N
K = opt.K
Q = opt.Q
batch_size = opt.batch_size
model_name = opt.model
max_length = opt.max_length
print("{}-way-{}-shot Few-Shot NER".format(N, K))
print("model: {}".format(model_name))
print("max_length: {}".format(max_length))
print('mode: {}'.format(opt.mode))
set_seed(opt.seed)
print('loading model and tokenizer...')
pretrain_ckpt = opt.pretrain_ckpt or 'bert-base-uncased'
word_encoder = BERTWordEncoder(
pretrain_ckpt)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
print('loading data...')
if not opt.use_sampled_data:
opt.train = f'data/{opt.mode}/train.txt'
opt.test = f'data/{opt.mode}/test.txt'
opt.dev = f'data/{opt.mode}/dev.txt'
if not (os.path.exists(opt.train) and os.path.exists(opt.dev) and os.path.exists(opt.test)):
os.system(f'bash data/download.sh {opt.mode}')
else:
opt.train = f'data/episode-data/{opt.mode}/train_{opt.N}_{opt.K}.jsonl'
opt.test = f'data/episode-data/{opt.mode}/test_{opt.N}_{opt.K}.jsonl'
opt.dev = f'data/episode-data/{opt.mode}/dev_{opt.N}_{opt.K}.jsonl'
if not (os.path.exists(opt.train) and os.path.exists(opt.dev) and os.path.exists(opt.test)):
os.system(f'bash data/download.sh episode-data')
os.system('unzip -d data/ data/episode-data.zip')
if opt.mode == "supervised":
print("Warning: you are running few-shot learning methods on `supervised` dataset, if it is not expected, please change to `--mode inter` or `--mode intra`.")
train_data_loader = get_loader(opt.train, tokenizer,
N=trainN, K=K, Q=Q, batch_size=batch_size, max_length=max_length, ignore_index=opt.ignore_index, use_sampled_data=opt.use_sampled_data)
val_data_loader = get_loader(opt.dev, tokenizer,
N=N, K=K, Q=Q, batch_size=batch_size, max_length=max_length, ignore_index=opt.ignore_index, use_sampled_data=opt.use_sampled_data)
test_data_loader = get_loader(opt.test, tokenizer,
N=N, K=K, Q=Q, batch_size=batch_size, max_length=max_length, ignore_index=opt.ignore_index, use_sampled_data=opt.use_sampled_data)
prefix = '-'.join([model_name, opt.mode, str(N), str(K), 'seed'+str(opt.seed)])
if opt.dot:
prefix += '-dot'
if len(opt.ckpt_name) > 0:
prefix += '-' + opt.ckpt_name
if model_name == 'proto':
print('use proto')
model = Proto(word_encoder, dot=opt.dot, ignore_index=opt.ignore_index)
framework = FewShotNERFramework(train_data_loader, val_data_loader, test_data_loader, use_sampled_data=opt.use_sampled_data)
elif model_name == 'nnshot':
print('use nnshot')
model = NNShot(word_encoder, dot=opt.dot, ignore_index=opt.ignore_index)
framework = FewShotNERFramework(train_data_loader, val_data_loader, test_data_loader, use_sampled_data=opt.use_sampled_data)
elif model_name == 'structshot':
print('use structshot')
model = NNShot(word_encoder, dot=opt.dot, ignore_index=opt.ignore_index)
framework = FewShotNERFramework(train_data_loader, val_data_loader, test_data_loader, N=opt.N, tau=opt.tau, train_fname=opt.train, viterbi=True, use_sampled_data=opt.use_sampled_data)
else:
raise NotImplementedError
if not os.path.exists('checkpoint'):
os.mkdir('checkpoint')
ckpt = 'checkpoint/{}.pth.tar'.format(prefix)
if opt.save_ckpt:
ckpt = opt.save_ckpt
print('model-save-path:', ckpt)
if torch.cuda.is_available():
model.cuda()
if not opt.only_test:
if opt.lr == -1:
opt.lr = 2e-5
framework.train(model, prefix,
load_ckpt=opt.load_ckpt, save_ckpt=ckpt,
val_step=opt.val_step, fp16=opt.fp16,
train_iter=opt.train_iter, warmup_step=int(opt.train_iter * 0.1), val_iter=opt.val_iter, learning_rate=opt.lr, use_sgd_for_bert=opt.use_sgd_for_bert)
else:
ckpt = opt.load_ckpt
if ckpt is None:
print("Warning: --load_ckpt is not specified. Will load Hugginface pre-trained checkpoint.")
ckpt = 'none'
# test
precision, recall, f1, fp, fn, within, outer = framework.eval(model, opt.test_iter, ckpt=ckpt)
print("RESULT: precision: %.4f, recall: %.4f, f1:%.4f" % (precision, recall, f1))
print('ERROR ANALYSIS: fp: %.4f, fn: %.4f, within:%.4f, outer: %.4f'%(fp, fn, within, outer))
if __name__ == "__main__":
main()
| 8,044
| 42.02139
| 191
|
py
|
Few-NERD
|
Few-NERD-main/util/fewshotsampler.py
|
import random
class FewshotSampleBase:
'''
Abstract Class
DO NOT USE
Build your own Sample class and inherit from this class
'''
def __init__(self):
self.class_count = {}
def get_class_count(self):
'''
return a dictionary of {class_name:count} in format {any : int}
'''
return self.class_count
class FewshotSampler:
'''
sample one support set and one query set
'''
def __init__(self, N, K, Q, samples, classes=None, random_state=0):
'''
N: int, how many types in each set
K: int, how many instances for each type in support set
Q: int, how many instances for each type in query set
samples: List[Sample], Sample class must have `get_class_count` attribute
classes[Optional]: List[any], all unique classes in samples. If not given, the classes will be got from samples.get_class_count()
random_state[Optional]: int, the random seed
'''
self.K = K
self.N = N
self.Q = Q
self.samples = samples
self.__check__() # check if samples have correct types
if classes:
self.classes = classes
else:
self.classes = self.__get_all_classes__()
random.seed(random_state)
def __get_all_classes__(self):
classes = []
for sample in self.samples:
classes += list(sample.get_class_count().keys())
return list(set(classes))
def __check__(self):
for idx, sample in enumerate(self.samples):
if not hasattr(sample,'get_class_count'):
print('[ERROR] samples in self.samples expected to have `get_class_count` attribute, but self.samples[{idx}] does not')
raise ValueError
def __additem__(self, index, set_class):
class_count = self.samples[index].get_class_count()
for class_name in class_count:
if class_name in set_class:
set_class[class_name] += class_count[class_name]
else:
set_class[class_name] = class_count[class_name]
def __valid_sample__(self, sample, set_class, target_classes):
threshold = 2 * set_class['k']
class_count = sample.get_class_count()
if not class_count:
return False
isvalid = False
for class_name in class_count:
if class_name not in target_classes:
return False
if class_count[class_name] + set_class.get(class_name, 0) > threshold:
return False
if set_class.get(class_name, 0) < set_class['k']:
isvalid = True
return isvalid
def __finish__(self, set_class):
if len(set_class) < self.N+1:
return False
for k in set_class:
if set_class[k] < set_class['k']:
return False
return True
def __get_candidates__(self, target_classes):
return [idx for idx, sample in enumerate(self.samples) if sample.valid(target_classes)]
def __next__(self):
'''
randomly sample one support set and one query set
return:
target_classes: List[any]
support_idx: List[int], sample index in support set in samples list
support_idx: List[int], sample index in query set in samples list
'''
support_class = {'k':self.K}
support_idx = []
query_class = {'k':self.Q}
query_idx = []
target_classes = random.sample(self.classes, self.N)
candidates = self.__get_candidates__(target_classes)
while not candidates:
target_classes = random.sample(self.classes, self.N)
candidates = self.__get_candidates__(target_classes)
# greedy search for support set
while not self.__finish__(support_class):
index = random.choice(candidates)
if index not in support_idx:
if self.__valid_sample__(self.samples[index], support_class, target_classes):
self.__additem__(index, support_class)
support_idx.append(index)
# same for query set
while not self.__finish__(query_class):
index = random.choice(candidates)
if index not in query_idx and index not in support_idx:
if self.__valid_sample__(self.samples[index], query_class, target_classes):
self.__additem__(index, query_class)
query_idx.append(index)
return target_classes, support_idx, query_idx
def __iter__(self):
return self
| 4,636
| 36.395161
| 137
|
py
|
Few-NERD
|
Few-NERD-main/util/word_encoder.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
import os
from torch import optim
from transformers import BertTokenizer, BertModel, BertForMaskedLM, BertForSequenceClassification, RobertaModel, RobertaTokenizer, RobertaForSequenceClassification
class BERTWordEncoder(nn.Module):
def __init__(self, pretrain_path):
nn.Module.__init__(self)
self.bert = BertModel.from_pretrained(pretrain_path)
def forward(self, words, masks):
outputs = self.bert(words, attention_mask=masks, output_hidden_states=True, return_dict=True)
#outputs = self.bert(inputs['word'], attention_mask=inputs['mask'], output_hidden_states=True, return_dict=True)
# use the sum of the last 4 layers
last_four_hidden_states = torch.cat([hidden_state.unsqueeze(0) for hidden_state in outputs['hidden_states'][-4:]], 0)
del outputs
word_embeddings = torch.sum(last_four_hidden_states, 0) # [num_sent, number_of_tokens, 768]
return word_embeddings
| 1,047
| 42.666667
| 163
|
py
|
Few-NERD
|
Few-NERD-main/util/data_loader.py
|
import torch
import torch.utils.data as data
import os
from .fewshotsampler import FewshotSampler, FewshotSampleBase
import numpy as np
import json
def get_class_name(rawtag):
# get (finegrained) class name
if rawtag.startswith('B-') or rawtag.startswith('I-'):
return rawtag[2:]
else:
return rawtag
class Sample(FewshotSampleBase):
def __init__(self, filelines):
filelines = [line.split('\t') for line in filelines]
self.words, self.tags = zip(*filelines)
self.words = [word.lower() for word in self.words]
# strip B-, I-
self.normalized_tags = list(map(get_class_name, self.tags))
self.class_count = {}
def __count_entities__(self):
current_tag = self.normalized_tags[0]
for tag in self.normalized_tags[1:]:
if tag == current_tag:
continue
else:
if current_tag != 'O':
if current_tag in self.class_count:
self.class_count[current_tag] += 1
else:
self.class_count[current_tag] = 1
current_tag = tag
if current_tag != 'O':
if current_tag in self.class_count:
self.class_count[current_tag] += 1
else:
self.class_count[current_tag] = 1
def get_class_count(self):
if self.class_count:
return self.class_count
else:
self.__count_entities__()
return self.class_count
def get_tag_class(self):
# strip 'B' 'I'
tag_class = list(set(self.normalized_tags))
if 'O' in tag_class:
tag_class.remove('O')
return tag_class
def valid(self, target_classes):
return (set(self.get_class_count().keys()).intersection(set(target_classes))) and not (set(self.get_class_count().keys()).difference(set(target_classes)))
def __str__(self):
newlines = zip(self.words, self.tags)
return '\n'.join(['\t'.join(line) for line in newlines])
class FewShotNERDatasetWithRandomSampling(data.Dataset):
"""
Fewshot NER Dataset
"""
def __init__(self, filepath, tokenizer, N, K, Q, max_length, ignore_label_id=-1):
if not os.path.exists(filepath):
print("[ERROR] Data file does not exist!")
assert(0)
self.class2sampleid = {}
self.N = N
self.K = K
self.Q = Q
self.tokenizer = tokenizer
self.samples, self.classes = self.__load_data_from_file__(filepath)
self.max_length = max_length
self.sampler = FewshotSampler(N, K, Q, self.samples, classes=self.classes)
self.ignore_label_id = ignore_label_id
def __insert_sample__(self, index, sample_classes):
for item in sample_classes:
if item in self.class2sampleid:
self.class2sampleid[item].append(index)
else:
self.class2sampleid[item] = [index]
def __load_data_from_file__(self, filepath):
samples = []
classes = []
with open(filepath, 'r', encoding='utf-8')as f:
lines = f.readlines()
samplelines = []
index = 0
for line in lines:
line = line.strip()
if line:
samplelines.append(line)
else:
sample = Sample(samplelines)
samples.append(sample)
sample_classes = sample.get_tag_class()
self.__insert_sample__(index, sample_classes)
classes += sample_classes
samplelines = []
index += 1
if samplelines:
sample = Sample(samplelines)
samples.append(sample)
sample_classes = sample.get_tag_class()
self.__insert_sample__(index, sample_classes)
classes += sample_classes
samplelines = []
index += 1
classes = list(set(classes))
return samples, classes
def __get_token_label_list__(self, sample):
tokens = []
labels = []
for word, tag in zip(sample.words, sample.normalized_tags):
word_tokens = self.tokenizer.tokenize(word)
if word_tokens:
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
word_labels = [self.tag2label[tag]] + [self.ignore_label_id] * (len(word_tokens) - 1)
labels.extend(word_labels)
return tokens, labels
def __getraw__(self, tokens, labels):
# get tokenized word list, attention mask, text mask (mask [CLS], [SEP] as well), tags
# split into chunks of length (max_length-2)
# 2 is for special tokens [CLS] and [SEP]
tokens_list = []
labels_list = []
while len(tokens) > self.max_length - 2:
tokens_list.append(tokens[:self.max_length-2])
tokens = tokens[self.max_length-2:]
labels_list.append(labels[:self.max_length-2])
labels = labels[self.max_length-2:]
if tokens:
tokens_list.append(tokens)
labels_list.append(labels)
# add special tokens and get masks
indexed_tokens_list = []
mask_list = []
text_mask_list = []
for i, tokens in enumerate(tokens_list):
# token -> ids
tokens = ['[CLS]'] + tokens + ['[SEP]']
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokens)
# padding
while len(indexed_tokens) < self.max_length:
indexed_tokens.append(0)
indexed_tokens_list.append(indexed_tokens)
# mask
mask = np.zeros((self.max_length), dtype=np.int32)
mask[:len(tokens)] = 1
mask_list.append(mask)
# text mask, also mask [CLS] and [SEP]
text_mask = np.zeros((self.max_length), dtype=np.int32)
text_mask[1:len(tokens)-1] = 1
text_mask_list.append(text_mask)
assert len(labels_list[i]) == len(tokens) - 2, print(labels_list[i], tokens)
return indexed_tokens_list, mask_list, text_mask_list, labels_list
def __additem__(self, index, d, word, mask, text_mask, label):
d['index'].append(index)
d['word'] += word
d['mask'] += mask
d['label'] += label
d['text_mask'] += text_mask
def __populate__(self, idx_list, savelabeldic=False):
'''
populate samples into data dict
set savelabeldic=True if you want to save label2tag dict
'index': sample_index
'word': tokenized word ids
'mask': attention mask in BERT
'label': NER labels
'sentence_num': number of sentences in this set (a batch contains multiple sets)
'text_mask': 0 for special tokens and paddings, 1 for real text
'''
dataset = {'index':[], 'word': [], 'mask': [], 'label':[], 'sentence_num':[], 'text_mask':[] }
for idx in idx_list:
tokens, labels = self.__get_token_label_list__(self.samples[idx])
word, mask, text_mask, label = self.__getraw__(tokens, labels)
word = torch.tensor(word).long()
mask = torch.tensor(np.array(mask)).long()
text_mask = torch.tensor(np.array(text_mask)).long()
self.__additem__(idx, dataset, word, mask, text_mask, label)
dataset['sentence_num'] = [len(dataset['word'])]
if savelabeldic:
dataset['label2tag'] = [self.label2tag]
return dataset
def __getitem__(self, index):
target_classes, support_idx, query_idx = self.sampler.__next__()
# add 'O' and make sure 'O' is labeled 0
distinct_tags = ['O'] + target_classes
self.tag2label = {tag:idx for idx, tag in enumerate(distinct_tags)}
self.label2tag = {idx:tag for idx, tag in enumerate(distinct_tags)}
support_set = self.__populate__(support_idx)
query_set = self.__populate__(query_idx, savelabeldic=True)
return support_set, query_set
def __len__(self):
return 100000
class FewShotNERDataset(FewShotNERDatasetWithRandomSampling):
def __init__(self, filepath, tokenizer, max_length, ignore_label_id=-1):
if not os.path.exists(filepath):
print("[ERROR] Data file does not exist!")
assert(0)
self.class2sampleid = {}
self.tokenizer = tokenizer
self.samples = self.__load_data_from_file__(filepath)
self.max_length = max_length
self.ignore_label_id = ignore_label_id
def __load_data_from_file__(self, filepath):
with open(filepath)as f:
lines = f.readlines()
for i in range(len(lines)):
lines[i] = json.loads(lines[i].strip())
return lines
def __additem__(self, d, word, mask, text_mask, label):
d['word'] += word
d['mask'] += mask
d['label'] += label
d['text_mask'] += text_mask
def __get_token_label_list__(self, words, tags):
tokens = []
labels = []
for word, tag in zip(words, tags):
word_tokens = self.tokenizer.tokenize(word)
if word_tokens:
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
word_labels = [self.tag2label[tag]] + [self.ignore_label_id] * (len(word_tokens) - 1)
labels.extend(word_labels)
return tokens, labels
def __populate__(self, data, savelabeldic=False):
'''
populate samples into data dict
set savelabeldic=True if you want to save label2tag dict
'word': tokenized word ids
'mask': attention mask in BERT
'label': NER labels
'sentence_num': number of sentences in this set (a batch contains multiple sets)
'text_mask': 0 for special tokens and paddings, 1 for real text
'''
dataset = {'word': [], 'mask': [], 'label':[], 'sentence_num':[], 'text_mask':[] }
for i in range(len(data['word'])):
tokens, labels = self.__get_token_label_list__(data['word'][i], data['label'][i])
word, mask, text_mask, label = self.__getraw__(tokens, labels)
word = torch.tensor(word).long()
mask = torch.tensor(mask).long()
text_mask = torch.tensor(text_mask).long()
self.__additem__(dataset, word, mask, text_mask, label)
dataset['sentence_num'] = [len(dataset['word'])]
if savelabeldic:
dataset['label2tag'] = [self.label2tag]
return dataset
def __getitem__(self, index):
sample = self.samples[index]
target_classes = sample['types']
support = sample['support']
query = sample['query']
# add 'O' and make sure 'O' is labeled 0
distinct_tags = ['O'] + target_classes
self.tag2label = {tag:idx for idx, tag in enumerate(distinct_tags)}
self.label2tag = {idx:tag for idx, tag in enumerate(distinct_tags)}
support_set = self.__populate__(support)
query_set = self.__populate__(query, savelabeldic=True)
return support_set, query_set
def __len__(self):
return len(self.samples)
def collate_fn(data):
batch_support = {'word': [], 'mask': [], 'label':[], 'sentence_num':[], 'text_mask':[]}
batch_query = {'word': [], 'mask': [], 'label':[], 'sentence_num':[], 'label2tag':[], 'text_mask':[]}
support_sets, query_sets = zip(*data)
for i in range(len(support_sets)):
for k in batch_support:
batch_support[k] += support_sets[i][k]
for k in batch_query:
batch_query[k] += query_sets[i][k]
for k in batch_support:
if k != 'label' and k != 'sentence_num':
batch_support[k] = torch.stack(batch_support[k], 0)
for k in batch_query:
if k !='label' and k != 'sentence_num' and k!= 'label2tag':
batch_query[k] = torch.stack(batch_query[k], 0)
batch_support['label'] = [torch.tensor(tag_list).long() for tag_list in batch_support['label']]
batch_query['label'] = [torch.tensor(tag_list).long() for tag_list in batch_query['label']]
return batch_support, batch_query
def get_loader(filepath, tokenizer, N, K, Q, batch_size, max_length,
num_workers=8, collate_fn=collate_fn, ignore_index=-1, use_sampled_data=True):
if not use_sampled_data:
dataset = FewShotNERDatasetWithRandomSampling(filepath, tokenizer, N, K, Q, max_length, ignore_label_id=ignore_index)
else:
dataset = FewShotNERDataset(filepath, tokenizer, max_length, ignore_label_id=ignore_index)
data_loader = data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=num_workers,
collate_fn=collate_fn)
return data_loader
| 13,114
| 39.353846
| 162
|
py
|
Few-NERD
|
Few-NERD-main/util/viterbi.py
|
import torch
import torch.nn as nn
START_ID = 0
O_ID = 1
class ViterbiDecoder:
"""
Generalized Viterbi decoding
"""
def __init__(self, n_tag, abstract_transitions, tau):
"""
We assume the batch size is 1, so no need to worry about PAD for now
n_tag: START, O, and I_Xs
"""
super().__init__()
self.transitions = self.project_target_transitions(n_tag, abstract_transitions, tau)
@staticmethod
def project_target_transitions(n_tag, abstract_transitions, tau):
s_o, s_i, o_o, o_i, i_o, i_i, x_y = abstract_transitions
# self transitions for I-X tags
a = torch.eye(n_tag) * i_i
# transitions from I-X to I-Y
b = torch.ones(n_tag, n_tag) * x_y / (n_tag - 3)
c = torch.eye(n_tag) * x_y / (n_tag - 3)
transitions = a + b - c
# transition from START to O
transitions[START_ID, O_ID] = s_o
# transitions from START to I-X
transitions[START_ID, O_ID+1:] = s_i / (n_tag - 2)
# transition from O to O
transitions[O_ID, O_ID] = o_o
# transitions from O to I-X
transitions[O_ID, O_ID+1:] = o_i / (n_tag - 2)
# transitions from I-X to O
transitions[O_ID+1:, O_ID] = i_o
# no transitions to START
transitions[:, START_ID] = 0.
powered = torch.pow(transitions, tau)
summed = powered.sum(dim=1)
transitions = powered / summed.view(n_tag, 1)
transitions = torch.where(transitions > 0, transitions, torch.tensor(.000001))
#print(transitions)
#print(torch.sum(transitions, dim=1))
return torch.log(transitions)
def forward(self, scores: torch.Tensor) -> torch.Tensor: # type: ignore
"""
Take the emission scores calculated by NERModel, and return a tensor of CRF features,
which is the sum of transition scores and emission scores.
:param scores: emission scores calculated by NERModel.
shape: (batch_size, sentence_length, ntags)
:return: a tensor containing the CRF features whose shape is
(batch_size, sentence_len, ntags, ntags). F[b, t, i, j] represents
emission[t, j] + transition[i, j] for the b'th sentence in this batch.
"""
batch_size, sentence_len, _ = scores.size()
# expand the transition matrix batch-wise as well as sentence-wise
transitions = self.transitions.expand(batch_size, sentence_len, -1, -1)
# add another dimension for the "from" state, then expand to match
# the dimensions of the expanded transition matrix above
emissions = scores.unsqueeze(2).expand_as(transitions)
# add them up
return transitions + emissions
@staticmethod
def viterbi(features: torch.Tensor) -> torch.Tensor:
"""
Decode the most probable sequence of tags.
Note that the delta values are calculated in the log space.
:param features: the feature matrix from the forward method of CRF.
shaped (batch_size, sentence_len, ntags, ntags)
:return: a tensor containing the most probable sequences for the batch.
shaped (batch_size, sentence_len)
"""
batch_size, sentence_len, ntags, _ = features.size()
# initialize the deltas
delta_t = features[:, 0, START_ID, :]
deltas = [delta_t]
# use dynamic programming to iteratively calculate the delta values
for t in range(1, sentence_len):
f_t = features[:, t]
delta_t, _ = torch.max(f_t + delta_t.unsqueeze(2).expand_as(f_t), 1)
deltas.append(delta_t)
# now iterate backward to figure out the most probable tags
sequences = [torch.argmax(deltas[-1], 1, keepdim=True)]
for t in reversed(range(sentence_len - 1)):
f_prev = features[:, t + 1].gather(
2, sequences[-1].unsqueeze(2).expand(batch_size, ntags, 1)).squeeze(2)
sequences.append(torch.argmax(f_prev + deltas[t], 1, keepdim=True))
sequences.reverse()
return torch.cat(sequences, dim=1)
| 4,150
| 38.533333
| 93
|
py
|
Few-NERD
|
Few-NERD-main/util/supervised_util.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """
from __future__ import absolute_import, division, print_function
import logging
import os
from io import open
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or not line.strip():
if words:
examples.append(InputExample(guid="{}-{}".format(mode, guid_index),
words=words,
labels=labels))
guid_index += 1
words = []
labels = []
else:
splits = line.split("\t")
if splits[0].strip():
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid="%s-%d".format(mode, guid_index),
words=words,
labels=labels))
return examples
def convert_examples_to_features(examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-1,
sequence_a_segment_id=0,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
for word, label in zip(example.words, example.labels):
word_tokens = tokenizer.tokenize(word)
if word_tokens:
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[:(max_seq_length - special_tokens_count)]
label_ids = label_ids[:(max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += ([pad_token] * padding_length)
input_mask += ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids += ([pad_token_segment_id] * padding_length)
label_ids += ([pad_token_label_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length, print(len(label_ids), max_seq_length)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids))
return features
def get_labels(path):
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
print(len(labels), labels)
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
| 9,352
| 42.300926
| 113
|
py
|
Few-NERD
|
Few-NERD-main/util/framework.py
|
import os
import sklearn.metrics
import numpy as np
import sys
import time
from . import word_encoder
from . import data_loader
import torch
from torch import autograd, optim, nn
from torch.autograd import Variable
from torch.nn import functional as F
# from pytorch_pretrained_bert import BertAdam
from transformers import AdamW, get_linear_schedule_with_warmup
from torch.nn.parallel import DistributedDataParallel as DDP
from .viterbi import ViterbiDecoder
def get_abstract_transitions(train_fname, use_sampled_data=True):
"""
Compute abstract transitions on the training dataset for StructShot
"""
if use_sampled_data:
samples = data_loader.FewShotNERDataset(train_fname, None, 1).samples
tag_lists = []
for sample in samples:
tag_lists += sample['support']['label'] + sample['query']['label']
else:
samples = data_loader.FewShotNERDatasetWithRandomSampling(train_fname, None, 1, 1, 1, 1).samples
tag_lists = [sample.tags for sample in samples]
s_o, s_i = 0., 0.
o_o, o_i = 0., 0.
i_o, i_i, x_y = 0., 0., 0.
for tags in tag_lists:
if tags[0] == 'O': s_o += 1
else: s_i += 1
for i in range(len(tags)-1):
p, n = tags[i], tags[i+1]
if p == 'O':
if n == 'O': o_o += 1
else: o_i += 1
else:
if n == 'O':
i_o += 1
elif p != n:
x_y += 1
else:
i_i += 1
trans = []
trans.append(s_o / (s_o + s_i))
trans.append(s_i / (s_o + s_i))
trans.append(o_o / (o_o + o_i))
trans.append(o_i / (o_o + o_i))
trans.append(i_o / (i_o + i_i + x_y))
trans.append(i_i / (i_o + i_i + x_y))
trans.append(x_y / (i_o + i_i + x_y))
return trans
def warmup_linear(global_step, warmup_step):
if global_step < warmup_step:
return global_step / warmup_step
else:
return 1.0
class FewShotNERModel(nn.Module):
def __init__(self, my_word_encoder, ignore_index=-1):
'''
word_encoder: Sentence encoder
You need to set self.cost as your own loss function.
'''
nn.Module.__init__(self)
self.ignore_index = ignore_index
self.word_encoder = nn.DataParallel(my_word_encoder)
self.cost = nn.CrossEntropyLoss(ignore_index=ignore_index)
def forward(self, support, query, N, K, Q):
'''
support: Inputs of the support set.
query: Inputs of the query set.
N: Num of classes
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
return: logits, pred
'''
raise NotImplementedError
def loss(self, logits, label):
'''
logits: Logits with the size (..., class_num)
label: Label with whatever size.
return: [Loss] (A single value)
'''
N = logits.size(-1)
return self.cost(logits.view(-1, N), label.view(-1))
def __delete_ignore_index(self, pred, label):
pred = pred[label != self.ignore_index]
label = label[label != self.ignore_index]
assert pred.shape[0] == label.shape[0]
return pred, label
def accuracy(self, pred, label):
'''
pred: Prediction results with whatever size
label: Label with whatever size
return: [Accuracy] (A single value)
'''
pred, label = self.__delete_ignore_index(pred, label)
return torch.mean((pred.view(-1) == label.view(-1)).type(torch.FloatTensor))
def __get_class_span_dict__(self, label, is_string=False):
'''
return a dictionary of each class label/tag corresponding to the entity positions in the sentence
{label:[(start_pos, end_pos), ...]}
'''
class_span = {}
current_label = None
i = 0
if not is_string:
# having labels in [0, num_of_class]
while i < len(label):
if label[i] > 0:
start = i
current_label = label[i]
i += 1
while i < len(label) and label[i] == current_label:
i += 1
if current_label in class_span:
class_span[current_label].append((start, i))
else:
class_span[current_label] = [(start, i)]
else:
assert label[i] == 0
i += 1
else:
# having tags in string format ['O', 'O', 'person-xxx', ..]
while i < len(label):
if label[i] != 'O':
start = i
current_label = label[i]
i += 1
while i < len(label) and label[i] == current_label:
i += 1
if current_label in class_span:
class_span[current_label].append((start, i))
else:
class_span[current_label] = [(start, i)]
else:
i += 1
return class_span
def __get_intersect_by_entity__(self, pred_class_span, label_class_span):
'''
return the count of correct entity
'''
cnt = 0
for label in label_class_span:
cnt += len(list(set(label_class_span[label]).intersection(set(pred_class_span.get(label,[])))))
return cnt
def __get_cnt__(self, label_class_span):
'''
return the count of entities
'''
cnt = 0
for label in label_class_span:
cnt += len(label_class_span[label])
return cnt
def __transform_label_to_tag__(self, pred, query):
'''
flatten labels and transform them to string tags
'''
pred_tag = []
label_tag = []
current_sent_idx = 0 # record sentence index in the batch data
current_token_idx = 0 # record token index in the batch data
assert len(query['sentence_num']) == len(query['label2tag'])
# iterate by each query set
for idx, num in enumerate(query['sentence_num']):
true_label = torch.cat(query['label'][current_sent_idx:current_sent_idx+num], 0)
# drop ignore index
true_label = true_label[true_label!=self.ignore_index]
true_label = true_label.cpu().numpy().tolist()
set_token_length = len(true_label)
# use the idx-th label2tag dict
pred_tag += [query['label2tag'][idx][label] for label in pred[current_token_idx:current_token_idx + set_token_length]]
label_tag += [query['label2tag'][idx][label] for label in true_label]
# update sentence and token index
current_sent_idx += num
current_token_idx += set_token_length
assert len(pred_tag) == len(label_tag)
assert len(pred_tag) == len(pred)
return pred_tag, label_tag
def __get_correct_span__(self, pred_span, label_span):
'''
return count of correct entity spans
'''
pred_span_list = []
label_span_list = []
for pred in pred_span:
pred_span_list += pred_span[pred]
for label in label_span:
label_span_list += label_span[label]
return len(list(set(pred_span_list).intersection(set(label_span_list))))
def __get_wrong_within_span__(self, pred_span, label_span):
'''
return count of entities with correct span, correct coarse type but wrong finegrained type
'''
cnt = 0
for label in label_span:
coarse = label.split('-')[0]
within_pred_span = []
for pred in pred_span:
if pred != label and pred.split('-')[0] == coarse:
within_pred_span += pred_span[pred]
cnt += len(list(set(label_span[label]).intersection(set(within_pred_span))))
return cnt
def __get_wrong_outer_span__(self, pred_span, label_span):
'''
return count of entities with correct span but wrong coarse type
'''
cnt = 0
for label in label_span:
coarse = label.split('-')[0]
outer_pred_span = []
for pred in pred_span:
if pred != label and pred.split('-')[0] != coarse:
outer_pred_span += pred_span[pred]
cnt += len(list(set(label_span[label]).intersection(set(outer_pred_span))))
return cnt
def __get_type_error__(self, pred, label, query):
'''
return finegrained type error cnt, coarse type error cnt and total correct span count
'''
pred_tag, label_tag = self.__transform_label_to_tag__(pred, query)
pred_span = self.__get_class_span_dict__(pred_tag, is_string=True)
label_span = self.__get_class_span_dict__(label_tag, is_string=True)
total_correct_span = self.__get_correct_span__(pred_span, label_span) + 1e-6
wrong_within_span = self.__get_wrong_within_span__(pred_span, label_span)
wrong_outer_span = self.__get_wrong_outer_span__(pred_span, label_span)
return wrong_within_span, wrong_outer_span, total_correct_span
def metrics_by_entity(self, pred, label):
'''
return entity level count of total prediction, true labels, and correct prediction
'''
pred = pred.view(-1)
label = label.view(-1)
pred, label = self.__delete_ignore_index(pred, label)
pred = pred.cpu().numpy().tolist()
label = label.cpu().numpy().tolist()
pred_class_span = self.__get_class_span_dict__(pred)
label_class_span = self.__get_class_span_dict__(label)
pred_cnt = self.__get_cnt__(pred_class_span)
label_cnt = self.__get_cnt__(label_class_span)
correct_cnt = self.__get_intersect_by_entity__(pred_class_span, label_class_span)
return pred_cnt, label_cnt, correct_cnt
def error_analysis(self, pred, label, query):
'''
return
token level false positive rate and false negative rate
entity level within error and outer error
'''
pred = pred.view(-1)
label = label.view(-1)
pred, label = self.__delete_ignore_index(pred, label)
fp = torch.sum(((pred > 0) & (label == 0)).type(torch.FloatTensor))
fn = torch.sum(((pred == 0) & (label > 0)).type(torch.FloatTensor))
pred = pred.cpu().numpy().tolist()
label = label.cpu().numpy().tolist()
within, outer, total_span = self.__get_type_error__(pred, label, query)
return fp, fn, len(pred), within, outer, total_span
class FewShotNERFramework:
def __init__(self, train_data_loader, val_data_loader, test_data_loader, viterbi=False, N=None, train_fname=None, tau=0.05, use_sampled_data=True):
'''
train_data_loader: DataLoader for training.
val_data_loader: DataLoader for validating.
test_data_loader: DataLoader for testing.
'''
self.train_data_loader = train_data_loader
self.val_data_loader = val_data_loader
self.test_data_loader = test_data_loader
self.viterbi = viterbi
if viterbi:
abstract_transitions = get_abstract_transitions(train_fname, use_sampled_data=use_sampled_data)
self.viterbi_decoder = ViterbiDecoder(N+2, abstract_transitions, tau)
def __load_model__(self, ckpt):
'''
ckpt: Path of the checkpoint
return: Checkpoint dict
'''
if os.path.isfile(ckpt):
checkpoint = torch.load(ckpt)
print("Successfully loaded checkpoint '%s'" % ckpt)
return checkpoint
else:
raise Exception("No checkpoint found at '%s'" % ckpt)
def item(self, x):
'''
PyTorch before and after 0.4
'''
torch_version = torch.__version__.split('.')
if int(torch_version[0]) == 0 and int(torch_version[1]) < 4:
return x[0]
else:
return x.item()
def train(self,
model,
model_name,
learning_rate=1e-1,
train_iter=30000,
val_iter=1000,
val_step=2000,
load_ckpt=None,
save_ckpt=None,
warmup_step=300,
grad_iter=1,
fp16=False,
use_sgd_for_bert=False):
'''
model: a FewShotREModel instance
model_name: Name of the model
B: Batch size
N: Num of classes for each batch
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
ckpt_dir: Directory of checkpoints
learning_rate: Initial learning rate
train_iter: Num of iterations of training
val_iter: Num of iterations of validating
val_step: Validate every val_step steps
'''
print("Start training...")
# Init optimizer
print('Use bert optim!')
parameters_to_optimize = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
parameters_to_optimize = [
{'params': [p for n, p in parameters_to_optimize
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in parameters_to_optimize
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if use_sgd_for_bert:
optimizer = torch.optim.SGD(parameters_to_optimize, lr=learning_rate)
else:
optimizer = AdamW(parameters_to_optimize, lr=learning_rate, correct_bias=False)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_step, num_training_steps=train_iter)
# load model
if load_ckpt:
state_dict = self.__load_model__(load_ckpt)['state_dict']
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print('ignore {}'.format(name))
continue
print('load {} from {}'.format(name, load_ckpt))
own_state[name].copy_(param)
if fp16:
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
model.train()
# Training
best_f1 = 0.0
iter_loss = 0.0
iter_sample = 0
pred_cnt = 0
label_cnt = 0
correct_cnt = 0
it = 0
while it + 1 < train_iter:
for _, (support, query) in enumerate(self.train_data_loader):
label = torch.cat(query['label'], 0)
if torch.cuda.is_available():
for k in support:
if k != 'label' and k != 'sentence_num':
support[k] = support[k].cuda()
query[k] = query[k].cuda()
label = label.cuda()
logits, pred = model(support, query)
assert logits.shape[0] == label.shape[0], print(logits.shape, label.shape)
loss = model.loss(logits, label) / float(grad_iter)
tmp_pred_cnt, tmp_label_cnt, correct = model.metrics_by_entity(pred, label)
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if it % grad_iter == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
iter_loss += self.item(loss.data)
#iter_right += self.item(right.data)
pred_cnt += tmp_pred_cnt
label_cnt += tmp_label_cnt
correct_cnt += correct
iter_sample += 1
if (it + 1) % 100 == 0 or (it + 1) % val_step == 0:
precision = correct_cnt / pred_cnt
recall = correct_cnt / label_cnt
f1 = 2 * precision * recall / (precision + recall)
sys.stdout.write('step: {0:4} | loss: {1:2.6f} | [ENTITY] precision: {2:3.4f}, recall: {3:3.4f}, f1: {4:3.4f}'\
.format(it + 1, iter_loss/ iter_sample, precision, recall, f1) + '\r')
sys.stdout.flush()
if (it + 1) % val_step == 0:
_, _, f1, _, _, _, _ = self.eval(model, val_iter)
model.train()
if f1 > best_f1:
print('Best checkpoint')
torch.save({'state_dict': model.state_dict()}, save_ckpt)
best_f1 = f1
iter_loss = 0.
iter_sample = 0.
pred_cnt = 0
label_cnt = 0
correct_cnt = 0
if (it + 1) == train_iter:
break
it += 1
print("\n####################\n")
print("Finish training " + model_name)
def __get_emmissions__(self, logits, tags_list):
# split [num_of_query_tokens, num_class] into [[num_of_token_in_sent, num_class], ...]
emmissions = []
current_idx = 0
for tags in tags_list:
emmissions.append(logits[current_idx:current_idx+len(tags)])
current_idx += len(tags)
assert current_idx == logits.size()[0]
return emmissions
def viterbi_decode(self, logits, query_tags):
emissions_list = self.__get_emmissions__(logits, query_tags)
pred = []
for i in range(len(query_tags)):
sent_scores = emissions_list[i].cpu()
sent_len, n_label = sent_scores.shape
sent_probs = F.softmax(sent_scores, dim=1)
start_probs = torch.zeros(sent_len) + 1e-6
sent_probs = torch.cat((start_probs.view(sent_len, 1), sent_probs), 1)
feats = self.viterbi_decoder.forward(torch.log(sent_probs).view(1, sent_len, n_label+1))
vit_labels = self.viterbi_decoder.viterbi(feats)
vit_labels = vit_labels.view(sent_len)
vit_labels = vit_labels.detach().cpu().numpy().tolist()
for label in vit_labels:
pred.append(label-1)
return torch.tensor(pred).cuda()
def eval(self,
model,
eval_iter,
ckpt=None):
'''
model: a FewShotREModel instance
B: Batch size
N: Num of classes for each batch
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
eval_iter: Num of iterations
ckpt: Checkpoint path. Set as None if using current model parameters.
return: Accuracy
'''
print("")
model.eval()
if ckpt is None:
print("Use val dataset")
eval_dataset = self.val_data_loader
else:
print("Use test dataset")
if ckpt != 'none':
state_dict = self.__load_model__(ckpt)['state_dict']
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
own_state[name].copy_(param)
eval_dataset = self.test_data_loader
pred_cnt = 0 # pred entity cnt
label_cnt = 0 # true label entity cnt
correct_cnt = 0 # correct predicted entity cnt
fp_cnt = 0 # misclassify O as I-
fn_cnt = 0 # misclassify I- as O
total_token_cnt = 0 # total token cnt
within_cnt = 0 # span correct but of wrong fine-grained type
outer_cnt = 0 # span correct but of wrong coarse-grained type
total_span_cnt = 0 # span correct
eval_iter = min(eval_iter, len(eval_dataset))
with torch.no_grad():
it = 0
while it + 1 < eval_iter:
for _, (support, query) in enumerate(eval_dataset):
label = torch.cat(query['label'], 0)
if torch.cuda.is_available():
for k in support:
if k != 'label' and k != 'sentence_num':
support[k] = support[k].cuda()
query[k] = query[k].cuda()
label = label.cuda()
logits, pred = model(support, query)
if self.viterbi:
pred = self.viterbi_decode(logits, query['label'])
tmp_pred_cnt, tmp_label_cnt, correct = model.metrics_by_entity(pred, label)
fp, fn, token_cnt, within, outer, total_span = model.error_analysis(pred, label, query)
pred_cnt += tmp_pred_cnt
label_cnt += tmp_label_cnt
correct_cnt += correct
fn_cnt += self.item(fn.data)
fp_cnt += self.item(fp.data)
total_token_cnt += token_cnt
outer_cnt += outer
within_cnt += within
total_span_cnt += total_span
if it + 1 == eval_iter:
break
it += 1
epsilon = 1e-6
precision = correct_cnt / (pred_cnt + epsilon)
recall = correct_cnt / (label_cnt + epsilon)
f1 = 2 * precision * recall / (precision + recall + epsilon)
fp_error = fp_cnt / total_token_cnt
fn_error = fn_cnt / total_token_cnt
within_error = within_cnt / (total_span_cnt + epsilon)
outer_error = outer_cnt / (total_span_cnt + epsilon)
sys.stdout.write('[EVAL] step: {0:4} | [ENTITY] precision: {1:3.4f}, recall: {2:3.4f}, f1: {3:3.4f}'.format(it + 1, precision, recall, f1) + '\r')
sys.stdout.flush()
print("")
return precision, recall, f1, fp_error, fn_error, within_error, outer_error
| 22,526
| 38.59051
| 158
|
py
|
Few-NERD
|
Few-NERD-main/util/metric.py
|
class Metrics():
def __init__(self, ignore_index=-100):
'''
word_encoder: Sentence encoder
You need to set self.cost as your own loss function.
'''
self.ignore_index = ignore_index
def __get_class_span_dict__(self, label, is_string=False):
'''
return a dictionary of each class label/tag corresponding to the entity positions in the sentence
{label:[(start_pos, end_pos), ...]}
'''
class_span = {}
current_label = None
i = 0
if not is_string:
# having labels in [0, num_of_class]
while i < len(label):
if label[i] > 0:
start = i
current_label = label[i]
i += 1
while i < len(label) and label[i] == current_label:
i += 1
if current_label in class_span:
class_span[current_label].append((start, i))
else:
class_span[current_label] = [(start, i)]
else:
assert label[i] == 0
i += 1
else:
# having tags in string format ['O', 'O', 'person-xxx', ..]
while i < len(label):
if label[i] != 'O':
start = i
current_label = label[i]
i += 1
while i < len(label) and label[i] == current_label:
i += 1
if current_label in class_span:
class_span[current_label].append((start, i))
else:
class_span[current_label] = [(start, i)]
else:
i += 1
return class_span
def __get_intersect_by_entity__(self, pred_class_span, label_class_span):
'''
return the count of correct entity
'''
cnt = 0
for label in label_class_span:
cnt += len(list(set(label_class_span[label]).intersection(set(pred_class_span.get(label,[])))))
return cnt
def __get_cnt__(self, label_class_span):
'''
return the count of entities
'''
cnt = 0
for label in label_class_span:
cnt += len(label_class_span[label])
return cnt
def __get_correct_span__(self, pred_span, label_span):
'''
return count of correct entity spans
'''
pred_span_list = []
label_span_list = []
for pred in pred_span:
pred_span_list += pred_span[pred]
for label in label_span:
label_span_list += label_span[label]
return len(list(set(pred_span_list).intersection(set(label_span_list))))
def __get_wrong_within_span__(self, pred_span, label_span):
'''
return count of entities with correct span, correct coarse type but wrong finegrained type
'''
cnt = 0
for label in label_span:
coarse = label.split('-')[0]
within_pred_span = []
for pred in pred_span:
if pred != label and pred.split('-')[0] == coarse:
within_pred_span += pred_span[pred]
cnt += len(list(set(label_span[label]).intersection(set(within_pred_span))))
return cnt
def __get_wrong_outer_span__(self, pred_span, label_span):
'''
return count of entities with correct span but wrong coarse type
'''
cnt = 0
for label in label_span:
coarse = label.split('-')[0]
outer_pred_span = []
for pred in pred_span:
if pred != label and pred.split('-')[0] != coarse:
outer_pred_span += pred_span[pred]
cnt += len(list(set(label_span[label]).intersection(set(outer_pred_span))))
return cnt
def __get_type_error__(self, pred, label, query):
'''
return finegrained type error cnt, coarse type error cnt and total correct span count
'''
pred_tag, label_tag = self.__transform_label_to_tag__(pred, query)
pred_span = self.__get_class_span_dict__(pred_tag, is_string=True)
label_span = self.__get_class_span_dict__(label_tag, is_string=True)
total_correct_span = self.__get_correct_span__(pred_span, label_span) + 1e-6
wrong_within_span = self.__get_wrong_within_span__(pred_span, label_span)
wrong_outer_span = self.__get_wrong_outer_span__(pred_span, label_span)
return wrong_within_span, wrong_outer_span, total_correct_span
def metrics_by_entity_(self, pred, label):
'''
return entity level count of total prediction, true labels, and correct prediction
'''
pred_class_span = self.__get_class_span_dict__(pred, is_string=True)
label_class_span = self.__get_class_span_dict__(label, is_string=True)
pred_cnt = self.__get_cnt__(pred_class_span)
label_cnt = self.__get_cnt__(label_class_span)
correct_cnt = self.__get_intersect_by_entity__(pred_class_span, label_class_span)
return pred_cnt, label_cnt, correct_cnt
def metrics_by_entity(self, pred, label):
pred_cnt = 0
label_cnt = 0
correct_cnt = 0
for i in range(len(pred)):
p_cnt, l_cnt, c_cnt = self.metrics_by_entity_(pred[i], label[i])
pred_cnt += p_cnt
label_cnt += l_cnt
correct_cnt += c_cnt
precision = correct_cnt / (pred_cnt + 1e-8)
recall = correct_cnt / (label_cnt + 1e-8)
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
| 5,764
| 38.486301
| 107
|
py
|
Few-NERD
|
Few-NERD-main/model/nnshot.py
|
import sys
sys.path.append('..')
import util
import torch
from torch import autograd, optim, nn
from torch.autograd import Variable
from torch.nn import functional as F
class NNShot(util.framework.FewShotNERModel):
def __init__(self,word_encoder, dot=False, ignore_index=-1):
util.framework.FewShotNERModel.__init__(self, word_encoder, ignore_index=ignore_index)
self.drop = nn.Dropout()
self.dot = dot
def __dist__(self, x, y, dim):
if self.dot:
return (x * y).sum(dim)
else:
return -(torch.pow(x - y, 2)).sum(dim)
def __batch_dist__(self, S, Q, q_mask):
# S [class, embed_dim], Q [num_of_sent, num_of_tokens, embed_dim]
assert Q.size()[:2] == q_mask.size()
Q = Q[q_mask==1].view(-1, Q.size(-1))
return self.__dist__(S.unsqueeze(0), Q.unsqueeze(1), 2)
def __get_nearest_dist__(self, embedding, tag, mask, query, q_mask):
nearest_dist = []
S = embedding[mask==1].view(-1, embedding.size(-1))
tag = torch.cat(tag, 0)
assert tag.size(0) == S.size(0)
dist = self.__batch_dist__(S, query, q_mask) # [num_of_query_tokens, num_of_support_tokens]
for label in range(torch.max(tag)+1):
nearest_dist.append(torch.max(dist[:,tag==label], 1)[0])
nearest_dist = torch.stack(nearest_dist, dim=1) # [num_of_query_tokens, class_num]
return nearest_dist
def forward(self, support, query):
'''
support: Inputs of the support set.
query: Inputs of the query set.
N: Num of classes
K: Num of instances for each class in the support set
Q: Num of instances in the query set
'''
support_emb = self.word_encoder(support['word'], support['mask']) # [num_sent, number_of_tokens, 768]
query_emb = self.word_encoder(query['word'], query['mask']) # [num_sent, number_of_tokens, 768]
support_emb = self.drop(support_emb)
query_emb = self.drop(query_emb)
logits = []
current_support_num = 0
current_query_num = 0
assert support_emb.size()[:2] == support['mask'].size()
assert query_emb.size()[:2] == query['mask'].size()
for i, sent_support_num in enumerate(support['sentence_num']):
sent_query_num = query['sentence_num'][i]
# Calculate nearest distance to single entity in each class in support set
logits.append(self.__get_nearest_dist__(support_emb[current_support_num:current_support_num+sent_support_num],
support['label'][current_support_num:current_support_num+sent_support_num],
support['text_mask'][current_support_num: current_support_num+sent_support_num],
query_emb[current_query_num:current_query_num+sent_query_num],
query['text_mask'][current_query_num: current_query_num+sent_query_num]))
current_query_num += sent_query_num
current_support_num += sent_support_num
logits = torch.cat(logits, 0)
_, pred = torch.max(logits, 1)
return logits, pred
| 3,140
| 40.88
| 123
|
py
|
Few-NERD
|
Few-NERD-main/model/proto.py
|
import sys
sys.path.append('..')
import util
import torch
from torch import autograd, optim, nn
from torch.autograd import Variable
from torch.nn import functional as F
class Proto(util.framework.FewShotNERModel):
def __init__(self,word_encoder, dot=False, ignore_index=-1):
util.framework.FewShotNERModel.__init__(self, word_encoder, ignore_index=ignore_index)
self.drop = nn.Dropout()
self.dot = dot
def __dist__(self, x, y, dim):
if self.dot:
return (x * y).sum(dim)
else:
return -(torch.pow(x - y, 2)).sum(dim)
def __batch_dist__(self, S, Q, q_mask):
# S [class, embed_dim], Q [num_of_sent, num_of_tokens, embed_dim]
assert Q.size()[:2] == q_mask.size()
Q = Q[q_mask==1].view(-1, Q.size(-1)) # [num_of_all_text_tokens, embed_dim]
return self.__dist__(S.unsqueeze(0), Q.unsqueeze(1), 2)
def __get_proto__(self, embedding, tag, mask):
proto = []
embedding = embedding[mask==1].view(-1, embedding.size(-1))
tag = torch.cat(tag, 0)
assert tag.size(0) == embedding.size(0)
for label in range(torch.max(tag)+1):
proto.append(torch.mean(embedding[tag==label], 0))
proto = torch.stack(proto)
return proto
def forward(self, support, query):
'''
support: Inputs of the support set.
query: Inputs of the query set.
N: Num of classes
K: Num of instances for each class in the support set
Q: Num of instances in the query set
'''
support_emb = self.word_encoder(support['word'], support['mask']) # [num_sent, number_of_tokens, 768]
query_emb = self.word_encoder(query['word'], query['mask']) # [num_sent, number_of_tokens, 768]
support_emb = self.drop(support_emb)
query_emb = self.drop(query_emb)
# Prototypical Networks
logits = []
current_support_num = 0
current_query_num = 0
assert support_emb.size()[:2] == support['mask'].size()
assert query_emb.size()[:2] == query['mask'].size()
for i, sent_support_num in enumerate(support['sentence_num']):
sent_query_num = query['sentence_num'][i]
# Calculate prototype for each class
support_proto = self.__get_proto__(
support_emb[current_support_num:current_support_num+sent_support_num],
support['label'][current_support_num:current_support_num+sent_support_num],
support['text_mask'][current_support_num: current_support_num+sent_support_num])
# calculate distance to each prototype
logits.append(self.__batch_dist__(
support_proto,
query_emb[current_query_num:current_query_num+sent_query_num],
query['text_mask'][current_query_num: current_query_num+sent_query_num])) # [num_of_query_tokens, class_num]
current_query_num += sent_query_num
current_support_num += sent_support_num
logits = torch.cat(logits, 0)
_, pred = torch.max(logits, 1)
return logits, pred
| 3,166
| 39.088608
| 124
|
py
|
pycbc
|
pycbc-master/setup.py
|
#!/usr/bin/env python
# Copyright (C) 2012 Alex Nitz, Duncan Brown, Andrew Miller, Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
setup.py file for PyCBC package
"""
import sys
import os, subprocess, shutil
import platform
from distutils.command.clean import clean as _clean
from setuptools import Extension, setup, Command
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools import find_packages
requires = []
setup_requires = ['numpy>=1.16.0']
install_requires = setup_requires + [
'cython>=0.29',
'numpy>=1.16.0,!=1.19.0',
'scipy>=0.16.0',
'astropy>=2.0.3,!=4.2.1,!=4.0.5',
'matplotlib>=1.5.1',
'mpld3>=0.3',
'pillow',
'h5py>=3.0.0,!=3.7.0',
'jinja2',
'Mako>=1.0.1',
'beautifulsoup4>=4.6.0',
'tqdm',
'setuptools',
'gwdatafind',
'pegasus-wms.api >= 5.0.3',
'python-ligo-lw >= 1.7.0',
'ligo-segments',
'lalsuite!=7.2',
'lscsoft-glue>=1.59.3',
'pykerr',
]
def find_files(dirname, relpath=None):
def find_paths(dirname):
items = []
for fname in os.listdir(dirname):
path = os.path.join(dirname, fname)
if os.path.isdir(path):
items += find_paths(path)
elif not path.endswith(".py") and not path.endswith(".pyc"):
items.append(path)
return items
items = find_paths(dirname)
if relpath is None:
relpath = dirname
return [os.path.relpath(path, relpath) for path in items]
class cbuild_ext(_build_ext):
def run(self):
import pkg_resources
# At this point we can be sure pip has already installed numpy
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if (hasattr(ext, 'include_dirs') and
numpy_incl not in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
_build_ext.run(self)
# Add swig-generated files to the list of things to clean, so they
# get regenerated each time.
class clean(_clean):
def finalize_options (self):
_clean.finalize_options(self)
self.clean_files = []
self.clean_folders = ['docs/_build']
def run(self):
_clean.run(self)
for f in self.clean_files:
try:
os.unlink(f)
print('removed ' + f)
except:
pass
for fol in self.clean_folders:
shutil.rmtree(fol, ignore_errors=True)
print('removed ' + fol)
def get_version_info():
"""Get VCS info and write version info to version.py.
"""
from pycbc import _version_helper
class vdummy(object):
def __getattr__(self, attr):
return ''
# If this is a pycbc git repo always populate version information using GIT
try:
vinfo = _version_helper.generate_git_version_info()
except:
vinfo = vdummy()
vinfo.version = '2.2.dev1'
vinfo.release = 'False'
version_script = f"""# coding: utf-8
# Generated by setup.py for PyCBC on {vinfo.build_date}.
# general info
version = '{vinfo.version}'
date = '{vinfo.date}'
release = '{vinfo.release}'
last_release = '{vinfo.last_release}'
# git info
git_hash = '{vinfo.hash}'
git_branch = '{vinfo.branch}'
git_tag = '{vinfo.tag}'
git_author = '{vinfo.author}'
git_committer = '{vinfo.committer}'
git_status = '{vinfo.status}'
git_builder = '{vinfo.builder}'
git_build_date = '{vinfo.build_date}'
git_verbose_msg = \"\"\"Version: {vinfo.version}
Branch: {vinfo.branch}
Tag: {vinfo.tag}
Id: {vinfo.hash}
Builder: {vinfo.builder}
Build date: {vinfo.build_date}
Repository status is {vinfo.status}\"\"\"
from pycbc._version import *
"""
with open('pycbc/version.py', 'wb') as f:
f.write(version_script.encode('utf-8'))
from pycbc import version
return version.version
class build_docs(Command):
user_options = []
description = "Build the documentation pages"
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
cmd = (
"cd docs; "
"cp Makefile.std Makefile; "
"sphinx-apidoc -o ./ -f -A 'PyCBC dev team' -V '0.1' ../pycbc "
"&& make html"
)
subprocess.check_call(cmd, stderr=subprocess.STDOUT, shell=True)
class build_gh_pages(Command):
user_options = []
description = "Build the documentation pages for GitHub"
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
cmd = (
"mkdir -p _gh-pages/latest "
"&& touch _gh-pages/.nojekyll "
"&& cd docs; "
"cp Makefile.gh_pages Makefile; "
"sphinx-apidoc -o ./ -f -A 'PyCBC dev team' -V '0.1' ../pycbc "
"&& make html"
)
subprocess.check_call(cmd, stderr=subprocess.STDOUT, shell=True)
cmdclass = {
'build_docs': build_docs,
'build_gh_pages': build_gh_pages,
'clean': clean,
'build_ext': cbuild_ext
}
extras_require = {
'cuda': [
'pycuda>=2015.1',
'scikit-cuda',
],
'igwn': [
'ciecplib>=0.4.4',
],
}
# do the actual work of building the package
VERSION = get_version_info()
cythonext = ['waveform.spa_tmplt',
'waveform.utils',
'types.array',
'filter.matchedfilter',
'vetoes.chisq']
ext = []
cython_compile_args = ['-O3', '-w', '-ffast-math',
'-ffinite-math-only']
if platform.machine() == 'x86_64':
cython_compile_args.append('-msse4.2')
cython_link_args = []
# Mac's clang compiler doesn't have openMP support by default. Therefore
# disable openmp builds on MacOSX. Optimization should never really be a
# concern on that OS, and this line can be commented out if needed anyway.
# Mac's also alias gcc and can run into troubles getting libc correctly
if not sys.platform == 'darwin':
cython_compile_args += ['-fopenmp']
cython_link_args += ['-fopenmp']
else:
cython_compile_args += ["-stdlib=libc++"]
cython_link_args += ["-stdlib=libc++"]
for name in cythonext:
e = Extension("pycbc.%s_cpu" % name,
["pycbc/%s_cpu.pyx" % name.replace('.', '/')],
extra_compile_args=cython_compile_args,
extra_link_args=cython_link_args,
compiler_directives={'embedsignature': True})
ext.append(e)
# Not all modules work like this:
e = Extension("pycbc.fft.fftw_pruned_cython",
["pycbc/fft/fftw_pruned_cython.pyx"],
extra_compile_args=cython_compile_args,
extra_link_args=cython_link_args,
compiler_directives={'embedsignature': True})
ext.append(e)
e = Extension("pycbc.events.eventmgr_cython",
["pycbc/events/eventmgr_cython.pyx"],
extra_compile_args=cython_compile_args,
extra_link_args=cython_link_args,
compiler_directives={'embedsignature': True})
ext.append(e)
e = Extension("pycbc.events.simd_threshold_cython",
["pycbc/events/simd_threshold_cython.pyx"],
language='c++',
extra_compile_args=cython_compile_args,
extra_link_args=cython_link_args,
compiler_directives={'embedsignature': True})
ext.append(e)
e = Extension("pycbc.filter.simd_correlate_cython",
["pycbc/filter/simd_correlate_cython.pyx"],
language='c++',
extra_compile_args=cython_compile_args,
extra_link_args=cython_link_args,
compiler_directives={'embedsignature': True})
ext.append(e)
e = Extension("pycbc.waveform.decompress_cpu_cython",
["pycbc/waveform/decompress_cpu_cython.pyx"],
language='c++',
extra_compile_args=cython_compile_args,
extra_link_args=cython_link_args,
compiler_directives={'embedsignature': True})
ext.append(e)
e = Extension("pycbc.inference.models.relbin_cpu",
["pycbc/inference/models/relbin_cpu.pyx"],
language='c++',
extra_compile_args=cython_compile_args,
extra_link_args=cython_link_args,
compiler_directives={'embedsignature': True})
ext.append(e)
setup(
name = 'PyCBC',
version = VERSION,
description = 'Core library to analyze gravitational-wave data, find signals, and study their parameters.',
long_description = open('README.md').read(),
long_description_content_type='text/markdown',
author = 'The PyCBC team',
author_email = 'alex.nitz@gmail.org',
url = 'http://www.pycbc.org/',
download_url = f'https://github.com/gwastro/pycbc/tarball/v{VERSION}',
keywords = [
'ligo',
'physics',
'gravity',
'signal processing',
'gravitational waves'
],
cmdclass = cmdclass,
setup_requires = setup_requires,
extras_require = extras_require,
install_requires = install_requires,
scripts = find_files('bin', relpath='./'),
packages = find_packages(),
package_data = {
'pycbc.workflow': find_files('pycbc/workflow'),
'pycbc.results': find_files('pycbc/results'),
'pycbc.neutron_stars': find_files('pycbc/neutron_stars')
},
ext_modules = ext,
python_requires='>=3.7',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
],
)
| 10,715
| 31.18018
| 111
|
py
|
pycbc
|
pycbc-master/tools/benchmarking/absolute_times.py
|
#!/usr/bin/env python
import sys
try:
tottime = float(sys.argv[1])
except:
print("usage: %s total_time_in_seconds" % sys.argv[0])
print()
print("Typical use case is: ")
print(" gprof2dot.py -f pstats profile_file | %s total_time_in_seconds | dot -Tpng -o output.png" % sys.argv[0])
sys.exit(0)
for line in sys.stdin:
newtokens = []
for token in line.strip().split('\\n'):
if token[-1] == '%':
try:
value = float(token[:-1]) / 100.0
token = '%s [%.2e]' % (token, value*tottime)
except:
pass
elif token[-2:] == '%)':
value = float(token[1:-2]) / 100.0
token = '(%s [%.2e])' % (token[1:-2], value*tottime)
newtokens.append(token)
print('\\n'.join(newtokens))
| 822
| 23.939394
| 118
|
py
|
pycbc
|
pycbc-master/tools/timing/fft_perf.py
|
#!/usr/bin/python
from pycbc.scheme import *
from pycbc.types import *
from pycbc.fft import *
import pycbc
from optparse import OptionParser
import gc
parser = OptionParser()
parser.add_option('--scheme','-s', type = 'choice',
choices = ('cpu','cuda','opencl'),
default = 'cpu', dest = 'scheme',
help = 'specifies processing scheme, can be cpu [default], cuda, or opencl')
parser.add_option('--device-num','-d', action='store', type = 'int',
dest = 'devicenum', default=0,
help = 'specifies a GPU device to use for CUDA or OpenCL, 0 by default')
parser.add_option('--size',type=int, help='FFT size')
parser.add_option('--iterations', type=int, help='Number of iterations to perform')
parser.add_option('--measure-level', type=int, help='Set the measure level (only applies to FFTW- cpu scheme)', default=1)
parser.add_option('--backend', type=str, help='set the backend type for this scheme')
parser.add_option('--num-threads', type=int, help='set the number of threads to use', default=1)
parser.add_option('--import-float-wisdom', type=str, help='import an FFTW float wisdom file')
parser.add_option('--export-float-wisdom', type=str, help='export an FFTW float wisdom file')
(options, args) = parser.parse_args()
#Changing the optvalues to a dict makes them easier to read
_options = vars(options)
if _options['scheme'] == 'cpu':
ctx = CPUScheme(num_threads=options.num_threads)
if options.backend == 'fftw':
from pycbc.fft.fftw import set_measure_level, set_threads_backend
with ctx:
set_measure_level(options.measure_level)
if options.num_threads != 1:
set_threads_backend('openmp')
if _options['scheme'] == 'cuda':
ctx = CUDAScheme(device_num=_options['devicenum'])
if _options['scheme'] == 'opencl':
ctx = OpenCLScheme(device_num=_options['devicenum'])
niter = options.iterations
if type(ctx) is CUDAScheme:
print("RUNNING ON ", ctx.device.name())
else:
print("RUNNING ON CPU")
print(type(ctx))
N = 2**options.size
vecin = zeros(N, dtype=complex64)
vecout = zeros(N, dtype=complex64)
print("ALIGNMENT:", check_aligned(vecin.data))
if options.import_float_wisdom:
print("Loading a wisdom file")
fftw.import_single_wisdom_from_filename(options.import_float_wisdom)
print("Making the plan")
with ctx:
ifft(vecin, vecout, backend=options.backend)
print("Planning done")
if options.export_float_wisdom:
assert(_options['scheme'] == 'cpu' and options.backend == 'fftw')
fftw.export_single_wisdom_to_filename(options.export_float_wisdom)
def tifft():
with ctx:
for i in range(0, niter):
ifft(vecin, vecout, backend=options.backend)
sync = vecout[0]
import timeit
gt = timeit.Timer(tifft)
t = (1000 * gt.timeit(number=1)/niter)
print("C2C iFFT %.2f msec" % t, " %5.1f /min " % (1000 *60 /t))
| 2,935
| 33.139535
| 122
|
py
|
pycbc
|
pycbc-master/tools/timing/wav_perf.py
|
#!/usr/bin/python
from pycbc.scheme import *
from pycbc.types import *
from pycbc.waveform import *
import pycbc
from optparse import OptionParser
import gc
parser = OptionParser()
parser.add_option('--scheme','-s', type = 'choice',
choices = ('cpu','cuda','opencl'),
default = 'cpu', dest = 'scheme',
help = 'specifies processing scheme, can be cpu [default], cuda, or opencl')
parser.add_option('--device-num','-d', action='store', type = 'int',
dest = 'devicenum', default=0,
help = 'specifies a GPU device to use for CUDA or OpenCL, 0 by default')
parser.add_option('--approximant', type=str, default="TaylorF2")
parser.add_option('--deltaf',type=float, help='frequency step')
parser.add_option('--iterations', type=int, help='Number of iterations to perform')
(options, args) = parser.parse_args()
#Changing the optvalues to a dict makes them easier to read
_options = vars(options)
if _options['scheme'] == 'cpu':
ctx = CPUScheme()
if _options['scheme'] == 'cuda':
ctx = CUDAScheme(device_num=_options['devicenum'])
if _options['scheme'] == 'opencl':
ctx = OpenCLScheme(device_num=_options['devicenum'])
niter = options.iterations
if type(ctx) is CUDAScheme:
print("RUNNING ON ", ctx.device.name())
else:
print("RUNNING ON CPU")
with ctx:
wf_taylor = get_fd_waveform(mass1=1, mass2=1, f_lower=14,
approximant=options.approximant, delta_f=options.deltaf)
def taylorf2():
with ctx:
for i in range(0,niter):
wf_taylor = get_fd_waveform(mass1=1, mass2=1, f_lower=14,
approximant=options.approximant, delta_f=options.deltaf)
import timeit
gt = timeit.Timer(taylorf2)
t = (1000 * gt.timeit(number=1)/niter)
print("Waveform Generation %.2f msec" % t, " %5.1f gen/min " % (1000 *60 /t))
if type(ctx) is CUDAScheme:
def SPAtmplt():
with ctx:
n = int(1.0 / options.deltaf * 4096)
out = zeros(n, dtype=complex64)
for i in range(0,niter):
wf_taylor = get_fd_waveform(mass1=1, mass2=1, f_lower=14,
approximant="SPAtmplt", delta_f=options.deltaf, out=out, amplitude_order=0)
gt = timeit.Timer(SPAtmplt)
t = (1000 * gt.timeit(number=1)/niter)
print("SPAtmplt Generation %.2f msec" % t, " %5.1f gen/min " % (1000 *60 /t))
| 2,472
| 31.973333
| 119
|
py
|
pycbc
|
pycbc-master/tools/timing/match_perf.py
|
#!/usr/bin/env python
from pycbc.scheme import *
from pycbc.types import *
from pycbc.filter import *
from pycbc.psd import *
import pycbc
from math import log
import numpy
import numpy.random
from optparse import OptionParser
import gc
parser = OptionParser()
import logging
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
parser.add_option('--scheme','-s', type = 'choice',
choices = ('cpu','cuda','opencl'),
default = 'cpu', dest = 'scheme',
help = 'specifies processing scheme, can be cpu [default], cuda, or opencl')
parser.add_option('--device-num','-d', action='store', type = 'int',
dest = 'devicenum', default=0,
help = 'specifies a GPU device to use for CUDA or OpenCL, 0 by default')
parser.add_option('--size', type=int, help='fft size in log2')
parser.add_option('--iterations', type=int, help='Number of iterations to perform')
(options, args) = parser.parse_args()
#Changing the optvalues to a dict makes them easier to read
_options = vars(options)
if _options['scheme'] == 'cpu':
ctx = CPUScheme()
if _options['scheme'] == 'cuda':
ctx = CUDAScheme(device_num=_options['devicenum'])
if _options['scheme'] == 'opencl':
ctx = OpenCLScheme(device_num=_options['devicenum'])
size = options.size
niter = options.iterations
if type(ctx) is CUDAScheme:
print("RUNNING ON ", ctx.device.name())
else:
print("RUNNING ON CPU")
N = 2**size
print(" SIZE ", int(log(N,2)))
n = N/2 +1
a = numpy.zeros(N) + 1000
noise = numpy.random.normal(a).astype(numpy.float32)
with ctx:
nplus2 = TimeSeries(noise,delta_t=1.0/4096,dtype=float32)
ntilde2 = make_frequency_series(nplus2)
psd2 = ntilde2.squared_norm()
o = match(ntilde2,ntilde2,psd=psd2)
o = match(ntilde2,ntilde2,psd=None, v1_norm=1, v2_norm=1)
o = matched_filter_core(ntilde2, ntilde2)
out=zeros(N,dtype=complex64)
o = overlap_cplx(ntilde2, ntilde2, normalized=False)
ntilde3 = ntilde2 +10j
def matcht():
with ctx:
for i in range(0,niter):
o,ind = match(ntilde2,ntilde2,psd=psd2)
def match_fast():
with ctx:
for i in range(0,niter):
o,ind = match(ntilde2,ntilde2,psd=None,v1_norm=1,v2_norm=1)
def ovlp():
with ctx:
for i in range(0,niter):
o = overlap_cplx(ntilde2,ntilde3, normalized=False)
def filter_fast():
with ctx:
for i in range(0,niter):
snr, corr, norm = matched_filter_core(ntilde2, ntilde2, psd=None, h_norm=1, out=out)
import timeit
gt = timeit.Timer(ovlp)
t = (1000 * gt.timeit(number=1)/niter)
print("Foverlap %.2f msec" % t, " %5.1f op/min " % (1000 *60 /t))
gt = timeit.Timer(matcht)
t = (1000 * gt.timeit(number=1)/niter)
print("MATCH %.2f msec" % t, " %5.1f op/min " % (1000 *60 /t))
gt = timeit.Timer(match_fast)
t = (1000 * gt.timeit(number=1)/niter)
print("MATCH FAST %.2f msec" % t, " %5.1f op/min " % (1000 *60 /t))
gt = timeit.Timer(filter_fast)
t = (1000 * gt.timeit(number=1)/niter)
print("FILTER FAST %.2f msec" % t, " %5.1f op/min " % (1000 *60 /t))
| 3,154
| 27.169643
| 96
|
py
|
pycbc
|
pycbc-master/tools/timing/arr_perf.py
|
#!/usr/bin/python
from pycbc.scheme import *
from pycbc.types import *
from pycbc.fft import *
from pycbc.events import *
import pycbc
from optparse import OptionParser
from math import sin, log
import gc
parser = OptionParser()
parser.add_option('--scheme','-s', type = 'choice',
choices = ('cpu','cuda','opencl'),
default = 'cpu', dest = 'scheme',
help = 'specifies processing scheme, can be cpu [default], cuda, or opencl')
parser.add_option('--device-num','-d', action='store', type = 'int',
dest = 'devicenum', default=0,
help = 'specifies a GPU device to use for CUDA or OpenCL, 0 by default')
parser.add_option('--size',type=float, help='FFT size')
parser.add_option('--iterations', type=int, help='Number of iterations to perform')
(options, args) = parser.parse_args()
#Changing the optvalues to a dict makes them easier to read
_options = vars(options)
if _options['scheme'] == 'cpu':
ctx = CPUScheme()
if _options['scheme'] == 'cuda':
ctx = CUDAScheme(device_num=_options['devicenum'])
if _options['scheme'] == 'opencl':
ctx = OpenCLScheme(device_num=_options['devicenum'])
niter = options.iterations
if type(ctx) is CUDAScheme:
print("RUNNING ON ", ctx.device.name())
else:
print("RUNNING ON CPU")
print(type(ctx))
N = 2**options.size
v = zeros(N, dtype=complex64) + 1
with ctx:
v+v
v*v
v.squared_norm()
def addc():
with ctx:
for i in range(0, niter):
v + 3
v[0]
def add():
with ctx:
for i in range(0, niter):
v+v
v[0]
def mul():
with ctx:
for i in range(0, niter):
v*v
v[0]
def sqnm():
with ctx:
for i in range(0, niter):
v.squared_norm()
v[0]
import timeit
gt = timeit.Timer(addc)
t = (1000 * gt.timeit(number=1)/niter)
print("ADDC %.2f msec" % t, " %5.1f /min " % (1000 *60 /t))
gt = timeit.Timer(add)
t = (1000 * gt.timeit(number=1)/niter)
print("ADD %.2f msec" % t, " %5.1f /min " % (1000 *60 /t))
gt = timeit.Timer(mul)
t = (1000 * gt.timeit(number=1)/niter)
print("MUL %.2f msec" % t, " %5.1f /min " % (1000 *60 /t))
gt = timeit.Timer(sqnm)
t = (1000 * gt.timeit(number=1)/niter)
print("SQNRM %.2f msec" % t, " %5.1f /min " % (1000 *60 /t))
| 2,286
| 22.10101
| 96
|
py
|
pycbc
|
pycbc-master/tools/timing/correlate_perf.py
|
from pycbc.filter import correlate
from pycbc.filter.matchedfilter import BatchCorrelator, Correlator
from pycbc.types import zeros, complex64, complex128, Array
from time import time
from numpy.random import uniform
niter = 2000
for N in [2**10, 2**15, 2**18]:
a = zeros(N, dtype=complex64)
a.data += uniform(-1, 1, size=len(a))
b = a * 0.5
c = a * 1.5
xs = []
zs = []
for i in range(50):
xs.append(a*i)
zs.append(c*i)
corr = BatchCorrelator(xs, zs, N)
t1 = time()
for i in range(niter):
corr.execute(b)
t2 = time()
print("Batch Correlate Perf Size:{} Time:{:3.3f}".format(
N, (t2-t1)*1000 / niter))
for dtp in [complex64, complex128]:
for N in [2**10, 2**15, 2**20]:
a = zeros(N, dtype=dtp)
a += Array(uniform(-1, 1, size=N) * (1 + -.5j), dtype=a.dtype)
b = zeros(N, dtype=dtp)
c = zeros(N, dtype=dtp)
correlate(a, b, c)
t1 = time()
for i in range(niter):
correlate(a, b, c)
t2 = time()
print("Correlate Perf Type:{} Size:{} Time:{:3.3f}".format(repr(dtp),
N, (t2-t1)*1000 / niter))
if dtp is complex64:
corr = Correlator(a, b, c)
t1 = time()
for i in range(niter):
corr.correlate()
t2 = time()
print("Correlator Perf Type:{} Size:{} Time:{:3.3f}".format(repr(dtp),
N, (t2-t1)*1000 / niter))
| 1,489
| 26.592593
| 82
|
py
|
pycbc
|
pycbc-master/tools/timing/banksim/banksim.py
|
#! /usr/bin/env python
# Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import sys
from numpy import complex64,float32
from optparse import OptionParser
from ligo.lw import utils as ligolw_utils
from ligo.lw import table, lsctables
from pycbc.utils import mass1_mass2_to_mchirp_eta
from pycbc.waveform import get_td_waveform, get_fd_waveform, td_approximants, fd_approximants
from pycbc import DYN_RANGE_FAC
from pycbc.types import FrequencySeries, TimeSeries, zeros, real_same_precision_as, complex_same_precision_as
from pycbc.filter import match, sigmasq
from pycbc.scheme import DefaultScheme, CUDAScheme
from pycbc.fft import fft
from math import cos, sin
import pycbc.psd
def update_progress(progress):
print('\r\r[{0}] {1:.2%}'.format('#'*(int(progress*100)/2)+' '*(50-int(progress*100)/2), progress), end=' ')
if progress == 100:
print("Done")
sys.stdout.flush()
## Remove the need for these functions ########################################
def generate_fplus_fcross(latitude,longitude,polarization):
f_plus = - (1.0/2.0) * (1.0 + cos(latitude)*cos(latitude)) * cos (2.0 * longitude) * cos (2.0 * polarization) - cos(latitude) * sin(2.0*longitude) * sin (2.0 * polarization)
f_cross= (1.0/2.0) * (1.0 + cos(latitude)*cos(latitude)) * cos (2.0 * longitude) * sin (2.0* polarization) - cos (latitude) * sin(2.0*longitude) * cos (2.0 * polarization)
return f_plus, f_cross
def generate_detector_strain(template_params, h_plus, h_cross):
latitude = 0
longitude = 0
polarization = 0
if hasattr(template_params, 'latitude'):
latitude = template_params.latitude
if hasattr(template_params, 'longitude'):
longitude = template_params.longitude
if hasattr(template_params, 'polarization'):
polarization = template_params.polarization
f_plus, f_cross = generate_fplus_fcross(latitude, longitude, polarization)
return (f_plus*h_plus+f_cross*h_cross)
def make_padded_frequency_series(vec,filter_N=None):
"""Pad a TimeSeries with a length of zeros greater than its length, such
that the total length is the closest power of 2. This prevents the effects
of wraparound.
"""
if filter_N is None:
power = ceil(log(len(vec),2))+1
N = 2 ** power
else:
N = filter_N
n = N/2+1
if isinstance(vec,FrequencySeries):
vectilde = FrequencySeries(zeros(n, dtype=complex_same_precision_as(vec)),
delta_f=1.0,copy=False)
if len(vectilde) < len(vec):
cplen = len(vectilde)
else:
cplen = len(vec)
vectilde[0:cplen] = vec[0:cplen]
delta_f = vec.delta_f
if isinstance(vec,TimeSeries):
vec_pad = TimeSeries(zeros(N),delta_t=vec.delta_t,
dtype=real_same_precision_as(vec))
vec_pad[0:len(vec)] = vec
delta_f = 1.0/(vec.delta_t*N)
vectilde = FrequencySeries(zeros(n),delta_f=1.0,
dtype=complex_same_precision_as(vec))
fft(vec_pad,vectilde)
vectilde = FrequencySeries(vectilde * DYN_RANGE_FAC,delta_f=delta_f,dtype=complex64)
return vectilde
def get_waveform(approximant, phase_order, amplitude_order, template_params, start_frequency, sample_rate, length):
if approximant in td_approximants():
hplus,hcross = get_td_waveform(template_params, approximant=approximant,
phase_order=phase_order, delta_t=1.0 / sample_rate,
f_lower=start_frequency, amplitude_order=amplitude_order)
hvec = generate_detector_strain(template_params, hplus, hcross)
elif approximant in fd_approximants():
delta_f = sample_rate / length
hvec = get_fd_waveform(template_params, approximant=approximant,
phase_order=phase_order, delta_f=delta_f,
f_lower=start_frequency, amplitude_order=amplitude_order)
htilde = make_padded_frequency_series(hvec,filter_N)
return htilde
###############################################################################
#File output Settings
parser = OptionParser()
parser.add_option("--match-file", dest="out_file", help="file to output match results", metavar="FILE")
#PSD Settings
parser.add_option("--asd-file", dest="asd_file", help="two-column ASCII file containing ASD data", metavar="FILE")
parser.add_option("--psd", dest="psd", help="Analytic PSD model from LALSimulation", choices=pycbc.psd.get_lalsim_psd_list())
aprs = list(set(td_approximants() + fd_approximants()))
#Template Settings
parser.add_option("--template-file", dest="bank_file", help="SimInspiral or SnglInspiral XML file containing the template parameters.", metavar="FILE")
parser.add_option("--template-approximant",help="Template Approximant Name: " + str(aprs), choices = aprs)
parser.add_option("--template-phase-order",help="PN order to use for the phase",default=-1,type=int)
parser.add_option("--template-amplitude-order",help="PN order to use for the amplitude",default=-1,type=int)
parser.add_option("--template-start-frequency",help="Starting frequency for injections",type=float)
parser.add_option("--template-sample-rate",help="Starting frequency for injections",type=float)
#Signal Settings
parser.add_option("--signal-file", dest="sim_file", help="SimInspiral or SnglInspiral XML file containing the signal parameters.", metavar="FILE")
parser.add_option("--signal-approximant",help="Signal Approximant Name: " + str(aprs), choices = aprs)
parser.add_option("--signal-phase-order",help="PN order to use for the phase",default=-1,type=int)
parser.add_option("--signal-amplitude-order",help="PN order to use for the amplitude",default=-1,type=int)
parser.add_option("--signal-start-frequency",help="Starting frequency for templates",type=float)
parser.add_option("--signal-sample-rate",help="Starting frequency for templates",type=float)
#Filtering Settings
parser.add_option('--filter-low-frequency-cutoff', metavar='FREQ', help='low frequency cutoff of matched filter', type=float)
parser.add_option("--filter-sample-rate",help="Filter Sample Rate [Hz]",type=float)
parser.add_option("--filter-signal-length",help="Length of signal for filtering, shoud be longer than all waveforms and include some padding",type=int)
#Hardware support
parser.add_option("--use-cuda",action="store_true")
#Restricted maximization
parser.add_option("--mchirp-window",type=float)
(options, args) = parser.parse_args()
template_sample_rate = options.filter_sample_rate
signal_sample_rate = options.filter_sample_rate
if options.template_sample_rate:
template_sample_rate = options.template_sample_rate
if options.signal_sample_rate:
template_sample_rate = options.signal_sample_rate
if options.psd and options.asd_file:
parser.error("PSD and asd-file options are mututally exclusive")
if options.use_cuda:
ctx = CUDAScheme()
else:
ctx = DefaultScheme()
print("STARTING THE BANKSIM")
# Load in the template bank file
indoc = ligolw_utils.load_filename(options.bank_file, False)
try :
template_table = lsctables.SnglInspiralTable.get_table(indoc)
except ValueError:
template_table = lsctables.SimInspiralTable.get_table(indoc)
# open the output file where the max overlaps over the bank are stored
fout = open(options.out_file, "w")
fout2 = open(options.out_file+".found", "w")
print("Writing matches to " + options.out_file)
print("Writing recovered template in " + options.out_file+".found")
# Load in the simulation list
indoc = ligolw_utils.load_filename(options.sim_file, False)
try:
signal_table = lsctables.SimInspiralTable.get_table(indoc)
except ValueError:
signal_table = lsctables.SnglInspiralTable.get_table(indoc)
def outside_mchirp_window(template,signal,w):
template_mchirp,et = mass1_mass2_to_mchirp_eta(template.mass1,template.mass2)
signal_mchirp ,et = mass1_mass2_to_mchirp_eta(signal.mass1,signal.mass2)
if abs(signal_mchirp - template_mchirp) > (w*signal_mchirp) :
return True
else :
False
filter_N = int(options.filter_signal_length * options.filter_sample_rate)
filter_n = filter_N / 2 + 1
filter_delta_f = 1.0 / float(options.filter_signal_length)
print("Number of Signal Waveforms: ",len(signal_table))
print("Number of Templates : ",len(template_table))
print("Reading and Interpolating PSD")
if options.asd_file:
psd = pycbc.psd.read.from_txt(options.asd_file, filter_n, filter_delta_f,
options.filter_low_frequency_cutoff)
elif options.psd:
psd = pycbc.psd.analytic.from_string(options.psd, filter_n, filter_delta_f,
options.filter_low_frequency_cutoff)
psd *= DYN_RANGE_FAC **2
psd = FrequencySeries(psd,delta_f=psd.delta_f,dtype=float32)
with ctx:
print("Pregenerating Signals")
signals = []
index = 0
for signal_params in signal_table:
index += 1
update_progress(index/len(signal_table))
stilde = get_waveform(options.signal_approximant,
options.signal_phase_order,
options.signal_amplitude_order,
signal_params,
options.signal_start_frequency,
options.filter_sample_rate,
filter_N)
s_norm = sigmasq(stilde, psd=psd,
low_frequency_cutoff=options.filter_low_frequency_cutoff)
stilde /= psd
signals.append( (stilde, s_norm, [], signal_params) )
print("Calculating Overlaps")
index = 0
# Calculate the overlaps
for template_params in template_table:
index += 1
update_progress(float(index)/len(template_table))
h_norm = htilde = None
for stilde, s_norm, matches, signal_params in signals:
# Check if we need to look at this template
if options.mchirp_window and outside_mchirp_window(template_params,
signal_params, options.mchirp_window):
matches.append(-1)
continue
# Generate htilde if we haven't already done so
if htilde is None:
htilde = get_waveform(options.template_approximant,
options.template_phase_order,
options.template_amplitude_order,
template_params,
options.template_start_frequency,
options.filter_sample_rate,
filter_N)
h_norm = sigmasq(htilde, psd=psd,
low_frequency_cutoff=options.filter_low_frequency_cutoff)
o,i = match(htilde, stilde, h_norm=h_norm, s_norm=s_norm,
low_frequency_cutoff=options.filter_low_frequency_cutoff)
matches.append(o)
#Find the maximum overlap in the bank and output to a file
for stilde, s_norm, matches, sim_template in signals:
match_str= "%5.5f \n" % (max(matches))
match_str2=" "+options.bank_file+" "+str(matches.index(max(matches)))+"\n"
fout.write(match_str)
fout2.write(match_str2)
| 12,160
| 40.64726
| 177
|
py
|
pycbc
|
pycbc-master/tools/einsteinathome/check_GW150914_detection.py
|
# Read a pycbc_inspiral HDF5 trigger file and check that it contains triggers
# compatible with GW150914
# 2016 Tito Dal Canton
import sys
import h5py
import numpy as np
# GW150914 params from my run
# https://www.atlas.aei.uni-hannover.de/~tito/LSC/er8/er8b_c00_1.2.0_run1
gw150914_time = 1126259462.4
gw150914_snr = {'H1': 19.71, 'L1': 13.28}
gw150914_chi2r = {'H1': 1.05, 'L1': 0.45}
f = h5py.File(sys.argv[1], 'r')
detector = tuple(f.keys())[0]
end_times = f[detector]['end_time'][:]
snrs = f[detector]['snr'][:]
chi2rs = f[detector]['chisq'][:] / (2 * f[detector]['chisq_dof'][:] - 2)
# search for trigs compatible with GW150914
mask = np.logical_and.reduce([abs(end_times - gw150914_time) < 0.1,
snrs > 0.8 * gw150914_snr[detector],
snrs < 1.2 * gw150914_snr[detector],
chi2rs > 0.8 * gw150914_chi2r[detector],
chi2rs < 1.2 * gw150914_chi2r[detector]])
if mask.any():
print('Pass: %d GW150914-like triggers' % sum(mask))
print('end_time snr reduced_chi2')
for t, s, c in zip(end_times[mask], snrs[mask], chi2rs[mask]):
print('%.3f %.3f %.3f' % (t, s, c))
sys.exit(0)
else:
print('Fail: no GW150914-like triggers')
sys.exit(1)
| 1,290
| 32.973684
| 77
|
py
|
pycbc
|
pycbc-master/tools/static/runtime-tkinter.py
|
import os, sys
d = os.path.join(sys._MEIPASS, 'tcl')
if not os.path.exists(d):
os.makedirs(d)
d = os.path.join(sys._MEIPASS, 'tk')
if not os.path.exists(d):
os.makedirs(d)
| 180
| 21.625
| 37
|
py
|
pycbc
|
pycbc-master/tools/static/runtime-scipy.py
|
import os, distutils.sysconfig, sys, os.path
import scipy.misc
import scipy, fnmatch
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
# Handle stupid scipy imports (fixed in scipy 0.13)
scipy.factorial = scipy.misc.factorial
# handle python shared libary name
basedir = sys._MEIPASS
print("Setting up scipy for temporary basedir %s" % basedir)
python_lib = find('libpython*.so.*', basedir)[0]
python_lib_dest = python_lib.split('.so')[0] + '.so'
os.symlink(python_lib, python_lib_dest)
print("LD_LIBRARY_PATH=%s" % os.environ['LD_LIBRARY_PATH'])
| 734
| 29.625
| 60
|
py
|
pycbc
|
pycbc-master/tools/static/hooks/hook-pycbc.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
try:
from PyInstaller.utils.hooks import (collect_data_files, collect_submodules)
except ImportError:
from PyInstaller.hooks.hookutils import (collect_data_files, collect_submodules)
# Executables that need MKL
needs_mkl = ['pycbc_inspiral','pycbc_single_template']
#Some of our libaries are not being picked up automatically (need to invest.)
#In the meantime we can pull them manually, in the same way we normally
#find them
from pycbc.libutils import get_libpath_from_dirlist, pkg_config_libdirs
def find_lib_path(libname, packages):
libdirs = []
if "LD_LIBRARY_PATH" in os.environ:
libdirs += os.environ["LD_LIBRARY_PATH"].split(":")
try:
libdirs += pkg_config_libdirs(packages)
except ValueError:
pass
path = get_libpath_from_dirlist(libname, libdirs)
if path is not None:
return [(path, '')]
else:
return []
# pull in the pycbc imports it can't find
hiddenimports = ['pycbc.fft.fft_cpu',
'pycbc.filter.matchedfilter_cpu',
'pycbc.vetoes.chisq_cpu',
'pycbc.waveform.spa_tmplt_cpu',
'pycbc.types.array_cpu',
'pycbc.fft.backend_cpu',
'pycbc.fft.backend_mkl',
'pycbc.fft.fftw',
'pycbc.fft.mkl',
'pycbc.fft.npfft',
'pycbc.fft.__init__',
'pycbc.events.threshold_cpu',
'scipy.linalg.cython_blas',
'scipy.linalg.cython_lapack',
'scipy.special._ufuncs_cxx',
'h5py',
'h5py._conv',
'h5py._stub',
'mpld3'
]
datas = []
# Add html assets to all executables
cwd = os.environ.get("PYCBC_HOOKS_DIR", os.getcwd())
basedir = cwd.replace('tools/static','')
rootdir = basedir + 'pycbc/results'
for root, subdirs, files in os.walk(rootdir):
for filename in files:
if not filename.endswith('.py') and not filename.endswith('.pyc'):
file_path = os.path.join(root, filename)
store_path = '/'.join(file_path.split('/')[:-1])
store_path = store_path.replace(basedir, '')
datas.append( (file_path, store_path) )
# Add em-bright data file
file_path = basedir + 'pycbc/tmpltbank/ns_sequences/equil_2H.dat'
store_path = '/'.join(file_path.split('/')[:-1])
store_path = store_path.replace(basedir, '')
datas.append( (file_path, store_path) )
if os.environ.get("NOW_BUILDING", None) in needs_mkl:
# pull in all the mkl .so files
datas += find_lib_path('mkl_rt', [])
datas += find_lib_path('mkl_core', [])
datas += find_lib_path('mkl_intel_thread', [])
datas += find_lib_path('mkl_intel_lp64', [])
datas += find_lib_path('mkl_avx2', [])
datas += find_lib_path('mkl_def', [])
datas += find_lib_path('iomp5', [])
datas += find_lib_path('mkl_mc3', [])
# try to pull in the openmp fftw files
#datas += find_lib_path('fftw3', ['fftw3'])
#datas += find_lib_path('fftw3f', ['fft3f'])
#datas += find_lib_path('fftw3f_omp', ['fftw3f'])
#datas += find_lib_path('fftw3_omp', ['fftw3'])
| 3,570
| 35.070707
| 84
|
py
|
pycbc
|
pycbc-master/pycbc/boundaries.py
|
# Copyright (C) 2016 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides utilities for manipulating parameter boundaries. Namely,
classes are offered that will map values to a specified domain using either
cyclic boundaries or reflected boundaries.
"""
import numpy
class _Bound(float):
"""Adds methods to float for boundary comparisons."""
name = None
def larger(self, other):
"""A function to determine whether or not `other` is larger
than the bound. This raises a NotImplementedError; classes that
inherit from this must define it.
"""
raise NotImplementedError("larger function not set")
def smaller(self, other):
"""A function to determine whether or not `other` is smaller
than the bound. This raises a NotImplementedError; classes that
inherit from this must define it.
"""
raise NotImplementedError("smaller function not set")
class OpenBound(_Bound):
"""Sets larger and smaller functions to be `>` and `<`, respectively."""
name = 'open'
def larger(self, other):
"""Returns True if `other` is `>`, False otherwise"""
return self > other
def smaller(self, other):
"""Returns True if `other` is `<`, False otherwise."""
return self < other
class ClosedBound(_Bound):
"""Sets larger and smaller functions to be `>=` and `<=`, respectively."""
name = 'closed'
def larger(self, other):
return self >= other
def smaller(self, other):
return self <= other
class ReflectedBound(ClosedBound):
"""Inherits from `ClosedBound`, adding reflection functions."""
name = 'reflected'
def reflect(self, value):
return 2*self - value
def reflect_left(self, value):
"""Only reflects the value if is > self."""
if value > self:
value = self.reflect(value)
return value
def reflect_right(self, value):
"""Only reflects the value if is < self."""
if value < self:
value = self.reflect(value)
return value
boundary_types = {
OpenBound.name: OpenBound,
ClosedBound.name: ClosedBound,
ReflectedBound.name: ReflectedBound
}
#
# Helper functions for applying conditions to boundaries
#
def apply_cyclic(value, bounds):
"""Given a value, applies cyclic boundary conditions between the minimum
and maximum bounds.
Parameters
----------
value : float
The value to apply the cyclic conditions to.
bounds : Bounds instance
Boundaries to use for applying cyclic conditions.
Returns
-------
float
The value after the cyclic bounds are applied.
"""
return (value - bounds._min) %(bounds._max - bounds._min) + bounds._min
def reflect_well(value, bounds):
"""Given some boundaries, reflects the value until it falls within both
boundaries. This is done iteratively, reflecting left off of the
`boundaries.max`, then right off of the `boundaries.min`, etc.
Parameters
----------
value : float
The value to apply the reflected boundaries to.
bounds : Bounds instance
Boundaries to reflect between. Both `bounds.min` and `bounds.max` must
be instances of `ReflectedBound`, otherwise an AttributeError is
raised.
Returns
-------
float
The value after being reflected between the two bounds.
"""
while value not in bounds:
value = bounds._max.reflect_left(value)
value = bounds._min.reflect_right(value)
return value
def _pass(value):
"""Just return the given value."""
return value
#
# Bounds class
#
class Bounds(object):
"""Creates and stores bounds using the given values.
The type of boundaries used can be set using the `btype_(min|max)`
parameters. These arguments set what kind of boundary is used at the
minimum and maximum bounds. Specifically, if `btype_min` (`btype_max`) is
set to:
* "open": the minimum (maximum) boundary will be an instance of
`OpenBound`. This means that a value must be `>` (`<`) the bound
for it to be considered within the bounds.
* "closed": the minimum (maximum) boundary will be an instance of
`ClosedBound`. This means that a value must be `>=` (`<=`) the bound
for it to be considered within the bounds.
* "reflected": the minimum (maximum) boundary will be an isntance of
`ReflectedBound`. This means that a value will be reflected to the
right (left) if `apply_conditions` is used on the value. For more
details see `apply_conditions`.
If the `cyclic` keyword is set to True, then `apply_conditions` will cause
values to be wrapped around to the minimum (maximum) bound if the value
is > (<=) the maximum (minimum) bound. For more details see
`apply_conditions`.
Values can be checked whether or not they occur within the bounds using
`in`; e.g., `6 in bounds`. This is done without applying any boundary
conditions. To apply conditions, then check whether the value is in
bounds, use the `contains_conditioned` method.
The default is for the minimum bound to be "closed" and the maximum bound
to be "open", i.e., a right-open interval.
Parameters
----------
min_bound : {-numpy.inf, float}
The value of the lower bound. Default is `-inf`.
max_bound : {numpy.inf, float}
The value of the upper bound. Default is `inf`.
btype_min : {'closed', string}
The type of the lower bound; options are "closed", "open", or
"reflected". Default is "closed".
btype_min : {'open', string}
The type of the lower bound; options are "closed", "open", or
"reflected". Default is "open".
cyclic : {False, bool}
Whether or not to make the bounds cyclic; default is False. If True,
both the minimum and maximum bounds must be finite.
Examples
--------
Create a right-open interval between -1 and 1 and test whether various
values are within them:
>>> bounds = Bounds(-1., 1.)
>>> -1 in bounds
True
>>> 0 in bounds
True
>>> 1 in bounds
False
Create an open interval between -1 and 1 and test the same values:
>>> bounds = Bounds(-1, 1, btype_min="open")
>>> -1 in bounds
False
>>> 0 in bounds
True
>>> 1 in bounds
False
Create cyclic bounds between -1 and 1 and plot the effect of conditioning
on points between -10 and 10:
>>> bounds = Bounds(-1, 1, cyclic=True)
>>> x = numpy.linspace(-10, 10, num=1000)
>>> conditioned_x = bounds.apply_conditions(x)
>>> fig = pyplot.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, x, c='b', lw=2, label='input')
>>> ax.plot(conditioned_x, x, c='r', lw=1)
>>> ax.vlines([-1., 1.], x.min(), x.max(), color='k', linestyle='--')
>>> ax.set_title('cyclic bounds between x=-1,1')
>>> fig.show()
Create a reflected bound at -1 and plot the effect of conditioning:
>>> bounds = Bounds(-1, 1, btype_min='reflected')
>>> x = numpy.linspace(-10, 10, num=1000)
>>> conditioned_x = bounds.apply_conditions(x)
>>> fig = pyplot.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, x, c='b', lw=2, label='input')
>>> ax.plot(conditioned_x, x, c='r', lw=1)
>>> ax.vlines([-1., 1.], x.min(), x.max(), color='k', linestyle='--')
>>> ax.set_title('reflected right at x=-1')
>>> fig.show()
Create a reflected bound at 1 and plot the effect of conditioning:
>>> bounds = Bounds(-1, 1, btype_max='reflected')
>>> x = numpy.linspace(-10, 10, num=1000)
>>> conditioned_x = bounds.apply_conditions(x)
>>> fig = pyplot.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, x, c='b', lw=2, label='input')
>>> ax.plot(conditioned_x, x, c='r', lw=1)
>>> ax.vlines([-1., 1.], x.min(), x.max(), color='k', linestyle='--')
>>> ax.set_title('reflected left at x=1')
>>> fig.show()
Create reflected bounds at -1 and 1 and plot the effect of conditioning:
>>> bounds = Bounds(-1, 1, btype_min='reflected', btype_max='reflected')
>>> x = numpy.linspace(-10, 10, num=1000)
>>> conditioned_x = bounds.apply_conditions(x)
>>> fig = pyplot.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, x, c='b', lw=2, label='input')
>>> ax.plot(conditioned_x, x, c='r', lw=1)
>>> ax.vlines([-1., 1.], x.min(), x.max(), color='k', linestyle='--')
>>> ax.set_title('reflected betewen x=-1,1')
>>> fig.show()
"""
def __init__(self, min_bound=-numpy.inf, max_bound=numpy.inf,
btype_min='closed', btype_max='open', cyclic=False):
# check boundary values
if min_bound >= max_bound:
raise ValueError("min_bound must be < max_bound")
if cyclic and not (
numpy.isfinite(min_bound) and numpy.isfinite(max_bound)):
raise ValueError("if using cyclic, min and max bounds must both "
"be finite")
# store bounds
try:
self._min = boundary_types[btype_min](min_bound)
except KeyError:
raise ValueError("unrecognized btype_min {}".format(btype_min))
try:
self._max = boundary_types[btype_max](max_bound)
except KeyError:
raise ValueError("unrecognized btype_max {}".format(btype_max))
# store cyclic conditions
self._cyclic = bool(cyclic)
# store reflection conditions; we'll vectorize them here so that they
# can be used with arrays
if self._min.name == 'reflected' and self._max.name == 'reflected':
self._reflect = numpy.vectorize(self._reflect_well)
self.reflected = 'well'
elif self._min.name == 'reflected':
self._reflect = numpy.vectorize(self._min.reflect_right)
self.reflected = 'min'
elif self._max.name == 'reflected':
self._reflect = numpy.vectorize(self._max.reflect_left)
self.reflected = 'max'
else:
self._reflect = _pass
self.reflected = False
def __repr__(self):
return str(self.__class__)[:-1] + " " + " ".join(
map(str, ["min", self._min, "max", self._max,
"cyclic", self._cyclic])) + ">"
@property
def min(self):
"""_bounds instance: The minimum bound """
return self._min
@property
def max(self):
"""_bounds instance: The maximum bound """
return self._max
@property
def cyclic(self):
"""bool: Whether the bounds are cyclic or not.
"""
return self._cyclic
def __getitem__(self, ii):
if ii == 0:
return self._min
elif ii == 1:
return self._max
else:
raise IndexError("index {} out of range".format(ii))
def __abs__(self):
return abs(self._max - self._min)
def __contains__(self, value):
return self._min.smaller(value) & self._max.larger(value)
def _reflect_well(self, value):
"""Thin wrapper around `reflect_well` that passes self as the `bounds`.
"""
return reflect_well(value, self)
def _apply_cyclic(self, value):
"""Thin wrapper around `apply_cyclic` that passes self as the `bounds`.
"""
return apply_cyclic(value, self)
def apply_conditions(self, value):
"""Applies any boundary conditions to the given value.
The value is manipulated according based on the following conditions:
* If `self.cyclic` is True then `value` is wrapped around to the
minimum (maximum) bound if `value` is `>= self.max` (`< self.min`)
bound. For example, if the minimum and maximum bounds are `0, 2*pi`
and `value = 5*pi`, then the returned value will be `pi`.
* If `self.min` is a reflected boundary then `value` will be
reflected to the right if it is `< self.min`. For example, if
`self.min = 10` and `value = 3`, then the returned value will be
17.
* If `self.max` is a reflected boundary then `value` will be
reflected to the left if it is `> self.max`. For example, if
`self.max = 20` and `value = 27`, then the returned value will be
13.
* If `self.min` and `self.max` are both reflected boundaries, then
`value` will be reflected between the two boundaries until it
falls within the bounds. The first reflection occurs off of the
maximum boundary. For example, if `self.min = 10`, `self.max =
20`, and `value = 42`, the returned value will be 18 ( the first
reflection yields -2, the second 22, and the last 18).
* If neither bounds are reflected and cyclic is False, then the
value is just returned as-is.
Parameters
----------
value : float
The value to apply the conditions to.
Returns
-------
float
The value after the conditions are applied; see above for details.
"""
retval = value
if self._cyclic:
retval = apply_cyclic(value, self)
retval = self._reflect(retval)
if isinstance(retval, numpy.ndarray) and retval.size == 1:
try:
retval = retval[0]
except IndexError:
retval = float(retval)
return retval
def contains_conditioned(self, value):
"""Runs `apply_conditions` on the given value before testing whether it
is in bounds. Note that if `cyclic` is True, or both bounds
are reflected, than this will always return True.
Parameters
----------
value : float
The value to test.
Returns
-------
bool
Whether or not the value is within the bounds after the boundary
conditions are applied.
"""
return self.apply_conditions(value) in self
| 15,026
| 34.357647
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/pnutils.py
|
# Copyright (C) 2012 Alex Nitz
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This module contains convenience pN functions. This includes calculating conversions
between quantities.
"""
import lal
import numpy
from scipy.optimize import bisect, brentq, minimize
from pycbc import conversions, libutils
lalsimulation = libutils.import_optional('lalsimulation')
def nearest_larger_binary_number(input_len):
""" Return the nearest binary number larger than input_len.
"""
return int(2**numpy.ceil(numpy.log2(input_len)))
def chirp_distance(dist, mchirp, ref_mass=1.4):
return conversions.chirp_distance(dist, mchirp, ref_mass=ref_mass)
def mass1_mass2_to_mtotal_eta(mass1, mass2):
m_total = conversions.mtotal_from_mass1_mass2(mass1, mass2)
eta = conversions.eta_from_mass1_mass2(mass1, mass2)
return m_total,eta
def mtotal_eta_to_mass1_mass2(m_total, eta):
mass1 = conversions.mass1_from_mtotal_eta(m_total, eta)
mass2 = conversions.mass2_from_mtotal_eta(m_total, eta)
return mass1,mass2
def mass1_mass2_to_mchirp_eta(mass1, mass2):
m_chirp = conversions.mchirp_from_mass1_mass2(mass1, mass2)
eta = conversions.eta_from_mass1_mass2(mass1, mass2)
return m_chirp,eta
def mchirp_eta_to_mass1_mass2(m_chirp, eta):
mtotal = conversions.mtotal_from_mchirp_eta(m_chirp, eta)
mass1 = conversions.mass1_from_mtotal_eta(mtotal, eta)
mass2 = conversions.mass2_from_mtotal_eta(mtotal, eta)
return mass1, mass2
def mchirp_mass1_to_mass2(mchirp, mass1):
"""
This function takes a value of mchirp and one component mass and returns
the second component mass. As this is a cubic equation this requires
finding the roots and returning the one that is real.
Basically it can be shown that:
m2^3 - a(m2 + m1) = 0
where
a = Mc^5 / m1^3
this has 3 solutions but only one will be real.
"""
return conversions.mass2_from_mchirp_mass1(mchirp, mass1)
def eta_mass1_to_mass2(eta, mass1, return_mass_heavier=False, force_real=True):
"""
This function takes values for eta and one component mass and returns the
second component mass. Similar to mchirp_mass1_to_mass2 this requires
finding the roots of a quadratic equation. Basically:
eta m2^2 + (2 eta - 1)m1 m2 + \eta m1^2 = 0
This has two solutions which correspond to mass1 being the heavier mass
or it being the lighter mass. By default the value corresponding to
mass1 > mass2 is returned. Use the return_mass_heavier kwarg to invert this
behaviour.
"""
return conversions.mass_from_knownmass_eta(mass1, eta,
known_is_secondary=return_mass_heavier, force_real=force_real)
def mchirp_q_to_mass1_mass2(mchirp, q):
""" This function takes a value of mchirp and the mass ratio
mass1/mass2 and returns the two component masses.
The map from q to eta is
eta = (mass1*mass2)/(mass1+mass2)**2 = (q)/(1+q)**2
Then we can map from (mchirp,eta) to (mass1,mass2).
"""
eta = conversions.eta_from_q(q)
mass1 = conversions.mass1_from_mchirp_eta(mchirp, eta)
mass2 = conversions.mass2_from_mchirp_eta(mchirp, eta)
return mass1, mass2
def A0(f_lower):
"""used in calculating chirp times: see Cokelaer, arxiv.org:0706.4437
appendix 1, also lalinspiral/python/sbank/tau0tau3.py
"""
return conversions._a0(f_lower)
def A3(f_lower):
"""another parameter used for chirp times"""
return conversions._a3(f_lower)
def mass1_mass2_to_tau0_tau3(mass1, mass2, f_lower):
tau0 = conversions.tau0_from_mass1_mass2(mass1, mass2, f_lower)
tau3 = conversions.tau3_from_mass1_mass2(mass1, mass2, f_lower)
return tau0,tau3
def tau0_tau3_to_mtotal_eta(tau0, tau3, f_lower):
mtotal = conversions.mtotal_from_tau0_tau3(tau0, tau3, f_lower)
eta = conversions.eta_from_tau0_tau3(tau0, tau3, f_lower)
return mtotal, eta
def tau0_tau3_to_mass1_mass2(tau0, tau3, f_lower):
m_total,eta = tau0_tau3_to_mtotal_eta(tau0, tau3, f_lower)
return mtotal_eta_to_mass1_mass2(m_total, eta)
def mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(mass1, mass2,
spin1z, spin2z):
_, eta = mass1_mass2_to_mtotal_eta(mass1, mass2)
# get_beta_sigma_from_aligned_spins() takes
# the spin of the heaviest body first
heavy_spin = numpy.where(mass2 <= mass1, spin1z, spin2z)
light_spin = numpy.where(mass2 > mass1, spin1z, spin2z)
beta, sigma, gamma = get_beta_sigma_from_aligned_spins(
eta, heavy_spin, light_spin)
return beta, sigma, gamma
def get_beta_sigma_from_aligned_spins(eta, spin1z, spin2z):
"""
Calculate the various PN spin combinations from the masses and spins.
See <http://arxiv.org/pdf/0810.5336v3.pdf>.
Parameters
-----------
eta : float or numpy.array
Symmetric mass ratio of the input system(s)
spin1z : float or numpy.array
Spin(s) parallel to the orbit of the heaviest body(ies)
spin2z : float or numpy.array
Spin(s) parallel to the orbit of the smallest body(ies)
Returns
--------
beta : float or numpy.array
The 1.5PN spin combination
sigma : float or numpy.array
The 2PN spin combination
gamma : float or numpy.array
The 2.5PN spin combination
chis : float or numpy.array
(spin1z + spin2z) / 2.
"""
chiS = 0.5 * (spin1z + spin2z)
chiA = 0.5 * (spin1z - spin2z)
delta = (1 - 4 * eta) ** 0.5
spinspin = spin1z * spin2z
beta = (113. / 12. - 19. / 3. * eta) * chiS
beta += 113. / 12. * delta * chiA
sigma = eta / 48. * (474 * spinspin)
sigma += (1 - 2 * eta) * (81. / 16. * (chiS * chiS + chiA * chiA))
sigma += delta * (81. / 8. * (chiS * chiA))
gamma = (732985. / 2268. - 24260. / 81. * eta - \
340. / 9. * eta * eta) * chiS
gamma += (732985. / 2268. + 140. / 9. * eta) * delta * chiA
return beta, sigma, gamma
def solar_mass_to_kg(solar_masses):
return solar_masses * lal.MSUN_SI
def parsecs_to_meters(distance):
return distance * lal.PC_SI
def megaparsecs_to_meters(distance):
return parsecs_to_meters(distance) * 1e6
def velocity_to_frequency(v, M):
return conversions.velocity_to_frequency(v, M)
def frequency_to_velocity(f, M):
return conversions.frequency_to_velocity(f, M)
def f_SchwarzISCO(M):
"""
Innermost stable circular orbit (ISCO) for a test particle
orbiting a Schwarzschild black hole
Parameters
----------
M : float or numpy.array
Total mass in solar mass units
Returns
-------
f : float or numpy.array
Frequency in Hz
"""
return conversions.f_schwarzchild_isco(M)
def f_BKLISCO(m1, m2):
"""
Mass ratio dependent ISCO derived from estimates of the final spin
of a merged black hole in a paper by Buonanno, Kidder, Lehner
(arXiv:0709.3839). See also arxiv:0801.4297v2 eq.(5)
Parameters
----------
m1 : float or numpy.array
First component mass in solar mass units
m2 : float or numpy.array
Second component mass in solar mass units
Returns
-------
f : float or numpy.array
Frequency in Hz
"""
# q is defined to be in [0,1] for this formula
q = numpy.minimum(m1/m2, m2/m1)
return f_SchwarzISCO(m1+m2) * ( 1 + 2.8*q - 2.6*q*q + 0.8*q*q*q )
def f_LightRing(M):
"""
Gravitational wave frequency corresponding to the light-ring orbit,
equal to 1/(3**(3/2) pi M) : see InspiralBankGeneration.c
Parameters
----------
M : float or numpy.array
Total mass in solar mass units
Returns
-------
f : float or numpy.array
Frequency in Hz
"""
return 1.0 / (3.0**(1.5) * lal.PI * M * lal.MTSUN_SI)
def f_ERD(M):
"""
Effective RingDown frequency studied in Pan et al. (arXiv:0704.1964)
found to give good fit between stationary-phase templates and
numerical relativity waveforms [NB equal-mass & nonspinning!]
Equal to 1.07*omega_220/2*pi
Parameters
----------
M : float or numpy.array
Total mass in solar mass units
Returns
-------
f : float or numpy.array
Frequency in Hz
"""
return 1.07 * 0.5326 / (2*lal.PI * 0.955 * M * lal.MTSUN_SI)
def f_FRD(m1, m2):
"""
Fundamental RingDown frequency calculated from the Berti, Cardoso and
Will (gr-qc/0512160) value for the omega_220 QNM frequency using
mass-ratio dependent fits to the final BH mass and spin from Buonanno
et al. (arXiv:0706.3732) : see also InspiralBankGeneration.c
Parameters
----------
m1 : float or numpy.array
First component mass in solar mass units
m2 : float or numpy.array
Second component mass in solar mass units
Returns
-------
f : float or numpy.array
Frequency in Hz
"""
m_total, eta = mass1_mass2_to_mtotal_eta(m1, m2)
tmp = ( (1. - 0.63*(1. - 3.4641016*eta + 2.9*eta**2)**(0.3)) /
(1. - 0.057191*eta - 0.498*eta**2) )
return tmp / (2.*lal.PI * m_total*lal.MTSUN_SI)
def f_LRD(m1, m2):
"""
Lorentzian RingDown frequency = 1.2*FRD which captures part of
the Lorentzian tail from the decay of the QNMs
Parameters
----------
m1 : float or numpy.array
First component mass in solar mass units
m2 : float or numpy.array
Second component mass in solar mass units
Returns
-------
f : float or numpy.array
Frequency in Hz
"""
return 1.2 * f_FRD(m1, m2)
def _get_freq(freqfunc, m1, m2, s1z, s2z):
"""
Wrapper of the LALSimulation function returning the frequency
for a given frequency function and template parameters.
Parameters
----------
freqfunc : lalsimulation FrequencyFunction wrapped object e.g.
lalsimulation.fEOBNRv2RD
m1 : float-ish, i.e. castable to float
First component mass in solar masses
m2 : float-ish
Second component mass in solar masses
s1z : float-ish
First component dimensionless spin S_1/m_1^2 projected onto L
s2z : float-ish
Second component dimensionless spin S_2/m_2^2 projected onto L
Returns
-------
f : float
Frequency in Hz
"""
# Convert to SI units for lalsimulation
m1kg = float(m1) * lal.MSUN_SI
m2kg = float(m2) * lal.MSUN_SI
return lalsimulation.SimInspiralGetFrequency(
m1kg, m2kg, 0, 0, float(s1z), 0, 0, float(s2z), int(freqfunc))
# vectorize to enable calls with numpy arrays
_vec_get_freq = numpy.vectorize(_get_freq)
def get_freq(freqfunc, m1, m2, s1z, s2z):
"""
Returns the LALSimulation function which evaluates the frequency
for the given frequency function and template parameters.
Parameters
----------
freqfunc : string
Name of the frequency function to use, e.g., 'fEOBNRv2RD'
m1 : float or numpy.array
First component mass in solar masses
m2 : float or numpy.array
Second component mass in solar masses
s1z : float or numpy.array
First component dimensionless spin S_1/m_1^2 projected onto L
s2z : float or numpy.array
Second component dimensionless spin S_2/m_2^2 projected onto L
Returns
-------
f : float or numpy.array
Frequency in Hz
"""
lalsim_ffunc = getattr(lalsimulation, freqfunc)
return _vec_get_freq(lalsim_ffunc, m1, m2, s1z, s2z)
def _get_final_freq(approx, m1, m2, s1z, s2z):
"""
Wrapper of the LALSimulation function returning the final (highest)
frequency for a given approximant an template parameters
Parameters
----------
approx : lalsimulation approximant wrapped object e.g.
lalsimulation.EOBNRv2
m1 : float-ish, i.e. castable to float
First component mass in solar masses
m2 : float-ish
Second component mass in solar masses
s1z : float-ish
First component dimensionless spin S_1/m_1^2 projected onto L
s2z : float-ish
Second component dimensionless spin S_2/m_2^2 projected onto L
Returns
-------
f : float
Frequency in Hz
"""
# Convert to SI units for lalsimulation
m1kg = float(m1) * lal.MSUN_SI
m2kg = float(m2) * lal.MSUN_SI
return lalsimulation.SimInspiralGetFinalFreq(
m1kg, m2kg, 0, 0, float(s1z), 0, 0, float(s2z), int(approx))
# vectorize to enable calls with numpy arrays
_vec_get_final_freq = numpy.vectorize(_get_final_freq)
def get_final_freq(approx, m1, m2, s1z, s2z):
"""
Returns the LALSimulation function which evaluates the final
(highest) frequency for a given approximant using given template
parameters.
NOTE: TaylorTx and TaylorFx are currently all given an ISCO cutoff !!
Parameters
----------
approx : string
Name of the approximant e.g. 'EOBNRv2'
m1 : float or numpy.array
First component mass in solar masses
m2 : float or numpy.array
Second component mass in solar masses
s1z : float or numpy.array
First component dimensionless spin S_1/m_1^2 projected onto L
s2z : float or numpy.array
Second component dimensionless spin S_2/m_2^2 projected onto L
Returns
-------
f : float or numpy.array
Frequency in Hz
"""
lalsim_approx = lalsimulation.GetApproximantFromString(approx)
return _vec_get_final_freq(lalsim_approx, m1, m2, s1z, s2z)
# Dictionary of functions with uniform API taking a
# parameter dict indexed on mass1, mass2, spin1z, spin2z
named_frequency_cutoffs = {
# functions depending on the total mass alone
"SchwarzISCO": lambda p: f_SchwarzISCO(p["mass1"]+p["mass2"]),
"LightRing" : lambda p: f_LightRing(p["mass1"]+p["mass2"]),
"ERD" : lambda p: f_ERD(p["mass1"]+p["mass2"]),
# functions depending on the 2 component masses
"BKLISCO" : lambda p: f_BKLISCO(p["mass1"], p["mass2"]),
"FRD" : lambda p: f_FRD(p["mass1"], p["mass2"]),
"LRD" : lambda p: f_LRD(p["mass1"], p["mass2"]),
# functions depending on 2 component masses and aligned spins
"MECO" : lambda p: meco_frequency(p["mass1"], p["mass2"],
p["spin1z"], p["spin2z"]),
"HybridMECO" : lambda p: hybrid_meco_frequency(
p["mass1"], p["mass2"], p["spin1z"], p["spin2z"], qm1=None, qm2=None),
"IMRPhenomBFinal": lambda p: get_freq("fIMRPhenomBFinal",
p["mass1"], p["mass2"],
p["spin1z"], p["spin2z"]),
"IMRPhenomCFinal": lambda p: get_freq("fIMRPhenomCFinal",
p["mass1"], p["mass2"],
p["spin1z"], p["spin2z"]),
"IMRPhenomDPeak": lambda p: get_freq("fIMRPhenomDPeak",
p["mass1"], p["mass2"],
p["spin1z"], p["spin2z"]),
"EOBNRv2RD" : lambda p: get_freq("fEOBNRv2RD", p["mass1"], p["mass2"],
p["spin1z"], p["spin2z"]),
"EOBNRv2HMRD" : lambda p: get_freq("fEOBNRv2HMRD", p["mass1"], p["mass2"],
p["spin1z"], p["spin2z"]),
"SEOBNRv1RD" : lambda p: get_freq("fSEOBNRv1RD", p["mass1"], p["mass2"],
p["spin1z"], p["spin2z"]),
"SEOBNRv1Peak": lambda p: get_freq("fSEOBNRv1Peak", p["mass1"], p["mass2"],
p["spin1z"], p["spin2z"]),
"SEOBNRv2RD": lambda p: get_freq("fSEOBNRv2RD", p["mass1"], p["mass2"],
p["spin1z"], p["spin2z"]),
"SEOBNRv2Peak": lambda p: get_freq("fSEOBNRv2Peak", p["mass1"], p["mass2"],
p["spin1z"], p["spin2z"]),
"SEOBNRv4RD": lambda p: get_freq("fSEOBNRv4RD", p["mass1"], p["mass2"],
p["spin1z"], p["spin2z"]),
"SEOBNRv4Peak": lambda p: get_freq("fSEOBNRv4Peak", p["mass1"], p["mass2"],
p["spin1z"], p["spin2z"]),
"SEOBNRv5Peak": lambda p: get_freq("fSEOBNRv5Peak", p["mass1"], p["mass2"],
p["spin1z"], p["spin2z"])
}
def frequency_cutoff_from_name(name, m1, m2, s1z, s2z):
"""
Returns the result of evaluating the frequency cutoff function
specified by 'name' on a template with given parameters.
Parameters
----------
name : string
Name of the cutoff function
m1 : float or numpy.array
First component mass in solar masses
m2 : float or numpy.array
Second component mass in solar masses
s1z : float or numpy.array
First component dimensionless spin S_1/m_1^2 projected onto L
s2z : float or numpy.array
Second component dimensionless spin S_2/m_2^2 projected onto L
Returns
-------
f : float or numpy.array
Frequency in Hz
"""
params = {"mass1":m1, "mass2":m2, "spin1z":s1z, "spin2z":s2z}
return named_frequency_cutoffs[name](params)
def _get_imr_duration(m1, m2, s1z, s2z, f_low, approximant="SEOBNRv4"):
"""Wrapper of lalsimulation template duration approximate formula"""
m1, m2, s1z, s2z, f_low = float(m1), float(m2), float(s1z), float(s2z),\
float(f_low)
if approximant == "SEOBNRv2":
chi = lalsimulation.SimIMRPhenomBComputeChi(m1, m2, s1z, s2z)
time_length = lalsimulation.SimIMRSEOBNRv2ChirpTimeSingleSpin(
m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, chi, f_low)
elif approximant == "IMRPhenomD":
time_length = lalsimulation.SimIMRPhenomDChirpTime(
m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z, f_low)
elif approximant == "SEOBNRv4":
# NB for no clear reason this function has f_low as first argument
time_length = lalsimulation.SimIMRSEOBNRv4ROMTimeOfFrequency(
f_low, m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z)
elif approximant == 'SPAtmplt' or approximant == 'TaylorF2':
chi = lalsimulation.SimInspiralTaylorF2ReducedSpinComputeChi(
m1, m2, s1z, s2z
)
time_length = lalsimulation.SimInspiralTaylorF2ReducedSpinChirpTime(
f_low, m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, chi, -1
)
else:
raise RuntimeError("I can't calculate a duration for %s" % approximant)
# FIXME Add an extra factor of 1.1 for 'safety' since the duration
# functions are approximate
return time_length * 1.1
get_imr_duration = numpy.vectorize(_get_imr_duration)
def get_inspiral_tf(tc, mass1, mass2, spin1, spin2, f_low, n_points=100,
pn_2order=7, approximant='TaylorF2'):
"""Compute the time-frequency evolution of an inspiral signal.
Return a tuple of time and frequency vectors tracking the evolution of an
inspiral signal in the time-frequency plane.
"""
# handle param-dependent approximant specification
class Params:
pass
params = Params()
params.mass1 = mass1
params.mass2 = mass2
params.spin1z = spin1
params.spin2z = spin2
try:
approximant = eval(approximant, {'__builtins__': None},
dict(params=params))
except (NameError, TypeError):
pass
if approximant in ['TaylorF2', 'SPAtmplt']:
from pycbc.waveform.spa_tmplt import findchirp_chirptime
# FIXME spins are not taken into account
f_high = f_SchwarzISCO(mass1 + mass2)
track_f = numpy.logspace(numpy.log10(f_low), numpy.log10(f_high),
n_points)
track_t = numpy.array([findchirp_chirptime(float(mass1), float(mass2),
float(f), pn_2order) for f in track_f])
elif approximant in ['SEOBNRv2', 'SEOBNRv2_ROM_DoubleSpin',
'SEOBNRv2_ROM_DoubleSpin_HI']:
f_high = get_final_freq('SEOBNRv2', mass1, mass2, spin1, spin2)
track_f = numpy.logspace(numpy.log10(f_low), numpy.log10(f_high),
n_points)
# use HI function as it has wider freq range validity
track_t = numpy.array([
lalsimulation.SimIMRSEOBNRv2ROMDoubleSpinHITimeOfFrequency(f,
solar_mass_to_kg(mass1), solar_mass_to_kg(mass2),
float(spin1), float(spin2)) for f in track_f])
elif approximant in ['SEOBNRv4', 'SEOBNRv4_ROM']:
f_high = get_final_freq('SEOBNRv4', mass1, mass2, spin1, spin2)
# use frequency below final freq in case of rounding error
track_f = numpy.logspace(numpy.log10(f_low), numpy.log10(0.999*f_high),
n_points)
track_t = numpy.array([
lalsimulation.SimIMRSEOBNRv4ROMTimeOfFrequency(
f, solar_mass_to_kg(mass1), solar_mass_to_kg(mass2),
float(spin1), float(spin2)) for f in track_f])
else:
raise ValueError('Approximant ' + approximant + ' not supported')
return (tc - track_t, track_f)
##############################This code was taken from Andy ###########
def _energy_coeffs(m1, m2, chi1, chi2):
""" Return the center-of-mass energy coefficients up to 3.0pN (2.5pN spin)
"""
mtot = m1 + m2
eta = m1*m2 / (mtot*mtot)
chi = (m1*chi1 + m2*chi2) / mtot
chisym = (chi1 + chi2) / 2.
beta = (113.*chi - 76.*eta*chisym)/12.
sigma12 = 79.*eta*chi1*chi2/8.
sigmaqm = 81.*m1*m1*chi1*chi1/(16.*mtot*mtot) \
+ 81.*m2*m2*chi2*chi2/(16.*mtot*mtot)
energy0 = -0.5*eta
energy2 = -0.75 - eta/12.
energy3 = 0.
energy4 = -3.375 + (19*eta)/8. - pow(eta,2)/24.
energy5 = 0.
energy6 = -10.546875 - (155*pow(eta,2))/96. - (35*pow(eta,3))/5184. \
+ eta*(59.80034722222222 - (205*pow(lal.PI,2))/96.)
energy3 += (32*beta)/113. + (52*chisym*eta)/113.
energy4 += (-16*sigma12)/79. - (16*sigmaqm)/81.
energy5 += (96*beta)/113. + ((-124*beta)/339. - (522*chisym)/113.)*eta \
- (710*chisym*pow(eta,2))/339.
return (energy0, energy2, energy3, energy4, energy5, energy6)
def meco_velocity(m1, m2, chi1, chi2):
"""
Returns the velocity of the minimum energy cutoff for 3.5pN (2.5pN spin)
Parameters
----------
m1 : float
First component mass in solar masses
m2 : float
Second component mass in solar masses
chi1 : float
First component dimensionless spin S_1/m_1^2 projected onto L
chi2 : float
Second component dimensionless spin S_2/m_2^2 projected onto L
Returns
-------
v : float
Velocity (dimensionless)
"""
_, energy2, energy3, energy4, energy5, energy6 = \
_energy_coeffs(m1, m2, chi1, chi2)
def eprime(v):
return 2. + v * v * (4.*energy2 + v * (5.*energy3 \
+ v * (6.*energy4
+ v * (7.*energy5 + 8.*energy6 * v))))
return bisect(eprime, 0.05, 1.0)
def _meco_frequency(m1, m2, chi1, chi2):
"""Returns the frequency of the minimum energy cutoff for 3.5pN (2.5pN spin)
"""
return velocity_to_frequency(meco_velocity(m1, m2, chi1, chi2), m1+m2)
meco_frequency = numpy.vectorize(_meco_frequency)
def _dtdv_coeffs(m1, m2, chi1, chi2):
""" Returns the dt/dv coefficients up to 3.5pN (2.5pN spin)
"""
mtot = m1 + m2
eta = m1*m2 / (mtot*mtot)
chi = (m1*chi1 + m2*chi2) / mtot
chisym = (chi1 + chi2) / 2.
beta = (113.*chi - 76.*eta*chisym)/12.
sigma12 = 79.*eta*chi1*chi2/8.
sigmaqm = 81.*m1*m1*chi1*chi1/(16.*mtot*mtot) \
+ 81.*m2*m2*chi2*chi2/(16.*mtot*mtot)
dtdv0 = 1. # FIXME: Wrong but doesn't matter for now.
dtdv2 = (1./336.) * (743. + 924.*eta)
dtdv3 = -4. * lal.PI + beta
dtdv4 = (3058673. + 5472432.*eta + 4353552.*eta*eta)/1016064. - sigma12 - sigmaqm
dtdv5 = (1./672.) * lal.PI * (-7729. + 1092.*eta) + (146597.*beta/18984. + 42.*beta*eta/113. - 417307.*chisym*eta/18984. - 1389.*chisym*eta*eta/226.)
dtdv6 = 22.065 + 165.416*eta - 2.20067*eta*eta + 4.93152*eta*eta*eta
dtdv6log = 1712./315.
dtdv7 = (lal.PI/1016064.) * (-15419335. - 12718104.*eta + 4975824.*eta*eta)
return (dtdv0, dtdv2, dtdv3, dtdv4, dtdv5, dtdv6, dtdv6log, dtdv7)
def _dtdv_cutoff_velocity(m1, m2, chi1, chi2):
_, dtdv2, dtdv3, dtdv4, dtdv5, dtdv6, dtdv6log, dtdv7 = _dtdv_coeffs(m1, m2, chi1, chi2)
def dtdv_func(v):
x = dtdv7
x = v * x + dtdv6 + dtdv6log * 3. * numpy.log(v)
x = v * x + dtdv5
x = v * x + dtdv4
x = v * x + dtdv3
x = v * x + dtdv2
return v * v * x + 1.
if dtdv_func(1.0) < 0.:
return bisect(dtdv_func, 0.05, 1.0)
else:
return 1.0
def energy_coefficients(m1, m2, s1z=0, s2z=0, phase_order=-1, spin_order=-1):
""" Return the energy coefficients. This assumes that the system has aligned spins only.
"""
implemented_phase_order = 7
implemented_spin_order = 7
if phase_order > implemented_phase_order:
raise ValueError("pN coeffiecients of that order have not been implemented")
elif phase_order == -1:
phase_order = implemented_phase_order
if spin_order > implemented_spin_order:
raise ValueError("pN coeffiecients of that order have not been implemented")
elif spin_order == -1:
spin_order = implemented_spin_order
qmdef1 = 1.0
qmdef2 = 1.0
M = m1 + m2
dm = (m1-m2)/M
m1M = m1 / M
m2M = m2 / M
s1z = s1z * m1M * m1M
s2z = s2z * m2M * m2M
_, eta = mass1_mass2_to_mchirp_eta(m1, m2)
ecof = numpy.zeros(phase_order+1)
# Orbital terms
if phase_order >= 0:
ecof[0] = 1.0
if phase_order >= 1:
ecof[1] = 0
if phase_order >= 2:
ecof[2] = -(1.0/12.0) * (9.0 + eta)
if phase_order >= 3:
ecof[3] = 0
if phase_order >= 4:
ecof[4] = (-81.0 + 57.0*eta - eta*eta) / 24.0
if phase_order >= 5:
ecof[5] = 0
if phase_order >= 6:
ecof[6] = - 675.0/64.0 + ( 34445.0/576.0 \
- 205.0/96.0 * lal.PI * lal.PI ) * eta \
- (155.0/96.0) *eta * eta - 35.0/5184.0 * eta * eta
# Spin terms
ESO15s1 = 8.0/3.0 + 2.0*m2/m1
ESO15s2 = 8.0/3.0 + 2.0*m1/m2
ESS2 = 1.0 / eta
EQM2s1 = qmdef1/2.0/m1M/m1M
EQM2s1L = -qmdef1*3.0/2.0/m1M/m1M
#EQM2s2 = qmdef2/2.0/m2M/m2M
EQM2s2L = -qmdef2*3.0/2.0/m2M/m2M
ESO25s1 = 11.0 - 61.0*eta/9.0 + (dm/m1M) * (-3.0 + 10.*eta/3.0)
ESO25s2 = 11.0 - 61.0*eta/9.0 + (dm/m2M) * (3.0 - 10.*eta/3.0)
ESO35s1 = 135.0/4.0 - 367.0*eta/4.0 + 29.0*eta*eta/12.0 + (dm/m1M) * (-27.0/4.0 + 39.0*eta - 5.0*eta*eta/4.0)
ESO35s2 = 135.0/4.0 - 367.0*eta/4.0 + 29.0*eta*eta/12.0 - (dm/m2M) * (-27.0/4.0 + 39.0*eta - 5.0*eta*eta/4.0)
if spin_order >=3:
ecof[3] += ESO15s1 * s1z + ESO15s2 * s2z
if spin_order >=4:
ecof[4] += ESS2 * (s1z*s2z - 3.0*s1z*s2z)
ecof[4] += EQM2s1*s1z*s1z + EQM2s1*s2z*s2z + EQM2s1L*s1z*s1z + EQM2s2L*s2z*s2z
if spin_order >=5:
ecof[5] = ESO25s1*s1z + ESO25s2*s2z
if spin_order >=7:
ecof[7] += ESO35s1*s1z + ESO35s2*s2z
return ecof
def energy(v, mass1, mass2, s1z=0, s2z=0, phase_order=-1, spin_order=-1):
ecof = energy_coefficients(mass1, mass2, s1z, s2z, phase_order, spin_order)
_, eta = mass1_mass2_to_mchirp_eta(mass1, mass2)
amp = - (1.0/2.0) * eta
e = 0.0
for i in numpy.arange(0, len(ecof), 1):
e += v**(i+2.0) * ecof[i]
return e * amp
def meco2(m1, m2, s1z=0, s2z=0, phase_order=-1, spin_order=-1):
ecof = energy_coefficients(m1, m2, s1z, s2z, phase_order, spin_order)
def test(v):
de = 0
for i in numpy.arange(0, len(ecof), 1):
de += v**(i+1.0)* ecof[i] * (i + 2)
return de
return bisect(test, 0.001, 1.0)
def t2_cutoff_velocity(m1, m2, chi1, chi2):
return min(meco_velocity(m1,m2,chi1,chi2), _dtdv_cutoff_velocity(m1,m2,chi1,chi2))
def t2_cutoff_frequency(m1, m2, chi1, chi2):
return velocity_to_frequency(t2_cutoff_velocity(m1, m2, chi1, chi2), m1 + m2)
t4_cutoff_velocity = meco_velocity
t4_cutoff_frequency = meco_frequency
# Hybrid MECO in arXiv:1602.03134
# To obtain the MECO, find minimum in v of eq. (6)
def kerr_lightring(v, chi):
"""Return the function whose first root defines the Kerr light ring"""
return 1 + chi * v**3 - 3 * v**2 * (1 - chi * v**3)**(1./3)
def kerr_lightring_velocity(chi):
"""Return the velocity at the Kerr light ring"""
# If chi > 0.9996, the algorithm cannot solve the function
if chi >= 0.9996:
return brentq(kerr_lightring, 0, 0.8, args=(0.9996))
else:
return brentq(kerr_lightring, 0, 0.8, args=(chi))
def hybridEnergy(v, m1, m2, chi1, chi2, qm1, qm2):
"""Return hybrid MECO energy.
Return the hybrid energy [eq. (6)] whose minimum defines the hybrid MECO
up to 3.5PN (including the 3PN spin-spin)
Parameters
----------
m1 : float
Mass of the primary object in solar masses.
m2 : float
Mass of the secondary object in solar masses.
chi1: float
Dimensionless spin of the primary object.
chi2: float
Dimensionless spin of the secondary object.
qm1: float
Quadrupole-monopole term of the primary object (1 for black holes).
qm2: float
Quadrupole-monopole term of the secondary object (1 for black holes).
Returns
-------
h_E: float
The hybrid energy as a function of v
"""
pi_sq = numpy.pi**2
v2, v3, v4, v5, v6, v7 = v**2, v**3, v**4, v**5, v**6, v**7
chi1_sq, chi2_sq = chi1**2, chi2**2
m1, m2 = float(m1), float(m2)
M = float(m1 + m2)
M_2, M_4 = M**2, M**4
eta = m1 * m2 / M_2
eta2, eta3 = eta**2, eta**3
m1_2, m1_4 = m1**2, m1**4
m2_2, m2_4 = m2**2, m2**4
chi = (chi1 * m1 + chi2 * m2) / M
Kerr = -1. + (1. - 2. * v2 * (1. - chi * v3)**(1./3.)) / \
numpy.sqrt((1. - chi * v3) * (1. + chi * v3 - 3. * v2 * (1 - chi * v3)**(1./3.)))
h_E = Kerr - \
(v2 / 2.) * \
(
- eta * v2 / 12. - 2 * (chi1 + chi2) * eta * v3 / 3. +
(19. * eta / 8. - eta2 / 24. + chi1_sq * m1_2 * (1 - qm1) / M_2 +
chi2_sq * m2_2 * (1 - qm2) / M_2) * v4
- 1. / 9. * (120. * (chi1 + chi2) * eta2 +
(76. * chi1 + 45. * chi2) * m1_2 * eta / M_2 +
(45. * chi1 + 76. * chi2) * m2_2 * eta / M_2) * v5
+ (34445. * eta / 576. - 205. * pi_sq * eta / 96. - 155. * eta2 / 96. -
35. * eta3 / 5184. +
5. / 18. * (21. * chi1_sq * (1. - qm1) * m1_4 / M_4 +
21. * chi2_sq * (1. - qm2) * m2_4 / M_4 +
(chi1_sq * (56. - 27. * qm1) + 20. * chi1 * chi2) * eta * m1_2 / M_2 +
(chi2_sq * (56. - 27. * qm2) + 20. * chi1 * chi2) * eta * m2_2 / M_2 +
(chi1_sq * (31. - 9. * qm1) + 38. * chi1 * chi2 +
chi2_sq * (31. - 9. * qm2)) * eta2)) * v6
- eta / 12. * (3. * (292. * chi1 + 81. * chi2) * m1_4 / M_4 +
3. * (81. * chi1 + 292. * chi2) * m2_4 / M_4 +
4. * (673. * chi1 + 360. * chi2) * eta * m1_2 / M_2 +
4. * (360. * chi1 + 673. * chi2) * eta * m2_2 / M_2 +
3012. * eta2 * (chi1 + chi2)) * v7
)
return h_E
def hybrid_meco_velocity(m1, m2, chi1, chi2, qm1=None, qm2=None):
"""Return the velocity of the hybrid MECO
Parameters
----------
m1 : float
Mass of the primary object in solar masses.
m2 : float
Mass of the secondary object in solar masses.
chi1: float
Dimensionless spin of the primary object.
chi2: float
Dimensionless spin of the secondary object.
qm1: {None, float}, optional
Quadrupole-monopole term of the primary object (1 for black holes).
If None, will be set to qm1 = 1.
qm2: {None, float}, optional
Quadrupole-monopole term of the secondary object (1 for black holes).
If None, will be set to qm2 = 1.
Returns
-------
v: float
The velocity (dimensionless) of the hybrid MECO
"""
if qm1 is None:
qm1 = 1
if qm2 is None:
qm2 = 1
# Set bounds at 0.1 to skip v=0 and at the lightring velocity
chi = (chi1 * m1 + chi2 * m2) / (m1 + m2)
vmax = kerr_lightring_velocity(chi) - 0.01
return minimize(hybridEnergy, 0.2, args=(m1, m2, chi1, chi2, qm1, qm2),
bounds=[(0.1, vmax)]).x.item()
def hybrid_meco_frequency(m1, m2, chi1, chi2, qm1=None, qm2=None):
"""Return the frequency of the hybrid MECO
Parameters
----------
m1 : float
Mass of the primary object in solar masses.
m2 : float
Mass of the secondary object in solar masses.
chi1: float
Dimensionless spin of the primary object.
chi2: float
Dimensionless spin of the secondary object.
qm1: {None, float}, optional
Quadrupole-monopole term of the primary object (1 for black holes).
If None, will be set to qm1 = 1.
qm2: {None, float}, optional
Quadrupole-monopole term of the secondary object (1 for black holes).
If None, will be set to qm2 = 1.
Returns
-------
f: float
The frequency (in Hz) of the hybrid MECO
"""
if qm1 is None:
qm1 = 1
if qm2 is None:
qm2 = 1
return velocity_to_frequency(hybrid_meco_velocity(m1, m2, chi1, chi2, qm1, qm2), m1 + m2)
def jframe_to_l0frame(mass1, mass2, f_ref, phiref=0., thetajn=0., phijl=0.,
spin1_a=0., spin2_a=0.,
spin1_polar=0., spin2_polar=0.,
spin12_deltaphi=0.):
"""Converts J-frame parameters into L0 frame.
Parameters
----------
mass1 : float
The mass of the first component object in the
binary (in solar masses)
mass2 : float
The mass of the second component object in the
binary (in solar masses)
f_ref : float
The reference frequency.
thetajn : float
Angle between the line of sight and the total angular momentume J.
phijl : float
Azimuthal angle of L on its cone about J.
spin1_a : float
The dimensionless spin magnitude :math:`|\\vec{{s}}_1/m^2_1|`.
spin2_a : float
The dimensionless spin magnitude :math:`|\\vec{{s}}_2/m^2_2|`.
spin1_polar : float
Angle between L and the spin magnitude of the larger object.
spin2_polar : float
Angle betwen L and the spin magnitude of the smaller object.
spin12_deltaphi : float
Difference between the azimuthal angles of the spin of the larger
object (S1) and the spin of the smaller object (S2).
Returns
-------
dict :
Dictionary of:
* inclination : float
Inclination (rad), defined as the angle between
the orbital angular momentum L and the
line-of-sight at the reference frequency.
* spin1x : float
The x component of the first binary component's
dimensionless spin.
* spin1y : float
The y component of the first binary component's
dimensionless spin.
* spin1z : float
The z component of the first binary component's
dimensionless spin.
* spin2x : float
The x component of the second binary component's
dimensionless spin.
* spin2y : float
The y component of the second binary component's
dimensionless spin.
* spin2z : float
The z component of the second binary component's
dimensionless spin.
"""
inclination, spin1x, spin1y, spin1z, spin2x, spin2y, spin2z = \
lalsimulation.SimInspiralTransformPrecessingNewInitialConditions(
thetajn, phijl, spin1_polar, spin2_polar, spin12_deltaphi,
spin1_a, spin2_a, mass1*lal.MSUN_SI, mass2*lal.MSUN_SI, f_ref,
phiref)
out = {'inclination': inclination,
'spin1x': spin1x,
'spin1y': spin1y,
'spin1z': spin1z,
'spin2x': spin2x,
'spin2y': spin2y,
'spin2z': spin2z}
return out
def l0frame_to_jframe(mass1, mass2, f_ref, phiref=0., inclination=0.,
spin1x=0., spin1y=0., spin1z=0.,
spin2x=0., spin2y=0., spin2z=0.):
"""Converts L0-frame parameters to J-frame.
Parameters
----------
mass1 : float
The mass of the first component object in the
binary (in solar masses)
mass2 : float
The mass of the second component object in the
binary (in solar masses)
f_ref : float
The reference frequency.
phiref : float
The orbital phase at ``f_ref``.
inclination : float
Inclination (rad), defined as the angle between
the orbital angular momentum L and the
line-of-sight at the reference frequency.
spin1x : float
The x component of the first binary component's
dimensionless spin.
spin1y : float
The y component of the first binary component's
dimensionless spin.
spin1z : float
The z component of the first binary component's
dimensionless spin.
spin2x : float
The x component of the second binary component's
dimensionless spin.
spin2y : float
The y component of the second binary component's
dimensionless spin.
spin2z : float
The z component of the second binary component's
dimensionless spin.
Returns
-------
dict :
Dictionary of:
* thetajn : float
Angle between the line of sight and the total angular momentume J.
* phijl : float
Azimuthal angle of L on its cone about J.
* spin1_a : float
The dimensionless spin magnitude :math:`|\\vec{{s}}_1/m^2_1|`.
* spin2_a : float
The dimensionless spin magnitude :math:`|\\vec{{s}}_2/m^2_2|`.
* spin1_polar : float
Angle between L and the spin magnitude of the larger object.
* spin2_polar : float
Angle betwen L and the spin magnitude of the smaller object.
* spin12_deltaphi : float
Difference between the azimuthal angles of the spin of the larger
object (S1) and the spin of the smaller object (S2).
"""
# Note: unlike other LALSimulation functions, this one takes masses in
# solar masses
thetajn, phijl, s1pol, s2pol, s12_deltaphi, spin1_a, spin2_a = \
lalsimulation.SimInspiralTransformPrecessingWvf2PE(
inclination, spin1x, spin1y, spin1z, spin2x, spin2y, spin2z,
mass1, mass2, f_ref, phiref)
out = {'thetajn': thetajn,
'phijl': phijl,
'spin1_polar': s1pol,
'spin2_polar': s2pol,
'spin12_deltaphi': s12_deltaphi,
'spin1_a': spin1_a,
'spin2_a': spin2_a}
return out
| 40,056
| 34.990117
| 153
|
py
|
pycbc
|
pycbc-master/pycbc/_version_helper.py
|
# Based on generateGitID.sh by Reinhard Prix
#
# Copyright (C) 2009,2010, Adam Mercer <adam.mercer@ligo.org>
# Copyright (C) 2009,2010, Nickolas Fotopoulos <nvf@gravity.phys.uwm.edu>
# Copyright (C) 2008,2009, John T. Whelan <john.whelan@ligo.org>
# Copyright (C) 2008, Reinhard Prix <reinhard.ligo.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'Adam Mercer <adam.mercer@ligo.org>'
import os
import time
import subprocess
import re
import distutils.version
class GitInfo(object):
def __init__(self):
self.date = None
self.hash = None
self.branch = None
self.tag = None
self.author = None
self.committer = None
self.status = None
self.builder = None
self.build_date = None
class GitInvocationError(LookupError):
pass
def call(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
on_error='ignore', returncode=False):
"""Run the given command (with shell=False) and return the output as a
string.
Strips the output of enclosing whitespace.
If the return code is non-zero, throw GitInvocationError.
"""
# start external command process
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# get outputs
out, _ = p.communicate()
# throw exception if process failed
if p.returncode != 0 and on_error == 'raise':
raise GitInvocationError('Failed to run "%s"' % " ".join(command))
out = out.decode('utf-8')
if returncode:
return out.strip(), p.returncode
else:
return out.strip()
def get_build_name(git_path='git'):
"""Find the username of the current builder
"""
name,retcode = call(('git', 'config', 'user.name'), returncode=True)
if retcode:
name = "Unknown User"
email,retcode = call(('git', 'config', 'user.email'), returncode=True)
if retcode:
email = ""
return "%s <%s>" % (name, email)
def get_build_date():
"""Returns the current datetime as the git build date
"""
return time.strftime('%Y-%m-%d %H:%M:%S +0000', time.gmtime())
def get_last_commit(git_path='git'):
"""Returns the details of the last git commit
Returns a tuple (hash, date, author name, author e-mail,
committer name, committer e-mail).
"""
hash_, udate, aname, amail, cname, cmail = (
call((git_path, 'log', '-1',
'--pretty=format:%H,%ct,%an,%ae,%cn,%ce')).split(","))
date = time.strftime('%Y-%m-%d %H:%M:%S +0000', time.gmtime(float(udate)))
author = '%s <%s>' % (aname, amail)
committer = '%s <%s>' % (cname, cmail)
return hash_, date, author, committer
def get_git_branch(git_path='git'):
"""Returns the name of the current git branch
"""
branch_match = call((git_path, 'rev-parse', '--symbolic-full-name', 'HEAD'))
if branch_match == "HEAD":
return None
else:
return os.path.basename(branch_match)
def get_git_tag(hash_, git_path='git'):
"""Returns the name of the current git tag
"""
tag, status = call((git_path, 'describe', '--exact-match',
'--tags', hash_), returncode=True)
if status == 0:
return tag
else:
return None
def get_num_commits():
return call(('git', 'rev-list', '--count', 'HEAD'))
def get_git_status(git_path='git'):
"""Returns the state of the git working copy
"""
status_output = subprocess.call((git_path, 'diff-files', '--quiet'))
if status_output != 0:
return 'UNCLEAN: Modified working tree'
else:
# check index for changes
status_output = subprocess.call((git_path, 'diff-index', '--cached',
'--quiet', 'HEAD'))
if status_output != 0:
return 'UNCLEAN: Modified index'
else:
return 'CLEAN: All modifications committed'
def determine_latest_release_version():
"""Query the git repository for the last released version of the code.
"""
git_path = call(('which', 'git'))
# Get all tags
tag_list = call((git_path, 'tag')).split('\n')
# Reduce to only versions
tag_list = [t[1:] for t in tag_list if t.startswith('v')]
# Determine if indeed a tag and store largest
latest_version = None
latest_version_string = None
re_magic = re.compile("\d+\.\d+\.\d+$")
for tag in tag_list:
# Is this a version string
if re_magic.match(tag):
curr_version = distutils.version.StrictVersion(tag)
if latest_version is None or curr_version > latest_version:
latest_version = curr_version
latest_version_string = tag
return latest_version_string
def generate_git_version_info():
"""Query the git repository information to generate a version module.
"""
info = GitInfo()
git_path = call(('which', 'git'))
# get build info
info.builder = get_build_name()
info.build_date = get_build_date()
# parse git ID
info.hash, info.date, info.author, info.committer = (
get_last_commit(git_path))
# determine branch
info.branch = get_git_branch(git_path)
# determine tag
info.tag = get_git_tag(info.hash, git_path)
# determine version
if info.tag:
info.version = info.tag.strip('v')
info.release = not re.search('[a-z]', info.version.lower())
else:
info.version = '0.0a' + get_num_commits()
info.release = False
# Determine *last* stable release
info.last_release = determine_latest_release_version()
# refresh index
call((git_path, 'update-index', '-q', '--refresh'))
# check working copy for changes
info.status = get_git_status(git_path)
return info
| 6,380
| 29.5311
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/conversions.py
|
# Copyright (C) 2017 Collin Capano, Christopher M. Biwer, Duncan Brown,
# and Steven Reyes
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides a library of functions that calculate waveform parameters
from other parameters. All exposed functions in this module's namespace return
one parameter given a set of inputs.
"""
import copy
import numpy
import lal
from pycbc.detector import Detector
import pycbc.cosmology
from .coordinates import (
spherical_to_cartesian as _spherical_to_cartesian,
cartesian_to_spherical as _cartesian_to_spherical)
from pycbc import neutron_stars as ns
pykerr = pycbc.libutils.import_optional('pykerr')
lalsim = pycbc.libutils.import_optional('lalsimulation')
#
# =============================================================================
#
# Helper functions
#
# =============================================================================
#
def ensurearray(*args):
"""Apply numpy's broadcast rules to the given arguments.
This will ensure that all of the arguments are numpy arrays and that they
all have the same shape. See ``numpy.broadcast_arrays`` for more details.
It also returns a boolean indicating whether any of the inputs were
originally arrays.
Parameters
----------
*args :
The arguments to check.
Returns
-------
list :
A list with length ``N+1`` where ``N`` is the number of given
arguments. The first N values are the input arguments as ``ndarrays``s.
The last value is a boolean indicating whether any of the
inputs was an array.
"""
input_is_array = any(isinstance(arg, numpy.ndarray) for arg in args)
args = numpy.broadcast_arrays(*args)
args.append(input_is_array)
return args
def formatreturn(arg, input_is_array=False):
"""If the given argument is a numpy array with shape (1,), just returns
that value."""
if not input_is_array and arg.size == 1:
arg = arg.item()
return arg
#
# =============================================================================
#
# Fundamental conversions
#
# =============================================================================
#
def sec_to_year(sec):
""" Converts number of seconds to number of years """
return sec / lal.YRJUL_SI
#
# =============================================================================
#
# CBC mass functions
#
# =============================================================================
#
def primary_mass(mass1, mass2):
"""Returns the larger of mass1 and mass2 (p = primary)."""
mass1, mass2, input_is_array = ensurearray(mass1, mass2)
if mass1.shape != mass2.shape:
raise ValueError("mass1 and mass2 must have same shape")
mp = copy.copy(mass1)
mask = mass1 < mass2
mp[mask] = mass2[mask]
return formatreturn(mp, input_is_array)
def secondary_mass(mass1, mass2):
"""Returns the smaller of mass1 and mass2 (s = secondary)."""
mass1, mass2, input_is_array = ensurearray(mass1, mass2)
if mass1.shape != mass2.shape:
raise ValueError("mass1 and mass2 must have same shape")
ms = copy.copy(mass2)
mask = mass1 < mass2
ms[mask] = mass1[mask]
return formatreturn(ms, input_is_array)
def mtotal_from_mass1_mass2(mass1, mass2):
"""Returns the total mass from mass1 and mass2."""
return mass1 + mass2
def q_from_mass1_mass2(mass1, mass2):
"""Returns the mass ratio m1/m2, where m1 >= m2."""
return primary_mass(mass1, mass2) / secondary_mass(mass1, mass2)
def invq_from_mass1_mass2(mass1, mass2):
"""Returns the inverse mass ratio m2/m1, where m1 >= m2."""
return secondary_mass(mass1, mass2) / primary_mass(mass1, mass2)
def eta_from_mass1_mass2(mass1, mass2):
"""Returns the symmetric mass ratio from mass1 and mass2."""
return mass1*mass2 / (mass1 + mass2)**2.
def mchirp_from_mass1_mass2(mass1, mass2):
"""Returns the chirp mass from mass1 and mass2."""
return eta_from_mass1_mass2(mass1, mass2)**(3./5) * (mass1 + mass2)
def mass1_from_mtotal_q(mtotal, q):
"""Returns a component mass from the given total mass and mass ratio.
If the mass ratio q is >= 1, the returned mass will be the primary
(heavier) mass. If q < 1, the returned mass will be the secondary
(lighter) mass.
"""
return q*mtotal / (1. + q)
def mass2_from_mtotal_q(mtotal, q):
"""Returns a component mass from the given total mass and mass ratio.
If the mass ratio q is >= 1, the returned mass will be the secondary
(lighter) mass. If q < 1, the returned mass will be the primary (heavier)
mass.
"""
return mtotal / (1. + q)
def mass1_from_mtotal_eta(mtotal, eta):
"""Returns the primary mass from the total mass and symmetric mass
ratio.
"""
return 0.5 * mtotal * (1.0 + (1.0 - 4.0 * eta)**0.5)
def mass2_from_mtotal_eta(mtotal, eta):
"""Returns the secondary mass from the total mass and symmetric mass
ratio.
"""
return 0.5 * mtotal * (1.0 - (1.0 - 4.0 * eta)**0.5)
def mtotal_from_mchirp_eta(mchirp, eta):
"""Returns the total mass from the chirp mass and symmetric mass ratio.
"""
return mchirp / eta**(3./5.)
def mass1_from_mchirp_eta(mchirp, eta):
"""Returns the primary mass from the chirp mass and symmetric mass ratio.
"""
mtotal = mtotal_from_mchirp_eta(mchirp, eta)
return mass1_from_mtotal_eta(mtotal, eta)
def mass2_from_mchirp_eta(mchirp, eta):
"""Returns the primary mass from the chirp mass and symmetric mass ratio.
"""
mtotal = mtotal_from_mchirp_eta(mchirp, eta)
return mass2_from_mtotal_eta(mtotal, eta)
def _mass2_from_mchirp_mass1(mchirp, mass1):
r"""Returns the secondary mass from the chirp mass and primary mass.
As this is a cubic equation this requires finding the roots and returning
the one that is real. Basically it can be shown that:
.. math::
m_2^3 - a(m_2 + m_1) = 0,
where
.. math::
a = \frac{\mathcal{M}^5}{m_1^3}.
This has 3 solutions but only one will be real.
"""
a = mchirp**5 / mass1**3
roots = numpy.roots([1, 0, -a, -a * mass1])
# Find the real one
real_root = roots[(abs(roots - roots.real)).argmin()]
return real_root.real
mass2_from_mchirp_mass1 = numpy.vectorize(_mass2_from_mchirp_mass1)
def _mass_from_knownmass_eta(known_mass, eta, known_is_secondary=False,
force_real=True):
r"""Returns the other component mass given one of the component masses
and the symmetric mass ratio.
This requires finding the roots of the quadratic equation:
.. math::
\eta m_2^2 + (2\eta - 1)m_1 m_2 + \eta m_1^2 = 0.
This has two solutions which correspond to :math:`m_1` being the heavier
mass or it being the lighter mass. By default, `known_mass` is assumed to
be the heavier (primary) mass, and the smaller solution is returned. Use
the `other_is_secondary` to invert.
Parameters
----------
known_mass : float
The known component mass.
eta : float
The symmetric mass ratio.
known_is_secondary : {False, bool}
Whether the known component mass is the primary or the secondary. If
True, `known_mass` is assumed to be the secondary (lighter) mass and
the larger solution is returned. Otherwise, the smaller solution is
returned. Default is False.
force_real : {True, bool}
Force the returned mass to be real.
Returns
-------
float
The other component mass.
"""
roots = numpy.roots([eta, (2*eta - 1) * known_mass, eta * known_mass**2.])
if force_real:
roots = numpy.real(roots)
if known_is_secondary:
return roots[roots.argmax()]
else:
return roots[roots.argmin()]
mass_from_knownmass_eta = numpy.vectorize(_mass_from_knownmass_eta)
def mass2_from_mass1_eta(mass1, eta, force_real=True):
"""Returns the secondary mass from the primary mass and symmetric mass
ratio.
"""
return mass_from_knownmass_eta(mass1, eta, known_is_secondary=False,
force_real=force_real)
def mass1_from_mass2_eta(mass2, eta, force_real=True):
"""Returns the primary mass from the secondary mass and symmetric mass
ratio.
"""
return mass_from_knownmass_eta(mass2, eta, known_is_secondary=True,
force_real=force_real)
def eta_from_q(q):
r"""Returns the symmetric mass ratio from the given mass ratio.
This is given by:
.. math::
\eta = \frac{q}{(1+q)^2}.
Note that the mass ratio may be either < 1 or > 1.
"""
return q / (1. + q)**2
def mass1_from_mchirp_q(mchirp, q):
"""Returns the primary mass from the given chirp mass and mass ratio."""
mass1 = q**(2./5.) * (1.0 + q)**(1./5.) * mchirp
return mass1
def mass2_from_mchirp_q(mchirp, q):
"""Returns the secondary mass from the given chirp mass and mass ratio."""
mass2 = q**(-3./5.) * (1.0 + q)**(1./5.) * mchirp
return mass2
def _a0(f_lower):
"""Used in calculating chirp times: see Cokelaer, arxiv.org:0706.4437
appendix 1, also lalinspiral/python/sbank/tau0tau3.py.
"""
return 5. / (256. * (numpy.pi * f_lower)**(8./3.))
def _a3(f_lower):
"""Another parameter used for chirp times"""
return numpy.pi / (8. * (numpy.pi * f_lower)**(5./3.))
def tau0_from_mtotal_eta(mtotal, eta, f_lower):
r"""Returns :math:`\tau_0` from the total mass, symmetric mass ratio, and
the given frequency.
"""
# convert to seconds
mtotal = mtotal * lal.MTSUN_SI
# formulae from arxiv.org:0706.4437
return _a0(f_lower) / (mtotal**(5./3.) * eta)
def tau0_from_mchirp(mchirp, f_lower):
r"""Returns :math:`\tau_0` from the chirp mass and the given frequency.
"""
# convert to seconds
mchirp = mchirp * lal.MTSUN_SI
# formulae from arxiv.org:0706.4437
return _a0(f_lower) / mchirp ** (5./3.)
def tau3_from_mtotal_eta(mtotal, eta, f_lower):
r"""Returns :math:`\tau_0` from the total mass, symmetric mass ratio, and
the given frequency.
"""
# convert to seconds
mtotal = mtotal * lal.MTSUN_SI
# formulae from arxiv.org:0706.4437
return _a3(f_lower) / (mtotal**(2./3.) * eta)
def tau0_from_mass1_mass2(mass1, mass2, f_lower):
r"""Returns :math:`\tau_0` from the component masses and given frequency.
"""
mtotal = mass1 + mass2
eta = eta_from_mass1_mass2(mass1, mass2)
return tau0_from_mtotal_eta(mtotal, eta, f_lower)
def tau3_from_mass1_mass2(mass1, mass2, f_lower):
r"""Returns :math:`\tau_3` from the component masses and given frequency.
"""
mtotal = mass1 + mass2
eta = eta_from_mass1_mass2(mass1, mass2)
return tau3_from_mtotal_eta(mtotal, eta, f_lower)
def mchirp_from_tau0(tau0, f_lower):
r"""Returns chirp mass from :math:`\tau_0` and the given frequency.
"""
mchirp = (_a0(f_lower) / tau0) ** (3./5.) # in seconds
# convert back to solar mass units
return mchirp / lal.MTSUN_SI
def mtotal_from_tau0_tau3(tau0, tau3, f_lower,
in_seconds=False):
r"""Returns total mass from :math:`\tau_0, \tau_3`."""
mtotal = (tau3 / _a3(f_lower)) / (tau0 / _a0(f_lower))
if not in_seconds:
# convert back to solar mass units
mtotal /= lal.MTSUN_SI
return mtotal
def eta_from_tau0_tau3(tau0, tau3, f_lower):
r"""Returns symmetric mass ratio from :math:`\tau_0, \tau_3`."""
mtotal = mtotal_from_tau0_tau3(tau0, tau3, f_lower,
in_seconds=True)
eta = mtotal**(-2./3.) * (_a3(f_lower) / tau3)
return eta
def mass1_from_tau0_tau3(tau0, tau3, f_lower):
r"""Returns the primary mass from the given :math:`\tau_0, \tau_3`."""
mtotal = mtotal_from_tau0_tau3(tau0, tau3, f_lower)
eta = eta_from_tau0_tau3(tau0, tau3, f_lower)
return mass1_from_mtotal_eta(mtotal, eta)
def mass2_from_tau0_tau3(tau0, tau3, f_lower):
r"""Returns the secondary mass from the given :math:`\tau_0, \tau_3`."""
mtotal = mtotal_from_tau0_tau3(tau0, tau3, f_lower)
eta = eta_from_tau0_tau3(tau0, tau3, f_lower)
return mass2_from_mtotal_eta(mtotal, eta)
def lambda_tilde(mass1, mass2, lambda1, lambda2):
""" The effective lambda parameter
The mass-weighted dominant effective lambda parameter defined in
https://journals.aps.org/prd/pdf/10.1103/PhysRevD.91.043002
"""
m1, m2, lambda1, lambda2, input_is_array = ensurearray(
mass1, mass2, lambda1, lambda2)
lsum = lambda1 + lambda2
ldiff, _ = ensurearray(lambda1 - lambda2)
mask = m1 < m2
ldiff[mask] = -ldiff[mask]
eta = eta_from_mass1_mass2(m1, m2)
p1 = (lsum) * (1 + 7. * eta - 31 * eta ** 2.0)
p2 = (1 - 4 * eta)**0.5 * (1 + 9 * eta - 11 * eta ** 2.0) * (ldiff)
return formatreturn(8.0 / 13.0 * (p1 + p2), input_is_array)
def lambda_from_mass_tov_file(mass, tov_file, distance=0.):
"""Return the lambda parameter(s) corresponding to the input mass(es)
interpolating from the mass-Lambda data for a particular EOS read in from
an ASCII file.
"""
data = numpy.loadtxt(tov_file)
mass_from_file = data[:, 0]
lambda_from_file = data[:, 1]
mass_src = mass/(1.0 + pycbc.cosmology.redshift(distance))
lambdav = numpy.interp(mass_src, mass_from_file, lambda_from_file)
return lambdav
def ensure_obj1_is_primary(mass1, mass2, *params):
"""
Enforce that the object labelled as 1 is the primary.
Parameters
----------
mass1 : float, numpy.array
Mass values labelled as 1.
mass2 : float, numpy.array
Mass values labelled as 2.
*params :
The binary parameters to be swapped around when mass1 < mass2.
The list must have length 2N and it must be organized so that
params[i] and params[i+1] are the same kind of quantity, but
for object 1 and object 2, respsectively.
E.g., spin1z, spin2z, lambda1, lambda2.
Returns
-------
list :
A list with mass1, mass2, params as arrays, with elements, each
with elements re-arranged so that object 1 is the primary.
"""
# Check params are 2N
if len(params) % 2 != 0:
raise ValueError("params must be 2N floats or arrays")
input_properties, input_is_array = ensurearray((mass1, mass2)+params)
# Check inputs are all the same length
shapes = [par.shape for par in input_properties]
if len(set(shapes)) != 1:
raise ValueError("Individual masses and params must have same shape")
# What needs to be swapped
mask = mass1 < mass2
# Output containter
output_properties = []
for i in numpy.arange(0, len(shapes), 2):
# primary (p)
p = copy.copy(input_properties[i])
# secondary (s)
s = copy.copy(input_properties[i+1])
# Swap
p[mask] = input_properties[i+1][mask]
s[mask] = input_properties[i][mask]
# Format and include in output object
output_properties.append(formatreturn(p, input_is_array))
output_properties.append(formatreturn(s, input_is_array))
# Release output
return output_properties
def remnant_mass_from_mass1_mass2_spherical_spin_eos(
mass1, mass2, spin1_a=0.0, spin1_polar=0.0, eos='2H',
spin2_a=0.0, spin2_polar=0.0, swap_companions=False,
ns_bh_mass_boundary=None, extrapolate=False):
"""
Function that determines the remnant disk mass of an NS-BH system
using the fit to numerical-relativity results discussed in
Foucart, Hinderer & Nissanke, PRD 98, 081501(R) (2018).
The BH spin may be misaligned with the orbital angular momentum.
In such cases the ISSO is approximated following the approach of
Stone, Loeb & Berger, PRD 87, 084053 (2013), which was originally
devised for a previous NS-BH remnant mass fit of
Foucart, PRD 86, 124007 (2012).
Note: The NS spin does not play any role in this fit!
Parameters
-----------
mass1 : float
The mass of the black hole, in solar masses.
mass2 : float
The mass of the neutron star, in solar masses.
spin1_a : float, optional
The dimensionless magnitude of the spin of mass1. Default = 0.
spin1_polar : float, optional
The tilt angle of the spin of mass1. Default = 0 (aligned w L).
eos : str, optional
Name of the equation of state being adopted. Default is '2H'.
spin2_a : float, optional
The dimensionless magnitude of the spin of mass2. Default = 0.
spin2_polar : float, optional
The tilt angle of the spin of mass2. Default = 0 (aligned w L).
swap_companions : boolean, optional
If mass2 > mass1, swap mass and spin of object 1 and 2 prior
to applying the fitting formula (otherwise fail). Default is False.
ns_bh_mass_boundary : float, optional
If mass2 is greater than this value, the neutron star is effectively
treated as a black hole and the returned value is 0. For consistency
with the eos, set this to the maximum mass allowed by the eos; set
a lower value for a more stringent cut. Default is None.
extrapolate : boolean, optional
Invoke extrapolation of NS baryonic mass and NS compactness in
scipy.interpolate.interp1d at low masses. If ns_bh_mass_boundary is
provided, it is applied at high masses, otherwise the equation of
state prescribes the maximum possible mass2. Default is False.
Returns
----------
remnant_mass: float
The remnant mass in solar masses
"""
mass1, mass2, spin1_a, spin1_polar, spin2_a, spin2_polar, \
input_is_array = \
ensurearray(mass1, mass2, spin1_a, spin1_polar, spin2_a, spin2_polar)
# mass1 must be greater than mass2: swap the properties of 1 and 2 or fail
if swap_companions:
mass1, mass2, spin1_a, spin2_a, spin1_polar, spin2_polar = \
ensure_obj1_is_primary(mass1, mass2, spin1_a, spin2_a,
spin1_polar, spin2_polar)
else:
try:
if any(mass2 > mass1) and input_is_array:
raise ValueError(f'Require mass1 >= mass2')
except TypeError:
if mass2 > mass1 and not input_is_array:
raise ValueError(f'Require mass1 >= mass2. {mass1} < {mass2}')
eta = eta_from_mass1_mass2(mass1, mass2)
# If a maximum NS mass is not provided, accept all values and
# let the EOS handle this (in ns.initialize_eos)
if ns_bh_mass_boundary is None:
mask = numpy.ones(ensurearray(mass2).size[0], dtype=bool)
# Otherwise perform the calculation only for small enough NS masses...
else:
mask = mass2 <= ns_bh_mass_boundary
# ...and return 0's otherwise
remnant_mass = numpy.zeros(ensurearray(mass2)[0].size)
ns_compactness, ns_b_mass = ns.initialize_eos(mass2[mask], eos,
extrapolate=extrapolate)
remnant_mass[mask] = ns.foucart18(
eta[mask], ns_compactness, ns_b_mass,
spin1_a[mask], spin1_polar[mask])
return formatreturn(remnant_mass, input_is_array)
def remnant_mass_from_mass1_mass2_cartesian_spin_eos(
mass1, mass2, spin1x=0.0, spin1y=0.0, spin1z=0.0, eos='2H',
spin2x=0.0, spin2y=0.0, spin2z=0.0, swap_companions=False,
ns_bh_mass_boundary=None, extrapolate=False):
"""
Function that determines the remnant disk mass of an NS-BH system
using the fit to numerical-relativity results discussed in
Foucart, Hinderer & Nissanke, PRD 98, 081501(R) (2018).
The BH spin may be misaligned with the orbital angular momentum.
In such cases the ISSO is approximated following the approach of
Stone, Loeb & Berger, PRD 87, 084053 (2013), which was originally
devised for a previous NS-BH remnant mass fit of
Foucart, PRD 86, 124007 (2012).
Note: NS spin is assumed to be 0!
Parameters
-----------
mass1 : float
The mass of the black hole, in solar masses.
mass2 : float
The mass of the neutron star, in solar masses.
spin1x : float, optional
The dimensionless x-component of the spin of mass1. Default = 0.
spin1y : float, optional
The dimensionless y-component of the spin of mass1. Default = 0.
spin1z : float, optional
The dimensionless z-component of the spin of mass1. Default = 0.
eos: str, optional
Name of the equation of state being adopted. Default is '2H'.
spin2x : float, optional
The dimensionless x-component of the spin of mass2. Default = 0.
spin2y : float, optional
The dimensionless y-component of the spin of mass2. Default = 0.
spin2z : float, optional
The dimensionless z-component of the spin of mass2. Default = 0.
swap_companions : boolean, optional
If mass2 > mass1, swap mass and spin of object 1 and 2 prior
to applying the fitting formula (otherwise fail). Default is False.
ns_bh_mass_boundary : float, optional
If mass2 is greater than this value, the neutron star is effectively
treated as a black hole and the returned value is 0. For consistency
with the eos, set this to the maximum mass allowed by the eos; set
a lower value for a more stringent cut. Default is None.
extrapolate : boolean, optional
Invoke extrapolation of NS baryonic mass and NS compactness in
scipy.interpolate.interp1d at low masses. If ns_bh_mass_boundary is
provided, it is applied at high masses, otherwise the equation of
state prescribes the maximum possible mass2. Default is False.
Returns
----------
remnant_mass: float
The remnant mass in solar masses
"""
spin1_a, _, spin1_polar = _cartesian_to_spherical(spin1x, spin1y, spin1z)
if swap_companions:
spin2_a, _, spin2_polar = _cartesian_to_spherical(spin2x,
spin2y, spin2z)
else:
size = ensurearray(spin1_a)[0].size
spin2_a = numpy.zeros(size)
spin2_polar = numpy.zeros(size)
return remnant_mass_from_mass1_mass2_spherical_spin_eos(
mass1, mass2, spin1_a=spin1_a, spin1_polar=spin1_polar, eos=eos,
spin2_a=spin2_a, spin2_polar=spin2_polar,
swap_companions=swap_companions,
ns_bh_mass_boundary=ns_bh_mass_boundary, extrapolate=extrapolate)
#
# =============================================================================
#
# CBC spin functions
#
# =============================================================================
#
def chi_eff(mass1, mass2, spin1z, spin2z):
"""Returns the effective spin from mass1, mass2, spin1z, and spin2z."""
return (spin1z * mass1 + spin2z * mass2) / (mass1 + mass2)
def chi_a(mass1, mass2, spin1z, spin2z):
""" Returns the aligned mass-weighted spin difference from mass1, mass2,
spin1z, and spin2z.
"""
return (spin2z * mass2 - spin1z * mass1) / (mass2 + mass1)
def chi_p(mass1, mass2, spin1x, spin1y, spin2x, spin2y):
"""Returns the effective precession spin from mass1, mass2, spin1x,
spin1y, spin2x, and spin2y.
"""
xi1 = secondary_xi(mass1, mass2, spin1x, spin1y, spin2x, spin2y)
xi2 = primary_xi(mass1, mass2, spin1x, spin1y, spin2x, spin2y)
return chi_p_from_xi1_xi2(xi1, xi2)
def phi_a(mass1, mass2, spin1x, spin1y, spin2x, spin2y):
""" Returns the angle between the in-plane perpendicular spins."""
phi1 = phi_from_spinx_spiny(primary_spin(mass1, mass2, spin1x, spin2x),
primary_spin(mass1, mass2, spin1y, spin2y))
phi2 = phi_from_spinx_spiny(secondary_spin(mass1, mass2, spin1x, spin2x),
secondary_spin(mass1, mass2, spin1y, spin2y))
return (phi1 - phi2) % (2 * numpy.pi)
def phi_s(spin1x, spin1y, spin2x, spin2y):
""" Returns the sum of the in-plane perpendicular spins."""
phi1 = phi_from_spinx_spiny(spin1x, spin1y)
phi2 = phi_from_spinx_spiny(spin2x, spin2y)
return (phi1 + phi2) % (2 * numpy.pi)
def chi_eff_from_spherical(mass1, mass2, spin1_a, spin1_polar,
spin2_a, spin2_polar):
"""Returns the effective spin using spins in spherical coordinates."""
spin1z = spin1_a * numpy.cos(spin1_polar)
spin2z = spin2_a * numpy.cos(spin2_polar)
return chi_eff(mass1, mass2, spin1z, spin2z)
def chi_p_from_spherical(mass1, mass2, spin1_a, spin1_azimuthal, spin1_polar,
spin2_a, spin2_azimuthal, spin2_polar):
"""Returns the effective precession spin using spins in spherical
coordinates.
"""
spin1x, spin1y, _ = _spherical_to_cartesian(
spin1_a, spin1_azimuthal, spin1_polar)
spin2x, spin2y, _ = _spherical_to_cartesian(
spin2_a, spin2_azimuthal, spin2_polar)
return chi_p(mass1, mass2, spin1x, spin1y, spin2x, spin2y)
def primary_spin(mass1, mass2, spin1, spin2):
"""Returns the dimensionless spin of the primary mass."""
mass1, mass2, spin1, spin2, input_is_array = ensurearray(
mass1, mass2, spin1, spin2)
sp = copy.copy(spin1)
mask = mass1 < mass2
sp[mask] = spin2[mask]
return formatreturn(sp, input_is_array)
def secondary_spin(mass1, mass2, spin1, spin2):
"""Returns the dimensionless spin of the secondary mass."""
mass1, mass2, spin1, spin2, input_is_array = ensurearray(
mass1, mass2, spin1, spin2)
ss = copy.copy(spin2)
mask = mass1 < mass2
ss[mask] = spin1[mask]
return formatreturn(ss, input_is_array)
def primary_xi(mass1, mass2, spin1x, spin1y, spin2x, spin2y):
"""Returns the effective precession spin argument for the larger mass.
"""
spinx = primary_spin(mass1, mass2, spin1x, spin2x)
spiny = primary_spin(mass1, mass2, spin1y, spin2y)
return chi_perp_from_spinx_spiny(spinx, spiny)
def secondary_xi(mass1, mass2, spin1x, spin1y, spin2x, spin2y):
"""Returns the effective precession spin argument for the smaller mass.
"""
spinx = secondary_spin(mass1, mass2, spin1x, spin2x)
spiny = secondary_spin(mass1, mass2, spin1y, spin2y)
return xi2_from_mass1_mass2_spin2x_spin2y(mass1, mass2, spinx, spiny)
def xi1_from_spin1x_spin1y(spin1x, spin1y):
"""Returns the effective precession spin argument for the larger mass.
This function assumes it's given spins of the primary mass.
"""
return chi_perp_from_spinx_spiny(spin1x, spin1y)
def xi2_from_mass1_mass2_spin2x_spin2y(mass1, mass2, spin2x, spin2y):
"""Returns the effective precession spin argument for the smaller mass.
This function assumes it's given spins of the secondary mass.
"""
q = q_from_mass1_mass2(mass1, mass2)
a1 = 2 + 3 * q / 2
a2 = 2 + 3 / (2 * q)
return a1 / (q**2 * a2) * chi_perp_from_spinx_spiny(spin2x, spin2y)
def chi_perp_from_spinx_spiny(spinx, spiny):
"""Returns the in-plane spin from the x/y components of the spin.
"""
return numpy.sqrt(spinx**2 + spiny**2)
def chi_perp_from_mass1_mass2_xi2(mass1, mass2, xi2):
"""Returns the in-plane spin from mass1, mass2, and xi2 for the
secondary mass.
"""
q = q_from_mass1_mass2(mass1, mass2)
a1 = 2 + 3 * q / 2
a2 = 2 + 3 / (2 * q)
return q**2 * a2 / a1 * xi2
def chi_p_from_xi1_xi2(xi1, xi2):
"""Returns effective precession spin from xi1 and xi2.
"""
xi1, xi2, input_is_array = ensurearray(xi1, xi2)
chi_p = copy.copy(xi1)
mask = xi1 < xi2
chi_p[mask] = xi2[mask]
return formatreturn(chi_p, input_is_array)
def phi1_from_phi_a_phi_s(phi_a, phi_s):
"""Returns the angle between the x-component axis and the in-plane
spin for the primary mass from phi_s and phi_a.
"""
return (phi_s + phi_a) / 2.0
def phi2_from_phi_a_phi_s(phi_a, phi_s):
"""Returns the angle between the x-component axis and the in-plane
spin for the secondary mass from phi_s and phi_a.
"""
return (phi_s - phi_a) / 2.0
def phi_from_spinx_spiny(spinx, spiny):
"""Returns the angle between the x-component axis and the in-plane spin.
"""
phi = numpy.arctan2(spiny, spinx)
return phi % (2 * numpy.pi)
def spin1z_from_mass1_mass2_chi_eff_chi_a(mass1, mass2, chi_eff, chi_a):
"""Returns spin1z.
"""
return (mass1 + mass2) / (2.0 * mass1) * (chi_eff - chi_a)
def spin2z_from_mass1_mass2_chi_eff_chi_a(mass1, mass2, chi_eff, chi_a):
"""Returns spin2z.
"""
return (mass1 + mass2) / (2.0 * mass2) * (chi_eff + chi_a)
def spin1x_from_xi1_phi_a_phi_s(xi1, phi_a, phi_s):
"""Returns x-component spin for primary mass.
"""
phi1 = phi1_from_phi_a_phi_s(phi_a, phi_s)
return xi1 * numpy.cos(phi1)
def spin1y_from_xi1_phi_a_phi_s(xi1, phi_a, phi_s):
"""Returns y-component spin for primary mass.
"""
phi1 = phi1_from_phi_a_phi_s(phi_s, phi_a)
return xi1 * numpy.sin(phi1)
def spin2x_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2, xi2, phi_a, phi_s):
"""Returns x-component spin for secondary mass.
"""
chi_perp = chi_perp_from_mass1_mass2_xi2(mass1, mass2, xi2)
phi2 = phi2_from_phi_a_phi_s(phi_a, phi_s)
return chi_perp * numpy.cos(phi2)
def spin2y_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2, xi2, phi_a, phi_s):
"""Returns y-component spin for secondary mass.
"""
chi_perp = chi_perp_from_mass1_mass2_xi2(mass1, mass2, xi2)
phi2 = phi2_from_phi_a_phi_s(phi_a, phi_s)
return chi_perp * numpy.sin(phi2)
def dquadmon_from_lambda(lambdav):
r"""Return the quadrupole moment of a neutron star given its lambda
We use the relations defined here. https://arxiv.org/pdf/1302.4499.pdf.
Note that the convention we use is that:
.. math::
\mathrm{dquadmon} = \bar{Q} - 1.
Where :math:`\bar{Q}` (dimensionless) is the reduced quadrupole moment.
"""
ll = numpy.log(lambdav)
ai = .194
bi = .0936
ci = 0.0474
di = -4.21 * 10**-3.0
ei = 1.23 * 10**-4.0
ln_quad_moment = ai + bi*ll + ci*ll**2.0 + di*ll**3.0 + ei*ll**4.0
return numpy.exp(ln_quad_moment) - 1
def spin_from_pulsar_freq(mass, radius, freq):
"""Returns the dimensionless spin of a pulsar.
Assumes the pulsar is a solid sphere when computing the moment of inertia.
Parameters
----------
mass : float
The mass of the pulsar, in solar masses.
radius : float
The assumed radius of the pulsar, in kilometers.
freq : float
The spin frequency of the pulsar, in Hz.
"""
omega = 2 * numpy.pi * freq
mt = mass * lal.MTSUN_SI
mominert = (2/5.) * mt * (radius * 1000 / lal.C_SI)**2
return mominert * omega / mt**2
#
# =============================================================================
#
# Extrinsic parameter functions
#
# =============================================================================
#
def chirp_distance(dist, mchirp, ref_mass=1.4):
"""Returns the chirp distance given the luminosity distance and chirp mass.
"""
return dist * (2.**(-1./5) * ref_mass / mchirp)**(5./6)
def distance_from_chirp_distance_mchirp(chirp_distance, mchirp, ref_mass=1.4):
"""Returns the luminosity distance given a chirp distance and chirp mass.
"""
return chirp_distance * (2.**(-1./5) * ref_mass / mchirp)**(-5./6)
_detector_cache = {}
def det_tc(detector_name, ra, dec, tc, ref_frame='geocentric', relative=False):
"""Returns the coalescence time of a signal in the given detector.
Parameters
----------
detector_name : string
The name of the detector, e.g., 'H1'.
ra : float
The right ascension of the signal, in radians.
dec : float
The declination of the signal, in radians.
tc : float
The GPS time of the coalescence of the signal in the `ref_frame`.
ref_frame : {'geocentric', string}
The reference frame that the given coalescence time is defined in.
May specify 'geocentric', or a detector name; default is 'geocentric'.
Returns
-------
float :
The GPS time of the coalescence in detector `detector_name`.
"""
ref_time = tc
if relative:
tc = 0
if ref_frame == detector_name:
return tc
if detector_name not in _detector_cache:
_detector_cache[detector_name] = Detector(detector_name)
detector = _detector_cache[detector_name]
if ref_frame == 'geocentric':
return tc + detector.time_delay_from_earth_center(ra, dec, ref_time)
else:
other = Detector(ref_frame)
return tc + detector.time_delay_from_detector(other, ra, dec, ref_time)
def optimal_orientation_from_detector(detector_name, tc):
""" Low-level function to be called from _optimal_dec_from_detector
and _optimal_ra_from_detector"""
d = Detector(detector_name)
ra, dec = d.optimal_orientation(tc)
return ra, dec
def optimal_dec_from_detector(detector_name, tc):
"""For a given detector and GPS time, return the optimal orientation
(directly overhead of the detector) in declination.
Parameters
----------
detector_name : string
The name of the detector, e.g., 'H1'.
tc : float
The GPS time of the coalescence of the signal in the `ref_frame`.
Returns
-------
float :
The declination of the signal, in radians.
"""
return optimal_orientation_from_detector(detector_name, tc)[1]
def optimal_ra_from_detector(detector_name, tc):
"""For a given detector and GPS time, return the optimal orientation
(directly overhead of the detector) in right ascension.
Parameters
----------
detector_name : string
The name of the detector, e.g., 'H1'.
tc : float
The GPS time of the coalescence of the signal in the `ref_frame`.
Returns
-------
float :
The declination of the signal, in radians.
"""
return optimal_orientation_from_detector(detector_name, tc)[0]
#
# =============================================================================
#
# Likelihood statistic parameter functions
#
# =============================================================================
#
def snr_from_loglr(loglr):
"""Returns SNR computed from the given log likelihood ratio(s). This is
defined as `sqrt(2*loglr)`.If the log likelihood ratio is < 0, returns 0.
Parameters
----------
loglr : array or float
The log likelihood ratio(s) to evaluate.
Returns
-------
array or float
The SNRs computed from the log likelihood ratios.
"""
singleval = isinstance(loglr, float)
if singleval:
loglr = numpy.array([loglr])
# temporarily quiet sqrt(-1) warnings
with numpy.errstate(invalid="ignore"):
snrs = numpy.sqrt(2*loglr)
snrs[numpy.isnan(snrs)] = 0.
if singleval:
snrs = snrs[0]
return snrs
#
# =============================================================================
#
# BH Ringdown functions
#
# =============================================================================
#
def get_lm_f0tau(mass, spin, l, m, n=0, which='both'):
"""Return the f0 and the tau for one or more overtones of an l, m mode.
Parameters
----------
mass : float or array
Mass of the black hole (in solar masses).
spin : float or array
Dimensionless spin of the final black hole.
l : int or array
l-index of the harmonic.
m : int or array
m-index of the harmonic.
n : int or array
Overtone(s) to generate, where n=0 is the fundamental mode.
Default is 0.
which : {'both', 'f0', 'tau'}, optional
What to return; 'both' returns both frequency and tau, 'f0' just
frequency, 'tau' just tau. Default is 'both'.
Returns
-------
f0 : float or array
Returned if ``which`` is 'both' or 'f0'.
The frequency of the QNM(s), in Hz.
tau : float or array
Returned if ``which`` is 'both' or 'tau'.
The damping time of the QNM(s), in seconds.
"""
# convert to arrays
mass, spin, l, m, n, input_is_array = ensurearray(
mass, spin, l, m, n)
# we'll ravel the arrays so we can evaluate each parameter combination
# one at a a time
getf0 = which == 'both' or which == 'f0'
gettau = which == 'both' or which == 'tau'
out = []
if getf0:
f0s = pykerr.qnmfreq(mass, spin, l, m, n)
out.append(formatreturn(f0s, input_is_array))
if gettau:
taus = pykerr.qnmtau(mass, spin, l, m, n)
out.append(formatreturn(taus, input_is_array))
if not (getf0 and gettau):
out = out[0]
return out
def get_lm_f0tau_allmodes(mass, spin, modes):
"""Returns a dictionary of all of the frequencies and damping times for the
requested modes.
Parameters
----------
mass : float or array
Mass of the black hole (in solar masses).
spin : float or array
Dimensionless spin of the final black hole.
modes : list of str
The modes to get. Each string in the list should be formatted
'lmN', where l (m) is the l (m) index of the harmonic and N is the
number of overtones to generate (note, N is not the index of the
overtone).
Returns
-------
f0 : dict
Dictionary mapping the modes to the frequencies. The dictionary keys
are 'lmn' string, where l (m) is the l (m) index of the harmonic and
n is the index of the overtone. For example, '220' is the l = m = 2
mode and the 0th overtone.
tau : dict
Dictionary mapping the modes to the damping times. The keys are the
same as ``f0``.
"""
f0, tau = {}, {}
for lmn in modes:
key = '{}{}{}'
l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2])
for n in range(nmodes):
tmp_f0, tmp_tau = get_lm_f0tau(mass, spin, l, m, n)
f0[key.format(l, abs(m), n)] = tmp_f0
tau[key.format(l, abs(m), n)] = tmp_tau
return f0, tau
def freq_from_final_mass_spin(final_mass, final_spin, l=2, m=2, n=0):
"""Returns QNM frequency for the given mass and spin and mode.
Parameters
----------
final_mass : float or array
Mass of the black hole (in solar masses).
final_spin : float or array
Dimensionless spin of the final black hole.
l : int or array, optional
l-index of the harmonic. Default is 2.
m : int or array, optional
m-index of the harmonic. Default is 2.
n : int or array
Overtone(s) to generate, where n=0 is the fundamental mode.
Default is 0.
Returns
-------
float or array
The frequency of the QNM(s), in Hz.
"""
return get_lm_f0tau(final_mass, final_spin, l, m, n=n, which='f0')
def tau_from_final_mass_spin(final_mass, final_spin, l=2, m=2, n=0):
"""Returns QNM damping time for the given mass and spin and mode.
Parameters
----------
final_mass : float or array
Mass of the black hole (in solar masses).
final_spin : float or array
Dimensionless spin of the final black hole.
l : int or array, optional
l-index of the harmonic. Default is 2.
m : int or array, optional
m-index of the harmonic. Default is 2.
n : int or array
Overtone(s) to generate, where n=0 is the fundamental mode.
Default is 0.
Returns
-------
float or array
The damping time of the QNM(s), in seconds.
"""
return get_lm_f0tau(final_mass, final_spin, l, m, n=n, which='tau')
# The following are from Table VIII, IX, X of Berti et al.,
# PRD 73 064030, arXiv:gr-qc/0512160 (2006).
# Keys are l,m (only n=0 supported). Constants are for converting from
# frequency and damping time to mass and spin.
_berti_spin_constants = {
(2, 2): (0.7, 1.4187, -0.4990),
(2, 1): (-0.3, 2.3561, -0.2277),
(3, 3): (0.9, 2.343, -0.4810),
(4, 4): (1.1929, 3.1191, -0.4825),
}
_berti_mass_constants = {
(2, 2): (1.5251, -1.1568, 0.1292),
(2, 1): (0.6, -0.2339, 0.4175),
(3, 3): (1.8956, -1.3043, 0.1818),
(4, 4): (2.3, -1.5056, 0.2244),
}
def final_spin_from_f0_tau(f0, tau, l=2, m=2):
"""Returns the final spin based on the given frequency and damping time.
.. note::
Currently, only (l,m) = (2,2), (3,3), (4,4), (2,1) are supported.
Any other indices will raise a ``KeyError``.
Parameters
----------
f0 : float or array
Frequency of the QNM (in Hz).
tau : float or array
Damping time of the QNM (in seconds).
l : int, optional
l-index of the harmonic. Default is 2.
m : int, optional
m-index of the harmonic. Default is 2.
Returns
-------
float or array
The spin of the final black hole. If the combination of frequency
and damping times give an unphysical result, ``numpy.nan`` will be
returned.
"""
f0, tau, input_is_array = ensurearray(f0, tau)
# from Berti et al. 2006
a, b, c = _berti_spin_constants[l,m]
origshape = f0.shape
# flatten inputs for storing results
f0 = f0.ravel()
tau = tau.ravel()
spins = numpy.zeros(f0.size)
for ii in range(spins.size):
Q = f0[ii] * tau[ii] * numpy.pi
try:
s = 1. - ((Q-a)/b)**(1./c)
except ValueError:
s = numpy.nan
spins[ii] = s
spins = spins.reshape(origshape)
return formatreturn(spins, input_is_array)
def final_mass_from_f0_tau(f0, tau, l=2, m=2):
"""Returns the final mass (in solar masses) based on the given frequency
and damping time.
.. note::
Currently, only (l,m) = (2,2), (3,3), (4,4), (2,1) are supported.
Any other indices will raise a ``KeyError``.
Parameters
----------
f0 : float or array
Frequency of the QNM (in Hz).
tau : float or array
Damping time of the QNM (in seconds).
l : int, optional
l-index of the harmonic. Default is 2.
m : int, optional
m-index of the harmonic. Default is 2.
Returns
-------
float or array
The mass of the final black hole. If the combination of frequency
and damping times give an unphysical result, ``numpy.nan`` will be
returned.
"""
# from Berti et al. 2006
spin = final_spin_from_f0_tau(f0, tau, l=l, m=m)
a, b, c = _berti_mass_constants[l,m]
return (a + b*(1-spin)**c)/(2*numpy.pi*f0*lal.MTSUN_SI)
def freqlmn_from_other_lmn(f0, tau, current_l, current_m, new_l, new_m):
"""Returns the QNM frequency (in Hz) of a chosen new (l,m) mode from the
given current (l,m) mode.
Parameters
----------
f0 : float or array
Frequency of the current QNM (in Hz).
tau : float or array
Damping time of the current QNM (in seconds).
current_l : int, optional
l-index of the current QNM.
current_m : int, optional
m-index of the current QNM.
new_l : int, optional
l-index of the new QNM to convert to.
new_m : int, optional
m-index of the new QNM to convert to.
Returns
-------
float or array
The frequency of the new (l, m) QNM mode. If the combination of
frequency and damping time provided for the current (l, m) QNM mode
correspond to an unphysical Kerr black hole mass and/or spin,
``numpy.nan`` will be returned.
"""
mass = final_mass_from_f0_tau(f0, tau, l=current_l, m=current_m)
spin = final_spin_from_f0_tau(f0, tau, l=current_l, m=current_m)
mass, spin, input_is_array = ensurearray(mass, spin)
mass[mass < 0] = numpy.nan
spin[numpy.abs(spin) > 0.9996] = numpy.nan
new_f0 = freq_from_final_mass_spin(mass, spin, l=new_l, m=new_m)
return formatreturn(new_f0, input_is_array)
def taulmn_from_other_lmn(f0, tau, current_l, current_m, new_l, new_m):
"""Returns the QNM damping time (in seconds) of a chosen new (l,m) mode
from the given current (l,m) mode.
Parameters
----------
f0 : float or array
Frequency of the current QNM (in Hz).
tau : float or array
Damping time of the current QNM (in seconds).
current_l : int, optional
l-index of the current QNM.
current_m : int, optional
m-index of the current QNM.
new_l : int, optional
l-index of the new QNM to convert to.
new_m : int, optional
m-index of the new QNM to convert to.
Returns
-------
float or array
The daming time of the new (l, m) QNM mode. If the combination of
frequency and damping time provided for the current (l, m) QNM mode
correspond to an unphysical Kerr black hole mass and/or spin,
``numpy.nan`` will be returned.
"""
mass = final_mass_from_f0_tau(f0, tau, l=current_l, m=current_m)
spin = final_spin_from_f0_tau(f0, tau, l=current_l, m=current_m)
mass, spin, input_is_array = ensurearray(mass, spin)
mass[mass < 0] = numpy.nan
spin[numpy.abs(spin) > 0.9996] = numpy.nan
new_tau = tau_from_final_mass_spin(mass, spin, l=new_l, m=new_m)
return formatreturn(new_tau, input_is_array)
def get_final_from_initial(mass1, mass2, spin1x=0., spin1y=0., spin1z=0.,
spin2x=0., spin2y=0., spin2z=0.,
approximant='SEOBNRv4PHM', f_ref=-1):
"""Estimates the final mass and spin from the given initial parameters.
This uses the fits used by either the NRSur7dq4 or EOBNR models for
converting from initial parameters to final, depending on the
``approximant`` argument.
Parameters
----------
mass1 : float
The mass of one of the components, in solar masses.
mass2 : float
The mass of the other component, in solar masses.
spin1x : float, optional
The dimensionless x-component of the spin of mass1. Default is 0.
spin1y : float, optional
The dimensionless y-component of the spin of mass1. Default is 0.
spin1z : float, optional
The dimensionless z-component of the spin of mass1. Default is 0.
spin2x : float, optional
The dimensionless x-component of the spin of mass2. Default is 0.
spin2y : float, optional
The dimensionless y-component of the spin of mass2. Default is 0.
spin2z : float, optional
The dimensionless z-component of the spin of mass2. Default is 0.
approximant : str, optional
The waveform approximant to use for the fit function. If "NRSur7dq4",
the NRSur7dq4Remnant fit in lalsimulation will be used. If "SEOBNRv4",
the ``XLALSimIMREOBFinalMassSpin`` function in lalsimulation will be
used. Otherwise, ``XLALSimIMREOBFinalMassSpinPrec`` from lalsimulation
will be used, with the approximant name passed as the approximant
in that function ("SEOBNRv4PHM" will work with this function).
Default is "SEOBNRv4PHM".
f_ref : float, optional
The reference frequency for the spins. Only used by the NRSur7dq4
fit. Default (-1) will use the default reference frequency for the
approximant.
Returns
-------
final_mass : float
The final mass, in solar masses.
final_spin : float
The dimensionless final spin.
"""
args = (mass1, mass2, spin1x, spin1y, spin1z, spin2x, spin2y, spin2z)
args = ensurearray(*args)
input_is_array = args[-1]
origshape = args[0].shape
# flatten inputs for storing results
args = [a.ravel() for a in args[:-1]]
mass1, mass2, spin1x, spin1y, spin1z, spin2x, spin2y, spin2z = args
final_mass = numpy.full(mass1.shape, numpy.nan)
final_spin = numpy.full(mass1.shape, numpy.nan)
for ii in range(final_mass.size):
m1 = float(mass1[ii])
m2 = float(mass2[ii])
spin1 = list(map(float, [spin1x[ii], spin1y[ii], spin1z[ii]]))
spin2 = list(map(float, [spin2x[ii], spin2y[ii], spin2z[ii]]))
if approximant == 'NRSur7dq4':
from lalsimulation import nrfits
try:
res = nrfits.eval_nrfit(m1*lal.MSUN_SI,
m2*lal.MSUN_SI,
spin1, spin2, 'NRSur7dq4Remnant',
['FinalMass', 'FinalSpin'],
f_ref=f_ref)
except RuntimeError:
continue
final_mass[ii] = res['FinalMass'][0] / lal.MSUN_SI
sf = res['FinalSpin']
final_spin[ii] = (sf**2).sum()**0.5
if sf[-1] < 0:
final_spin[ii] *= -1
elif approximant == 'SEOBNRv4':
_, fm, fs = lalsim.SimIMREOBFinalMassSpin(
m1, m2, spin1, spin2, getattr(lalsim, approximant))
final_mass[ii] = fm * (m1 + m2)
final_spin[ii] = fs
else:
_, fm, fs = lalsim.SimIMREOBFinalMassSpinPrec(
m1, m2, spin1, spin2, getattr(lalsim, approximant))
final_mass[ii] = fm * (m1 + m2)
final_spin[ii] = fs
final_mass = final_mass.reshape(origshape)
final_spin = final_spin.reshape(origshape)
return (formatreturn(final_mass, input_is_array),
formatreturn(final_spin, input_is_array))
def final_mass_from_initial(mass1, mass2, spin1x=0., spin1y=0., spin1z=0.,
spin2x=0., spin2y=0., spin2z=0.,
approximant='SEOBNRv4PHM', f_ref=-1):
"""Estimates the final mass from the given initial parameters.
This uses the fits used by either the NRSur7dq4 or EOBNR models for
converting from initial parameters to final, depending on the
``approximant`` argument.
Parameters
----------
mass1 : float
The mass of one of the components, in solar masses.
mass2 : float
The mass of the other component, in solar masses.
spin1x : float, optional
The dimensionless x-component of the spin of mass1. Default is 0.
spin1y : float, optional
The dimensionless y-component of the spin of mass1. Default is 0.
spin1z : float, optional
The dimensionless z-component of the spin of mass1. Default is 0.
spin2x : float, optional
The dimensionless x-component of the spin of mass2. Default is 0.
spin2y : float, optional
The dimensionless y-component of the spin of mass2. Default is 0.
spin2z : float, optional
The dimensionless z-component of the spin of mass2. Default is 0.
approximant : str, optional
The waveform approximant to use for the fit function. If "NRSur7dq4",
the NRSur7dq4Remnant fit in lalsimulation will be used. If "SEOBNRv4",
the ``XLALSimIMREOBFinalMassSpin`` function in lalsimulation will be
used. Otherwise, ``XLALSimIMREOBFinalMassSpinPrec`` from lalsimulation
will be used, with the approximant name passed as the approximant
in that function ("SEOBNRv4PHM" will work with this function).
Default is "SEOBNRv4PHM".
f_ref : float, optional
The reference frequency for the spins. Only used by the NRSur7dq4
fit. Default (-1) will use the default reference frequency for the
approximant.
Returns
-------
float
The final mass, in solar masses.
"""
return get_final_from_initial(mass1, mass2, spin1x, spin1y, spin1z,
spin2x, spin2y, spin2z, approximant,
f_ref=f_ref)[0]
def final_spin_from_initial(mass1, mass2, spin1x=0., spin1y=0., spin1z=0.,
spin2x=0., spin2y=0., spin2z=0.,
approximant='SEOBNRv4PHM', f_ref=-1):
"""Estimates the final spin from the given initial parameters.
This uses the fits used by either the NRSur7dq4 or EOBNR models for
converting from initial parameters to final, depending on the
``approximant`` argument.
Parameters
----------
mass1 : float
The mass of one of the components, in solar masses.
mass2 : float
The mass of the other component, in solar masses.
spin1x : float, optional
The dimensionless x-component of the spin of mass1. Default is 0.
spin1y : float, optional
The dimensionless y-component of the spin of mass1. Default is 0.
spin1z : float, optional
The dimensionless z-component of the spin of mass1. Default is 0.
spin2x : float, optional
The dimensionless x-component of the spin of mass2. Default is 0.
spin2y : float, optional
The dimensionless y-component of the spin of mass2. Default is 0.
spin2z : float, optional
The dimensionless z-component of the spin of mass2. Default is 0.
approximant : str, optional
The waveform approximant to use for the fit function. If "NRSur7dq4",
the NRSur7dq4Remnant fit in lalsimulation will be used. If "SEOBNRv4",
the ``XLALSimIMREOBFinalMassSpin`` function in lalsimulation will be
used. Otherwise, ``XLALSimIMREOBFinalMassSpinPrec`` from lalsimulation
will be used, with the approximant name passed as the approximant
in that function ("SEOBNRv4PHM" will work with this function).
Default is "SEOBNRv4PHM".
f_ref : float, optional
The reference frequency for the spins. Only used by the NRSur7dq4
fit. Default (-1) will use the default reference frequency for the
approximant.
Returns
-------
float
The dimensionless final spin.
"""
return get_final_from_initial(mass1, mass2, spin1x, spin1y, spin1z,
spin2x, spin2y, spin2z, approximant,
f_ref=f_ref)[1]
#
# =============================================================================
#
# post-Newtonian functions
#
# =============================================================================
#
def velocity_to_frequency(v, M):
""" Calculate the gravitational-wave frequency from the
total mass and invariant velocity.
Parameters
----------
v : float
Invariant velocity
M : float
Binary total mass
Returns
-------
f : float
Gravitational-wave frequency
"""
return v**(3.0) / (M * lal.MTSUN_SI * lal.PI)
def frequency_to_velocity(f, M):
""" Calculate the invariant velocity from the total
mass and gravitational-wave frequency.
Parameters
----------
f: float
Gravitational-wave frequency
M: float
Binary total mass
Returns
-------
v : float or numpy.array
Invariant velocity
"""
return (lal.PI * M * lal.MTSUN_SI * f)**(1.0/3.0)
def f_schwarzchild_isco(M):
"""
Innermost stable circular orbit (ISCO) for a test particle
orbiting a Schwarzschild black hole
Parameters
----------
M : float or numpy.array
Total mass in solar mass units
Returns
-------
f : float or numpy.array
Frequency in Hz
"""
return velocity_to_frequency((1.0/6.0)**(0.5), M)
#
# ============================================================================
#
# p-g mode non-linear tide functions
#
# ============================================================================
#
def nltides_coefs(amplitude, n, m1, m2):
"""Calculate the coefficents needed to compute the
shift in t(f) and phi(f) due to non-linear tides.
Parameters
----------
amplitude: float
Amplitude of effect
n: float
Growth dependence of effect
m1: float
Mass of component 1
m2: float
Mass of component 2
Returns
-------
f_ref : float
Reference frequency used to define A and n
t_of_f_factor: float
The constant factor needed to compute t(f)
phi_of_f_factor: float
The constant factor needed to compute phi(f)
"""
# Use 100.0 Hz as a reference frequency
f_ref = 100.0
# Calculate chirp mass
mc = mchirp_from_mass1_mass2(m1, m2)
mc *= lal.lal.MSUN_SI
# Calculate constants in phasing
a = (96./5.) * \
(lal.lal.G_SI * lal.lal.PI * mc * f_ref / lal.lal.C_SI**3.)**(5./3.)
b = 6. * amplitude
t_of_f_factor = -1./(lal.lal.PI*f_ref) * b/(a*a * (n-4.))
phi_of_f_factor = -2.*b / (a*a * (n-3.))
return f_ref, t_of_f_factor, phi_of_f_factor
def nltides_gw_phase_difference(f, f0, amplitude, n, m1, m2):
"""Calculate the gravitational-wave phase shift bwtween
f and f_coalescence = infinity due to non-linear tides.
To compute the phase shift between e.g. f_low and f_isco,
call this function twice and compute the difference.
Parameters
----------
f: float or numpy.array
Frequency from which to compute phase
f0: float or numpy.array
Frequency that NL effects switch on
amplitude: float or numpy.array
Amplitude of effect
n: float or numpy.array
Growth dependence of effect
m1: float or numpy.array
Mass of component 1
m2: float or numpy.array
Mass of component 2
Returns
-------
delta_phi: float or numpy.array
Phase in radians
"""
f, f0, amplitude, n, m1, m2, input_is_array = ensurearray(
f, f0, amplitude, n, m1, m2)
delta_phi = numpy.zeros(m1.shape)
f_ref, _, phi_of_f_factor = nltides_coefs(amplitude, n, m1, m2)
mask = f <= f0
delta_phi[mask] = - phi_of_f_factor[mask] * (f0[mask]/f_ref)**(n[mask]-3.)
mask = f > f0
delta_phi[mask] = - phi_of_f_factor[mask] * (f[mask]/f_ref)**(n[mask]-3.)
return formatreturn(delta_phi, input_is_array)
def nltides_gw_phase_diff_isco(f_low, f0, amplitude, n, m1, m2):
"""Calculate the gravitational-wave phase shift bwtween
f_low and f_isco due to non-linear tides.
Parameters
----------
f_low: float
Frequency from which to compute phase. If the other
arguments are passed as numpy arrays then the value
of f_low is duplicated for all elements in the array
f0: float or numpy.array
Frequency that NL effects switch on
amplitude: float or numpy.array
Amplitude of effect
n: float or numpy.array
Growth dependence of effect
m1: float or numpy.array
Mass of component 1
m2: float or numpy.array
Mass of component 2
Returns
-------
delta_phi: float or numpy.array
Phase in radians
"""
f0, amplitude, n, m1, m2, input_is_array = ensurearray(
f0, amplitude, n, m1, m2)
f_low = numpy.zeros(m1.shape) + f_low
phi_l = nltides_gw_phase_difference(
f_low, f0, amplitude, n, m1, m2)
f_isco = f_schwarzchild_isco(m1+m2)
phi_i = nltides_gw_phase_difference(
f_isco, f0, amplitude, n, m1, m2)
return formatreturn(phi_i - phi_l, input_is_array)
__all__ = ['dquadmon_from_lambda', 'lambda_tilde',
'lambda_from_mass_tov_file', 'primary_mass',
'secondary_mass', 'mtotal_from_mass1_mass2',
'q_from_mass1_mass2', 'invq_from_mass1_mass2',
'eta_from_mass1_mass2', 'mchirp_from_mass1_mass2',
'mass1_from_mtotal_q', 'mass2_from_mtotal_q',
'mass1_from_mtotal_eta', 'mass2_from_mtotal_eta',
'mtotal_from_mchirp_eta', 'mass1_from_mchirp_eta',
'mass2_from_mchirp_eta', 'mass2_from_mchirp_mass1',
'mass_from_knownmass_eta', 'mass2_from_mass1_eta',
'mass1_from_mass2_eta', 'eta_from_q', 'mass1_from_mchirp_q',
'mass2_from_mchirp_q', 'tau0_from_mtotal_eta',
'tau3_from_mtotal_eta', 'tau0_from_mass1_mass2',
'tau0_from_mchirp', 'mchirp_from_tau0',
'tau3_from_mass1_mass2', 'mtotal_from_tau0_tau3',
'eta_from_tau0_tau3', 'mass1_from_tau0_tau3',
'mass2_from_tau0_tau3', 'primary_spin', 'secondary_spin',
'chi_eff', 'chi_a', 'chi_p', 'phi_a', 'phi_s',
'primary_xi', 'secondary_xi',
'xi1_from_spin1x_spin1y', 'xi2_from_mass1_mass2_spin2x_spin2y',
'chi_perp_from_spinx_spiny', 'chi_perp_from_mass1_mass2_xi2',
'chi_p_from_xi1_xi2', 'phi_from_spinx_spiny',
'phi1_from_phi_a_phi_s', 'phi2_from_phi_a_phi_s',
'spin1z_from_mass1_mass2_chi_eff_chi_a',
'spin2z_from_mass1_mass2_chi_eff_chi_a',
'spin1x_from_xi1_phi_a_phi_s', 'spin1y_from_xi1_phi_a_phi_s',
'spin2x_from_mass1_mass2_xi2_phi_a_phi_s',
'spin2y_from_mass1_mass2_xi2_phi_a_phi_s',
'chirp_distance', 'det_tc', 'snr_from_loglr',
'freq_from_final_mass_spin', 'tau_from_final_mass_spin',
'final_spin_from_f0_tau', 'final_mass_from_f0_tau',
'final_mass_from_initial', 'final_spin_from_initial',
'optimal_dec_from_detector', 'optimal_ra_from_detector',
'chi_eff_from_spherical', 'chi_p_from_spherical',
'nltides_gw_phase_diff_isco', 'spin_from_pulsar_freq',
'freqlmn_from_other_lmn', 'taulmn_from_other_lmn',
'remnant_mass_from_mass1_mass2_spherical_spin_eos',
'remnant_mass_from_mass1_mass2_cartesian_spin_eos'
]
| 62,478
| 34.239143
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/cosmology.py
|
# Copyright (C) 2017 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides functions for computing cosmological quantities, such as
redshift. This is mostly a wrapper around ``astropy.cosmology``.
Note: in all functions, ``distance`` is short hand for ``luminosity_distance``.
Any other distance measure is explicitly named; e.g., ``comoving_distance``.
"""
import logging
import numpy
from scipy import interpolate, integrate
import astropy.cosmology
from astropy import units
from astropy.cosmology.core import CosmologyError
from astropy.cosmology import parameters
import pycbc.conversions
DEFAULT_COSMOLOGY = 'Planck15'
def get_cosmology(cosmology=None, **kwargs):
r"""Gets an astropy cosmology class.
Parameters
----------
cosmology : str or astropy.cosmology.FlatLambdaCDM, optional
The name of the cosmology to use. For the list of options, see
:py:attr:`astropy.cosmology.parameters.available`. If None, and no
other keyword arguments are provided, will default to
:py:attr:`DEFAULT_COSMOLOGY`. If an instance of
:py:class:`astropy.cosmology.FlatLambdaCDM`, will just return that.
\**kwargs :
If any other keyword arguments are provided they will be passed to
:py:attr:`astropy.cosmology.FlatLambdaCDM` to create a custom
cosmology.
Returns
-------
astropy.cosmology.FlatLambdaCDM
The cosmology to use.
Examples
--------
Use the default:
>>> from pycbc.cosmology import get_cosmology
>>> get_cosmology()
FlatLambdaCDM(name="Planck15", H0=67.7 km / (Mpc s), Om0=0.307,
Tcmb0=2.725 K, Neff=3.05, m_nu=[0. 0. 0.06] eV,
Ob0=0.0486)
Use properties measured by WMAP instead:
>>> get_cosmology("WMAP9")
FlatLambdaCDM(name="WMAP9", H0=69.3 km / (Mpc s), Om0=0.286, Tcmb0=2.725 K,
Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.0463)
Create your own cosmology (see :py:class:`astropy.cosmology.FlatLambdaCDM`
for details on the default values used):
>>> get_cosmology(H0=70., Om0=0.3)
FlatLambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Tcmb0=0 K, Neff=3.04, m_nu=None,
Ob0=None)
"""
if kwargs and cosmology is not None:
raise ValueError("if providing custom cosmological parameters, do "
"not provide a `cosmology` argument")
if isinstance(cosmology, astropy.cosmology.FlatLambdaCDM):
# just return
return cosmology
if kwargs:
cosmology = astropy.cosmology.FlatLambdaCDM(**kwargs)
else:
if cosmology is None:
cosmology = DEFAULT_COSMOLOGY
if cosmology not in parameters.available:
raise ValueError("unrecognized cosmology {}".format(cosmology))
cosmology = getattr(astropy.cosmology, cosmology)
return cosmology
def z_at_value(func, fval, unit, zmax=1000., **kwargs):
r"""Wrapper around astropy.cosmology.z_at_value to handle numpy arrays.
Getting a z for a cosmological quantity involves numerically inverting
``func``. The ``zmax`` argument sets how large of a z to guess (see
:py:func:`astropy.cosmology.z_at_value` for details). If a z is larger than
``zmax``, this will try a larger zmax up to ``zmax * 10**5``. If that still
is not large enough, will just return ``numpy.inf``.
Parameters
----------
func : function or method
A function that takes redshift as input.
fval : float
The value of ``func(z)``.
unit : astropy.unit
The unit of ``fval``.
zmax : float, optional
The initial maximum search limit for ``z``. Default is 1000.
\**kwargs :
All other keyword arguments are passed to
:py:func:``astropy.cosmology.z_at_value``.
Returns
-------
float
The redshift at the requested values.
"""
fval, input_is_array = pycbc.conversions.ensurearray(fval)
# make sure fval is atleast 1D
if fval.size == 1 and fval.ndim == 0:
fval = fval.reshape(1)
zs = numpy.zeros(fval.shape, dtype=float) # the output array
if 'method' not in kwargs:
# workaround for https://github.com/astropy/astropy/issues/14249
# FIXME remove when fixed in astropy/scipy
kwargs['method'] = 'bounded'
for (ii, val) in enumerate(fval):
try:
zs[ii] = astropy.cosmology.z_at_value(func, val*unit, zmax=zmax,
**kwargs)
except CosmologyError:
if ii == len(zs)-1:
# if zs[ii] is less than but very close to zmax, let's say
# zs[ii] is the last element in the [zmin, zmax],
# `z_at_value` will also returns "CosmologyError", please
# see (https://docs.astropy.org/en/stable/api/astropy.
# cosmology.z_at_value.html), in order to avoid bumping up
# zmax, just set zs equals to previous value, we assume
# the `func` is smooth
zs[ii] = zs[ii-1]
else:
# we'll get this if the z was larger than zmax; in that
# case we'll try bumping up zmax later to get a value
zs[ii] = numpy.inf
# check if there were any zs > zmax
replacemask = numpy.isinf(zs)
# try bumping up zmax to get a result
if replacemask.any():
# we'll keep bumping up the maxz until we can get a result
counter = 0 # to prevent running forever
while replacemask.any():
kwargs['zmin'] = zmax
zmax = 10 * zmax
idx = numpy.where(replacemask)
for ii in idx:
val = fval[ii]
try:
zs[ii] = astropy.cosmology.z_at_value(
func, val*unit, zmax=zmax, **kwargs)
replacemask[ii] = False
except CosmologyError:
# didn't work, try on next loop
pass
counter += 1
if counter == 5:
# give up and warn the user
logging.warning("One or more values correspond to a "
"redshift > {0:.1e}. The redshift for these "
"have been set to inf. If you would like "
"better precision, call God.".format(zmax))
break
return pycbc.conversions.formatreturn(zs, input_is_array)
def _redshift(distance, **kwargs):
r"""Uses astropy to get redshift from the given luminosity distance.
Parameters
----------
distance : float
The luminosity distance, in Mpc.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The redshift corresponding to the given luminosity distance.
"""
cosmology = get_cosmology(**kwargs)
return z_at_value(cosmology.luminosity_distance, distance, units.Mpc)
class DistToZ(object):
r"""Interpolates luminosity distance as a function of redshift to allow for
fast conversion.
The :mod:`astropy.cosmology` module provides methods for converting any
cosmological parameter (like luminosity distance) to redshift. This can be
very slow when operating on a large array, as it involves numerically
inverting :math:`z(D)` (where :math:`D` is the luminosity distance). This
class speeds that up by pre-interpolating :math:`D(z)`. It works by setting
up a dense grid of redshifts, then using linear interpolation to find the
inverse function. The interpolation uses a grid linear in z for z < 1, and
log in z for ``default_maxz`` > z > 1. This interpolater is setup the first
time `get_redshift` is called. If a distance is requested that results in
a z > ``default_maxz``, the class falls back to calling astropy directly.
Instances of this class can be called like a function on luminosity
distances, which will return the corresponding redshifts.
Parameters
----------
default_maxz : float, optional
The maximum z to interpolate up to before falling back to calling
astropy directly. Default is 1000.
numpoints : int, optional
The number of points to use in the linear interpolation between 0 to 1
and 1 to ``default_maxz``. Default is 10000.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
"""
def __init__(self, default_maxz=1000., numpoints=10000, **kwargs):
self.numpoints = int(numpoints)
self.default_maxz = default_maxz
self.cosmology = get_cosmology(**kwargs)
# the interpolating functions; we'll set them to None for now, then set
# them up when get_redshift is first called
self.nearby_d2z = None
self.faraway_d2z = None
self.default_maxdist = None
def setup_interpolant(self):
"""Initializes the z(d) interpolation."""
# for computing nearby (z < 1) redshifts
zs = numpy.linspace(0., 1., num=self.numpoints)
ds = self.cosmology.luminosity_distance(zs).value
self.nearby_d2z = interpolate.interp1d(ds, zs, kind='linear',
bounds_error=False)
# for computing far away (z > 1) redshifts
zs = numpy.logspace(0, numpy.log10(self.default_maxz),
num=self.numpoints)
ds = self.cosmology.luminosity_distance(zs).value
self.faraway_d2z = interpolate.interp1d(ds, zs, kind='linear',
bounds_error=False)
# store the default maximum distance
self.default_maxdist = ds.max()
def get_redshift(self, dist):
"""Returns the redshift for the given distance.
"""
dist, input_is_array = pycbc.conversions.ensurearray(dist)
try:
zs = self.nearby_d2z(dist)
except TypeError:
# interpolant hasn't been setup yet
self.setup_interpolant()
zs = self.nearby_d2z(dist)
# if any points had red shifts beyond the nearby, will have nans;
# replace using the faraway interpolation
replacemask = numpy.isnan(zs)
if replacemask.any():
zs[replacemask] = self.faraway_d2z(dist[replacemask])
replacemask = numpy.isnan(zs)
# if we still have nans, means that some distances are beyond our
# furthest default; fall back to using astropy
if replacemask.any():
# well... check that the distance is positive and finite first
if not (dist > 0.).all() and numpy.isfinite(dist).all():
raise ValueError("distance must be finite and > 0")
zs[replacemask] = _redshift(dist[replacemask],
cosmology=self.cosmology)
return pycbc.conversions.formatreturn(zs, input_is_array)
def __call__(self, dist):
return self.get_redshift(dist)
# set up D(z) interpolating classes for the standard cosmologies
_d2zs = {_c: DistToZ(cosmology=_c)
for _c in parameters.available}
def redshift(distance, **kwargs):
r"""Returns the redshift associated with the given luminosity distance.
If the requested cosmology is one of the pre-defined ones in
:py:attr:`astropy.cosmology.parameters.available`, :py:class:`DistToZ` is
used to provide a fast interpolation. This takes a few seconds to setup
on the first call.
Parameters
----------
distance : float
The luminosity distance, in Mpc.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The redshift corresponding to the given distance.
"""
cosmology = get_cosmology(**kwargs)
try:
z = _d2zs[cosmology.name](distance)
except KeyError:
# not a standard cosmology, call the redshift function
z = _redshift(distance, cosmology=cosmology)
return z
class ComovingVolInterpolator(object):
r"""Interpolates comoving volume to distance or redshift.
The :mod:`astropy.cosmology` module provides methods for converting any
cosmological parameter (like luminosity distance) to redshift. This can be
very slow when operating on a large array, as it involves numerically
inverting :math:`z(D)` (where :math:`D` is the luminosity distance). This
class speeds that up by pre-interpolating :math:`D(z)`. It works by setting
up a dense grid of redshifts, then using linear interpolation to find the
inverse function. The interpolation uses a grid linear in z for z < 1, and
log in z for ``default_maxz`` > z > 1. This interpolater is setup the first
time `get_redshift` is called. If a distance is requested that results in
a z > ``default_maxz``, the class falls back to calling astropy directly.
Instances of this class can be called like a function on luminosity
distances, which will return the corresponding redshifts.
Parameters
----------
parameter : {'luminosity_distance', 'redshift'}
What parameter to interpolate.
default_maxz : float, optional
The maximum z to interpolate up to before falling back to calling
astropy directly. Default is 10.
numpoints : int, optional
The number of points to use in the linear interpolation between 0 to 1
and 1 to ``default_maxz``. Default is 1000.
vol_func: function, optional
Optionally set how the volume is calculated by providing a function
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
"""
def __init__(self, parameter, default_maxz=10., numpoints=1000,
vol_func=None, **kwargs):
self.parameter = parameter
self.numpoints = int(numpoints)
self.default_maxz = default_maxz
self.cosmology = get_cosmology(**kwargs)
# the interpolating functions; we'll set them to None for now, then set
# them up when get_redshift is first called
self.nearby_interp = None
self.faraway_interp = None
self.default_maxvol = None
if vol_func is not None:
self.vol_func = vol_func
else:
self.vol_func = self.cosmology.comoving_volume
self.vol_units = self.vol_func(0.5).unit
def _create_interpolant(self, minz, maxz):
minlogv = numpy.log(self.vol_func(minz).value)
maxlogv = numpy.log(self.vol_func(maxz).value)
logvs = numpy.linspace(minlogv, maxlogv, num=self.numpoints)
zs = z_at_value(self.vol_func, numpy.exp(logvs), self.vol_units, maxz)
if self.parameter != 'redshift':
ys = cosmological_quantity_from_redshift(zs, self.parameter)
else:
ys = zs
return interpolate.interp1d(logvs, ys, kind='linear',
bounds_error=False)
def setup_interpolant(self):
"""Initializes the z(d) interpolation."""
# get VC bounds
# for computing nearby (z < 1) redshifts
minz = 0.001
maxz = 1.
self.nearby_interp = self._create_interpolant(minz, maxz)
# for computing far away (z > 1) redshifts
minz = 1.
maxz = self.default_maxz
self.faraway_interp = self._create_interpolant(minz, maxz)
# store the default maximum volume
self.default_maxvol = numpy.log(self.vol_func(maxz).value)
def get_value_from_logv(self, logv):
"""Returns the redshift for the given distance.
"""
logv, input_is_array = pycbc.conversions.ensurearray(logv)
try:
vals = self.nearby_interp(logv)
except TypeError:
# interpolant hasn't been setup yet
self.setup_interpolant()
vals = self.nearby_interp(logv)
# if any points had red shifts beyond the nearby, will have nans;
# replace using the faraway interpolation
replacemask = numpy.isnan(vals)
if replacemask.any():
vals[replacemask] = self.faraway_interp(logv[replacemask])
replacemask = numpy.isnan(vals)
# if we still have nans, means that some distances are beyond our
# furthest default; fall back to using astropy
if replacemask.any():
# well... check that the logv is finite first
if not numpy.isfinite(logv).all():
raise ValueError("comoving volume must be finite and > 0")
zs = z_at_value(self.vol_func,
numpy.exp(logv[replacemask]), self.vol_units)
if self.parameter == 'redshift':
vals[replacemask] = zs
else:
vals[replacemask] = \
getattr(self.cosmology, self.parameter)(zs).value
return pycbc.conversions.formatreturn(vals, input_is_array)
def get_value(self, volume):
return self.get_value_from_logv(numpy.log(volume))
def __call__(self, volume):
return self.get_value(volume)
# set up D(z) interpolating classes for the standard cosmologies
_v2ds = {_c: ComovingVolInterpolator('luminosity_distance', cosmology=_c)
for _c in parameters.available}
_v2zs = {_c: ComovingVolInterpolator('redshift', cosmology=_c)
for _c in parameters.available}
def redshift_from_comoving_volume(vc, interp=True, **kwargs):
r"""Returns the redshift from the given comoving volume.
Parameters
----------
vc : float
The comoving volume, in units of cubed Mpc.
interp : bool, optional
If true, this will setup an interpolator between redshift and comoving
volume the first time this function is called. This is useful when
making many successive calls to this function (and is necessary when
using this function in a transform when doing parameter estimation).
However, setting up the interpolator the first time takes O(10)s of
seconds. If you will only be making a single call to this function, or
will only run it on an array with < ~100000 elements, it is faster to
not use the interpolator (i.e., set ``interp=False``). Default is
``True``.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The redshift at the given comoving volume.
"""
cosmology = get_cosmology(**kwargs)
lookup = _v2zs if interp else {}
try:
z = lookup[cosmology.name](vc)
except KeyError:
# not using interp or not a standard cosmology,
# call the redshift function directly
z = z_at_value(cosmology.comoving_volume, vc, units.Mpc**3)
return z
def distance_from_comoving_volume(vc, interp=True, **kwargs):
r"""Returns the luminosity distance from the given comoving volume.
Parameters
----------
vc : float
The comoving volume, in units of cubed Mpc.
interp : bool, optional
If true, this will setup an interpolator between distance and comoving
volume the first time this function is called. This is useful when
making many successive calls to this function (such as when using this
function in a transform for parameter estimation). However, setting up
the interpolator the first time takes O(10)s of seconds. If you will
only be making a single call to this function, or will only run it on
an array with < ~100000 elements, it is faster to not use the
interpolator (i.e., set ``interp=False``). Default is ``True``.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The luminosity distance at the given comoving volume.
"""
cosmology = get_cosmology(**kwargs)
lookup = _v2ds if interp else {}
try:
dist = lookup[cosmology.name](vc)
except KeyError:
# not using interp or not a standard cosmology,
# call the redshift function directly
z = z_at_value(cosmology.comoving_volume, vc, units.Mpc**3)
dist = cosmology.luminosity_distance(z).value
return dist
def cosmological_quantity_from_redshift(z, quantity, strip_unit=True,
**kwargs):
r"""Returns the value of a cosmological quantity (e.g., age) at a redshift.
Parameters
----------
z : float
The redshift.
quantity : str
The name of the quantity to get. The name may be any attribute of
:py:class:`astropy.cosmology.FlatLambdaCDM`.
strip_unit : bool, optional
Just return the value of the quantity, sans units. Default is True.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float or astropy.units.quantity :
The value of the quantity at the requested value. If ``strip_unit`` is
``True``, will return the value. Otherwise, will return the value with
units.
"""
cosmology = get_cosmology(**kwargs)
val = getattr(cosmology, quantity)(z)
if strip_unit:
val = val.value
return val
__all__ = ['redshift', 'redshift_from_comoving_volume',
'distance_from_comoving_volume',
'cosmological_quantity_from_redshift',
]
| 23,038
| 39.137631
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/rate.py
|
import numpy
import bisect
from . import bin_utils
def integral_element(mu, pdf):
'''
Returns an array of elements of the integrand dP = p(mu) dmu
for a density p(mu) defined at sample values mu ; samples need
not be equally spaced. Uses a simple trapezium rule.
Number of dP elements is 1 - (number of mu samples).
'''
dmu = mu[1:] - mu[:-1]
bin_mean = (pdf[1:] + pdf[:-1]) / 2.
return dmu * bin_mean
def normalize_pdf(mu, pofmu):
"""
Takes a function pofmu defined at rate sample values mu and
normalizes it to be a suitable pdf. Both mu and pofmu must be
arrays or lists of the same length.
"""
if min(pofmu) < 0:
raise ValueError("Probabilities cannot be negative, don't ask me to "
"normalize a function with negative values!")
if min(mu) < 0:
raise ValueError("Rates cannot be negative, don't ask me to "
"normalize a function over a negative domain!")
dp = integral_element(mu, pofmu)
return mu, pofmu/sum(dp)
def compute_upper_limit(mu_in, post, alpha=0.9):
"""
Returns the upper limit mu_high of confidence level alpha for a
posterior distribution post on the given parameter mu.
The posterior need not be normalized.
"""
if 0 < alpha < 1:
dp = integral_element(mu_in, post)
high_idx = bisect.bisect_left(dp.cumsum() / dp.sum(), alpha)
# if alpha is in (0,1] and post is non-negative, bisect_left
# will always return an index in the range of mu since
# post.cumsum()/post.sum() will always begin at 0 and end at 1
mu_high = mu_in[high_idx]
elif alpha == 1:
mu_high = numpy.max(mu_in[post > 0])
else:
raise ValueError("Confidence level must be in (0,1].")
return mu_high
def compute_lower_limit(mu_in, post, alpha=0.9):
"""
Returns the lower limit mu_low of confidence level alpha for a
posterior distribution post on the given parameter mu.
The posterior need not be normalized.
"""
if 0 < alpha < 1:
dp = integral_element(mu_in, post)
low_idx = bisect.bisect_right(dp.cumsum() / dp.sum(), 1 - alpha)
# if alpha is in [0,1) and post is non-negative, bisect_right
# will always return an index in the range of mu since
# post.cumsum()/post.sum() will always begin at 0 and end at 1
mu_low = mu_in[low_idx]
elif alpha == 1:
mu_low = numpy.min(mu_in[post > 0])
else:
raise ValueError("Confidence level must be in (0,1].")
return mu_low
def confidence_interval_min_width(mu, post, alpha=0.9):
'''
Returns the minimal-width confidence interval [mu_low, mu_high] of
confidence level alpha for a posterior distribution post on the parameter mu.
'''
if not 0 < alpha < 1:
raise ValueError("Confidence level must be in (0,1).")
# choose a step size for the sliding confidence window
alpha_step = 0.01
# initialize the lower and upper limits
mu_low = numpy.min(mu)
mu_high = numpy.max(mu)
# find the smallest window (by delta-mu) stepping by dalpha
for ai in numpy.arange(0, 1 - alpha, alpha_step):
ml = compute_lower_limit(mu, post, 1 - ai)
mh = compute_upper_limit(mu, post, alpha + ai)
if mh - ml < mu_high - mu_low:
mu_low = ml
mu_high = mh
return mu_low, mu_high
def hpd_coverage(mu, pdf, thresh):
'''
Integrates a pdf over mu taking only bins where
the mean over the bin is above a given threshold
This gives the coverage of the HPD interval for
the given threshold.
'''
dp = integral_element(mu, pdf)
bin_mean = (pdf[1:] + pdf[:-1]) / 2.
return dp[bin_mean > thresh].sum()
def hpd_threshold(mu_in, post, alpha, tol):
'''
For a PDF post over samples mu_in, find a density
threshold such that the region having higher density
has coverage of at least alpha, and less than alpha
plus a given tolerance.
'''
norm_post = normalize_pdf(mu_in, post)
# initialize bisection search
p_minus = 0.0
p_plus = max(post)
while abs(hpd_coverage(mu_in, norm_post, p_minus) -
hpd_coverage(mu_in, norm_post, p_plus)) >= tol:
p_test = (p_minus + p_plus) / 2.
if hpd_coverage(mu_in, post, p_test) >= alpha:
# test value was too low or just right
p_minus = p_test
else:
# test value was too high
p_plus = p_test
# p_minus never goes above the required threshold and p_plus never goes below
# thus on exiting p_minus is at or below the required threshold and the
# difference in coverage is within tolerance
return p_minus
def hpd_credible_interval(mu_in, post, alpha=0.9, tolerance=1e-3):
'''
Returns the minimum and maximum rate values of the HPD
(Highest Posterior Density) credible interval for a posterior
post defined at the sample values mu_in. Samples need not be
uniformly spaced and posterior need not be normalized.
Will not return a correct credible interval if the posterior
is multimodal and the correct interval is not contiguous;
in this case will over-cover by including the whole range from
minimum to maximum mu.
'''
if alpha == 1:
nonzero_samples = mu_in[post > 0]
mu_low = numpy.min(nonzero_samples)
mu_high = numpy.max(nonzero_samples)
elif 0 < alpha < 1:
# determine the highest PDF for which the region with
# higher density has sufficient coverage
pthresh = hpd_threshold(mu_in, post, alpha, tol=tolerance)
samples_over_threshold = mu_in[post > pthresh]
mu_low = numpy.min(samples_over_threshold)
mu_high = numpy.max(samples_over_threshold)
return mu_low, mu_high
# Following functions are for the old pylal volume vs mass calculations
# These were replaced by 'imr_utils' functions now contained in sensitivity.py
# and bin_utils.py
def integrate_efficiency(dbins, eff, err=0, logbins=False):
if logbins:
logd = numpy.log(dbins)
dlogd = logd[1:] - logd[:-1]
# use log midpoint of bins
dreps = numpy.exp((numpy.log(dbins[1:]) + numpy.log(dbins[:-1])) / 2.)
vol = numpy.sum(4.*numpy.pi * dreps**3. * eff * dlogd)
# propagate errors in eff to errors in v
verr = numpy.sqrt(
numpy.sum((4.*numpy.pi * dreps**3. * err * dlogd)**2.)
)
else:
dd = dbins[1:] - dbins[:-1]
dreps = (dbins[1:] + dbins[:-1]) / 2.
vol = numpy.sum(4. * numpy.pi * dreps**2. * eff * dd)
# propagate errors
verr = numpy.sqrt(numpy.sum((4.*numpy.pi * dreps**2. * err * dd)**2.))
return vol, verr
def compute_efficiency(f_dist, m_dist, dbins):
'''
Compute the efficiency as a function of distance for the given sets of found
and missed injection distances.
Note that injections that do not fit into any dbin get lost :(
'''
efficiency = numpy.zeros(len(dbins) - 1)
error = numpy.zeros(len(dbins) - 1)
for j, dlow in enumerate(dbins[:-1]):
dhigh = dbins[j + 1]
found = numpy.sum((dlow <= f_dist) * (f_dist < dhigh))
missed = numpy.sum((dlow <= m_dist) * (m_dist < dhigh))
if found+missed == 0:
# avoid divide by 0 in empty bins
missed = 1.
efficiency[j] = float(found) / (found + missed)
error[j] = numpy.sqrt(efficiency[j] * (1 - efficiency[j]) /
(found + missed))
return efficiency, error
def mean_efficiency_volume(found, missed, dbins):
if len(found) == 0:
# no efficiency here
return numpy.zeros(len(dbins) - 1), numpy.zeros(len(dbins) - 1), 0, 0
# only need distances
f_dist = numpy.array([l.distance for l in found])
m_dist = numpy.array([l.distance for l in missed])
# compute the efficiency and its variance
eff, err = compute_efficiency(f_dist, m_dist, dbins)
vol, verr = integrate_efficiency(dbins, eff, err)
return eff, err, vol, verr
def filter_injections_by_mass(injs, mbins, bin_num, bin_type, bin_num2=None):
'''
For a given set of injections (sim_inspiral rows), return the subset
of injections that fall within the given mass range.
'''
if bin_type == "Mass1_Mass2":
m1bins = numpy.concatenate((mbins.lower()[0],
numpy.array([mbins.upper()[0][-1]])))
m1lo = m1bins[bin_num]
m1hi = m1bins[bin_num + 1]
m2bins = numpy.concatenate((mbins.lower()[1],
numpy.array([mbins.upper()[1][-1]])))
m2lo = m2bins[bin_num2]
m2hi = m2bins[bin_num2 + 1]
newinjs = [l for l in injs if
((m1lo <= l.mass1 < m1hi and m2lo <= l.mass2 < m2hi) or
(m1lo <= l.mass2 < m1hi and m2lo <= l.mass1 < m2hi))]
return newinjs
mbins = numpy.concatenate((mbins.lower()[0],
numpy.array([mbins.upper()[0][-1]])))
mlow = mbins[bin_num]
mhigh = mbins[bin_num + 1]
if bin_type == "Chirp_Mass":
newinjs = [l for l in injs if (mlow <= l.mchirp < mhigh)]
elif bin_type == "Total_Mass":
newinjs = [l for l in injs if (mlow <= l.mass1 + l.mass2 < mhigh)]
elif bin_type == "Component_Mass":
# here it is assumed that m2 is fixed
newinjs = [l for l in injs if (mlow <= l.mass1 < mhigh)]
elif bin_type == "BNS_BBH":
if bin_num in [0, 2]:
# BNS/BBH case
newinjs = [l for l in injs if
(mlow <= l.mass1 < mhigh and mlow <= l.mass2 < mhigh)]
else:
# NSBH
newinjs = [l for l in injs if (mbins[0] <= l.mass1 < mbins[1] and
mbins[2] <= l.mass2 < mbins[3])]
# BHNS
newinjs += [l for l in injs if (mbins[0] <= l.mass2 < mbins[1] and
mbins[2] <= l.mass1 < mbins[3])]
return newinjs
def compute_volume_vs_mass(found, missed, mass_bins, bin_type, dbins=None):
"""
Compute the average luminosity an experiment was sensitive to
Assumes that luminosity is uniformly distributed in space.
Input is the sets of found and missed injections.
"""
# mean and std estimate for luminosity
volArray = bin_utils.BinnedArray(mass_bins)
vol2Array = bin_utils.BinnedArray(mass_bins)
# found/missed stats
foundArray = bin_utils.BinnedArray(mass_bins)
missedArray = bin_utils.BinnedArray(mass_bins)
# compute the mean luminosity in each mass bin
effvmass = []
errvmass = []
# 2D case first
if bin_type == "Mass1_Mass2":
for j, mc1 in enumerate(mass_bins.centres()[0]):
for k, mc2 in enumerate(mass_bins.centres()[1]):
newfound = filter_injections_by_mass(
found, mass_bins, j, bin_type, k)
newmissed = filter_injections_by_mass(
missed, mass_bins, j, bin_type, k)
foundArray[(mc1, mc2)] = len(newfound)
missedArray[(mc1, mc2)] = len(newmissed)
# compute the volume using this injection set
meaneff, efferr, meanvol, volerr = mean_efficiency_volume(
newfound, newmissed, dbins)
effvmass.append(meaneff)
errvmass.append(efferr)
volArray[(mc1, mc2)] = meanvol
vol2Array[(mc1, mc2)] = volerr
return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass
for j, mc in enumerate(mass_bins.centres()[0]):
# filter out injections not in this mass bin
newfound = filter_injections_by_mass(found, mass_bins, j, bin_type)
newmissed = filter_injections_by_mass(missed, mass_bins, j, bin_type)
foundArray[(mc, )] = len(newfound)
missedArray[(mc, )] = len(newmissed)
# compute the volume using this injection set
meaneff, efferr, meanvol, volerr = mean_efficiency_volume(
newfound, newmissed, dbins)
effvmass.append(meaneff)
errvmass.append(efferr)
volArray[(mc, )] = meanvol
vol2Array[(mc, )] = volerr
return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass
| 12,540
| 35.9941
| 81
|
py
|
pycbc
|
pycbc-master/pycbc/mchirp_area.py
|
# Module with utilities for estimating candidate events source probabilities
# Initial code by A. Curiel Barroso, August 2019
# Modified by V. Villa-Ortega, January 2020, March 2021
"""Functions to compute the area corresponding to different CBC on the m1 & m2
plane when given a central mchirp value and uncertainty.
It also includes a function that calculates the source frame when given the
detector frame mass and redshift.
"""
import math
import numpy as np
from pycbc.conversions import mass2_from_mchirp_mass1 as m2mcm1
from scipy.integrate import quad
from pycbc.cosmology import _redshift
from astropy.cosmology import FlatLambdaCDM
def insert_args(parser):
mchirp_group = parser.add_argument_group("Arguments for estimating the "
"source probabilities of a "
"candidate event using the snr, "
"mchirp, and effective distance.")
mchirp_group.add_argument('--src-class-mass-limits', type=float, nargs=3,
metavar=('MIN_M2', 'MAX_NS', 'MAX_M1'),
default=[1.0, 3.0, 45.0],
help="Minimum and maximum values for the mass "
"of the binary components and maximum mass "
"of a neutron star, used as limits "
"when computing the area corresponding"
"to different CBC sources.")
mchirp_group.add_argument('--src-class-mass-gap-max', type=float,
metavar=('MAX_GAP'),
help="Upper limit of the mass gap, corresponding"
" to the minimum mass of a black hole. "
"Used as limit of integration of the "
"different CBC regions when considering "
"the MassGap category.")
mchirp_group.add_argument('--src-class-mchirp-to-delta', type=float,
metavar='m0', required=True,
help='Coefficient to estimate the value of the '
'mchirp uncertainty by mchirp_delta = '
'm0 * mchirp.')
mchirp_group.add_argument('--src-class-eff-to-lum-distance', type=float,
metavar='a0', required=True,
help='Coefficient to estimate the value of the '
'luminosity distance from the minimum '
'eff distance by D_lum = a0 * min(D_eff).')
mchirp_group.add_argument('--src-class-lum-distance-to-delta', type=float,
nargs=2, metavar=('b0', 'b1'), required=True,
help='Coefficients to estimate the value of the '
'uncertainty on the luminosity distance '
'from the estimated luminosity distance and'
' the coinc snr by delta_lum = D_lum * '
'exp(b0) * coinc_snr ** b1.')
mchirp_group.add_argument('--src-class-mass-gap-separate',
action='store_true',
help='Gives separate probabilities for each kind'
' of mass gap CBC sources: GNS, GG, BHG.')
mchirp_group.add_argument('--src-class-lal-cosmology',
action='store_true',
help='Uses the Planck15 cosmology defined in '
'lalsuite instead of the astropy Planck15 '
'default model.')
def from_cli(args, parser):
mass_limits_sorted = sorted(args.src_class_mass_limits)
if args.src_class_mass_gap_max:
if args.src_class_mass_gap_max < mass_limits_sorted[1]:
parser.error('MAX_GAP value cannot be lower than MAX_NS limit')
return {'mass_limits':
{'max_m1': mass_limits_sorted[2],
'min_m2': mass_limits_sorted[0]},
'mass_bdary':
{'ns_max': mass_limits_sorted[1],
'gap_max': args.src_class_mass_gap_max},
'estimation_coeff':
{'a0': args.src_class_eff_to_lum_distance,
'b0': args.src_class_lum_distance_to_delta[0],
'b1': args.src_class_lum_distance_to_delta[1],
'm0': args.src_class_mchirp_to_delta},
'mass_gap': True,
'mass_gap_separate': args.src_class_mass_gap_separate,
'lal_cosmology': args.src_class_lal_cosmology}
return {'mass_limits':
{'max_m1': mass_limits_sorted[2],
'min_m2': mass_limits_sorted[0]},
'mass_bdary':
{'ns_max': mass_limits_sorted[1],
'gap_max': mass_limits_sorted[1]},
'estimation_coeff':
{'a0': args.src_class_eff_to_lum_distance,
'b0': args.src_class_lum_distance_to_delta[0],
'b1': args.src_class_lum_distance_to_delta[1],
'm0': args.src_class_mchirp_to_delta},
'mass_gap': False,
'mass_gap_separate': args.src_class_mass_gap_separate,
'lal_cosmology': args.src_class_lal_cosmology}
def redshift_estimation(distance, distance_std, lal_cosmology):
"""Takes values of distance and its uncertainty and returns a
dictionary with estimates of the redshift and its uncertainty.
If the argument 'lal_cosmology' is True, it uses Planck15 cosmology
model as defined in lalsuite instead of the astropy default.
Constants for lal_cosmology taken from Planck15_lal_cosmology() in
https://git.ligo.org/lscsoft/pesummary/-/blob/master/pesummary/gw/
cosmology.py.
"""
if lal_cosmology:
cosmology = FlatLambdaCDM(H0=67.90, Om0=0.3065)
else:
cosmology = None
z_estimation = _redshift(distance, cosmology=cosmology)
z_est_max = _redshift((distance + distance_std),
cosmology=cosmology)
z_est_min = _redshift((distance - distance_std),
cosmology=cosmology)
z_std_estimation = 0.5 * (z_est_max - z_est_min)
z = {'central': z_estimation, 'delta': z_std_estimation}
return z
def src_mass_from_z_det_mass(z, del_z, mdet, del_mdet):
"""Takes values of redshift, redshift uncertainty, detector mass and its
uncertainty and computes the source mass and its uncertainty.
"""
msrc = mdet / (1. + z)
del_msrc = msrc * ((del_mdet / mdet) ** 2.
+ (del_z / (1. + z)) ** 2.) ** 0.5
return (msrc, del_msrc)
def intmc(mc, x_min, x_max):
"""Returns the integral of m2 over m1 between x_min and x_max,
assuming that mchirp is fixed.
"""
integral = quad(lambda x, mc: m2mcm1(mc, x), x_min, x_max, args=mc)
return integral[0]
def get_area(trig_mc, lim_h1, lim_h2, lim_v1, lim_v2):
"""Returns the area under the chirp mass contour in a region of the m1-m2
plane (m1 > m2).
Parameters
----------
trig_mc : sequence of two values
first represents central estimate of mchirp in source frame,
second its uncertainty
lim_h1, lim_h2 : floats or the string 'diagonal'
upper and lower horizontal limits of the region (limits on m2)
lim_v1, lim_v2 : floats
right and left vertical limits of the region (limits on m1)
Returns
-------
area : float
"""
mc_max = trig_mc[0] + trig_mc[1]
mc_min = trig_mc[0] - trig_mc[1]
# The points where the equal mass line and a chirp mass
# curve intersect is m1 = m2 = 2**0.2 * mchirp
mi_max = (2.**0.2) * mc_max
mi_min = (2.**0.2) * mc_min
if lim_h1 == 'diagonal':
max_h1 = mi_max
min_h1 = mi_min
fun_sup = lambda x: x
else:
max_h1 = m2mcm1(mc_max, lim_h1)
min_h1 = m2mcm1(mc_min, lim_h1)
fun_sup = lambda x: lim_h1
max_h2 = m2mcm1(mc_max, lim_h2)
min_h2 = m2mcm1(mc_min, lim_h2)
fun_inf = lambda x: lim_h2
lim_max1 = np.clip(max_h1, lim_v1, lim_v2)
lim_max2 = np.clip(max_h2, lim_v1, lim_v2)
lim_min1 = np.clip(min_h1, lim_v1, lim_v2)
lim_min2 = np.clip(min_h2, lim_v1, lim_v2)
int_max = intmc(mc_max, lim_max1, lim_max2)
int_min = intmc(mc_min, lim_min1, lim_min2)
intline_sup = quad(fun_sup, lim_min1, lim_max1)[0]
intline_inf = quad(fun_inf, lim_min2, lim_max2)[0]
area = int_max + intline_sup - int_min - intline_inf
return area
def calc_areas(
trig_mc_det,
mass_limits,
mass_bdary,
z,
mass_gap,
mass_gap_separate):
"""Computes the area inside the lines of the second component mass as a
function of the first component mass for the two extreme values
of mchirp: mchirp +/- mchirp_uncertainty, for each region of the source
classifying diagram.
"""
trig_mc = src_mass_from_z_det_mass(z["central"], z["delta"],
trig_mc_det["central"],
trig_mc_det["delta"])
m2_min = mass_limits["min_m2"]
m1_max = mass_limits["max_m1"]
ns_max = mass_bdary["ns_max"]
gap_max = mass_bdary["gap_max"]
abbh = get_area(trig_mc, 'diagonal', gap_max, gap_max, m1_max)
abhg = get_area(trig_mc, gap_max, ns_max, gap_max, m1_max)
ansbh = get_area(trig_mc, ns_max, m2_min, gap_max, m1_max)
agg = get_area(trig_mc, 'diagonal', ns_max, ns_max, gap_max)
agns = get_area(trig_mc, ns_max, m2_min, ns_max, gap_max)
abns = get_area(trig_mc, 'diagonal', m2_min, m2_min, ns_max)
if mass_gap:
if mass_gap_separate:
return {
"BNS": abns,
"GNS": agns,
"NSBH": ansbh,
"GG": agg,
"BHG": abhg,
"BBH": abbh
}
return {
"BNS": abns,
"NSBH": ansbh,
"BBH": abbh,
"Mass Gap": agns + agg + abhg
}
return {
"BNS": abns,
"NSBH": ansbh,
"BBH": abbh
}
def calc_probabilities(mchirp, snr, eff_distance, src_args):
"""Computes the different probabilities that a candidate event belongs to
each CBC source category taking as arguments the chirp mass, the
coincident SNR and the effective distance, and estimating the
chirp mass uncertainty, the luminosity distance (and its uncertainty)
and the redshift (and its uncertainty). Probability is estimated to be
directly proportional to the area of the corresponding CBC region.
"""
mass_limits = src_args['mass_limits']
mass_bdary = src_args['mass_bdary']
coeff = src_args['estimation_coeff']
trig_mc_det = {'central': mchirp, 'delta': mchirp * coeff['m0']}
dist_estimation = coeff['a0'] * eff_distance
dist_std_estimation = (dist_estimation * math.exp(coeff['b0']) *
snr ** coeff['b1'])
z = redshift_estimation(dist_estimation, dist_std_estimation,
src_args['lal_cosmology'])
mass_gap = src_args['mass_gap']
mass_gap_separate = src_args['mass_gap_separate']
# If the mchirp is greater than the mchirp corresponding to two masses
# equal to the maximum mass, the probability for BBH is 100%.
# If it is less than the mchirp corresponding to two masses equal to the
# minimum mass, the probability for BNS is 100%.
mc_max = mass_limits['max_m1'] / (2 ** 0.2)
mc_min = mass_limits['min_m2'] / (2 ** 0.2)
if trig_mc_det['central'] > mc_max * (1 + z['central']):
if mass_gap:
if mass_gap_separate:
probabilities = {"BNS": 0.0, "GNS": 0.0, "NSBH": 0.0,
"GG": 0.0, "BHG": 0.0, "BBH": 1.0}
else:
probabilities = {"BNS": 0.0, "NSBH": 0.0, "BBH": 1.0,
"Mass Gap": 0.0}
else:
probabilities = {"BNS": 0.0, "NSBH": 0.0, "BBH": 1.0}
elif trig_mc_det['central'] < mc_min * (1 + z['central']):
if mass_gap:
if mass_gap_separate:
probabilities = {"BNS": 1.0, "GNS": 0.0, "NSBH": 0.0,
"GG": 0.0, "BHG": 0.0, "BBH": 0.0}
else:
probabilities = {"BNS": 1.0, "NSBH": 0.0, "BBH": 0.0,
"Mass Gap": 0.0}
else:
probabilities = {"BNS": 1.0, "NSBH": 0.0, "BBH": 0.0}
else:
areas = calc_areas(trig_mc_det, mass_limits, mass_bdary, z,
mass_gap, mass_gap_separate)
total_area = sum(areas.values())
probabilities = {key: areas[key] / total_area for key in areas}
return probabilities
| 13,070
| 42.57
| 79
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.