repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
GANFingerprints | GANFingerprints-master/classifier_visNet/nets/mobilenet/mobilenet_v2.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Mobilenet V2.
Architecture: https://arxiv.org/abs/1801.04381
The base model gives 72.2% accuracy on ImageNet, with 300MMadds,
3.4 M parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import tensorflow as tf
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet as lib
slim = tf.contrib.slim
op = lib.op
expand_input = ops.expand_input_by_factor
# pyformat: disable
# Architecture: https://arxiv.org/abs/1801.04381
V2_DEF = dict(
defaults={
# Note: these parameters of batch norm affect the architecture
# that's why they are here and not in training_scope.
(slim.batch_norm,): {'center': True, 'scale': True},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
},
(ops.expanded_conv,): {
'expansion_size': expand_input(6),
'split_expansion': 1,
'normalizer_fn': slim.batch_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(ops.expanded_conv,
expansion_size=expand_input(1, divisible_by=1),
num_outputs=16),
op(ops.expanded_conv, stride=2, num_outputs=24),
op(ops.expanded_conv, stride=1, num_outputs=24),
op(ops.expanded_conv, stride=2, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=2, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=2, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=320),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280)
],
)
# pyformat: enable
@slim.add_arg_scope
def mobilenet(input_tensor,
num_classes=1001,
depth_multiplier=1.0,
scope='MobilenetV2',
conv_defs=None,
finegrain_classification_mode=False,
min_depth=None,
divisible_by=None,
activation_fn=None,
**kwargs):
"""Creates mobilenet V2 network.
Inference mode is created by default. To create training use training_scope
below.
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
Args:
input_tensor: The input tensor
num_classes: number of classes
depth_multiplier: The multiplier applied to scale number of
channels in each layer. Note: this is called depth multiplier in the
paper but the name is kept for consistency with slim's model builder.
scope: Scope of the operator
conv_defs: Allows to override default conv def.
finegrain_classification_mode: When set to True, the model
will keep the last layer large even for small multipliers. Following
https://arxiv.org/abs/1801.04381
suggests that it improves performance for ImageNet-type of problems.
*Note* ignored if final_endpoint makes the builder exit earlier.
min_depth: If provided, will ensure that all layers will have that
many channels after application of depth multiplier.
divisible_by: If provided will ensure that all layers # channels
will be divisible by this number.
activation_fn: Activation function to use, defaults to tf.nn.relu6 if not
specified.
**kwargs: passed directly to mobilenet.mobilenet:
prediction_fn- what prediction function to use.
reuse-: whether to reuse variables (if reuse set to true, scope
must be given).
Returns:
logits/endpoints pair
Raises:
ValueError: On invalid arguments
"""
if conv_defs is None:
conv_defs = V2_DEF
if 'multiplier' in kwargs:
raise ValueError('mobilenetv2 doesn\'t support generic '
'multiplier parameter use "depth_multiplier" instead.')
if finegrain_classification_mode:
conv_defs = copy.deepcopy(conv_defs)
if depth_multiplier < 1:
conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier
if activation_fn:
conv_defs = copy.deepcopy(conv_defs)
defaults = conv_defs['defaults']
conv_defaults = (
defaults[(slim.conv2d, slim.fully_connected, slim.separable_conv2d)])
conv_defaults['activation_fn'] = activation_fn
depth_args = {}
# NB: do not set depth_args unless they are provided to avoid overriding
# whatever default depth_multiplier might have thanks to arg_scope.
if min_depth is not None:
depth_args['min_depth'] = min_depth
if divisible_by is not None:
depth_args['divisible_by'] = divisible_by
with slim.arg_scope((lib.depth_multiplier,), **depth_args):
return lib.mobilenet(
input_tensor,
num_classes=num_classes,
conv_defs=conv_defs,
scope=scope,
multiplier=depth_multiplier,
**kwargs)
mobilenet.default_image_size = 224
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
# Wrappers for mobilenet v2 with depth-multipliers. Be noticed that
# 'finegrain_classification_mode' is set to True, which means the embedding
# layer will not be shrinked when given a depth-multiplier < 1.0.
mobilenet_v2_140 = wrapped_partial(mobilenet, depth_multiplier=1.4)
mobilenet_v2_050 = wrapped_partial(mobilenet, depth_multiplier=0.50,
finegrain_classification_mode=True)
mobilenet_v2_035 = wrapped_partial(mobilenet, depth_multiplier=0.35,
finegrain_classification_mode=True)
@slim.add_arg_scope
def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
return mobilenet(input_tensor,
depth_multiplier=depth_multiplier,
base_only=True, **kwargs)
def training_scope(**kwargs):
"""Defines MobilenetV2 training scope.
Usage:
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
with slim.
Args:
**kwargs: Passed to mobilenet.training_scope. The following parameters
are supported:
weight_decay- The weight decay to use for regularizing the model.
stddev- Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob- dropout keep probability
bn_decay- decay for the batch norm moving averages.
Returns:
An `arg_scope` to use for the mobilenet v2 model.
"""
return lib.training_scope(**kwargs)
__all__ = ['training_scope', 'mobilenet_base', 'mobilenet', 'V2_DEF']
| 8,078 | 36.230415 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/nets/mobilenet/conv_blocks.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convolution blocks for mobilenet."""
import contextlib
import functools
import tensorflow as tf
slim = tf.contrib.slim
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _split_divisible(num, num_ways, divisible_by=8):
"""Evenly splits num, num_ways so each piece is a multiple of divisible_by."""
assert num % divisible_by == 0
assert num / num_ways >= divisible_by
# Note: want to round down, we adjust each split to match the total.
base = num // num_ways // divisible_by * divisible_by
result = []
accumulated = 0
for i in range(num_ways):
r = base
while accumulated + r < num * (i + 1) / num_ways:
r += divisible_by
result.append(r)
accumulated += r
assert accumulated == num
return result
@contextlib.contextmanager
def _v1_compatible_scope_naming(scope):
if scope is None: # Create uniqified separable blocks.
with tf.variable_scope(None, default_name='separable') as s, \
tf.name_scope(s.original_name_scope):
yield ''
else:
# We use scope_depthwise, scope_pointwise for compatibility with V1 ckpts.
# which provide numbered scopes.
scope += '_'
yield scope
@slim.add_arg_scope
def split_separable_conv2d(input_tensor,
num_outputs,
scope=None,
normalizer_fn=None,
stride=1,
rate=1,
endpoints=None,
use_explicit_padding=False):
"""Separable mobilenet V1 style convolution.
Depthwise convolution, with default non-linearity,
followed by 1x1 depthwise convolution. This is similar to
slim.separable_conv2d, but differs in tha it applies batch
normalization and non-linearity to depthwise. This matches
the basic building of Mobilenet Paper
(https://arxiv.org/abs/1704.04861)
Args:
input_tensor: input
num_outputs: number of outputs
scope: optional name of the scope. Note if provided it will use
scope_depthwise for deptwhise, and scope_pointwise for pointwise.
normalizer_fn: which normalizer function to use for depthwise/pointwise
stride: stride
rate: output rate (also known as dilation rate)
endpoints: optional, if provided, will export additional tensors to it.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
Returns:
output tesnor
"""
with _v1_compatible_scope_naming(scope) as scope:
dw_scope = scope + 'depthwise'
endpoints = endpoints if endpoints is not None else {}
kernel_size = [3, 3]
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
input_tensor = _fixed_padding(input_tensor, kernel_size, rate)
net = slim.separable_conv2d(
input_tensor,
None,
kernel_size,
depth_multiplier=1,
stride=stride,
rate=rate,
normalizer_fn=normalizer_fn,
padding=padding,
scope=dw_scope)
endpoints[dw_scope] = net
pw_scope = scope + 'pointwise'
net = slim.conv2d(
net,
num_outputs, [1, 1],
stride=1,
normalizer_fn=normalizer_fn,
scope=pw_scope)
endpoints[pw_scope] = net
return net
def expand_input_by_factor(n, divisible_by=8):
return lambda num_inputs, **_: _make_divisible(num_inputs * n, divisible_by)
@slim.add_arg_scope
def expanded_conv(input_tensor,
num_outputs,
expansion_size=expand_input_by_factor(6),
stride=1,
rate=1,
kernel_size=(3, 3),
residual=True,
normalizer_fn=None,
project_activation_fn=tf.identity,
split_projection=1,
split_expansion=1,
expansion_transform=None,
depthwise_location='expansion',
depthwise_channel_multiplier=1,
endpoints=None,
use_explicit_padding=False,
padding='SAME',
scope=None):
"""Depthwise Convolution Block with expansion.
Builds a composite convolution that has the following structure
expansion (1x1) -> depthwise (kernel_size) -> projection (1x1)
Args:
input_tensor: input
num_outputs: number of outputs in the final layer.
expansion_size: the size of expansion, could be a constant or a callable.
If latter it will be provided 'num_inputs' as an input. For forward
compatibility it should accept arbitrary keyword arguments.
Default will expand the input by factor of 6.
stride: depthwise stride
rate: depthwise rate
kernel_size: depthwise kernel
residual: whether to include residual connection between input
and output.
normalizer_fn: batchnorm or otherwise
project_activation_fn: activation function for the project layer
split_projection: how many ways to split projection operator
(that is conv expansion->bottleneck)
split_expansion: how many ways to split expansion op
(that is conv bottleneck->expansion) ops will keep depth divisible
by this value.
expansion_transform: Optional function that takes expansion
as a single input and returns output.
depthwise_location: where to put depthwise covnvolutions supported
values None, 'input', 'output', 'expansion'
depthwise_channel_multiplier: depthwise channel multiplier:
each input will replicated (with different filters)
that many times. So if input had c channels,
output will have c x depthwise_channel_multpilier.
endpoints: An optional dictionary into which intermediate endpoints are
placed. The keys "expansion_output", "depthwise_output",
"projection_output" and "expansion_transform" are always populated, even
if the corresponding functions are not invoked.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
padding: Padding type to use if `use_explicit_padding` is not set.
scope: optional scope.
Returns:
Tensor of depth num_outputs
Raises:
TypeError: on inval
"""
with tf.variable_scope(scope, default_name='expanded_conv') as s, \
tf.name_scope(s.original_name_scope):
prev_depth = input_tensor.get_shape().as_list()[3]
if depthwise_location not in [None, 'input', 'output', 'expansion']:
raise TypeError('%r is unknown value for depthwise_location' %
depthwise_location)
if use_explicit_padding:
if padding != 'SAME':
raise TypeError('`use_explicit_padding` should only be used with '
'"SAME" padding.')
padding = 'VALID'
depthwise_func = functools.partial(
slim.separable_conv2d,
num_outputs=None,
kernel_size=kernel_size,
depth_multiplier=depthwise_channel_multiplier,
stride=stride,
rate=rate,
normalizer_fn=normalizer_fn,
padding=padding,
scope='depthwise')
# b1 -> b2 * r -> b2
# i -> (o * r) (bottleneck) -> o
input_tensor = tf.identity(input_tensor, 'input')
net = input_tensor
if depthwise_location == 'input':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net, activation_fn=None)
if callable(expansion_size):
inner_size = expansion_size(num_inputs=prev_depth)
else:
inner_size = expansion_size
if inner_size > net.shape[3]:
net = split_conv(
net,
inner_size,
num_ways=split_expansion,
scope='expand',
stride=1,
normalizer_fn=normalizer_fn)
net = tf.identity(net, 'expansion_output')
if endpoints is not None:
endpoints['expansion_output'] = net
if depthwise_location == 'expansion':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net)
net = tf.identity(net, name='depthwise_output')
if endpoints is not None:
endpoints['depthwise_output'] = net
if expansion_transform:
net = expansion_transform(expansion_tensor=net, input_tensor=input_tensor)
# Note in contrast with expansion, we always have
# projection to produce the desired output size.
net = split_conv(
net,
num_outputs,
num_ways=split_projection,
stride=1,
scope='project',
normalizer_fn=normalizer_fn,
activation_fn=project_activation_fn)
if endpoints is not None:
endpoints['projection_output'] = net
if depthwise_location == 'output':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net, activation_fn=None)
if callable(residual): # custom residual
net = residual(input_tensor=input_tensor, output_tensor=net)
elif (residual and
# stride check enforces that we don't add residuals when spatial
# dimensions are None
stride == 1 and
# Depth matches
net.get_shape().as_list()[3] ==
input_tensor.get_shape().as_list()[3]):
net += input_tensor
return tf.identity(net, name='output')
def split_conv(input_tensor,
num_outputs,
num_ways,
scope,
divisible_by=8,
**kwargs):
"""Creates a split convolution.
Split convolution splits the input and output into
'num_blocks' blocks of approximately the same size each,
and only connects $i$-th input to $i$ output.
Args:
input_tensor: input tensor
num_outputs: number of output filters
num_ways: num blocks to split by.
scope: scope for all the operators.
divisible_by: make sure that every part is divisiable by this.
**kwargs: will be passed directly into conv2d operator
Returns:
tensor
"""
b = input_tensor.get_shape().as_list()[3]
if num_ways == 1 or min(b // num_ways,
num_outputs // num_ways) < divisible_by:
# Don't do any splitting if we end up with less than 8 filters
# on either side.
return slim.conv2d(input_tensor, num_outputs, [1, 1], scope=scope, **kwargs)
outs = []
input_splits = _split_divisible(b, num_ways, divisible_by=divisible_by)
output_splits = _split_divisible(
num_outputs, num_ways, divisible_by=divisible_by)
inputs = tf.split(input_tensor, input_splits, axis=3, name='split_' + scope)
base = scope
for i, (input_tensor, out_size) in enumerate(zip(inputs, output_splits)):
scope = base + '_part_%d' % (i,)
n = slim.conv2d(input_tensor, out_size, [1, 1], scope=scope, **kwargs)
n = tf.identity(n, scope + '_output')
outs.append(n)
return tf.concat(outs, 3, name=scope + '_concat')
| 13,146 | 35.62117 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/nets/mobilenet/mobilenet_v2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
slim = tf.contrib.slim
def find_ops(optype):
"""Find ops of a given type in graphdef or a graph.
Args:
optype: operation type (e.g. Conv2D)
Returns:
List of operations.
"""
gd = tf.get_default_graph()
return [var for var in gd.get_operations() if var.type == optype]
class MobilenetV2Test(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testCreation(self):
spec = dict(mobilenet_v2.V2_DEF)
_, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# This is mostly a sanity test. No deep reason for these particular
# constants.
#
# All but first 2 and last one have two convolutions, and there is one
# extra conv that is not in the spec. (logits)
self.assertEqual(num_convs, len(spec['spec']) * 2 - 2)
# Check that depthwise are exposed.
for i in range(2, 17):
self.assertIn('layer_%d/depthwise_output' % i, ep)
def testCreationNoClasses(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
net, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec,
num_classes=None)
self.assertIs(net, ep['global_pool'])
def testImageSizes(self):
for input_size, output_size in [(224, 7), (192, 6), (160, 5),
(128, 4), (96, 3)]:
tf.reset_default_graph()
_, ep = mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, input_size, input_size, 3)))
self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3],
[output_size] * 2)
def testWithSplits(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
spec['overrides'] = {
(ops.expanded_conv,): dict(split_expansion=2),
}
_, _ = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# All but 3 op has 3 conv operatore, the remainign 3 have one
# and there is one unaccounted.
self.assertEqual(num_convs, len(spec['spec']) * 3 - 5)
def testWithOutputStride8(self):
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testDivisibleBy(self):
tf.reset_default_graph()
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
divisible_by=16,
min_depth=32)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576, 960, 1280,
1001], s)
def testDivisibleByWithArgScope(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements(s, [32, 192, 128, 1001])
def testFineGrained(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01,
finegrain_classification_mode=True)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
# All convolutions will be 8->48, except for the last one.
self.assertSameElements(s, [8, 48, 1001, 1280])
def testMobilenetBase(self):
tf.reset_default_graph()
# Verifies that mobilenet_base returns pre-pooling layer.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
net, _ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128])
def testWithOutputStride16(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testWithOutputStride8AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
use_explicit_padding=True,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testWithOutputStride16AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16,
use_explicit_padding=True)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):
sc = mobilenet.training_scope(is_training=None)
self.assertNotIn('is_training', sc[slim.arg_scope_func_key(
slim.batch_norm)])
def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):
sc = mobilenet.training_scope(is_training=False)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope(is_training=True)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope()
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
if __name__ == '__main__':
tf.test.main()
| 7,083 | 36.284211 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/nets/mobilenet/__init__.py | 0 | 0 | 0 | py | |
GANFingerprints | GANFingerprints-master/classifier_visNet/nets/mobilenet/mobilenet.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet Base Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import os
import tensorflow as tf
slim = tf.contrib.slim
@slim.add_arg_scope
def apply_activation(x, name=None, activation_fn=None):
return activation_fn(x, name=name) if activation_fn else x
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@contextlib.contextmanager
def _set_arg_scope_defaults(defaults):
"""Sets arg scope defaults for all items present in defaults.
Args:
defaults: dictionary/list of pairs, containing a mapping from
function to a dictionary of default args.
Yields:
context manager where all defaults are set.
"""
if hasattr(defaults, 'items'):
items = list(defaults.items())
else:
items = defaults
if not items:
yield
else:
func, default_arg = items[0]
with slim.arg_scope(func, **default_arg):
with _set_arg_scope_defaults(items[1:]):
yield
@slim.add_arg_scope
def depth_multiplier(output_params,
multiplier,
divisible_by=8,
min_depth=8,
**unused_kwargs):
if 'num_outputs' not in output_params:
return
d = output_params['num_outputs']
output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by,
min_depth)
_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func'])
def op(opfunc, **params):
multiplier = params.pop('multiplier_transorm', depth_multiplier)
return _Op(opfunc, params=params, multiplier_func=multiplier)
class NoOpScope(object):
"""No-op context manager."""
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
def safe_arg_scope(funcs, **kwargs):
"""Returns `slim.arg_scope` with all None arguments removed.
Arguments:
funcs: Functions to pass to `arg_scope`.
**kwargs: Arguments to pass to `arg_scope`.
Returns:
arg_scope or No-op context manager.
Note: can be useful if None value should be interpreted as "do not overwrite
this parameter value".
"""
filtered_args = {name: value for name, value in kwargs.items()
if value is not None}
if filtered_args:
return slim.arg_scope(funcs, **filtered_args)
else:
return NoOpScope()
@slim.add_arg_scope
def mobilenet_base( # pylint: disable=invalid-name
inputs,
conv_defs,
multiplier=1.0,
final_endpoint=None,
output_stride=None,
use_explicit_padding=False,
scope=None,
is_training=False):
"""Mobilenet base network.
Constructs a network from inputs to the given final endpoint. By default
the network is constructed in inference mode. To create network
in training mode use:
with slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_base(...)
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
conv_defs: A list of op(...) layers specifying the net architecture.
multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
final_endpoint: The name of last layer, for early termination for
for V1-based networks: last layer is "layer_14", for V2: "layer_20"
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 1 or any even number, excluding
zero. Typical values are 8 (accurate fully convolutional mode), 16
(fast fully convolutional mode), and 32 (classification mode).
NOTE- output_stride relies on all consequent operators to support dilated
operators via "rate" parameter. This might require wrapping non-conv
operators to operate properly.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: optional variable scope.
is_training: How to setup batch_norm and other ops. Note: most of the time
this does not need be set directly. Use mobilenet.training_scope() to set
up training instead. This parameter is here for backward compatibility
only. It is safe to set it to the value matching
training_scope(is_training=...). It is also safe to explicitly set
it to False, even if there is outer training_scope set to to training.
(The network will be built in inference mode). If this is set to None,
no arg_scope is added for slim.batch_norm's is_training parameter.
Returns:
tensor_out: output tensor.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
if multiplier <= 0:
raise ValueError('multiplier is not greater than zero.')
# Set conv defs defaults and overrides.
conv_defs_defaults = conv_defs.get('defaults', {})
conv_defs_overrides = conv_defs.get('overrides', {})
if use_explicit_padding:
conv_defs_overrides = copy.deepcopy(conv_defs_overrides)
conv_defs_overrides[
(slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'}
if output_stride is not None:
if output_stride == 0 or (output_stride > 1 and output_stride % 2):
raise ValueError('Output stride must be None, 1 or a multiple of 2.')
# a) Set the tensorflow scope
# b) set padding to default: note we might consider removing this
# since it is also set by mobilenet_scope
# c) set all defaults
# d) set all extra overrides.
with _scope_all(scope, default_scope='Mobilenet'), \
safe_arg_scope([slim.batch_norm], is_training=is_training), \
_set_arg_scope_defaults(conv_defs_defaults), \
_set_arg_scope_defaults(conv_defs_overrides):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
# Insert default parameters before the base scope which includes
# any custom overrides set in mobilenet.
end_points = {}
scopes = {}
for i, opdef in enumerate(conv_defs['spec']):
params = dict(opdef.params)
opdef.multiplier_func(params, multiplier)
stride = params.get('stride', 1)
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= stride
else:
layer_stride = stride
layer_rate = 1
current_stride *= stride
# Update params.
params['stride'] = layer_stride
# Only insert rate to params if rate > 1.
if layer_rate > 1:
params['rate'] = layer_rate
# Set padding
if use_explicit_padding:
if 'kernel_size' in params:
net = _fixed_padding(net, params['kernel_size'], layer_rate)
else:
params['use_explicit_padding'] = True
end_point = 'layer_%d' % (i + 1)
try:
net = opdef.op(net, **params)
except Exception:
print('Failed to create op %i: %r params: %r' % (i, opdef, params))
raise
end_points[end_point] = net
scope = os.path.dirname(net.name)
scopes[scope] = end_point
if final_endpoint is not None and end_point == final_endpoint:
break
# Add all tensors that end with 'output' to
# endpoints
for t in net.graph.get_operations():
scope = os.path.dirname(t.name)
bn = os.path.basename(t.name)
if scope in scopes and t.name.endswith('output'):
end_points[scopes[scope] + '/' + bn] = t.outputs[0]
return net, end_points
@contextlib.contextmanager
def _scope_all(scope, default_scope=None):
with tf.variable_scope(scope, default_name=default_scope) as s,\
tf.name_scope(s.original_name_scope):
yield s
@slim.add_arg_scope
def mobilenet(inputs,
num_classes=1001,
prediction_fn=slim.softmax,
reuse=None,
scope='Mobilenet',
base_only=False,
**mobilenet_args):
"""Mobilenet model for classification, supports both V1 and V2.
Note: default mode is inference, use mobilenet.training_scope to create
training network.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
prediction_fn: a function to get predictions out of logits
(default softmax).
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
base_only: if True will only create the base of the network (no pooling
and no logits).
**mobilenet_args: passed to mobilenet_base verbatim.
- conv_defs: list of conv defs
- multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
- output_stride: will ensure that the last layer has at most total stride.
If the architecture calls for more stride than that provided
(e.g. output_stride=16, but the architecture has 5 stride=2 operators),
it will replace output_stride with fractional convolutions using Atrous
Convolutions.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation tensor.
Raises:
ValueError: Input rank is invalid.
"""
is_training = mobilenet_args.get('is_training', False)
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope:
inputs = tf.identity(inputs, 'input')
net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args)
if base_only:
return net, end_points
net = tf.identity(net, name='embedding')
with tf.variable_scope('Logits'):
net = global_pool(net)
end_points['global_pool'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, scope='Dropout', is_training=is_training)
# 1 x 1 x num_classes
# Note: legacy scope name.
logits = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='Conv2d_1c_1x1')
logits = tf.squeeze(logits, [1, 2])
logits = tf.identity(logits, name='output')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, 'Predictions')
return logits, end_points
def global_pool(input_tensor, pool_op=tf.nn.avg_pool):
"""Applies avg pool to produce 1x1 output.
NOTE: This function is funcitonally equivalenet to reduce_mean, but it has
baked in average pool which has better support across hardware.
Args:
input_tensor: input tensor
pool_op: pooling op (avg pool is default)
Returns:
a tensor batch_size x 1 x 1 x depth.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size = tf.convert_to_tensor(
[1, tf.shape(input_tensor)[1],
tf.shape(input_tensor)[2], 1])
else:
kernel_size = [1, shape[1], shape[2], 1]
output = pool_op(
input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
# Recover output shape, for unknown shape.
output.set_shape([None, 1, 1, None])
return output
def training_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
dropout_keep_prob=0.8,
bn_decay=0.997):
"""Defines Mobilenet training scope.
Usage:
with tf.contrib.slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
# the network created will be trainble with dropout/batch norm
# initialized appropriately.
Args:
is_training: if set to False this will ensure that all customizations are
set to non-training mode. This might be helpful for code that is reused
across both training/evaluation, but most of the time training_scope with
value False is not needed. If this is set to None, the parameters is not
added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob: dropout keep probability (not set if equals to None).
bn_decay: decay for the batch norm moving averages (not set if equals to
None).
Returns:
An argument scope to use via arg_scope.
"""
# Note: do not introduce parameters that would change the inference
# model here (for example whether to use bias), modify conv_def instead.
batch_norm_params = {
'decay': bn_decay,
'is_training': is_training
}
if stddev < 0:
weight_intitializer = slim.initializers.xavier_initializer()
else:
weight_intitializer = tf.truncated_normal_initializer(stddev=stddev)
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope(
[slim.conv2d, slim.fully_connected, slim.separable_conv2d],
weights_initializer=weight_intitializer,
normalizer_fn=slim.batch_norm), \
slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\
safe_arg_scope([slim.batch_norm], **batch_norm_params), \
safe_arg_scope([slim.dropout], is_training=is_training,
keep_prob=dropout_keep_prob), \
slim.arg_scope([slim.conv2d], \
weights_regularizer=slim.l2_regularizer(weight_decay)), \
slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:
return s
| 17,332 | 36.036325 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/metrics/sliced_wasserstein.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import numpy as np
import scipy.ndimage
#----------------------------------------------------------------------------
def get_descriptors_for_minibatch(minibatch, nhood_size, nhoods_per_image):
S = minibatch.shape # (minibatch, channel, height, width)
assert len(S) == 4 and S[1] == 3
N = nhoods_per_image * S[0]
H = nhood_size // 2
nhood, chan, x, y = np.ogrid[0:N, 0:3, -H:H+1, -H:H+1]
img = nhood // nhoods_per_image
x = x + np.random.randint(H, S[3] - H, size=(N, 1, 1, 1))
y = y + np.random.randint(H, S[2] - H, size=(N, 1, 1, 1))
idx = ((img * S[1] + chan) * S[2] + y) * S[3] + x
return minibatch.flat[idx]
#----------------------------------------------------------------------------
def finalize_descriptors(desc):
if isinstance(desc, list):
desc = np.concatenate(desc, axis=0)
assert desc.ndim == 4 # (neighborhood, channel, height, width)
desc -= np.mean(desc, axis=(0, 2, 3), keepdims=True)
desc /= np.std(desc, axis=(0, 2, 3), keepdims=True)
desc = desc.reshape(desc.shape[0], -1)
return desc
#----------------------------------------------------------------------------
def sliced_wasserstein(A, B, dir_repeats, dirs_per_repeat):
assert A.ndim == 2 and A.shape == B.shape # (neighborhood, descriptor_component)
results = []
for repeat in range(dir_repeats):
dirs = np.random.randn(A.shape[1], dirs_per_repeat) # (descriptor_component, direction)
dirs /= np.sqrt(np.sum(np.square(dirs), axis=0, keepdims=True)) # normalize descriptor components for each direction
dirs = dirs.astype(np.float32)
projA = np.matmul(A, dirs) # (neighborhood, direction)
projB = np.matmul(B, dirs)
projA = np.sort(projA, axis=0) # sort neighborhood projections for each direction
projB = np.sort(projB, axis=0)
dists = np.abs(projA - projB) # pointwise wasserstein distances
results.append(np.mean(dists)) # average over neighborhoods and directions
return np.mean(results) # average over repeats
#----------------------------------------------------------------------------
def downscale_minibatch(minibatch, lod):
if lod == 0:
return minibatch
t = minibatch.astype(np.float32)
for i in range(lod):
t = (t[:, :, 0::2, 0::2] + t[:, :, 0::2, 1::2] + t[:, :, 1::2, 0::2] + t[:, :, 1::2, 1::2]) * 0.25
return np.round(t).clip(0, 255).astype(np.uint8)
#----------------------------------------------------------------------------
gaussian_filter = np.float32([
[1, 4, 6, 4, 1],
[4, 16, 24, 16, 4],
[6, 24, 36, 24, 6],
[4, 16, 24, 16, 4],
[1, 4, 6, 4, 1]]) / 256.0
def pyr_down(minibatch): # matches cv2.pyrDown()
assert minibatch.ndim == 4
return scipy.ndimage.convolve(minibatch, gaussian_filter[np.newaxis, np.newaxis, :, :], mode='mirror')[:, :, ::2, ::2]
def pyr_up(minibatch): # matches cv2.pyrUp()
assert minibatch.ndim == 4
S = minibatch.shape
res = np.zeros((S[0], S[1], S[2] * 2, S[3] * 2), minibatch.dtype)
res[:, :, ::2, ::2] = minibatch
return scipy.ndimage.convolve(res, gaussian_filter[np.newaxis, np.newaxis, :, :] * 4.0, mode='mirror')
def generate_laplacian_pyramid(minibatch, num_levels):
pyramid = [np.float32(minibatch)]
for i in range(1, num_levels):
pyramid.append(pyr_down(pyramid[-1]))
pyramid[-2] -= pyr_up(pyramid[-1])
return pyramid
def reconstruct_laplacian_pyramid(pyramid):
minibatch = pyramid[-1]
for level in pyramid[-2::-1]:
minibatch = pyr_up(minibatch) + level
return minibatch
#----------------------------------------------------------------------------
class API:
def __init__(self, num_images, image_shape, image_dtype, minibatch_size, num_levels=None):
self.nhood_size = 7
self.nhoods_per_image = 128
self.dir_repeats = 4
self.dirs_per_repeat = 128
self.resolutions = []
res = image_shape[1]
if num_levels is None:
while res >= 16:
self.resolutions.append(res)
res //= 2
else:
for count in range(num_levels):
self.resolutions.append(res)
res //= 2
def get_metric_names(self):
return ['SWDx1e3_%d' % res for res in self.resolutions] + ['SWDx1e3_avg']
def get_metric_formatting(self):
return ['%-13.4f'] * len(self.get_metric_names())
def begin(self, mode):
assert mode in ['warmup', 'reals', 'fakes']
self.descriptors = [[] for res in self.resolutions]
def feed(self, mode, minibatch):
for lod, level in enumerate(generate_laplacian_pyramid(minibatch, len(self.resolutions))):
desc = get_descriptors_for_minibatch(level, self.nhood_size, self.nhoods_per_image)
self.descriptors[lod].append(desc)
def end(self, mode):
desc = [finalize_descriptors(d) for d in self.descriptors]
del self.descriptors
if mode in ['warmup', 'reals']:
self.desc_real = desc
dist = [sliced_wasserstein(dreal, dfake, self.dir_repeats, self.dirs_per_repeat) for dreal, dfake in zip(self.desc_real, desc)]
del desc
dist = [d * 1e3 for d in dist] # multiply by 10^3
return dist + [np.mean(dist)]
#----------------------------------------------------------------------------
| 5,977 | 41.397163 | 135 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/metrics/frechet_inception_distance.py | #!/usr/bin/env python3
#
# Copyright 2017 Martin Heusel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from the original implementation by Martin Heusel.
# Source https://github.com/bioinf-jku/TTUR/blob/master/fid.py
''' Calculates the Frechet Inception Distance (FID) to evalulate GANs.
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectivly.
See --help to see further details.
'''
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy as sp
import os
import gzip, pickle
import tensorflow as tf
from scipy.misc import imread
import pathlib
import urllib
class InvalidFIDException(Exception):
pass
def create_inception_graph(pth):
"""Creates a graph from saved GraphDef file."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile( pth, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString( f.read())
_ = tf.import_graph_def( graph_def, name='FID_Inception_Net')
#-------------------------------------------------------------------------------
# code for handling inception net derived from
# https://github.com/openai/improved-gan/blob/master/inception_score/model.py
def _get_inception_layer(sess):
"""Prepares inception net for batched usage and returns pool_3 layer. """
layername = 'FID_Inception_Net/pool_3:0'
pool3 = sess.graph.get_tensor_by_name(layername)
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
if shape._dims is not None:
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
try:
o._shape = tf.TensorShape(new_shape)
except ValueError:
o._shape_val = tf.TensorShape(new_shape) # EDIT: added for compatibility with tensorflow 1.6.0
return pool3
#-------------------------------------------------------------------------------
def get_activations(images, sess, batch_size=50, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 256.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the disposable hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- A numpy array of dimension (num images, 2048) that contains the
activations of the given tensor when feeding inception with the query tensor.
"""
inception_layer = _get_inception_layer(sess)
d0 = images.shape[0]
if batch_size > d0:
print("warning: batch size is bigger than the data size. setting batch size to data size")
batch_size = d0
n_batches = d0//batch_size
n_used_imgs = n_batches*batch_size
pred_arr = np.empty((n_used_imgs,2048))
for i in range(n_batches):
if verbose:
print("\rPropagating batch %d/%d" % (i+1, n_batches), end="", flush=True)
start = i*batch_size
end = start + batch_size
batch = images[start:end]
pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': batch})
pred_arr[start:end] = pred.reshape(batch_size,-1)
if verbose:
print(" done")
return pred_arr
#-------------------------------------------------------------------------------
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Params:
-- mu1 : Numpy array containing the activations of the pool_3 layer of the
inception net ( like returned by the function 'get_predictions')
-- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
on an representive data set.
-- sigma2: The covariance matrix over activations of the pool_3 layer,
precalcualted on an representive data set.
Returns:
-- dist : The Frechet Distance.
Raises:
-- InvalidFIDException if nan occures.
"""
m = np.square(mu1 - mu2).sum()
#s = sp.linalg.sqrtm(np.dot(sigma1, sigma2)) # EDIT: commented out
s, _ = sp.linalg.sqrtm(np.dot(sigma1, sigma2), disp=False) # EDIT: added
dist = m + np.trace(sigma1+sigma2 - 2*s)
#if np.isnan(dist): # EDIT: commented out
# raise InvalidFIDException("nan occured in distance calculation.") # EDIT: commented out
#return dist # EDIT: commented out
return np.real(dist) # EDIT: added
#-------------------------------------------------------------------------------
def calculate_activation_statistics(images, sess, batch_size=50, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 255.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the available hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
"""
act = get_activations(images, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# The following functions aren't needed for calculating the FID
# they're just here to make this module work as a stand-alone script
# for calculating FID scores
#-------------------------------------------------------------------------------
def check_or_download_inception(inception_path):
''' Checks if the path to the inception file is valid, or downloads
the file if it is not present. '''
INCEPTION_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
if inception_path is None:
inception_path = '/tmp'
inception_path = pathlib.Path(inception_path)
model_file = inception_path / 'classify_image_graph_def.pb'
if not model_file.exists():
print("Downloading Inception model")
from urllib import request
import tarfile
fn, _ = request.urlretrieve(INCEPTION_URL)
with tarfile.open(fn, mode='r') as f:
f.extract('classify_image_graph_def.pb', str(model_file.parent))
return str(model_file)
def _handle_path(path, sess):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
x = np.array([imread(str(fn)).astype(np.float32) for fn in files])
m, s = calculate_activation_statistics(x, sess)
return m, s
def calculate_fid_given_paths(paths, inception_path):
''' Calculates the FID of two paths. '''
inception_path = check_or_download_inception(inception_path)
for p in paths:
if not os.path.exists(p):
raise RuntimeError("Invalid path: %s" % p)
create_inception_graph(str(inception_path))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
m1, s1 = _handle_path(paths[0], sess)
m2, s2 = _handle_path(paths[1], sess)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
if __name__ == "__main__":
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("path", type=str, nargs=2,
help='Path to the generated images or to .npz statistic files')
parser.add_argument("-i", "--inception", type=str, default=None,
help='Path to Inception model (will be downloaded if not provided)')
parser.add_argument("--gpu", default="", type=str,
help='GPU to use (leave blank for CPU only)')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
fid_value = calculate_fid_given_paths(args.path, args.inception)
print("FID: ", fid_value)
#----------------------------------------------------------------------------
# EDIT: added
class API:
def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
import config
self.network_dir = os.path.join(config.result_dir, '_inception_fid')
self.network_file = check_or_download_inception(self.network_dir)
self.sess = tf.get_default_session()
create_inception_graph(self.network_file)
def get_metric_names(self):
return ['FID']
def get_metric_formatting(self):
return ['%-10.4f']
def begin(self, mode):
assert mode in ['warmup', 'reals', 'fakes']
self.activations = []
def feed(self, mode, minibatch):
act = get_activations(minibatch.transpose(0,2,3,1), self.sess, batch_size=minibatch.shape[0])
self.activations.append(act)
def end(self, mode):
act = np.concatenate(self.activations)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
if mode in ['warmup', 'reals']:
self.mu_real = mu
self.sigma_real = sigma
fid = calculate_frechet_distance(mu, sigma, self.mu_real, self.sigma_real)
return [fid]
#----------------------------------------------------------------------------
| 11,441 | 39.574468 | 110 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/metrics/ms_ssim.py | #!/usr/bin/python
#
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Adapted from the original implementation by The TensorFlow Authors.
# Source: https://github.com/tensorflow/models/blob/master/research/compression/image_encoder/msssim.py
import numpy as np
from scipy import signal
from scipy.ndimage.filters import convolve
def _FSpecialGauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function."""
radius = size // 2
offset = 0.0
start, stop = -radius, radius + 1
if size % 2 == 0:
offset = 0.5
stop -= 1
x, y = np.mgrid[offset + start:stop, offset + start:stop]
assert len(x) == size
g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2)))
return g / g.sum()
def _SSIMForMultiScale(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
"""Return the Structural Similarity Map between `img1` and `img2`.
This function attempts to match the functionality of ssim_index_new.m by
Zhou Wang: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Arguments:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
Returns:
Pair containing the mean SSIM and contrast sensitivity between `img1` and
`img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError('Input images must have the same shape (%s vs. %s).' % (img1.shape, img2.shape))
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d' % img1.ndim)
img1 = img1.astype(np.float32)
img2 = img2.astype(np.float32)
_, height, width, _ = img1.shape
# Filter size can't be larger than height or width of images.
size = min(filter_size, height, width)
# Scale down sigma if a smaller filter size is used.
sigma = size * filter_sigma / filter_size if filter_size else 0
if filter_size:
window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1))
mu1 = signal.fftconvolve(img1, window, mode='valid')
mu2 = signal.fftconvolve(img2, window, mode='valid')
sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')
sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')
sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')
else:
# Empty blur kernel so no need to convolve.
mu1, mu2 = img1, img2
sigma11 = img1 * img1
sigma22 = img2 * img2
sigma12 = img1 * img2
mu11 = mu1 * mu1
mu22 = mu2 * mu2
mu12 = mu1 * mu2
sigma11 -= mu11
sigma22 -= mu22
sigma12 -= mu12
# Calculate intermediate values used by both ssim and cs_map.
c1 = (k1 * max_val) ** 2
c2 = (k2 * max_val) ** 2
v1 = 2.0 * sigma12 + c2
v2 = sigma11 + sigma22 + c2
ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)), axis=(1, 2, 3)) # Return for each image individually.
cs = np.mean(v1 / v2, axis=(1, 2, 3))
return ssim, cs
def _HoxDownsample(img):
return (img[:, 0::2, 0::2, :] + img[:, 1::2, 0::2, :] + img[:, 0::2, 1::2, :] + img[:, 1::2, 1::2, :]) * 0.25
def msssim(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03, weights=None):
"""Return the MS-SSIM score between `img1` and `img2`.
This function implements Multi-Scale Structural Similarity (MS-SSIM) Image
Quality Assessment according to Zhou Wang's paper, "Multi-scale structural
similarity for image quality assessment" (2003).
Link: https://ece.uwaterloo.ca/~z70wang/publications/msssim.pdf
Author's MATLAB implementation:
http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Arguments:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
weights: List of weights for each level; if none, use five levels and the
weights from the original paper.
Returns:
MS-SSIM score between `img1` and `img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError('Input images must have the same shape (%s vs. %s).' % (img1.shape, img2.shape))
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d' % img1.ndim)
# Note: default weights don't sum to 1.0 but do match the paper / matlab code.
weights = np.array(weights if weights else [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
levels = weights.size
downsample_filter = np.ones((1, 2, 2, 1)) / 4.0
im1, im2 = [x.astype(np.float32) for x in [img1, img2]]
mssim = []
mcs = []
for _ in range(levels):
ssim, cs = _SSIMForMultiScale(
im1, im2, max_val=max_val, filter_size=filter_size,
filter_sigma=filter_sigma, k1=k1, k2=k2)
mssim.append(ssim)
mcs.append(cs)
im1, im2 = [_HoxDownsample(x) for x in [im1, im2]]
# Clip to zero. Otherwise we get NaNs.
mssim = np.clip(np.asarray(mssim), 0.0, np.inf)
mcs = np.clip(np.asarray(mcs), 0.0, np.inf)
# Average over images only at the end.
return np.mean(np.prod(mcs[:-1, :] ** weights[:-1, np.newaxis], axis=0) * (mssim[-1, :] ** weights[-1]))
#----------------------------------------------------------------------------
# EDIT: added
class API:
def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
assert num_images % 2 == 0 and minibatch_size % 2 == 0
self.num_pairs = num_images // 2
def get_metric_names(self):
return ['MS-SSIM']
def get_metric_formatting(self):
return ['%-10.4f']
def begin(self, mode):
assert mode in ['warmup', 'reals', 'fakes']
self.sum = 0.0
def feed(self, mode, minibatch):
images = minibatch.transpose(0, 2, 3, 1)
score = msssim(images[0::2], images[1::2])
self.sum += score * (images.shape[0] // 2)
def end(self, mode):
avg = self.sum / self.num_pairs
return [avg]
#----------------------------------------------------------------------------
| 8,160 | 39.60199 | 128 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/metrics/inception_score.py | # Copyright 2016 Wojciech Zaremba
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from the original implementation by Wojciech Zaremba.
# Source: https://github.com/openai/improved-gan/blob/master/inception_score/model.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
import glob
import scipy.misc
import math
import sys
MODEL_DIR = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_score(images, splits=10):
assert(type(images) == list)
assert(type(images[0]) == np.ndarray)
assert(len(images[0].shape) == 3)
#assert(np.max(images[0]) > 10) # EDIT: commented out
#assert(np.min(images[0]) >= 0.0)
inps = []
for img in images:
img = img.astype(np.float32)
inps.append(np.expand_dims(img, 0))
bs = 100
with tf.Session() as sess:
preds = []
n_batches = int(math.ceil(float(len(inps)) / float(bs)))
for i in range(n_batches):
#sys.stdout.write(".") # EDIT: commented out
#sys.stdout.flush()
inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
inp = np.concatenate(inp, 0)
pred = sess.run(softmax, {'ExpandDims:0': inp})
preds.append(pred)
preds = np.concatenate(preds, 0)
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
# This function is called automatically.
def _init_inception():
global softmax
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR) # EDIT: increased indent
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
with tf.Session() as sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
try:
o._shape = tf.TensorShape(new_shape)
except ValueError:
o._shape_val = tf.TensorShape(new_shape) # EDIT: added for compatibility with tensorflow 1.6.0
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
logits = tf.matmul(tf.squeeze(pool3), w)
softmax = tf.nn.softmax(logits)
#if softmax is None: # EDIT: commented out
# _init_inception() # EDIT: commented out
#----------------------------------------------------------------------------
# EDIT: added
class API:
def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
import config
globals()['MODEL_DIR'] = os.path.join(config.result_dir, '_inception')
self.sess = tf.get_default_session()
_init_inception()
def get_metric_names(self):
return ['IS_mean', 'IS_std']
def get_metric_formatting(self):
return ['%-10.4f', '%-10.4f']
def begin(self, mode):
assert mode in ['warmup', 'reals', 'fakes']
self.images = []
def feed(self, mode, minibatch):
self.images.append(minibatch.transpose(0, 2, 3, 1))
def end(self, mode):
images = list(np.concatenate(self.images))
with self.sess.as_default():
mean, std = get_inception_score(images)
return [mean, std]
#----------------------------------------------------------------------------
| 5,305 | 34.851351 | 110 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/metrics/__init__.py | # empty
| 8 | 3.5 | 7 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/tensorflow_vgg/vgg19_trainable.py | import tensorflow as tf
import numpy as np
from functools import reduce
VGG_MEAN = [103.939, 116.779, 123.68]
class Vgg19:
"""
A trainable version VGG19.
"""
def __init__(self, vgg19_npy_path=None, trainable=True, dropout=0.5):
if vgg19_npy_path is not None:
self.data_dict = np.load(vgg19_npy_path, encoding='latin1').item()
else:
self.data_dict = None
self.var_dict = {}
self.trainable = trainable
self.dropout = dropout
def build(self, rgb, train_mode=None):
"""
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
:param train_mode: a bool tensor, usually a placeholder: if True, dropout will be turned on
"""
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
bgr = tf.concat(axis=3, values=[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
self.conv1_1 = self.conv_layer(bgr, 3, 64, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, 64, 64, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, 64, 128, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, 128, 128, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, 128, 256, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, 256, 256, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, 256, 256, "conv3_3")
self.conv3_4 = self.conv_layer(self.conv3_3, 256, 256, "conv3_4")
self.pool3 = self.max_pool(self.conv3_4, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, 256, 512, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, 512, 512, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, 512, 512, "conv4_3")
self.conv4_4 = self.conv_layer(self.conv4_3, 512, 512, "conv4_4")
self.pool4 = self.max_pool(self.conv4_4, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, 512, 512, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, 512, 512, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, 512, 512, "conv5_3")
self.conv5_4 = self.conv_layer(self.conv5_3, 512, 512, "conv5_4")
self.pool5 = self.max_pool(self.conv5_4, 'pool5')
self.fc6 = self.fc_layer(self.pool5, 25088, 4096, "fc6") # 25088 = ((224 // (2 ** 5)) ** 2) * 512
self.relu6 = tf.nn.relu(self.fc6)
if train_mode is not None:
self.relu6 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu6, self.dropout), lambda: self.relu6)
elif self.trainable:
self.relu6 = tf.nn.dropout(self.relu6, self.dropout)
self.fc7 = self.fc_layer(self.relu6, 4096, 4096, "fc7")
self.relu7 = tf.nn.relu(self.fc7)
if train_mode is not None:
self.relu7 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu7, self.dropout), lambda: self.relu7)
elif self.trainable:
self.relu7 = tf.nn.dropout(self.relu7, self.dropout)
self.fc8 = self.fc_layer(self.relu7, 4096, 1000, "fc8")
self.prob = tf.nn.softmax(self.fc8, name="prob")
self.data_dict = None
def avg_pool(self, bottom, name):
return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def conv_layer(self, bottom, in_channels, out_channels, name):
with tf.variable_scope(name):
filt, conv_biases = self.get_conv_var(3, in_channels, out_channels, name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def fc_layer(self, bottom, in_size, out_size, name):
with tf.variable_scope(name):
weights, biases = self.get_fc_var(in_size, out_size, name)
x = tf.reshape(bottom, [-1, in_size])
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
def get_conv_var(self, filter_size, in_channels, out_channels, name):
initial_value = tf.truncated_normal([filter_size, filter_size, in_channels, out_channels], 0.0, 0.001)
filters = self.get_var(initial_value, name, 0, name + "_filters")
initial_value = tf.truncated_normal([out_channels], .0, .001)
biases = self.get_var(initial_value, name, 1, name + "_biases")
return filters, biases
def get_fc_var(self, in_size, out_size, name):
initial_value = tf.truncated_normal([in_size, out_size], 0.0, 0.001)
weights = self.get_var(initial_value, name, 0, name + "_weights")
initial_value = tf.truncated_normal([out_size], .0, .001)
biases = self.get_var(initial_value, name, 1, name + "_biases")
return weights, biases
def get_var(self, initial_value, name, idx, var_name):
if self.data_dict is not None and name in self.data_dict:
value = self.data_dict[name][idx]
else:
value = initial_value
if self.trainable:
var = tf.Variable(value, name=var_name)
else:
var = tf.constant(value, dtype=tf.float32, name=var_name)
self.var_dict[(name, idx)] = var
# print var_name, var.get_shape().as_list()
assert var.get_shape() == initial_value.get_shape()
return var
def save_npy(self, sess, npy_path="./vgg19-save.npy"):
assert isinstance(sess, tf.Session)
data_dict = {}
for (name, idx), var in list(self.var_dict.items()):
var_out = sess.run(var)
if name not in data_dict:
data_dict[name] = {}
data_dict[name][idx] = var_out
np.save(npy_path, data_dict)
print(("file saved", npy_path))
return npy_path
def get_var_count(self):
count = 0
for v in list(self.var_dict.values()):
count += reduce(lambda x, y: x * y, v.get_shape().as_list())
return count
| 6,685 | 37.647399 | 113 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/tensorflow_vgg/test_vgg19.py | import numpy as np
import tensorflow as tf
import vgg19
import utils
img1 = utils.load_image("./test_data/tiger.jpeg")
img2 = utils.load_image("./test_data/puzzle.jpeg")
batch1 = img1.reshape((1, 224, 224, 3))
batch2 = img2.reshape((1, 224, 224, 3))
batch = np.concatenate((batch1, batch2), 0)
# with tf.Session(config=tf.ConfigProto(gpu_options=(tf.GPUOptions(per_process_gpu_memory_fraction=0.7)))) as sess:
with tf.device('/cpu:0'):
with tf.Session() as sess:
images = tf.placeholder("float", [2, 224, 224, 3])
feed_dict = {images: batch}
vgg = vgg19.Vgg19()
with tf.name_scope("content_vgg"):
vgg.build(images)
prob = sess.run(vgg.prob, feed_dict=feed_dict)
print(prob)
utils.print_prob(prob[0], './synset.txt')
utils.print_prob(prob[1], './synset.txt')
| 845 | 28.172414 | 115 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/tensorflow_vgg/vgg16.py | import inspect
import os
import numpy as np
import tensorflow as tf
import time
VGG_MEAN = [103.939, 116.779, 123.68]
class Vgg16:
def __init__(self, vgg16_npy_path=None):
if vgg16_npy_path is None:
path = inspect.getfile(Vgg16)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "vgg16.npy")
vgg16_npy_path = path
print(path)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
print("npy file loaded")
def build(self, rgb):
"""
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
"""
start_time = time.time()
print("build model started")
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
bgr = tf.concat(axis=3, values=[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self.max_pool(self.conv3_3, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.pool4 = self.max_pool(self.conv4_3, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.pool5 = self.max_pool(self.conv5_3, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
assert self.fc6.get_shape().as_list()[1:] == [4096]
self.relu6 = tf.nn.relu(self.fc6)
self.fc7 = self.fc_layer(self.relu6, "fc7")
self.relu7 = tf.nn.relu(self.fc7)
self.fc8 = self.fc_layer(self.relu7, "fc8")
self.prob = tf.nn.softmax(self.fc8, name="prob")
self.data_dict = None
print(("build model finished: %ds" % (time.time() - start_time)))
def avg_pool(self, bottom, name):
return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def conv_layer(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def fc_layer(self, bottom, name):
with tf.variable_scope(name):
shape = bottom.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(bottom, [-1, dim])
weights = self.get_fc_weight(name)
biases = self.get_bias(name)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
def get_conv_filter(self, name):
return tf.constant(self.data_dict[name][0], name="filter")
def get_bias(self, name):
return tf.constant(self.data_dict[name][1], name="biases")
def get_fc_weight(self, name):
return tf.constant(self.data_dict[name][0], name="weights")
| 4,414 | 34.039683 | 106 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/tensorflow_vgg/utils.py | import skimage
import skimage.io
import skimage.transform
import numpy as np
# synset = [l.strip() for l in open('synset.txt').readlines()]
# returns image of shape [224, 224, 3]
# [height, width, depth]
def load_image(path):
# load image
img = skimage.io.imread(path)
img = img / 255.0
assert (0 <= img).all() and (img <= 1.0).all()
# print "Original Image Shape: ", img.shape
# we crop image from center
short_edge = min(img.shape[:2])
yy = int((img.shape[0] - short_edge) / 2)
xx = int((img.shape[1] - short_edge) / 2)
crop_img = img[yy: yy + short_edge, xx: xx + short_edge]
# resize to 224, 224
resized_img = skimage.transform.resize(crop_img, (224, 224))
return resized_img
# returns the top1 string
def print_prob(prob, file_path):
synset = [l.strip() for l in open(file_path).readlines()]
# print prob
pred = np.argsort(prob)[::-1]
# Get top1 label
top1 = synset[pred[0]]
print(("Top1: ", top1, prob[pred[0]]))
# Get top5 label
top5 = [(synset[pred[i]], prob[pred[i]]) for i in range(5)]
print(("Top5: ", top5))
return top1
def load_image2(path, height=None, width=None):
# load image
img = skimage.io.imread(path)
img = img / 255.0
if height is not None and width is not None:
ny = height
nx = width
elif height is not None:
ny = height
nx = img.shape[1] * ny / img.shape[0]
elif width is not None:
nx = width
ny = img.shape[0] * nx / img.shape[1]
else:
ny = img.shape[0]
nx = img.shape[1]
return skimage.transform.resize(img, (ny, nx))
def test():
img = skimage.io.imread("./test_data/starry_night.jpg")
ny = 300
nx = img.shape[1] * ny / img.shape[0]
img = skimage.transform.resize(img, (ny, nx))
skimage.io.imsave("./test_data/test/output.jpg", img)
if __name__ == "__main__":
test()
| 1,921 | 25.328767 | 64 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/tensorflow_vgg/vgg19.py | import os
import tensorflow as tf
import numpy as np
import time
import inspect
VGG_MEAN = [103.939, 116.779, 123.68]
class Vgg19:
def __init__(self, vgg19_npy_path=None):
if vgg19_npy_path is None:
path = inspect.getfile(Vgg19)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "vgg19.npy")
vgg19_npy_path = path
print(vgg19_npy_path)
self.data_dict = np.load(vgg19_npy_path, encoding='latin1').item()
print("npy file loaded")
def build(self, rgb):
"""
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
"""
start_time = time.time()
print("build model started")
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
bgr = tf.concat(axis=3, values=[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.conv3_4 = self.conv_layer(self.conv3_3, "conv3_4")
self.pool3 = self.max_pool(self.conv3_4, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.conv4_4 = self.conv_layer(self.conv4_3, "conv4_4")
self.pool4 = self.max_pool(self.conv4_4, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.conv5_4 = self.conv_layer(self.conv5_3, "conv5_4")
self.pool5 = self.max_pool(self.conv5_4, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
assert self.fc6.get_shape().as_list()[1:] == [4096]
self.relu6 = tf.nn.relu(self.fc6)
self.fc7 = self.fc_layer(self.relu6, "fc7")
self.relu7 = tf.nn.relu(self.fc7)
self.fc8 = self.fc_layer(self.relu7, "fc8")
self.prob = tf.nn.softmax(self.fc8, name="prob")
self.data_dict = None
print(("build model finished: %ds" % (time.time() - start_time)))
def avg_pool(self, bottom, name):
return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def conv_layer(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def fc_layer(self, bottom, name):
with tf.variable_scope(name):
shape = bottom.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(bottom, [-1, dim])
weights = self.get_fc_weight(name)
biases = self.get_bias(name)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
def get_conv_filter(self, name):
return tf.constant(self.data_dict[name][0], name="filter")
def get_bias(self, name):
return tf.constant(self.data_dict[name][1], name="biases")
def get_fc_weight(self, name):
return tf.constant(self.data_dict[name][0], name="weights")
| 4,616 | 34.790698 | 106 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/tensorflow_vgg/test_vgg19_trainable.py | """
Simple tester for the vgg19_trainable
"""
import tensorflow as tf
import vgg19_trainable as vgg19
import utils
img1 = utils.load_image("./test_data/tiger.jpeg")
img1_true_result = [1 if i == 292 else 0 for i in range(1000)] # 1-hot result for tiger
batch1 = img1.reshape((1, 224, 224, 3))
with tf.device('/cpu:0'):
sess = tf.Session()
images = tf.placeholder(tf.float32, [1, 224, 224, 3])
true_out = tf.placeholder(tf.float32, [1, 1000])
train_mode = tf.placeholder(tf.bool)
vgg = vgg19.Vgg19('./vgg19.npy')
vgg.build(images, train_mode)
# print number of variables used: 143667240 variables, i.e. ideal size = 548MB
print(vgg.get_var_count())
sess.run(tf.global_variables_initializer())
# test classification
prob = sess.run(vgg.prob, feed_dict={images: batch1, train_mode: False})
utils.print_prob(prob[0], './synset.txt')
# simple 1-step training
cost = tf.reduce_sum((vgg.prob - true_out) ** 2)
train = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
sess.run(train, feed_dict={images: batch1, true_out: [img1_true_result], train_mode: True})
# test classification again, should have a higher probability about tiger
prob = sess.run(vgg.prob, feed_dict={images: batch1, train_mode: False})
utils.print_prob(prob[0], './synset.txt')
# test save
vgg.save_npy(sess, './test-save.npy')
| 1,397 | 30.066667 | 95 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/tensorflow_vgg/test_vgg16.py | import numpy as np
import tensorflow as tf
import vgg16
import utils
img1 = utils.load_image("./test_data/tiger.jpeg")
img2 = utils.load_image("./test_data/puzzle.jpeg")
batch1 = img1.reshape((1, 224, 224, 3))
batch2 = img2.reshape((1, 224, 224, 3))
batch = np.concatenate((batch1, batch2), 0)
# with tf.Session(config=tf.ConfigProto(gpu_options=(tf.GPUOptions(per_process_gpu_memory_fraction=0.7)))) as sess:
with tf.device('/cpu:0'):
with tf.Session() as sess:
images = tf.placeholder("float", [2, 224, 224, 3])
feed_dict = {images: batch}
vgg = vgg16.Vgg16()
with tf.name_scope("content_vgg"):
vgg.build(images)
prob = sess.run(vgg.prob, feed_dict=feed_dict)
print(prob)
utils.print_prob(prob[0], './synset.txt')
utils.print_prob(prob[1], './synset.txt')
| 845 | 28.172414 | 115 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/tensorflow_vgg/__init__.py | 0 | 0 | 0 | py | |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/main.py | import sys
import numpy as np
import core
from utils.misc import pp, visualize
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_integer("max_iteration", 150000, "Epoch to train [150000]")
flags.DEFINE_float("learning_rate", .0001, "Learning rate [.0001]")
flags.DEFINE_float("learning_rate_D", -1, "Learning rate for discriminator, if negative same as generator [-1]")
flags.DEFINE_boolean("MMD_lr_scheduler", True, "Wheather to use lr scheduler based on 3-sample test")
flags.DEFINE_float("decay_rate", .5, "Decay rate [.5]")
flags.DEFINE_float("gp_decay_rate", 1.0, "Decay rate [1.0]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_float("init", 0.1, "Initialization value [0.1]")
flags.DEFINE_integer("batch_size", 32, "The size of batch images [1000]")
flags.DEFINE_integer("real_batch_size", -1, "The size of batch images for real samples. If -1 then same as batch_size [-1]")
flags.DEFINE_integer("output_size", 128, "The size of the output images to produce [64]")
flags.DEFINE_integer("c_dim", 3, "Dimension of image color. [3]")
flags.DEFINE_string("dataset", "celebA", "The name of the model fro saving puposes")
flags.DEFINE_string("name", "mmd_test", "The name of dataset [celebA, mnist, lsun, cifar10]")
flags.DEFINE_string("checkpoint_dir", "checkpoint_mmd", "Directory name to save the checkpoints [checkpoint_mmd]")
flags.DEFINE_string("sample_dir", "samples_mmd", "Directory name to save the image samples [samples_mmd]")
flags.DEFINE_string("log_dir", "logs_mmd", "Directory name to save the image samples [logs_mmd]")
flags.DEFINE_string("data_dir", "./data", "Directory containing datasets [./data]")
flags.DEFINE_string("architecture", "dcgan", "The name of the architecture [dcgan, g-resnet5, dcgan5]")
flags.DEFINE_string("kernel", "", "The name of the architecture ['', 'mix_rbf', 'mix_rq', 'distance', 'dot', 'mix_rq_dot']")
flags.DEFINE_string("model", "mmd", "The model type [mmd, cramer, wgan_gp]")
flags.DEFINE_boolean("is_train", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("visualize", False, "True for visualizing, False for nothing [False]")
flags.DEFINE_boolean("is_demo", False, "For testing [False]")
flags.DEFINE_float("gradient_penalty", 0.0, "Use gradient penalty [0.0]")
flags.DEFINE_integer("threads", 64, "Upper limit for number of threads [np.inf]")
flags.DEFINE_integer("dsteps", 5, "Number of discriminator steps in a row [1] ")
flags.DEFINE_integer("gsteps", 1, "Number of generator steps in a row [1] ")
flags.DEFINE_integer("start_dsteps", 5, "Number of discrimintor steps in a row during first 20 steps and every 100th step" [1])
flags.DEFINE_integer("df_dim", 64, "Discriminator no of channels at first conv layer [64]")
flags.DEFINE_integer("dof_dim", 16, "No of discriminator output features [16]")
flags.DEFINE_integer("gf_dim", 64, "no of generator channels [64]")
flags.DEFINE_boolean("batch_norm", True, "Use of batch norm [False] (always False for discriminator if gradient_penalty > 0)")
flags.DEFINE_boolean("log", False, "Wheather to write log to a file in samples directory [True]")
flags.DEFINE_string("suffix", '', "For additional settings ['', '_tf_records']")
flags.DEFINE_boolean('compute_scores', False, "Compute scores [True]")
flags.DEFINE_float("gpu_mem", .9, "GPU memory fraction limit [0.9]")
flags.DEFINE_float("L2_discriminator_penalty", 0.0, "L2 penalty on discriminator features [0.0]")
flags.DEFINE_integer("no_of_samples", 100000, "number of samples to produce")
flags.DEFINE_boolean("print_pca", False, "")
flags.DEFINE_integer("save_layer_outputs", 0, "Wheather to save_layer_outputs. If == 2, saves outputs at exponential steps: 1, 2, 4, ..., 512 and every 1000. [0, 1, 2]")
flags.DEFINE_string("output_dir_of_test_samples", 'samples_mmd', "Output directory for testing samples")
flags.DEFINE_integer("random_seed", 0, "Random seed")
FLAGS = flags.FLAGS
class EasyDict(dict):
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
def __getattr__(self, name): return self[name]
def __setattr__(self, name, value): self[name] = value
def __delattr__(self, name): del self[name]
def create_session(config_dict=dict(), force_as_default=False):
config = tf.ConfigProto()
for key, value in config_dict.items():
fields = key.split('.')
obj = config
for field in fields[:-1]:
obj = getattr(obj, field)
setattr(obj, fields[-1], value)
session = tf.Session(config=config)
if force_as_default:
session._default_session = session.as_default()
session._default_session.enforce_nesting = False
session._default_session.__enter__()
return session
if tf.get_default_session() is None:
np.random.seed(FLAGS.random_seed)
tf.set_random_seed(np.random.randint(1 << 31))
tf_config = EasyDict() # TensorFlow session config, set by tfutil.init_tf().
tf_config['graph_options.place_pruned_graph'] = True # False (default) = Check that all ops are available on the designated device. True = Skip the check for ops that are not used.
tf_config['gpu_options.allow_growth'] = False # False (default) = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed.
create_session(tf_config, force_as_default=True)
def main(_):
pp.pprint(FLAGS.__flags)
if FLAGS.threads < np.inf:
sess_config = tf.ConfigProto(intra_op_parallelism_threads=FLAGS.threads)
sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_mem
else:
sess_config = tf.ConfigProto()
if 'mmd' in FLAGS.model:
from core.model import MMD_GAN as Model
elif FLAGS.model == 'wgan_gp':
from core.wgan_gp import WGAN_GP as Model
elif 'cramer' in FLAGS.model:
from core.cramer import Cramer_GAN as Model
with tf.Session(config=sess_config) as sess:
if FLAGS.dataset == 'mnist':
gan = Model(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=28, c_dim=1,
data_dir=FLAGS.data_dir)
elif FLAGS.dataset == 'cifar10':
gan = Model(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=32, c_dim=3,
data_dir=FLAGS.data_dir)
elif FLAGS.dataset in ['celebA', 'lsun']:
gan = Model(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=FLAGS.output_size, c_dim=3,
data_dir=FLAGS.data_dir)
else:
gan = Model(sess, config=FLAGS, batch_size=FLAGS.batch_size,
output_size=FLAGS.output_size, c_dim=FLAGS.c_dim,
data_dir=FLAGS.data_dir)
if FLAGS.is_train:
gan.train()
elif FLAGS.print_pca:
gan.print_pca()
elif FLAGS.visualize:
gan.load_checkpoint()
visualize(sess, gan, FLAGS, 2)
else:
gan.get_samples(FLAGS.no_of_samples, layers=[-1])
if FLAGS.log:
sys.stdout = gan.old_stdout
gan.log_file.close()
gan.sess.close()
if __name__ == '__main__':
tf.app.run()
| 7,225 | 53.330827 | 191 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/compute_scores.py | from __future__ import division, print_function
import os.path, sys, tarfile
import numpy as np
from scipy import linalg
from six.moves import range, urllib
from sklearn.metrics.pairwise import polynomial_kernel
import tensorflow as tf
from tqdm import tqdm
# from tqdm docs: https://pypi.python.org/pypi/tqdm#hooks-and-callbacks
class TqdmUpTo(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n) # also sets self.n = b * bsize
class Inception(object):
def __init__(self):
MODEL_DIR = '/tmp/imagenet'
DATA_URL = ('http://download.tensorflow.org/models/image/imagenet/'
'inception-2015-12-05.tgz')
self.softmax_dim = 1008
self.coder_dim = 2048
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
with TqdmUpTo(unit='B', unit_scale=True, miniters=1,
desc=filename) as t:
filepath, _ = urllib.request.urlretrieve(
DATA_URL, filepath, reporthook=t.update_to)
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
self.sess = sess = tf.Session()
#with sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = [s.value for s in o.get_shape()]
if len(shape) and shape[0] == 1:
shape[0] = None
o._shape = tf.TensorShape(shape)
w = sess.graph.get_operation_by_name(
"softmax/logits/MatMul").inputs[1]
self.coder = tf.squeeze(tf.squeeze(pool3, 2), 1)
logits = tf.matmul(self.coder, w)
self.softmax = tf.nn.softmax(logits)
assert self.coder.get_shape()[1].value == self.coder_dim
assert self.softmax.get_shape()[1].value == self.softmax_dim
self.input = 'ExpandDims:0'
class LeNet(object):
def __init__(self):
MODEL_DIR = 'lenet/saved_model'
self.softmax_dim = 10
self.coder_dim = 512
self.sess = sess = tf.Session()
tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.TRAINING], MODEL_DIR)
g = sess.graph
self.softmax = g.get_tensor_by_name('Softmax_1:0')
self.coder = g.get_tensor_by_name('Relu_5:0')
assert self.coder.get_shape()[1].value == self.coder_dim
assert self.softmax.get_shape()[1].value == self.softmax_dim
self.input = 'Placeholder_2:0'
def featurize(images, model, batch_size=100, transformer=np.asarray,
get_preds=True, get_codes=False, output=sys.stdout,
out_preds=None, out_codes=None):
'''
images: a list of numpy arrays with values in [0, 255]
'''
sub = transformer(images[:10])
assert(sub.ndim == 4)
if isinstance(model, Inception):
assert sub.shape[3] == 3
if (sub.max() > 255) or (sub.min() < 0):
print('WARNING! Inception min/max violated: min = %f, max = %f. Clipping values.' % (sub.min(), sub.max()))
sub = sub.clip(0., 255.)
elif isinstance(model, LeNet):
batch_size = 64
assert sub.shape[3] == 1
if (sub.max() > .5) or (sub.min() < -.5):
print('WARNING! LeNet min/max violated: min = %f, max = %f. Clipping values.' % (sub.min(), sub.max()))
sub = sub.clip(-.5, .5)
n = len(images)
to_get = ()
ret = ()
if get_preds:
to_get += (model.softmax,)
if out_preds is not None:
assert out_preds.shape == (n, model.softmax_dim)
assert out_preds.dtype == np.float32
preds = out_preds
else:
preds = np.empty((n, model.softmax_dim), dtype=np.float32)
preds.fill(np.nan)
ret += (preds,)
if get_codes:
to_get += (model.coder,)
if out_codes is not None:
assert out_codes.shape == (n, model.coder_dim)
assert out_codes.dtype == np.float32
codes = out_codes
else:
codes = np.empty((n, model.coder_dim), dtype=np.float32)
codes.fill(np.nan)
ret += (codes,)
# with model.sess:
with TqdmUpTo(unit='img', unit_scale=True, total=n, file=output) as t:
for start in range(0, n, batch_size):
t.update_to(start)
end = min(start + batch_size, n)
inp = transformer(images[start:end])
if end - start != batch_size:
pad = batch_size - (end - start)
extra = np.zeros((pad,) + inp.shape[1:], dtype=inp.dtype)
inp = np.r_[inp, extra]
w = slice(0, end - start)
else:
w = slice(None)
out = model.sess.run(to_get, {model.input: inp})
if get_preds:
preds[start:end] = out[0][w]
if get_codes:
codes[start:end] = out[-1][w]
t.update_to(n)
return ret
def get_splits(n, splits=10, split_method='openai'):
if split_method == 'openai':
return [slice(i * n // splits, (i + 1) * n // splits)
for i in range(splits)]
elif split_method == 'bootstrap':
return [np.random.choice(n, n) for _ in range(splits)]
else:
raise ValueError("bad split_method {}".format(split_method))
def inception_score(preds, **split_args):
split_inds = get_splits(preds.shape[0], **split_args)
scores = np.zeros(len(split_inds))
for i, inds in enumerate(split_inds):
part = preds[inds]
kl = part * (np.log(part) - np.log(np.mean(part, 0, keepdims=True)))
kl = np.mean(np.sum(kl, 1))
scores[i] = np.exp(kl)
return scores
def fid_score(codes_g, codes_r, eps=1e-6, output=sys.stdout, **split_args):
splits_g = get_splits(codes_g.shape[0], **split_args)
splits_r = get_splits(codes_r.shape[0], **split_args)
assert len(splits_g) == len(splits_r)
d = codes_g.shape[1]
assert codes_r.shape[1] == d
scores = np.zeros(len(splits_g))
with tqdm(splits_g, desc='FID', file=output) as bar:
for i, (w_g, w_r) in enumerate(zip(bar, splits_r)):
part_g = codes_g[w_g]
part_r = codes_r[w_r]
mn_g = part_g.mean(axis=0)
mn_r = part_r.mean(axis=0)
cov_g = np.cov(part_g, rowvar=False)
cov_r = np.cov(part_r, rowvar=False)
covmean, _ = linalg.sqrtm(cov_g.dot(cov_r), disp=False)
if not np.isfinite(covmean).all():
cov_g[range(d), range(d)] += eps
cov_r[range(d), range(d)] += eps
covmean = linalg.sqrtm(cov_g.dot(cov_r))
scores[i] = np.sum((mn_g - mn_r) ** 2) + (
np.trace(cov_g) + np.trace(cov_r) - 2 * np.trace(covmean))
bar.set_postfix({'mean': scores[:i+1].mean()})
return scores
def polynomial_mmd_averages(codes_g, codes_r, n_subsets=50, subset_size=1000,
ret_var=True, output=sys.stdout, **kernel_args):
m = min(codes_g.shape[0], codes_r.shape[0])
mmds = np.zeros(n_subsets)
if ret_var:
vars = np.zeros(n_subsets)
choice = np.random.choice
with tqdm(range(n_subsets), desc='MMD', file=output) as bar:
for i in bar:
g = codes_g[choice(len(codes_g), subset_size, replace=False)]
r = codes_r[choice(len(codes_r), subset_size, replace=False)]
o = polynomial_mmd(g, r, **kernel_args, var_at_m=m, ret_var=ret_var)
if ret_var:
mmds[i], vars[i] = o
else:
mmds[i] = o
bar.set_postfix({'mean': mmds[:i+1].mean()})
return (mmds, vars) if ret_var else mmds
def polynomial_mmd(codes_g, codes_r, degree=3, gamma=None, coef0=1,
var_at_m=None, ret_var=True):
# use k(x, y) = (gamma <x, y> + coef0)^degree
# default gamma is 1 / dim
X = codes_g
Y = codes_r
K_XX = polynomial_kernel(X, degree=degree, gamma=gamma, coef0=coef0)
K_YY = polynomial_kernel(Y, degree=degree, gamma=gamma, coef0=coef0)
K_XY = polynomial_kernel(X, Y, degree=degree, gamma=gamma, coef0=coef0)
return _mmd2_and_variance(K_XX, K_XY, K_YY,
var_at_m=var_at_m, ret_var=ret_var)
def _sqn(arr):
flat = np.ravel(arr)
return flat.dot(flat)
def _mmd2_and_variance(K_XX, K_XY, K_YY, unit_diagonal=False,
mmd_est='unbiased', block_size=1024,
var_at_m=None, ret_var=True):
# based on
# https://github.com/dougalsutherland/opt-mmd/blob/master/two_sample/mmd.py
# but changed to not compute the full kernel matrix at once
m = K_XX.shape[0]
assert K_XX.shape == (m, m)
assert K_XY.shape == (m, m)
assert K_YY.shape == (m, m)
if var_at_m is None:
var_at_m = m
# Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to compute them explicitly
if unit_diagonal:
diag_X = diag_Y = 1
sum_diag_X = sum_diag_Y = m
sum_diag2_X = sum_diag2_Y = m
else:
diag_X = np.diagonal(K_XX)
diag_Y = np.diagonal(K_YY)
sum_diag_X = diag_X.sum()
sum_diag_Y = diag_Y.sum()
sum_diag2_X = _sqn(diag_X)
sum_diag2_Y = _sqn(diag_Y)
Kt_XX_sums = K_XX.sum(axis=1) - diag_X
Kt_YY_sums = K_YY.sum(axis=1) - diag_Y
K_XY_sums_0 = K_XY.sum(axis=0)
K_XY_sums_1 = K_XY.sum(axis=1)
Kt_XX_sum = Kt_XX_sums.sum()
Kt_YY_sum = Kt_YY_sums.sum()
K_XY_sum = K_XY_sums_0.sum()
if mmd_est == 'biased':
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
+ (Kt_YY_sum + sum_diag_Y) / (m * m)
- 2 * K_XY_sum / (m * m))
else:
assert mmd_est in {'unbiased', 'u-statistic'}
mmd2 = (Kt_XX_sum + Kt_YY_sum) / (m * (m-1))
if mmd_est == 'unbiased':
mmd2 -= 2 * K_XY_sum / (m * m)
else:
mmd2 -= 2 * (K_XY_sum - np.trace(K_XY)) / (m * (m-1))
if not ret_var:
return mmd2
Kt_XX_2_sum = _sqn(K_XX) - sum_diag2_X
Kt_YY_2_sum = _sqn(K_YY) - sum_diag2_Y
K_XY_2_sum = _sqn(K_XY)
dot_XX_XY = Kt_XX_sums.dot(K_XY_sums_1)
dot_YY_YX = Kt_YY_sums.dot(K_XY_sums_0)
m1 = m - 1
m2 = m - 2
zeta1_est = (
1 / (m * m1 * m2) * (
_sqn(Kt_XX_sums) - Kt_XX_2_sum + _sqn(Kt_YY_sums) - Kt_YY_2_sum)
- 1 / (m * m1)**2 * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 1 / (m * m * m1) * (
_sqn(K_XY_sums_1) + _sqn(K_XY_sums_0) - 2 * K_XY_2_sum)
- 2 / m**4 * K_XY_sum**2
- 2 / (m * m * m1) * (dot_XX_XY + dot_YY_YX)
+ 2 / (m**3 * m1) * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
)
zeta2_est = (
1 / (m * m1) * (Kt_XX_2_sum + Kt_YY_2_sum)
- 1 / (m * m1)**2 * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 2 / (m * m) * K_XY_2_sum
- 2 / m**4 * K_XY_sum**2
- 4 / (m * m * m1) * (dot_XX_XY + dot_YY_YX)
+ 4 / (m**3 * m1) * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
)
var_est = (4 * (var_at_m - 2) / (var_at_m * (var_at_m - 1)) * zeta1_est
+ 2 / (var_at_m * (var_at_m - 1)) * zeta2_est)
return mmd2, var_est
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('samples')
parser.add_argument('reference_feats', nargs='?')
parser.add_argument('--output', '-o')
parser.add_argument('--reference-subset', default=slice(None),
type=lambda x: slice(*(int(s) if s else None
for s in x.split(':'))))
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--model', choices=['inception', 'lenet'],
default='inception')
g = parser.add_mutually_exclusive_group()
g.add_argument('--save-codes')
g.add_argument('--load-codes')
g = parser.add_mutually_exclusive_group()
g.add_argument('--save-preds')
g.add_argument('--load-preds')
g = parser.add_mutually_exclusive_group()
g.add_argument('--do-inception', action='store_true', default=True)
g.add_argument('--no-inception', action='store_false', dest='do_inception')
g = parser.add_mutually_exclusive_group()
g.add_argument('--do-fid', action='store_true', default=False)
g.add_argument('--no-fid', action='store_false', dest='do_fid')
g = parser.add_mutually_exclusive_group()
g.add_argument('--do-mmd', action='store_true', default=False)
g.add_argument('--no-mmd', action='store_false', dest='do_mmd')
parser.add_argument('--mmd-degree', type=int, default=3)
parser.add_argument('--mmd-gamma', type=float, default=None)
parser.add_argument('--mmd-coef0', type=float, default=1)
parser.add_argument('--mmd-subsets', type=int, default=100)
parser.add_argument('--mmd-subset-size', type=int, default=1000)
g = parser.add_mutually_exclusive_group()
g.add_argument('--mmd-var', action='store_true', default=False)
g.add_argument('--no-mmd-var', action='store_false', dest='mmd_var')
parser.add_argument('--splits', type=int, default=10)
parser.add_argument('--split-method', choices=['openai', 'bootstrap'],
default='bootstrap')
args = parser.parse_args()
if args.do_fid and args.reference_feats is None:
parser.error("Need REFERENCE_FEATS if you're doing FID")
def check_path(pth):
if os.path.exists(pth):
parser.error("Path {} already exists".format(pth))
d = os.path.dirname(pth)
if d and not os.path.exists(d):
os.makedirs(d)
if args.output:
check_path(args.output)
samples = np.load(args.samples, mmap_mode='r')
if args.model == 'inception':
model = Inception()
if samples.dtype == np.uint8:
transformer = np.asarray
elif samples.dtype == np.float32:
m = samples[:10].max()
assert .5 <= m <= 1
transformer = lambda x: x * 255
else:
raise TypeError("don't know how to handle {}".format(samples.dtype))
elif args.model == 'lenet':
model = LeNet()
if samples.dtype == np.uint8:
def transformer(x):
return (np.asarray(x, dtype=np.float32) - (255 / 2.)) / 255
elif samples.dtype == np.float32:
assert .8 <= samples[:10].max() <= 1
assert 0 <= samples[:10].min() <= .3
transformer = lambda x: x - .5
else:
raise TypeError("don't know how to handle {}".format(samples.dtype))
else:
raise ValueError("bad model {}".format(args.model))
if args.reference_feats:
ref_feats = np.load(args.reference_feats, mmap_mode='r')[
args.reference_subset]
out_kw = {}
if args.save_codes:
check_path(args.save_codes)
out_kw['out_codes'] = np.lib.format.open_memmap(
args.save_codes, mode='w+', dtype=np.float32,
shape=(samples.shape[0], model.coder_dim))
if args.save_preds:
check_path(args.save_preds)
out_kw['out_preds'] = np.lib.format.open_memmap(
args.save_preds, mode='w+', dtype=np.float32,
shape=(samples.shape[0], model.softmax_dim))
need_preds = args.do_inception or args.save_preds
need_codes = args.do_fid or args.do_mmd or args.save_codes
print('Transformer test: transformer([-1, 0, 10.]) = ' + repr(transformer(np.array([-1, 0, 10.]))))
if args.load_codes or args.load_preds:
if args.load_codes:
codes = np.load(args.load_codes, mmap_mode='r')
assert codes.ndim == 2
assert codes.shape[0] == samples.shape[0]
assert codes.shape[1] == model.coder_dim
if args.load_preds:
preds = np.load(args.load_preds, mmap_mode='r')
assert preds.ndim == 2
assert preds.shape[0] == samples.shape[0]
assert preds.shape[1] == model.softmax_dim
elif need_preds:
raise NotImplementedError()
else:
out = featurize(
samples, model, batch_size=args.batch_size, transformer=transformer,
get_preds=need_preds, get_codes=need_codes, **out_kw)
if need_preds:
preds = out[0]
if need_codes:
codes = out[-1]
split_args = {'splits': args.splits, 'split_method': args.split_method}
output = {'args': args}
if args.do_inception:
output['inception'] = scores = inception_score(preds, **split_args)
print("Inception mean:", np.mean(scores))
print("Inception std:", np.std(scores))
print("Inception scores:", scores, sep='\n')
if args.do_fid:
output['fid'] = scores = fid_score(codes, ref_feats, **split_args)
print("FID mean:", np.mean(scores))
print("FID std:", np.std(scores))
print("FID scores:", scores, sep='\n')
print()
if args.do_mmd:
ret = polynomial_mmd_averages(
codes, ref_feats, degree=args.mmd_degree, gamma=args.mmd_gamma,
coef0=args.mmd_coef0, ret_var=args.mmd_var,
n_subsets=args.mmd_subsets, subset_size=args.mmd_subset_size)
if args.mmd_var:
output['mmd2'], output['mmd2_var'] = mmd2s, vars = ret
else:
output['mmd2'] = mmd2s = ret
print("mean MMD^2 estimate:", mmd2s.mean())
print("std MMD^2 estimate:", mmd2s.std())
print("MMD^2 estimates:", mmd2s, sep='\n')
print()
if args.mmd_var:
print("mean Var[MMD^2] estimate:", vars.mean())
print("std Var[MMD^2] estimate:", vars.std())
print("Var[MMD^2] estimates:", vars, sep='\n')
print()
if args.output:
np.savez(args.output, **output)
if __name__ == '__main__':
main()
| 18,512 | 35.087719 | 119 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/summarize.py | import argparse
import os
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs='+')
parser.add_argument('--tex', action='store_true')
args = parser.parse_args()
if args.tex:
split = ' & '
end = '\\\\\n'
else:
split = ' '
end = '\n'
print(' ' * 79 + 'Inception (std) FID (std) MMD^2 (std)')
print(' ' * (18 if args.tex else 87), end=split)
print('{:>9}'.format('Inception'), end=' ' if args.tex else ' ')
print('{:>7}'.format('' if args.tex else '(std)'), end=split)
print('{:>8}'.format('FID'), end=' ')
print('{:>7}'.format('' if args.tex else '(std)'), end=split)
print('{:>8}'.format('KID'), end=' ')
print('{:>8}'.format('' if args.tex else '(std)'), end=end)
if args.tex:
print("\\hline")
for fn in sorted(args.files):
with np.load(fn) as d:
n = '/'.join(fn.split('/')[-3:-1])#os.path.basename(fn)
if n.endswith('.npz'):
n = n[:-4]
if n.endswith('-results'):
n = n[:-len('-results')]
if args.tex:
n = n.replace('_', ' ')
print('{:88}'.format(n), end=split)
print('{:8.3f}'.format(d['inception'].mean()), end=' ')
print('({:5.3f})'.format(d['inception'].std()), end=split)
print('{:8.3f}'.format(d['fid'].mean()), end=' ')
print('({:5.3f})'.format(d['fid'].std()), end=split)
print('{:8.4f}'.format(d['mmd2'].mean()), end=' ')
print('({:6.4f})'.format(d['mmd2'].std()), end=end)
| 1,506 | 30.395833 | 77 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/architecture.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 14:34:47 2018
@author: mikolajbinkowski
"""
import tensorflow as tf
from core.ops import batch_norm, conv2d, deconv2d, linear, lrelu
from utils.misc import conv_sizes
# Generators
class Generator:
def __init__(self, dim, c_dim, output_size, use_batch_norm, prefix='g_'):
self.used = False
self.dim = dim
self.c_dim = c_dim
self.output_size = output_size
self.prefix = prefix
if use_batch_norm:
self.g_bn0 = batch_norm(name=prefix + 'bn0')
self.g_bn1 = batch_norm(name=prefix + 'bn1')
self.g_bn2 = batch_norm(name=prefix + 'bn2')
self.g_bn3 = batch_norm(name=prefix + 'bn3')
self.g_bn4 = batch_norm(name=prefix + 'bn4')
self.g_bn5 = batch_norm(name=prefix + 'bn5')
else:
self.g_bn0 = lambda x: x
self.g_bn1 = lambda x: x
self.g_bn2 = lambda x: x
self.g_bn3 = lambda x: x
self.g_bn4 = lambda x: x
self.g_bn5 = lambda x: x
def __call__(self, seed, batch_size):
with tf.variable_scope('generator') as scope:
if self.used:
scope.reuse_variables()
self.used = True
return self.network(seed, batch_size)
def network(self, seed, batch_size):
pass
class DCGANGenerator(Generator):
def network(self, seed, batch_size):
s1, s2, s4, s8, s16 = conv_sizes(self.output_size, layers=4, stride=2)
# 64, 32, 16, 8, 4 - for self.output_size = 64
# default architecture
# For Cramer: self.gf_dim = 64
z_ = linear(seed, self.dim * 8 * s16 * s16, self.prefix + 'h0_lin') # project random noise seed and reshape
h0 = tf.reshape(z_, [batch_size, s16, s16, self.dim * 8])
h0 = tf.nn.relu(self.g_bn0(h0))
h1 = deconv2d(h0, [batch_size, s8, s8, self.dim*4], name=self.prefix + 'h1')
h1 = tf.nn.relu(self.g_bn1(h1))
h2 = deconv2d(h1, [batch_size, s4, s4, self.dim*2], name=self.prefix + 'h2')
h2 = tf.nn.relu(self.g_bn2(h2))
h3 = deconv2d(h2, [batch_size, s2, s2, self.dim*1], name=self.prefix + 'h3')
h3 = tf.nn.relu(self.g_bn3(h3))
h4 = deconv2d(h3, [batch_size, s1, s1, self.c_dim], name=self.prefix + 'h4')
return tf.nn.sigmoid(h4)
class DCGAN5Generator(Generator):
def network(self, seed, batch_size):
s1, s2, s4, s8, s16, s32 = conv_sizes(self.output_size, layers=5, stride=2)
# project `z` and reshape
z_= linear(seed, self.dim * 16 * s32 * s32, self.prefix + 'h0_lin')
h0 = tf.reshape(z_, [-1, s32, s32, self.dim * 16])
h0 = tf.nn.relu(self.g_bn0(h0))
h1 = deconv2d(h0, [batch_size, s16, s16, self.dim*8], name=self.prefix + 'h1')
h1 = tf.nn.relu(self.g_bn1(h1))
h2 = deconv2d(h1, [batch_size, s8, s8, self.dim*4], name=self.prefix + 'h2')
h2 = tf.nn.relu(self.g_bn2(h2))
h3 = deconv2d(h2, [batch_size, s4, s4, self.dim*2], name=self.prefix + 'h3')
h3 = tf.nn.relu(self.g_bn3(h3))
h4 = deconv2d(h3, [batch_size, s2, s2, self.dim], name=self.prefix + 'h4')
h4 = tf.nn.relu(self.g_bn4(h4))
h5 = deconv2d(h4, [batch_size, s1, s1, self.c_dim], name=self.prefix + 'h5')
return tf.nn.sigmoid(h5)
class ResNetGenerator(Generator):
def network(self, seed, batch_size):
from core.resnet import block, ops
s1, s2, s4, s8, s16, s32 = conv_sizes(self.output_size, layers=5, stride=2)
# project `z` and reshape
z_= linear(seed, self.dim * 16 * s32 * s32, self.prefix + 'h0_lin')
h0 = tf.reshape(z_, [-1, self.dim * 16, s32, s32]) # NCHW format
h1 = block.ResidualBlock(self.prefix + 'res1', 16 * self.dim,
8 * self.dim, 3, h0, resample='up')
h2 = block.ResidualBlock(self.prefix + 'res2', 8 * self.dim,
4 * self.dim, 3, h1, resample='up')
h3 = block.ResidualBlock(self.prefix + 'res3', 4 * self.dim,
2 * self.dim, 3, h2, resample='up')
h4 = block.ResidualBlock(self.prefix + 'res4', 2 * self.dim,
self.dim, 3, h3, resample='up')
h4 = ops.batchnorm.Batchnorm('g_h4', [0, 2, 3], h4, fused=True)
h4 = tf.nn.relu(h4)
# h5 = lib.ops.conv2d.Conv2D('g_h5', dim, 3, 3, h4)
h5 = tf.transpose(h4, [0, 2, 3, 1]) # NCHW to NHWC
h5 = deconv2d(h5, [batch_size, s1, s1, self.c_dim], name='g_h5')
return tf.nn.sigmoid(h5)
# Discriminator
class Discriminator:
def __init__(self, dim, o_dim, use_batch_norm, prefix='d_'):
self.dim = dim
self.o_dim = o_dim
self.prefix = prefix
self.used = False
if use_batch_norm:
self.d_bn0 = batch_norm(name=prefix + 'bn0')
self.d_bn1 = batch_norm(name=prefix + 'bn1')
self.d_bn2 = batch_norm(name=prefix + 'bn2')
self.d_bn3 = batch_norm(name=prefix + 'bn3')
self.d_bn4 = batch_norm(name=prefix + 'bn4')
self.d_bn5 = batch_norm(name=prefix + 'bn5')
else:
self.d_bn0 = lambda x: x
self.d_bn1 = lambda x: x
self.d_bn2 = lambda x: x
self.d_bn3 = lambda x: x
self.d_bn4 = lambda x: x
self.d_bn5 = lambda x: x
def __call__(self, image, batch_size, return_layers=False):
with tf.variable_scope("discriminator") as scope:
if self.used:
scope.reuse_variables()
self.used = True
layers = self.network(image, batch_size)
if return_layers:
return layers
return layers['hF']
def network(self, image, batch_size):
pass
class DCGANDiscriminator(Discriminator):
def network(self, image, batch_size):
o_dim = self.o_dim if (self.o_dim > 0) else 8 * self.dim
h0 = lrelu(conv2d(image, self.dim, name=self.prefix + 'h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.dim * 2, name=self.prefix + 'h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.dim * 4, name=self.prefix + 'h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.dim * 8, name=self.prefix + 'h3_conv')))
hF = linear(tf.reshape(h3, [batch_size, -1]), o_dim, self.prefix + 'h4_lin')
return {'h0': h0, 'h1': h1, 'h2': h2, 'h3': h3, 'hF': hF}
class DCGAN5Discriminator(Discriminator):
def network(self, image, batch_size):
o_dim = self.o_dim if (self.o_dim > 0) else 16 * self.dim
h0 = lrelu(conv2d(image, self.dim, name=self.prefix + 'h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.dim * 2, name=self.prefix + 'h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.dim * 4, name=self.prefix + 'h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.dim * 8, name=self.prefix + 'h3_conv')))
h4 = lrelu(self.d_bn4(conv2d(h3, self.dim * 16, name=self.prefix + 'h4_conv')))
hF = linear(tf.reshape(h4, [batch_size, -1]), o_dim, self.prefix + 'h6_lin')
return {'h0': h0, 'h1': h1, 'h2': h2, 'h3': h3, 'h4': h4, 'hF': hF}
class FullConvDiscriminator(Discriminator):
def network(self, image, batch_size):
h0 = lrelu(conv2d(image, self.dim, name=self.prefix + 'h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.dim * 2, name=self.prefix + 'h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.dim * 4, name=self.prefix + 'h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.dim * 8, name=self.prefix + 'h3_conv')))
hF = lrelu(self.d_bn4(conv2d(h3, self.o_dim, name=self.prefix + 'hF_conv')))
hF = tf.reshape(hF, [batch_size, -1])
return {'h0': h0, 'h1': h1, 'h2': h2, 'h3': h3, 'hF': hF}
class ResNetDiscriminator(Discriminator):
def network(self, image, batch_size):
from core.resnet import block, ops
image = tf.transpose(image, [0, 3, 1, 2]) # NHWC to NCHW
h0 = lrelu(ops.conv2d.Conv2D(self.prefix + 'h0_conv', 3, self.dim,
3, image, he_init=False))
h1 = block.ResidualBlock(self.prefix + 'res1', self.dim,
2 * self.dim, 3, h0, resample='down')
h2 = block.ResidualBlock(self.prefix + 'res2', 2 * self.dim,
4 * self.dim, 3, h1, resample='down')
h3 = block.ResidualBlock(self.prefix + 'res3', 4 * self.dim,
8 * self.dim, 3, h2, resample='down')
h4 = block.ResidualBlock(self.prefix + 'res4', 8 * self.dim,
8 * self.dim, 3, h3, resample='down')
hF = tf.reshape(h4, [-1, 4 * 4 * 8 * self.dim])
hF = linear(hF, self.o_dim, self.prefix + 'h5_lin')
return {'h0': h0, 'h1': h1, 'h2': h2, 'h3': h3, 'h4': h4, 'hF': hF}
def get_networks(architecture):
if architecture == 'dcgan':
return DCGANGenerator, DCGANDiscriminator
elif architecture == 'dcgan5':
return DCGAN5Generator, DCGAN5Discriminator
elif 'g_resnet5' in architecture:
return ResNetGenerator, DCGAN5Discriminator
elif architecture == 'resnet5':
return ResNetGenerator, ResNetDiscriminator
elif architecture == 'd_fullconv5':
return DCGAN5Generator, FullConvDiscriminator
raise ValueError('Wrong architecture: "%s"' % architecture)
| 9,781 | 42.475556 | 115 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/cramer.py | from .model import MMD_GAN, tf, np
from .architecture import get_networks
from .ops import safer_norm
class Cramer_GAN(MMD_GAN):
def build_model(self):
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.lr = tf.Variable(self.config.learning_rate, name='lr',
trainable=False, dtype=tf.float32)
self.lr_decay_op = self.lr.assign(tf.maximum(self.lr * self.config.decay_rate, 1.e-6))
with tf.variable_scope('loss'):
if self.config.is_train and (self.config.gradient_penalty > 0):
self.gp = tf.Variable(self.config.gradient_penalty,
name='gradient_penalty',
trainable=False, dtype=tf.float32)
self.gp_decay_op = self.gp.assign(self.gp * self.config.gp_decay_rate)
self.set_pipeline()
self.sample_z = tf.constant(np.random.uniform(-1, 1, size=(self.sample_size,
self.z_dim)).astype(np.float32),
dtype=tf.float32, name='sample_z')
Generator, Discriminator = get_networks(self.config.architecture)
generator = Generator(self.gf_dim, self.c_dim, self.output_size, self.config.batch_norm)
dbn = self.config.batch_norm & (self.config.gradient_penalty <= 0)
self.discriminator = Discriminator(self.df_dim, self.dof_dim, dbn)
self.G = generator(tf.random_uniform([self.batch_size, self.z_dim], minval=-1.,
maxval=1., dtype=tf.float32, name='z'),
self.batch_size)
self.G2 = generator(tf.random_uniform([self.batch_size, self.z_dim], minval=-1.,
maxval=1., dtype=tf.float32, name='z2'),
self.batch_size)
self.sampler = generator(self.sample_z, self.sample_size)
self.d_images_layers = self.discriminator(self.images, self.real_batch_size, return_layers=True)
self.d_G_layers = self.discriminator(self.G, self.batch_size, return_layers=True)
self.d_images = self.d_images_layers['hF']
self.d_G = self.d_G_layers['hF']
G2 = self.discriminator(self.G2, self.batch_size)
self.set_loss(self.d_G, G2, self.d_images)
block = min(8, int(np.sqrt(self.real_batch_size)), int(np.sqrt(self.batch_size)))
tf.summary.image("train/input image",
self.imageRearrange(tf.clip_by_value(self.images, 0, 1), block))
tf.summary.image("train/gen image",
self.imageRearrange(tf.clip_by_value(self.G, 0, 1), block))
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver(max_to_keep=2)
def set_loss(self, G, G2, images):
bs = min([self.batch_size, self.real_batch_size])
alpha = tf.random_uniform(shape=[bs])
alpha = tf.reshape(alpha, [bs, 1, 1, 1])
real_data = self.images[:bs] #before discirminator
fake_data = self.G[:bs] #before discriminator
x_hat_data = (1. - alpha) * real_data + alpha * fake_data
x_hat = self.discriminator(x_hat_data, bs)
critic = lambda x, x_ : safer_norm(x - x_, axis=1) - safer_norm(x, axis=1)
with tf.variable_scope('loss'):
if self.config.model == 'cramer': # Cramer GAN paper
self.g_loss = tf.reduce_mean(
- safer_norm(G - G2, axis=1) + safer_norm(G - images, axis=1) + safer_norm(G2 - images, axis=1))
self.d_loss = -tf.reduce_mean(critic(images, G) - critic(G2, G))
to_penalize = critic(x_hat, G)
elif self.config.model == 'reddit_cramer':
self.g_loss = tf.reduce_mean(critic(images, G) - critic(G, G2))
self.d_loss = -self.g_loss
to_penalize = critic(x_hat, G)
else:
raise(AttributeError('wrong model: %s' % self.config.model))
gradients = tf.gradients(to_penalize, [x_hat_data])[0]
penalty = tf.reduce_mean(tf.square(safer_norm(gradients, axis=1) - 1.0))#
self.gp = tf.get_variable('gradient_penalty', dtype=tf.float32,
initializer=self.config.gradient_penalty)
self.d_loss += penalty * self.gp
self.optim_name = '%s gp %.1f' % (self.config.model, self.config.gradient_penalty)
tf.summary.scalar(self.optim_name + ' G', self.g_loss)
tf.summary.scalar(self.optim_name + ' D', self.d_loss)
tf.summary.scalar('dx_penalty', penalty)
| 4,955 | 50.625 | 116 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/mmd.py | '''
MMD functions implemented in tensorflow.
'''
from __future__ import division
_eps=1.0e-5
import tensorflow as tf
import numpy as np
from .ops import dot, sq_sum
mysqrt = lambda x: tf.sqrt(tf.maximum(x + _eps, 0.))
def _distance_kernel(X, Y, K_XY_only=False):
XX = tf.matmul(X, X, transpose_b=True)
XY = tf.matmul(X, Y, transpose_b=True)
YY = tf.matmul(Y, Y, transpose_b=True)
X_sqnorms = tf.diag_part(XX)
Y_sqnorms = tf.diag_part(YY)
r = lambda x: tf.expand_dims(x, 0)
c = lambda x: tf.expand_dims(x, 1)
K_XY = c(mysqrt(X_sqnorms)) + r(mysqrt(Y_sqnorms)) - mysqrt(-2 * XY + c(X_sqnorms) + r(Y_sqnorms))
if K_XY_only:
return K_XY
K_XX = c(mysqrt(X_sqnorms)) + r(mysqrt(X_sqnorms)) - mysqrt(-2 * XX + c(X_sqnorms) + r(X_sqnorms))
K_YY = c(mysqrt(Y_sqnorms)) + r(mysqrt(Y_sqnorms)) - mysqrt(-2 * YY + c(Y_sqnorms) + r(Y_sqnorms))
return K_XX, K_XY, K_YY, False
def _tanh_distance_kernel(X, Y, K_XY_only=False):
return _distance_kernel(tf.tanh(X), tf.tanh(Y), K_XY_only=K_XY_only)
def _dot_kernel(X, Y, K_XY_only=False):
K_XY = tf.matmul(X, Y, transpose_b=True)
if K_XY_only:
return K_XY
K_XX = tf.matmul(X, X, transpose_b=True)
K_YY = tf.matmul(Y, Y, transpose_b=True)
return K_XX, K_XY, K_YY, False
def _mix_rbf_kernel(X, Y, sigmas=[2.0, 5.0, 10.0, 20.0, 40.0, 80.0], wts=None,
K_XY_only=False):
if wts is None:
wts = [1] * len(sigmas)
XX = tf.matmul(X, X, transpose_b=True)
XY = tf.matmul(X, Y, transpose_b=True)
YY = tf.matmul(Y, Y, transpose_b=True)
X_sqnorms = tf.diag_part(XX)
Y_sqnorms = tf.diag_part(YY)
r = lambda x: tf.expand_dims(x, 0)
c = lambda x: tf.expand_dims(x, 1)
K_XX, K_XY, K_YY = 0, 0, 0
XYsqnorm = -2 * XY + c(X_sqnorms) + r(Y_sqnorms)
for sigma, wt in zip(sigmas, wts):
gamma = 1 / (2 * sigma**2)
K_XY += wt * tf.exp(-gamma * XYsqnorm)
if K_XY_only:
return K_XY
XXsqnorm = -2 * XX + c(X_sqnorms) + r(X_sqnorms)
YYsqnorm = -2 * YY + c(Y_sqnorms) + r(Y_sqnorms)
for sigma, wt in zip(sigmas, wts):
gamma = 1 / (2 * sigma**2)
K_XX += wt * tf.exp(-gamma * XXsqnorm)
K_YY += wt * tf.exp(-gamma * YYsqnorm)
return K_XX, K_XY, K_YY, tf.reduce_sum(wts)
def _mix_rq_dot_kernel(X, Y, alphas=[.1, 1., 10.], wts=None, K_XY_only=False):
return _mix_rq_kernel(X, Y, alphas=alphas, wts=wts, K_XY_only=K_XY_only, add_dot=.1)
def _mix_rq_1dot_kernel(X, Y, alphas=[.1, 1., 10.], wts=None, K_XY_only=False):
return _mix_rq_kernel(X, Y, alphas=alphas, wts=wts, K_XY_only=K_XY_only, add_dot=1.)
def _mix_rq_10dot_kernel(X, Y, alphas=[.1, 1., 10.], wts=None, K_XY_only=False):
return _mix_rq_kernel(X, Y, alphas=alphas, wts=wts, K_XY_only=K_XY_only, add_dot=10.)
def _mix_rq_01dot_kernel(X, Y, alphas=[.1, 1., 10.], wts=None, K_XY_only=False):
return _mix_rq_kernel(X, Y, alphas=alphas, wts=wts, K_XY_only=K_XY_only, add_dot=.1)
def _mix_rq_001dot_kernel(X, Y, alphas=[.1, 1., 10.], wts=None, K_XY_only=False):
return _mix_rq_kernel(X, Y, alphas=alphas, wts=wts, K_XY_only=K_XY_only, add_dot=.01)
def _tanh_mix_rq_kernel(X, Y, K_XY_only=False):
return _mix_rq_kernel(tf.tanh(X), tf.tanh(Y), K_XY_only=K_XY_only)
def _mix_rq_kernel(X, Y, alphas=[.1, 1., 10.], wts=None, K_XY_only=False, add_dot=.0):
"""
Rational quadratic kernel
http://www.cs.toronto.edu/~duvenaud/cookbook/index.html
"""
if wts is None:
wts = [1.] * len(alphas)
XX = tf.matmul(X, X, transpose_b=True)
XY = tf.matmul(X, Y, transpose_b=True)
YY = tf.matmul(Y, Y, transpose_b=True)
X_sqnorms = tf.diag_part(XX)
Y_sqnorms = tf.diag_part(YY)
r = lambda x: tf.expand_dims(x, 0)
c = lambda x: tf.expand_dims(x, 1)
K_XX, K_XY, K_YY = 0., 0., 0.
XYsqnorm = tf.maximum(-2. * XY + c(X_sqnorms) + r(Y_sqnorms), 0.)
for alpha, wt in zip(alphas, wts):
logXY = tf.log(1. + XYsqnorm/(2.*alpha))
K_XY += wt * tf.exp(-alpha * logXY)
if add_dot > 0:
K_XY += tf.cast(add_dot, tf.float32) * XY
if K_XY_only:
return K_XY
XXsqnorm = tf.maximum(-2. * XX + c(X_sqnorms) + r(X_sqnorms), 0.)
YYsqnorm = tf.maximum(-2. * YY + c(Y_sqnorms) + r(Y_sqnorms), 0.)
for alpha, wt in zip(alphas, wts):
logXX = tf.log(1. + XXsqnorm/(2.*alpha))
logYY = tf.log(1. + YYsqnorm/(2.*alpha))
K_XX += wt * tf.exp(-alpha * logXX)
K_YY += wt * tf.exp(-alpha * logYY)
if add_dot > 0:
K_XX += tf.cast(add_dot, tf.float32) * XX
K_YY += tf.cast(add_dot, tf.float32) * YY
# wts = tf.reduce_sum(tf.cast(wts, tf.float32))
wts = tf.reduce_sum(tf.cast(wts, tf.float32))
return K_XX, K_XY, K_YY, wts
def mmd2(K, biased=False):
K_XX, K_XY, K_YY, const_diagonal = K
return _mmd2(K_XX, K_XY, K_YY, const_diagonal, biased) # numerics checked at _mmd2 return
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(K_XX.get_shape()[0], tf.float32)
n = tf.cast(K_YY.get_shape()[0], tf.float32)
if biased:
mmd2 = (tf.reduce_sum(K_XX) / (m * m)
+ tf.reduce_sum(K_YY) / (n * n)
- 2 * tf.reduce_sum(K_XY) / (m * n))
else:
if const_diagonal is not False:
const_diagonal = tf.cast(const_diagonal, tf.float32)
trace_X = m * const_diagonal
trace_Y = n * const_diagonal
else:
trace_X = tf.trace(K_XX)
trace_Y = tf.trace(K_YY)
mmd2 = ((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))
+ (tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1))
- 2 * tf.reduce_sum(K_XY) / (m * n))
return mmd2
def mmd2_and_ratio(K, biased=False, min_var_est=_eps):
K_XX, K_XY, K_YY, const_diagonal = K
return _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal, biased, min_var_est)
def _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal=False, biased=False,
min_var_est=_eps):
mmd2, var_est = _mmd2_and_variance(
K_XX, K_XY, K_YY, const_diagonal=const_diagonal, biased=biased)
ratio = mmd2 / tf.sqrt(tf.maximum(var_est, min_var_est))
return mmd2, ratio, var_est
def _mmd2_and_variance(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(K_XX.get_shape()[0], tf.float32) # Assumes X, Y are same shape
### Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to compute them explicitly
if const_diagonal is not False:
const_diagonal = tf.cast(const_diagonal, tf.float32)
diag_X = diag_Y = const_diagonal
sum_diag_X = sum_diag_Y = m * const_diagonal
sum_diag2_X = sum_diag2_Y = m * const_diagonal**2
else:
diag_X = tf.diag_part(K_XX)
diag_Y = tf.diag_part(K_YY)
sum_diag_X = tf.reduce_sum(diag_X)
sum_diag_Y = tf.reduce_sum(diag_Y)
sum_diag2_X = sq_sum(diag_X)
sum_diag2_Y = sq_sum(diag_Y)
Kt_XX_sums = tf.reduce_sum(K_XX, 1) - diag_X
Kt_YY_sums = tf.reduce_sum(K_YY, 1) - diag_Y
K_XY_sums_0 = tf.reduce_sum(K_XY, 0)
K_XY_sums_1 = tf.reduce_sum(K_XY, 1)
Kt_XX_sum = tf.reduce_sum(Kt_XX_sums)
Kt_YY_sum = tf.reduce_sum(Kt_YY_sums)
K_XY_sum = tf.reduce_sum(K_XY_sums_0)
Kt_XX_2_sum = sq_sum(K_XX) - sum_diag2_X
Kt_YY_2_sum = sq_sum(K_YY) - sum_diag2_Y
K_XY_2_sum = sq_sum(K_XY)
if biased:
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
+ (Kt_YY_sum + sum_diag_Y) / (m * m)
- 2 * K_XY_sum / (m * m))
else:
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * (m-1))
+ (Kt_YY_sum + sum_diag_Y) / (m * (m-1))
- 2 * K_XY_sum / (m * m))
var_est = (
2 / (m**2 * (m-1)**2) * (
2 * sq_sum(Kt_XX_sums) - Kt_XX_2_sum
+ 2 * sq_sum(Kt_YY_sums) - Kt_YY_2_sum)
- (4*m-6) / (m**3 * (m-1)**3) * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 4*(m-2) / (m**3 * (m-1)**2) * (
sq_sum(K_XY_sums_1) + sq_sum(K_XY_sums_0))
- 4 * (m-3) / (m**3 * (m-1)**2) * K_XY_2_sum
- (8*m - 12) / (m**5 * (m-1)) * K_XY_sum**2
+ 8 / (m**3 * (m-1)) * (
1/m * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
- dot(Kt_XX_sums, K_XY_sums_1)
- dot(Kt_YY_sums, K_XY_sums_0))
)
return mmd2, var_est
def diff_polynomial_mmd2_and_ratio(X, Y, Z):
dim = tf.cast(X.get_shape()[1], tf.float32)
# TODO: could definitely do this faster
K_XY = (tf.matmul(X, Y, transpose_b=True) / dim + 1) ** 3
K_XZ = (tf.matmul(X, Z, transpose_b=True) / dim + 1) ** 3
K_YY = (tf.matmul(Y, Y, transpose_b=True) / dim + 1) ** 3
K_ZZ = (tf.matmul(Z, Z, transpose_b=True) / dim + 1) ** 3
return _diff_mmd2_and_ratio(K_XY, K_XZ, K_YY, K_ZZ, const_diagonal=False)
def diff_polynomial_mmd2_and_ratio_with_saving(X, Y, saved_sums_for_Z):
dim = tf.cast(X.get_shape()[1], tf.float32)
# TODO: could definitely do this faster
K_XY = (tf.matmul(X, Y, transpose_b=True) / dim + 1) ** 3
K_YY = (tf.matmul(Y, Y, transpose_b=True) / dim + 1) ** 3
m = tf.cast(K_YY.get_shape()[0], tf.float32)
Y_related_sums = _get_sums(K_XY, K_YY)
mmd2_diff, ratio = _diff_mmd2_and_ratio_from_sums(Y_related_sums, saved_sums_for_Z, m)
return mmd2_diff, ratio, Y_related_sums
def _diff_mmd2_and_ratio(K_XY, K_XZ, K_YY, K_ZZ, const_diagonal=False):
m = tf.cast(K_YY.get_shape()[0], tf.float32) # Assumes X, Y, Z are same shape
### Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to explicitly form them
return _diff_mmd2_and_ratio_from_sums(
_get_sums(K_XY, K_YY, const_diagonal),
_get_sums(K_XZ, K_ZZ, const_diagonal),
m,
const_diagonal=const_diagonal
)
def _diff_mmd2_and_ratio_from_sums(Y_related_sums, Z_related_sums, m, const_diagonal=False):
Kt_YY_sums, Kt_YY_2_sum, K_XY_sums_0, K_XY_sums_1, K_XY_2_sum = Y_related_sums
Kt_ZZ_sums, Kt_ZZ_2_sum, K_XZ_sums_0, K_XZ_sums_1, K_XZ_2_sum = Z_related_sums
Kt_YY_sum = tf.reduce_sum(Kt_YY_sums)
Kt_ZZ_sum = tf.reduce_sum(Kt_ZZ_sums)
K_XY_sum = tf.reduce_sum(K_XY_sums_0)
K_XZ_sum = tf.reduce_sum(K_XZ_sums_0)
# TODO: turn these into dot products?
# should figure out if that's faster or not on GPU / with theano...
### Estimators for the various terms involved
muY_muY = Kt_YY_sum / (m * (m-1))
muZ_muZ = Kt_ZZ_sum / (m * (m-1))
muX_muY = K_XY_sum / (m * m)
muX_muZ = K_XZ_sum / (m * m)
E_y_muY_sq = (sq_sum(Kt_YY_sums) - Kt_YY_2_sum) / (m*(m-1)*(m-2))
E_z_muZ_sq = (sq_sum(Kt_ZZ_sums) - Kt_ZZ_2_sum) / (m*(m-1)*(m-2))
E_x_muY_sq = (sq_sum(K_XY_sums_1) - K_XY_2_sum) / (m*m*(m-1))
E_x_muZ_sq = (sq_sum(K_XZ_sums_1) - K_XZ_2_sum) / (m*m*(m-1))
E_y_muX_sq = (sq_sum(K_XY_sums_0) - K_XY_2_sum) / (m*m*(m-1))
E_z_muX_sq = (sq_sum(K_XZ_sums_0) - K_XZ_2_sum) / (m*m*(m-1))
E_y_muY_y_muX = dot(Kt_YY_sums, K_XY_sums_0) / (m*m*(m-1))
E_z_muZ_z_muX = dot(Kt_ZZ_sums, K_XZ_sums_0) / (m*m*(m-1))
E_x_muY_x_muZ = dot(K_XY_sums_1, K_XZ_sums_1) / (m*m*m)
E_kyy2 = Kt_YY_2_sum / (m * (m-1))
E_kzz2 = Kt_ZZ_2_sum / (m * (m-1))
E_kxy2 = K_XY_2_sum / (m * m)
E_kxz2 = K_XZ_2_sum / (m * m)
### Combine into overall estimators
mmd2_diff = muY_muY - 2 * muX_muY - muZ_muZ + 2 * muX_muZ
first_order = 4 * (m-2) / (m * (m-1)) * (
E_y_muY_sq - muY_muY**2
+ E_x_muY_sq - muX_muY**2
+ E_y_muX_sq - muX_muY**2
+ E_z_muZ_sq - muZ_muZ**2
+ E_x_muZ_sq - muX_muZ**2
+ E_z_muX_sq - muX_muZ**2
- 2 * E_y_muY_y_muX + 2 * muY_muY * muX_muY
- 2 * E_x_muY_x_muZ + 2 * muX_muY * muX_muZ
- 2 * E_z_muZ_z_muX + 2 * muZ_muZ * muX_muZ
)
second_order = 2 / (m * (m-1)) * (
E_kyy2 - muY_muY**2
+ 2 * E_kxy2 - 2 * muX_muY**2
+ E_kzz2 - muZ_muZ**2
+ 2 * E_kxz2 - 2 * muX_muZ**2
- 4 * E_y_muY_y_muX + 4 * muY_muY * muX_muY
- 4 * E_x_muY_x_muZ + 4 * muX_muY * muX_muZ
- 4 * E_z_muZ_z_muX + 4 * muZ_muZ * muX_muZ
)
var_est = first_order + second_order
ratio = mmd2_diff / mysqrt(tf.maximum(var_est, _eps))
return mmd2_diff, ratio
def _get_sums(K_XY, K_YY, const_diagonal=False):
m = tf.cast(K_YY.get_shape()[0], tf.float32) # Assumes X, Y, Z are same shape
### Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to explicitly form them
if const_diagonal is not False:
const_diagonal = tf.cast(const_diagonal, tf.float32)
diag_Y = const_diagonal
sum_diag2_Y = m * const_diagonal**2
else:
diag_Y = tf.diag_part(K_YY)
sum_diag2_Y = sq_sum(diag_Y)
Kt_YY_sums = tf.reduce_sum(K_YY, 1) - diag_Y
K_XY_sums_0 = tf.reduce_sum(K_XY, 0)
K_XY_sums_1 = tf.reduce_sum(K_XY, 1)
# TODO: turn these into dot products?
# should figure out if that's faster or not on GPU / with theano...
Kt_YY_2_sum = sq_sum(K_YY) - sum_diag2_Y
K_XY_2_sum = sq_sum(K_XY)
return Kt_YY_sums, Kt_YY_2_sum, K_XY_sums_0, K_XY_sums_1, K_XY_2_sum
def np_diff_polynomial_mmd2_and_ratio_with_saving(X, Y, saved_sums_for_Z):
dim = float(X.shape[1])
# TODO: could definitely do this faster
K_XY = (np.dot(X, Y.transpose()) / dim + 1) ** 3
K_YY = (np.dot(Y, Y.transpose()) / dim + 1) ** 3
m = float(K_YY.shape[0])
Y_related_sums = _np_get_sums(K_XY, K_YY)
if saved_sums_for_Z is None:
return Y_related_sums
mmd2_diff, ratio = _np_diff_mmd2_and_ratio_from_sums(Y_related_sums, saved_sums_for_Z, m)
return mmd2_diff, ratio, Y_related_sums
def _np_diff_mmd2_and_ratio_from_sums(Y_related_sums, Z_related_sums, m, const_diagonal=False):
Kt_YY_sums, Kt_YY_2_sum, K_XY_sums_0, K_XY_sums_1, K_XY_2_sum = Y_related_sums
Kt_ZZ_sums, Kt_ZZ_2_sum, K_XZ_sums_0, K_XZ_sums_1, K_XZ_2_sum = Z_related_sums
Kt_YY_sum = Kt_YY_sums.sum()
Kt_ZZ_sum = Kt_ZZ_sums.sum()
K_XY_sum = K_XY_sums_0.sum()
K_XZ_sum = K_XZ_sums_0.sum()
# TODO: turn these into dot products?
# should figure out if that's faster or not on GPU / with theano...
### Estimators for the various terms involved
muY_muY = Kt_YY_sum / (m * (m-1))
muZ_muZ = Kt_ZZ_sum / (m * (m-1))
muX_muY = K_XY_sum / (m * m)
muX_muZ = K_XZ_sum / (m * m)
E_y_muY_sq = (np.dot(Kt_YY_sums, Kt_YY_sums) - Kt_YY_2_sum) / (m*(m-1)*(m-2))
E_z_muZ_sq = (np.dot(Kt_ZZ_sums, Kt_ZZ_sums) - Kt_ZZ_2_sum) / (m*(m-1)*(m-2))
E_x_muY_sq = (np.dot(K_XY_sums_1, K_XY_sums_1) - K_XY_2_sum) / (m*m*(m-1))
E_x_muZ_sq = (np.dot(K_XZ_sums_1, K_XZ_sums_1) - K_XZ_2_sum) / (m*m*(m-1))
E_y_muX_sq = (np.dot(K_XY_sums_0, K_XY_sums_0) - K_XY_2_sum) / (m*m*(m-1))
E_z_muX_sq = (np.dot(K_XZ_sums_0, K_XZ_sums_0) - K_XZ_2_sum) / (m*m*(m-1))
E_y_muY_y_muX = np.dot(Kt_YY_sums, K_XY_sums_0) / (m*m*(m-1))
E_z_muZ_z_muX = np.dot(Kt_ZZ_sums, K_XZ_sums_0) / (m*m*(m-1))
E_x_muY_x_muZ = np.dot(K_XY_sums_1, K_XZ_sums_1) / (m*m*m)
E_kyy2 = Kt_YY_2_sum / (m * (m-1))
E_kzz2 = Kt_ZZ_2_sum / (m * (m-1))
E_kxy2 = K_XY_2_sum / (m * m)
E_kxz2 = K_XZ_2_sum / (m * m)
### Combine into overall estimators
mmd2_diff = muY_muY - 2 * muX_muY - muZ_muZ + 2 * muX_muZ
first_order = 4 * (m-2) / (m * (m-1)) * (
E_y_muY_sq - muY_muY**2
+ E_x_muY_sq - muX_muY**2
+ E_y_muX_sq - muX_muY**2
+ E_z_muZ_sq - muZ_muZ**2
+ E_x_muZ_sq - muX_muZ**2
+ E_z_muX_sq - muX_muZ**2
- 2 * E_y_muY_y_muX + 2 * muY_muY * muX_muY
- 2 * E_x_muY_x_muZ + 2 * muX_muY * muX_muZ
- 2 * E_z_muZ_z_muX + 2 * muZ_muZ * muX_muZ
)
second_order = 2 / (m * (m-1)) * (
E_kyy2 - muY_muY**2
+ 2 * E_kxy2 - 2 * muX_muY**2
+ E_kzz2 - muZ_muZ**2
+ 2 * E_kxz2 - 2 * muX_muZ**2
- 4 * E_y_muY_y_muX + 4 * muY_muY * muX_muY
- 4 * E_x_muY_x_muZ + 4 * muX_muY * muX_muZ
- 4 * E_z_muZ_z_muX + 4 * muZ_muZ * muX_muZ
)
var_est = first_order + second_order
ratio = mmd2_diff / np.sqrt(max(var_est, _eps))
return mmd2_diff, ratio
def _np_get_sums(K_XY, K_YY, const_diagonal=False):
m = float(K_YY.shape[0]) # Assumes X, Y, Z are same shape
### Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to explicitly form them
if const_diagonal is not False:
const_diagonal = float(const_diagonal)
diag_Y = const_diagonal
sum_diag2_Y = m * const_diagonal**2
else:
diag_Y = np.diag(K_YY)
sum_diag2_Y = np.dot(diag_Y, diag_Y)
Kt_YY_sums = K_YY.sum(axis=1) - diag_Y
K_XY_sums_0 = K_XY.sum(axis=0)
K_XY_sums_1 = K_XY.sum(axis=1)
# TODO: turn these into dot products?
# should figure out if that's faster or not on GPU / with theano...
Kt_YY_2_sum = (K_YY ** 2).sum() - sum_diag2_Y
K_XY_2_sum = (K_XY ** 2).sum()
return Kt_YY_sums, Kt_YY_2_sum, K_XY_sums_0, K_XY_sums_1, K_XY_2_sum
| 17,404 | 33.465347 | 102 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/model.py | from __future__ import division, print_function
import os, sys, time, pprint, numpy as np
from . import mmd
from .ops import safer_norm, tf
from .architecture import get_networks
from .pipeline import get_pipeline
from utils import timer, scorer, misc
class MMD_GAN(object):
def __init__(self, sess, config,
batch_size=64, output_size=64,
z_dim=100, c_dim=3, data_dir='./data'):
if config.learning_rate_D < 0:
config.learning_rate_D = config.learning_rate
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
output_size: (optional) The resolution in pixels of the images. [64]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.timer = timer.Timer()
self.dataset = config.dataset
if config.architecture == 'dc128':
output_size = 128
if config.architecture in ['dc64', 'dcgan64']:
output_size = 64
self.sess = sess
if config.real_batch_size == -1:
config.real_batch_size = config.batch_size
self.config = config
self.is_grayscale = (c_dim == 1)
self.batch_size = batch_size
self.real_batch_size = config.real_batch_size
self.sample_size = 64 if self.config.is_train else batch_size
self.output_size = output_size
self.data_dir = data_dir
self.z_dim = z_dim
self.gf_dim = config.gf_dim
self.df_dim = config.df_dim
self.dof_dim = self.config.dof_dim
self.c_dim = c_dim
discriminator_desc = '_dc'
if self.config.learning_rate_D == self.config.learning_rate:
lr = 'lr%.8f' % self.config.learning_rate
else:
lr = 'lr%.8fG%fD' % (self.config.learning_rate, self.config.learning_rate_D)
arch = '%dx%d' % (self.config.gf_dim, self.config.df_dim)
self.description = ("%s%s_%s%s_%sd%d-%d-%d_%s_%s_%s" % (
self.dataset, arch,
self.config.architecture, discriminator_desc,
self.config.kernel, self.config.dsteps,
self.config.start_dsteps, self.config.gsteps, self.batch_size,
self.output_size, lr))
if self.config.batch_norm:
self.description += '_bn'
self._ensure_dirs()
stdout = sys.stdout
if self.config.log:
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
self.log_file = open(os.path.join(self.sample_dir, 'log.txt'), 'w', buffering=1)
print('Execution start time: %s' % time.ctime())
print('Log file: %s' % self.log_file)
stdout = self.log_file
sys.stdout = self.log_file
sys.stderr = self.log_file
if config.compute_scores:
self.scorer = scorer.Scorer(self.dataset, config.MMD_lr_scheduler, stdout=stdout)
print('Execution start time: %s' % time.ctime())
#pprint.PrettyPrinter().pprint(self.config.__dict__['__flags'])
self.build_model()
self.initialized_for_sampling = config.is_train
def _ensure_dirs(self, folders=['sample', 'log', 'checkpoint']):
if type(folders) == str:
folders = [folders]
for folder in folders:
ff = folder + '_dir'
if not os.path.exists(ff):
os.makedirs(ff)
self.__dict__[ff] = os.path.join(self.config.__getattr__(ff),
self.config.name + self.config.suffix,
self.description)
if not os.path.exists(self.__dict__[ff]):
os.makedirs(self.__dict__[ff])
def build_model(self):
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.lr = tf.Variable(self.config.learning_rate, name='lr',
trainable=False, dtype=tf.float32)
self.lr_decay_op = self.lr.assign(tf.maximum(self.lr * self.config.decay_rate, 1.e-6))
with tf.variable_scope('loss'):
if self.config.is_train and (self.config.gradient_penalty > 0):
self.gp = tf.Variable(self.config.gradient_penalty,
name='gradient_penalty',
trainable=False, dtype=tf.float32)
self.gp_decay_op = self.gp.assign(self.gp * self.config.gp_decay_rate)
self.set_pipeline()
self.z = tf.random_uniform([self.batch_size, self.z_dim], minval=-1.,
maxval=1., dtype=tf.float32, name='z')
self.sample_z = tf.constant(np.random.uniform(-1, 1, size=(self.sample_size,
self.z_dim)).astype(np.float32),
dtype=tf.float32, name='sample_z')
Generator, Discriminator = get_networks(self.config.architecture)
generator = Generator(self.gf_dim, self.c_dim, self.output_size, self.config.batch_norm)
dbn = self.config.batch_norm & (self.config.gradient_penalty <= 0)
self.discriminator = Discriminator(self.df_dim, self.dof_dim, dbn)
# tf.summary.histogram("z", self.z)
self.G = generator(self.z, self.batch_size)
self.sampler = generator(self.sample_z, self.sample_size)
self.d_images_layers = self.discriminator(self.images,
self.real_batch_size, return_layers=True)
self.d_G_layers = self.discriminator(self.G, self.batch_size,
return_layers=True)
self.d_images = self.d_images_layers['hF']
self.d_G = self.d_G_layers['hF']
if self.config.is_train:
self.set_loss(self.d_G, self.d_images)
block = min(8, int(np.sqrt(self.real_batch_size)), int(np.sqrt(self.batch_size)))
tf.summary.image("train/input image",
self.imageRearrange(tf.clip_by_value(self.images, 0, 1), block))
tf.summary.image("train/gen image",
self.imageRearrange(tf.clip_by_value(self.G, 0, 1), block))
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver(max_to_keep=2)
print('[*] Model built.')
def set_loss(self, G, images):
kernel = getattr(mmd, '_%s_kernel' % self.config.kernel)
kerGI = kernel(G, images)
with tf.variable_scope('loss'):
self.g_loss = mmd.mmd2(kerGI)
self.d_loss = -self.g_loss
self.optim_name = 'kernel_loss'
self.add_gradient_penalty(kernel, G, images)
self.add_l2_penalty()
print('[*] Loss set')
def add_gradient_penalty(self, kernel, fake, real):
bs = min([self.batch_size, self.real_batch_size])
real, fake = real[:bs], fake[:bs]
alpha = tf.random_uniform(shape=[bs, 1, 1, 1])
real_data = self.images[:bs] # discirminator input level
fake_data = self.G[:bs] # discriminator input level
x_hat_data = (1. - alpha) * real_data + alpha * fake_data
x_hat = self.discriminator(x_hat_data, bs)
Ekx = lambda yy: tf.reduce_mean(kernel(x_hat, yy, K_XY_only=True), axis=1)
Ekxr, Ekxf = Ekx(real), Ekx(fake)
witness = Ekxr - Ekxf
gradients = tf.gradients(witness, [x_hat_data])[0]
penalty = tf.reduce_mean(tf.square(safer_norm(gradients, axis=1) - 1.0))
with tf.variable_scope('loss'):
if self.config.gradient_penalty > 0:
self.d_loss += penalty * self.gp
self.optim_name += ' (gp %.1f)' % self.config.gradient_penalty
tf.summary.scalar('dx_penalty', penalty)
print('[*] Gradient penalty added')
tf.summary.scalar(self.optim_name + ' G', self.g_loss)
tf.summary.scalar(self.optim_name + ' D', self.d_loss)
def add_l2_penalty(self):
if self.config.L2_discriminator_penalty > 0:
penalty = 0.0
for _, layer in self.d_G_layers.items():
penalty += tf.reduce_mean(tf.reshape(tf.square(layer), [self.batch_size, -1]), axis=1)
for _, layer in self.d_images_layers.items():
penalty += tf.reduce_mean(tf.reshape(tf.square(layer), [self.batch_size, -1]), axis=1)
self.d_L2_penalty = self.config.L2_discriminator_penalty * tf.reduce_mean(penalty)
self.d_loss += self.d_L2_penalty
self.optim_name += ' (L2 dp %.6f)' % self.config.L2_discriminator_penalty
self.optim_name = self.optim_name.replace(') (', ', ')
tf.summary.scalar('L2_disc_penalty', self.d_L2_penalty)
print('[*] L2 discriminator penalty added')
def set_grads(self):
with tf.variable_scope("G_grads"):
self.g_optim = tf.train.AdamOptimizer(self.lr, beta1=self.config.beta1, beta2=0.9)
self.g_gvs = self.g_optim.compute_gradients(
loss=self.g_loss,
var_list=self.g_vars
)
self.g_gvs = [(tf.clip_by_norm(gg, 1.), vv) for gg, vv in self.g_gvs]
self.g_grads = self.g_optim.apply_gradients(
self.g_gvs,
global_step=self.global_step
) # minimizes self.g_loss <==> minimizes MMD
with tf.variable_scope("D_grads"):
self.d_optim = tf.train.AdamOptimizer(
self.lr * self.config.learning_rate_D / self.config.learning_rate,
beta1=self.config.beta1, beta2=0.9
)
self.d_gvs = self.d_optim.compute_gradients(
loss=self.d_loss,
var_list=self.d_vars
)
# negative gradients not needed - by definition d_loss = -optim_loss
self.d_gvs = [(tf.clip_by_norm(gg, 1.), vv) for gg, vv in self.d_gvs]
self.d_grads = self.d_optim.apply_gradients(self.d_gvs) # minimizes self.d_loss <==> max MMD
print('[*] Gradients set')
def train_step(self, batch_images=None):
step = self.sess.run(self.global_step)
write_summary = ((np.mod(step, 50) == 0) and (step < 1000)) \
or (np.mod(step, 1000) == 0) or (self.err_counter > 0)
if (self.g_counter == 0) and (self.d_grads is not None):
d_steps = self.config.dsteps
if ((step % 500 == 0) or (step < 20)):
d_steps = self.config.start_dsteps
self.d_counter = (self.d_counter + 1) % (d_steps + 1)
if self.d_counter == 0:
self.g_counter = (self.g_counter + 1) % self.config.gsteps
eval_ops = [self.g_gvs, self.d_gvs, self.g_loss, self.d_loss]
if self.config.is_demo:
summary_str, g_grads, d_grads, g_loss, d_loss = self.sess.run(
[self.TrainSummary] + eval_ops
)
else:
if self.d_counter == 0:
if write_summary:
_, summary_str, g_grads, d_grads, g_loss, d_loss = self.sess.run(
[self.g_grads, self.TrainSummary] + eval_ops
)
else:
_, g_grads, d_grads, g_loss, d_loss = self.sess.run([self.g_grads] + eval_ops)
else:
_, g_grads, d_grads, g_loss, d_loss = self.sess.run([self.d_grads] + eval_ops)
et = self.timer(step, "g step" if (self.d_counter == 0) else "d step", False)
assert ~np.isnan(g_loss), et + "NaN g_loss, epoch: "
assert ~np.isnan(d_loss), et + "NaN d_loss, epoch: "
# if G STEP, after D steps
if self.d_counter == 0:
if step % 10000 == 0:
try:
self.writer.add_summary(summary_str, step)
self.err_counter = 0
except Exception as e:
print('Step %d summary exception. ' % step, e)
self.err_counter += 1
if write_summary:
self.timer(step, "%s, G: %.8f, D: %.8f" % (self.optim_name, g_loss, d_loss))
if self.config.L2_discriminator_penalty > 0:
print(' ' * 22 + ('Discriminator L2 penalty: %.8f' % self.sess.run(self.d_L2_penalty)))
if np.mod(step + 1, self.config.max_iteration//5) == 0:
if not self.config.MMD_lr_scheduler:
# self.lr *= self.config.decay_rate
self.sess.run(self.lr_decay_op)
print('current learning rate: %f' % self.sess.run(self.lr))
if (self.config.gp_decay_rate > 0) and (self.config.gradient_penalty > 0):
self.sess.run(self.gp_decay_op)
print('current gradient penalty: %f' % self.sess.run(self.gp))
if self.config.compute_scores:
self.scorer.compute(self, step)
return g_loss, d_loss, step
def train_init(self):
self.set_grads()
self.sess.run(tf.local_variables_initializer())
self.sess.run(tf.global_variables_initializer())
print('[*] Variables initialized.')
self.TrainSummary = tf.summary.merge_all()
self._ensure_dirs('log')
self.writer = tf.summary.FileWriter(self.log_dir, self.sess.graph)
self.d_counter, self.g_counter, self.err_counter = 0, 0, 0
if self.load_checkpoint():
print(""" [*] Load SUCCESS, re-starting at epoch %d with learning
rate %.7f""" % (self.sess.run(self.global_step),
self.sess.run(self.lr)))
else:
print(" [!] Load failed...")
# self.sess.run(self.lr.assign(self.config.learning_rate))
if (not self.config.MMD_lr_scheduler) and (self.sess.run(self.gp) == self.config.gradient_penalty):
step = self.sess.run(self.global_step)
lr_decays_so_far = int((step * 5.)/self.config.max_iteration)
self.lr *= self.config.decay_rate ** lr_decays_so_far
if self.config.gp_decay_rate > 0:
self.gp *= self.config.gp_decay_rate ** lr_decays_so_far
print('current gradient penalty: %f' % self.sess.run(self.gp))
print('current learning rate: %f' % self.sess.run(self.lr))
print('[*] Model initialized for training')
def set_pipeline(self):
Pipeline = get_pipeline(self.dataset, self.config.suffix)
pipe = Pipeline(self.output_size, self.c_dim, self.real_batch_size,
self.data_dir,
timer=self.timer, sample_dir=self.sample_dir)
self.images = pipe.connect()
def train(self):
self.train_init()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=self.sess, coord=coord)
step = 0
print('[ ] Training ... ')
while step <= self.config.max_iteration:
g_loss, d_loss, step = self.train_step()
self.save_checkpoint_and_samples(step)
if self.config.save_layer_outputs:
self.save_layers(step)
coord.request_stop()
coord.join(threads)
def save_checkpoint(self, step=None):
self._ensure_dirs('checkpoint')
if step is None:
self.saver.save(self.sess,
os.path.join(self.checkpoint_dir, "best.model"))
else:
self.saver.save(self.sess,
os.path.join(self.checkpoint_dir, "MMDGAN.model"),
global_step=step)
def load_checkpoint(self):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(self.checkpoint_dir, ckpt_name))
return True
else:
return False
def save_checkpoint_and_samples(self, step, freq=1000):
if (np.mod(step, freq) == 0) and (self.d_counter == 0):
self.save_checkpoint(step)
samples = self.sess.run(self.sampler)
self._ensure_dirs('sample')
p = os.path.join(self.sample_dir, 'train_{:02d}.png'.format(step))
misc.save_images(samples[:64, :, :, :], [8, 8], p)
def save_layers(self, step, freq=1000, n=256, layers=[-1, -2]):
c = self.config.save_layer_outputs
valid = list(freq * np.arange(self.config.max_iteration/freq + 1))
if c > 1:
valid += [int(k) for k in c**np.arange(np.log(freq)/np.log(c))]
if (step in valid) and (self.d_counter == 0):
if not (layers == 'all'):
keys = [sorted(list(self.d_G_layers))[i] for i in layers]
fake = [(key + '_fake', self.d_G_layers[key]) for key in keys]
real = [(key + '_real', self.d_images_layers[key]) for key in keys]
values = self._evaluate_tensors(dict(real + fake), n=n)
path = os.path.join(self.sample_dir, 'layer_outputs_%d.npz' % step)
np.savez(path, **values)
def imageRearrange(self, image, block=4):
image = tf.slice(image, [0, 0, 0, 0], [block * block, -1, -1, -1])
x1 = tf.batch_to_space(image, [[0, 0], [0, 0]], block)
image_r = tf.reshape(tf.transpose(tf.reshape(x1,
[self.output_size, block, self.output_size, block, self.c_dim])
, [1, 0, 3, 2, 4]),
[1, self.output_size * block, self.output_size * block, self.c_dim])
return image_r
def _evaluate_tensors(self, variable_dict, n=None):
if n is None:
n = self.batch_size
values = dict([(key, []) for key in variable_dict.keys()])
sampled = 0
while sampled < n:
vv = self.sess.run(variable_dict)
for key, val in vv.items():
values[key].append(val)
sampled += list(vv.items())[0][1].shape[0]
for key, val in values.items():
values[key] = np.concatenate(val, axis=0)[:n]
return values
def get_samples(self, n=None, save=True, layers=[]):
if not (self.initialized_for_sampling or self.config.is_train):
print('[*] Loading from ' + self.checkpoint_dir + '...')
self.sess.run(tf.local_variables_initializer())
self.sess.run(tf.global_variables_initializer())
if self.load_checkpoint():
print(" [*] Load SUCCESS, model trained up to epoch %d" % \
self.sess.run(self.global_step))
else:
print(" [!] Load failed...")
return
if len(layers) > 0:
outputs = dict([(key + '_features', val) for key, val in self.d_G_layers.items()])
if not (layers == 'all'):
keys = [sorted(list(outputs.keys()))[i] for i in layers]
outputs = dict([(key, outputs[key]) for key in keys])
else:
outputs = {}
outputs['samples'] = self.G
values = self._evaluate_tensors(outputs, n=n)
if not save:
if len(layers) > 0:
return values
return values['samples']
if not os.path.isdir(self.config.output_dir_of_test_samples):
os.mkdir(self.config.output_dir_of_test_samples)
for key, val in values.items():
if key == 'samples':
for idx in range(val.shape[0]):
print('Generating png to %s: %d / %d...' % (self.config.output_dir_of_test_samples, idx, val.shape[0]), end='\r')
if self.config.model == 'mmd':
p = os.path.join(self.config.output_dir_of_test_samples, 'MMD_{:08d}.png'.format(idx))
elif self.config.model == 'wgan_gp':
p = os.path.join(self.config.output_dir_of_test_samples, 'WGAN-GP_{:08d}.png'.format(idx))
elif self.config.model == 'cramer':
p = os.path.join(self.config.output_dir_of_test_samples, 'CRAMER_{:08d}.png'.format(idx))
misc.save_images(val[idx:idx+1, :, :, :], [1, 1], p) | 21,413 | 44.464968 | 133 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/wgan_gp.py | from .model import MMD_GAN, tf
class WGAN_GP(MMD_GAN):
def __init__(self, sess, config, **kwargs):
config.dof_dim = 1
super(WGAN_GP, self).__init__(sess, config, **kwargs)
def set_loss(self, G, images):
alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1])
real_data = self.images
fake_data = self.G
differences = fake_data - real_data
interpolates0 = real_data + (alpha*differences)
interpolates = self.discriminator(interpolates0, self.batch_size)
gradients = tf.gradients(interpolates, [interpolates0])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1.)**2)
self.gp = tf.get_variable('gradient_penalty', dtype=tf.float32,
initializer=self.config.gradient_penalty)
self.d_loss = tf.reduce_mean(G) - tf.reduce_mean(images) + self.gp * gradient_penalty
self.g_loss = -tf.reduce_mean(G)
self.optim_name = 'wgan_gp%d_loss' % int(self.config.gradient_penalty)
tf.summary.scalar(self.optim_name + ' G', self.g_loss)
tf.summary.scalar(self.optim_name + ' D', self.d_loss) | 1,240 | 41.793103 | 93 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/__init__.py | __all__= ['model', 'wgan_gp', 'cramer', 'ops', 'mmd', 'resnet', 'architecture']
| 80 | 39.5 | 79 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/ops.py | from tensorflow.python.framework import ops
from utils.misc import variable_summaries
from .mmd import _eps, tf
class batch_norm(object):
def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"):
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x, train=True):
return tf.contrib.layers.batch_norm(x,
decay=self.momentum,
updates_collections=None,
epsilon=self.epsilon,
scale=True,
is_training=train,
scope=self.name)
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
def conv_cond_concat(x, y):
"""Concatenate conditioning vector on feature map axis."""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return tf.concat(3, [x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])])
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
with tf.variable_scope(name):
scope_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
has_summary = any([('w' in v.op.name) for v in scope_vars])
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
if not has_summary:
variable_summaries({'W': w, 'b': biases})
return conv
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
with tf.variable_scope(name):
scope_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
has_summary = any([('w' in v.op.name) for v in scope_vars])
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
try:
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
# Support for verisons of TensorFlow before 0.7.0
except AttributeError:
deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if not has_summary:
variable_summaries({'W': w, 'b': biases})
if with_w:
return deconv, w, biases
else:
return deconv
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak*x)
def linear(input_, output_size, name="Linear", stddev=0.01, bias_start=0.0, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
scope_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
has_summary = any([('Matrix' in v.op.name) for v in scope_vars])
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if not has_summary:
variable_summaries({'W': matrix, 'b': bias})
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
class linear_n:
def __init__(self, input_, output_size, scope=None, stddev=0.1,
bias_start=0., train_scale=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
self.matrix = tf.get_variable(
"Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
self.scale = tf.get_variable(
"scale", [output_size], tf.float32, tf.constant_initializer(1.0),
trainable=train_scale)
self.b = tf.get_variable(
"bias", [output_size], tf.float32, tf.constant_initializer(bias_start))
self.scale_ = tf.get_variable(
"scale_", [output_size], tf.float32, tf.constant_initializer(1.0))
self.W = self.matrix * (self.scale/tf.sqrt(tf.reduce_sum(tf.square(self.matrix),0)))
self.out = self.output(input_)
def output(self, inp):
return tf.matmul(inp, self.W) + self.b
def init_op(self):
mean = tf.reduce_mean(self.out, 0)
stdv = tf.sqrt(tf.reduce_mean(tf.square(self.out), 0))
self.out = (self.out - mean)/stdv
scale_update_op = tf.assign(self.scale, self.scale/stdv)
b_update_op = tf.assign(self.b, -mean/stdv)
return tf.group(*[scale_update_op, b_update_op])
def l2_normalize_op(self):
self.W = self.W * (self.scale_ / tf.sqrt(
1e-6 + tf.reduce_sum(tf.square(self.W), 0)))
def safer_norm(tensor, axis=None, keep_dims=False, epsilon=_eps):
sq = tf.square(tensor)
squares = tf.reduce_sum(sq, axis=axis, keep_dims=keep_dims)
return tf.sqrt(squares + epsilon)
def sq_sum(t, name=None):
"The squared Frobenius-type norm of a tensor, sum(t ** 2)."
with tf.name_scope(name, "SqSum", [t]):
t = tf.convert_to_tensor(t, name='t')
return 2 * tf.nn.l2_loss(t)
def dot(x, y, name=None):
"The dot product of two vectors x and y."
with tf.name_scope(name, "Dot", [x, y]):
x = tf.convert_to_tensor(x, name='x')
y = tf.convert_to_tensor(y, name='y')
x.get_shape().assert_has_rank(1)
y.get_shape().assert_has_rank(1)
return tf.squeeze(tf.matmul(tf.expand_dims(x, 0), tf.expand_dims(y, 1)))
| 7,220 | 38.244565 | 104 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/pipeline.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 11 14:11:46 2018
@author: mikolajbinkowski
"""
import os, time, lmdb, io
import numpy as np
import tensorflow as tf
from PIL import Image
from glob import glob
import matplotlib.pyplot as plt
from utils import misc
class Pipeline:
def __init__(self, output_size, c_dim, batch_size, data_dir, **kwargs):
self.output_size = output_size
self.c_dim = c_dim
# data_dir = os.path.join(self.data_dir, self.dataset)
self.batch_size = batch_size
self.read_batch = max(4000, batch_size * 10)
self.read_count = 0
self.data_dir = data_dir
self.shape = [self.read_batch, self.output_size, self.output_size, self.c_dim]
def _transform(self, x):
return x
def connect(self):
assert hasattr(self, 'single_sample'), 'Pipeline needs to have single_sample defined before connecting'
self.single_sample.set_shape(self.shape)
ims = tf.train.shuffle_batch([self.single_sample], self.batch_size,
capacity=self.read_batch,
min_after_dequeue=self.read_batch//8,
num_threads=16,
enqueue_many=len(self.shape) == 4)
return self._transform(ims)
class LMDB(Pipeline):
def __init__(self, *args, **kwargs):
super(LMDB, self).__init__(*args, **kwargs)
self.timer = kwargs.get('timer', None)
self.keys = []
env = lmdb.open(self.data_dir, map_size=1099511627776, max_readers=100, readonly=True)
with env.begin() as txn:
cursor = txn.cursor()
while cursor.next():
self.keys.append(cursor.key())
env.close()
print('No. of records in lmdb database: %d' % len(self.keys))
# tf queue for getting keys
key_producer = tf.train.string_input_producer(self.keys, shuffle=True)
single_key = key_producer.dequeue()
self.single_sample = tf.py_func(self._get_sample_from_lmdb, [single_key], tf.float32)
def _get_sample_from_lmdb(self, key, limit=None):
if limit is None:
limit = self.read_batch
with tf.device('/cpu:0'):
rc = self.read_count
self.read_count += 1
tt = time.time()
self.timer(rc, 'lmdb: start reading chunk from database')
ims = []
db_count = 1
while len(ims) < limit:
env = lmdb.open(self.data_dir, map_size=1099511627776, max_readers=100, readonly=True)
with env.begin(write=False) as txn:
cursor = txn.cursor()
cursor.set_key(key)
if not cursor.next():
cursor.first()
db_err = False
while (len(ims) < limit) and (not db_err):
try:
key, byte_arr = cursor.item()
byte_im = io.BytesIO(byte_arr)
# byte_im.seek(0)
im = Image.open(byte_im)
ims.append(misc.center_and_scale(im, size=self.output_size))
except Exception as e:
self.timer(rc, 'lmdb error: ' + str(e))
self.timer(rc, 'lmdb open no. %d failed at key %s, with %d collected images' % (db_count, repr(key), len(ims)))
db_count += 1
db_err = True
if not cursor.next():
cursor.first()
env.close()
self.timer(rc, 'lmdb read time = %f' % (time.time() - tt))
return np.asarray(ims, dtype=np.float32)
def constant_sample(self, size):
choice = np.random.choice(self.keys, 1)[0]
return self._get_sample_from_lmdb(choice, limit=size)
class TfRecords(Pipeline):
def __init__(self, *args, **kwargs):
regex = os.path.join(self.data_dir, 'lsun-%d/bedroom_train_*' % self.output_size)
filename_queue = tf.train.string_input_producer(tf.gfile.Glob(regex), num_epochs=None)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={
'image/class/label': tf.FixedLenFeature([1], tf.int64),
'image/encoded': tf.FixedLenFeature([], tf.string),
})
image = tf.image.decode_jpeg(features['image/encoded'])
self.single_sample = tf.cast(image, tf.float32)/255.
self.shape = [self.output_size, self.output_size, self.c_dim]
class JPEG_128(Pipeline):
def __init__(self, *args, base_size=128, random_crop=9, **kwargs):
super(JPEG_128, self).__init__(*args, **kwargs)
#base_size = kwargs.get('base_size', 160)
#random_crop = kwargs.get('random_crop', 9)
files = glob(os.path.join(self.data_dir, '*.png'))
filename_queue = tf.train.string_input_producer(files, shuffle=True)
reader = tf.WholeFileReader()
_, raw = reader.read(filename_queue)
decoded = tf.image.decode_jpeg(raw, channels=self.c_dim) # HWC
#bs = base_size + 2 * random_crop
#cropped = tf.image.resize_image_with_crop_or_pad(decoded, bs, bs)
#if random_crop > 0:
# cropped = tf.image.random_flip_left_right(cropped)
# cropped = tf.random_crop(cropped, [base_size, base_size, self.c_dim])
self.single_sample = decoded
self.shape = [base_size, base_size, self.c_dim]
def _transform(self, x):
x = tf.image.resize_bilinear(x, (self.output_size, self.output_size))
return tf.cast(x, tf.float32)/255.
class JPEG_64(Pipeline):
def __init__(self, *args, base_size=64, random_crop=9, **kwargs):
super(JPEG_64, self).__init__(*args, **kwargs)
#base_size = kwargs.get('base_size', 160)
#random_crop = kwargs.get('random_crop', 9)
files = glob(os.path.join(self.data_dir, '*.png'))
filename_queue = tf.train.string_input_producer(files, shuffle=True)
reader = tf.WholeFileReader()
_, raw = reader.read(filename_queue)
decoded = tf.image.decode_jpeg(raw, channels=self.c_dim) # HWC
#bs = base_size + 2 * random_crop
#cropped = tf.image.resize_image_with_crop_or_pad(decoded, bs, bs)
#if random_crop > 0:
# cropped = tf.image.random_flip_left_right(cropped)
# cropped = tf.random_crop(cropped, [base_size, base_size, self.c_dim])
self.single_sample = decoded
self.shape = [base_size, base_size, self.c_dim]
def _transform(self, x):
x = tf.image.resize_bilinear(x, (self.output_size, self.output_size))
return tf.cast(x, tf.float32)/255.
class Mnist(Pipeline):
def __init__(self, *args, **kwargs):
super(Mnist, self).__init__(*args, **kwargs)
fd = open(os.path.join(self.data_dir,'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trX = loaded[16:].reshape((60000,28,28,1)).astype(np.float)
fd = open(os.path.join(self.data_dir,'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trY = loaded[8:].reshape((60000)).astype(np.float)
fd = open(os.path.join(self.data_dir,'t10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teX = loaded[16:].reshape((10000,28,28,1)).astype(np.float)
fd = open(os.path.join(self.data_dir,'t10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teY = loaded[8:].reshape((10000)).astype(np.float)
trY = np.asarray(trY)
teY = np.asarray(teY)
X = np.concatenate((trX, teX), axis=0).astype(np.float32) / 255.
y = np.concatenate((trY, teY), axis=0)
seed = 547
np.random.seed(seed)
np.random.shuffle(X)
queue = tf.train.input_producer(tf.constant(X), shuffle=False)
self.single_sample = queue.dequeue_many(self.read_batch)
class Cifar10(Pipeline):
def __init__(self, *args, **kwargs):
super(Cifar10, self).__init__(*args, **kwargs)
categories = np.arange(10)
batchesX, batchesY = [], []
for batch in range(1,6):
loaded = misc.unpickle(os.path.join(self.data_dir, 'data_batch_%d' % batch))
idx = np.in1d(np.array(loaded['labels']), categories)
batchesX.append(loaded['data'][idx].reshape(idx.sum(), 3, 32, 32))
batchesY.append(np.array(loaded['labels'])[idx])
trX = np.concatenate(batchesX, axis=0).transpose(0, 2, 3, 1)
trY = np.concatenate(batchesY, axis=0)
test = misc.unpickle(os.path.join(self.data_dir, 'test_batch'))
idx = np.in1d(np.array(test['labels']), categories)
teX = test['data'][idx].reshape(idx.sum(), 3, 32, 32).transpose(0, 2, 3, 1)
teY = np.array(test['labels'])[idx]
X = np.concatenate((trX, teX), axis=0).astype(np.float32) / 255.
y = np.concatenate((trY, teY), axis=0)
seed = 547
np.random.seed(seed)
np.random.shuffle(X)
queue = tf.train.input_producer(tf.constant(X), shuffle=False)
self.single_sample = queue.dequeue_many(self.read_batch)
class GaussianMix(Pipeline):
def __init__(self, *args, sample_dir='/', means=[.0, 3.0], stds=[1.0, .5], size=1000, **kwargs):
super(GaussianMix, self).__init__(*args, **kwargs)
from matplotlib import animation
X_real = np.r_[
np.random.normal(0, 1, size=size),
np.random.normal(3, .5, size=size),
]
X_real = X_real.reshape(X_real.shape[0], 1, 1, 1)
xlo = -5
xhi = 7
ax1 = plt.gca()
fig = ax1.figure
ax1.grid(False)
ax1.set_yticks([], [])
myhist(X_real.ravel(), color='r')
ax1.set_xlim(xlo, xhi)
ax1.set_ylim(0, 1.05)
ax1._autoscaleXon = ax1._autoscaleYon = False
wrtr = animation.writers['ffmpeg'](fps=20)
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
wrtr.setup(fig=fig, outfile=os.path.join(sample_dir, 'train.mp4'), dpi=100)
self.G_config = {'g_line': None,
'ax1': ax1,
'writer': wrtr,
'figure': ax1.figure}
queue = tf.train.input_producer(tf.constant(X_real.astype(np.float32)), shuffle=False)
self.single_sample = queue.dequeue_many(self.read_batch)
def myhist(X, ax=plt, bins='auto', **kwargs):
hist, bin_edges = np.histogram(X, bins=bins)
hist = hist / hist.max()
return ax.plot(
np.c_[bin_edges, bin_edges].ravel(),
np.r_[0, np.c_[hist, hist].ravel(), 0],
**kwargs
)
def get_pipeline(dataset, info):
if dataset == 'celebA' or dataset == 'lsun_bedroom_200k':
return JPEG_128
elif 'lsun' in dataset:
if 'tf_records' in info:
return TfRecords
else:
return LMDB
elif dataset == 'mnist':
return Mnist
elif dataset == 'cifar10':
return Cifar10
elif dataset == 'GaussianMix':
return GaussianMix
elif dataset == 'tiny_imagenet':
return JPEG_64
else:
raise Exception('invalid dataset: %s' % dataset)
| 11,697 | 39.337931 | 139 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/resnet/block.py | """
Based on https://github.com/igul222/improved_wgan_training/blob/master/gan_64x64.py.
"""
import functools
import tensorflow as tf
from core.resnet.ops import conv2d, batchnorm, layernorm
def ResidualBlock(name, input_dim, output_dim, filter_size, inputs, resample=None, he_init=True):
"""
resample: None, 'down', or 'up'
"""
if resample=='down':
conv_shortcut = MeanPoolConv
conv_1 = functools.partial(conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(ConvMeanPool, input_dim=input_dim, output_dim=output_dim)
elif resample=='up':
conv_shortcut = UpsampleConv
conv_1 = functools.partial(UpsampleConv, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
elif resample==None:
conv_shortcut = conv2d.Conv2D
conv_1 = functools.partial(conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim==input_dim and resample==None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1,
he_init=False, biases=True, inputs=inputs)
output = inputs
output = Normalize(name+'.BN1', [0,2,3], output)
output = tf.nn.relu(output)
output = conv_1(name+'.Conv1', filter_size=filter_size, inputs=output, he_init=he_init, biases=False)
output = Normalize(name+'.BN2', [0,2,3], output)
output = tf.nn.relu(output)
output = conv_2(name+'.Conv2', filter_size=filter_size, inputs=output, he_init=he_init)
return shortcut + output
def UpsampleConv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
output = inputs
output = tf.concat([output, output, output, output], axis=1)
output = tf.transpose(output, [0,2,3,1])
output = tf.depth_to_space(output, 2)
output = tf.transpose(output, [0,3,1,2])
output = conv2d.Conv2D(name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases)
return output
def ConvMeanPool(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
output = conv2d.Conv2D(name, input_dim, output_dim, filter_size, inputs, he_init=he_init, biases=biases)
output = tf.add_n([output[:,:,::2,::2], output[:,:,1::2,::2], output[:,:,::2,1::2], output[:,:,1::2,1::2]]) / 4.
return output
def MeanPoolConv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
output = inputs
output = tf.add_n([output[:,:,::2,::2], output[:,:,1::2,::2], output[:,:,::2,1::2], output[:,:,1::2,1::2]]) / 4.
output = conv2d.Conv2D(name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases)
return output
def Normalize(name, axes, inputs):
if ('d_' in name):# and (MODE == 'wgan-gp'):
if axes != [0,2,3]:
raise Exception('Layernorm over non-standard axes is unsupported')
return layernorm.Layernorm(name,[1,2,3],inputs)
else:
return batchnorm.Batchnorm(name,axes,inputs,fused=True)
| 3,394 | 44.266667 | 116 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/resnet/__init__.py | import numpy as np
import tensorflow as tf
import locale
locale.setlocale(locale.LC_ALL, '')
__all__ = ['block', 'ops']
_params = {}
_param_aliases = {}
def param(name, *args, **kwargs):
"""
A wrapper for `tf.Variable` which enables parameter sharing in models.
Creates and returns theano shared variables similarly to `tf.Variable`,
except if you try to create a param with the same name as a
previously-created one, `param(...)` will just return the old one instead of
making a new one.
This constructor also adds a `param` attribute to the shared variables it
creates, so that you can easily search a graph for all params.
"""
if name not in _params:
kwargs['name'] = name
param = tf.Variable(*args, **kwargs)
param.param = True
_params[name] = param
result = _params[name]
i = 0
while result in _param_aliases:
i += 1
result = _param_aliases[result]
return result
def params_with_name(name):
return [p for n,p in _params.items() if name in n]
def delete_all_params():
_params.clear()
def alias_params(replace_dict):
for old,new in replace_dict.items():
_param_aliases[old] = new
def delete_param_aliases():
_param_aliases.clear()
def print_model_settings(locals_):
print('Uppercase local vars:')
all_vars = [(k,v) for (k,v) in locals_.items() if (k.isupper() and k!='T' and k!='SETTINGS' and k!='ALL_SETTINGS')]
all_vars = sorted(all_vars, key=lambda x: x[0])
for var_name, var_value in all_vars:
print('\t{}: {}'.format(var_name, var_value))
def print_model_settings_dict(settings):
print('Settings dict:')
all_vars = [(k,v) for (k,v) in settings.items()]
all_vars = sorted(all_vars, key=lambda x: x[0])
for var_name, var_value in all_vars:
print('\t{}: {}'.format(var_name, var_value))
| 1,889 | 29 | 119 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/resnet/ops/conv2d.py | """
Based on https://github.com/igul222/improved_wgan_training/blob/master/
"""
from ... import resnet as lib
import numpy as np
import tensorflow as tf
_default_weightnorm = False
def enable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = True
_weights_stdev = None
def set_weights_stdev(weights_stdev):
global _weights_stdev
_weights_stdev = weights_stdev
def unset_weights_stdev():
global _weights_stdev
_weights_stdev = None
def Conv2D(name, input_dim, output_dim, filter_size, inputs, he_init=True, mask_type=None, stride=1, weightnorm=None, biases=True, gain=1.):
"""
inputs: tensor of shape (batch size, num channels, height, width)
mask_type: one of None, 'a', 'b'
returns: tensor of shape (batch size, num channels, height, width)
"""
with tf.name_scope(name) as scope:
if mask_type is not None:
mask_type, mask_n_channels = mask_type
mask = np.ones(
(filter_size, filter_size, input_dim, output_dim),
dtype='float32'
)
center = filter_size // 2
# Mask out future locations
# filter shape is (height, width, input channels, output channels)
mask[center+1:, :, :, :] = 0.
mask[center, center+1:, :, :] = 0.
# Mask out future channels
for i in range(mask_n_channels):
for j in range(mask_n_channels):
if (mask_type=='a' and i >= j) or (mask_type=='b' and i > j):
mask[
center,
center,
i::mask_n_channels,
j::mask_n_channels
] = 0.
def uniform(stdev, size):
return np.random.uniform(
low=-stdev * np.sqrt(3),
high=stdev * np.sqrt(3),
size=size
).astype('float32')
fan_in = input_dim * filter_size**2
fan_out = output_dim * filter_size**2 / (stride**2)
if mask_type is not None: # only approximately correct
fan_in /= 2.
fan_out /= 2.
if he_init:
filters_stdev = np.sqrt(4./(fan_in+fan_out))
else: # Normalized init (Glorot & Bengio)
filters_stdev = np.sqrt(2./(fan_in+fan_out))
if _weights_stdev is not None:
filter_values = uniform(
_weights_stdev,
(filter_size, filter_size, input_dim, output_dim)
)
else:
filter_values = uniform(
filters_stdev,
(filter_size, filter_size, input_dim, output_dim)
)
filter_values *= gain
filters = lib.param(name+'.Filters', filter_values)
if weightnorm==None:
weightnorm = _default_weightnorm
if weightnorm:
norm_values = np.sqrt(np.sum(np.square(filter_values), axis=(0,1,2)))
target_norms = lib.param(
name + '.g',
norm_values
)
with tf.name_scope('weightnorm') as scope:
norms = tf.sqrt(tf.reduce_sum(tf.square(filters), reduction_indices=[0,1,2]))
filters = filters * (target_norms / norms)
if mask_type is not None:
with tf.name_scope('filter_mask'):
filters = filters * mask
result = tf.nn.conv2d(
input=inputs,
filter=filters,
strides=[1, 1, stride, stride],
padding='SAME',
data_format='NCHW'
)
if biases:
_biases = lib.param(
name+'.Biases',
np.zeros(output_dim, dtype='float32')
)
result = tf.nn.bias_add(result, _biases, data_format='NCHW')
return result
| 3,910 | 30.039683 | 140 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/resnet/ops/cond_batchnorm.py | import resnet as lib
import numpy as np
import tensorflow as tf
def Batchnorm(name, axes, inputs, is_training=None, stats_iter=None, update_moving_stats=True, fused=True, labels=None, n_labels=None):
"""conditional batchnorm (dumoulin et al 2016) for BCHW conv filtermaps"""
if axes != [0,2,3]:
raise Exception('unsupported')
mean, var = tf.nn.moments(inputs, axes, keep_dims=True)
shape = mean.get_shape().as_list() # shape is [1,n,1,1]
offset_m = lib.param(name+'.offset', np.zeros([n_labels,shape[1]], dtype='float32'))
scale_m = lib.param(name+'.scale', np.ones([n_labels,shape[1]], dtype='float32'))
offset = tf.nn.embedding_lookup(offset_m, labels)
scale = tf.nn.embedding_lookup(scale_m, labels)
result = tf.nn.batch_normalization(inputs, mean, var, offset[:,:,None,None], scale[:,:,None,None], 1e-5)
return result | 871 | 50.294118 | 135 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/resnet/ops/batchnorm.py | """
Based on https://github.com/igul222/improved_wgan_training/blob/master/
"""
from ... import resnet as lib
import numpy as np
import tensorflow as tf
def Batchnorm(name, axes, inputs, is_training=None, stats_iter=None, update_moving_stats=True, fused=True):
if ((axes == [0,2,3]) or (axes == [0,2])) and fused==True:
if axes==[0,2]:
inputs = tf.expand_dims(inputs, 3)
# Old (working but pretty slow) implementation:
##########
# inputs = tf.transpose(inputs, [0,2,3,1])
# mean, var = tf.nn.moments(inputs, [0,1,2], keep_dims=False)
# offset = lib.param(name+'.offset', np.zeros(mean.get_shape()[-1], dtype='float32'))
# scale = lib.param(name+'.scale', np.ones(var.get_shape()[-1], dtype='float32'))
# result = tf.nn.batch_normalization(inputs, mean, var, offset, scale, 1e-4)
# return tf.transpose(result, [0,3,1,2])
# New (super fast but untested) implementation:
offset = lib.param(name+'.offset', np.zeros(inputs.get_shape()[1], dtype='float32'))
scale = lib.param(name+'.scale', np.ones(inputs.get_shape()[1], dtype='float32'))
moving_mean = lib.param(name+'.moving_mean', np.zeros(inputs.get_shape()[1], dtype='float32'), trainable=False)
moving_variance = lib.param(name+'.moving_variance', np.ones(inputs.get_shape()[1], dtype='float32'), trainable=False)
def _fused_batch_norm_training():
return tf.nn.fused_batch_norm(inputs, scale, offset, epsilon=1e-5, data_format='NCHW')
def _fused_batch_norm_inference():
# Version which blends in the current item's statistics
batch_size = tf.cast(tf.shape(inputs)[0], 'float32')
mean, var = tf.nn.moments(inputs, [2,3], keep_dims=True)
mean = ((1./batch_size)*mean) + (((batch_size-1.)/batch_size)*moving_mean)[None,:,None,None]
var = ((1./batch_size)*var) + (((batch_size-1.)/batch_size)*moving_variance)[None,:,None,None]
return tf.nn.batch_normalization(inputs, mean, var, offset[None,:,None,None], scale[None,:,None,None], 1e-5), mean, var
# Standard version
# return tf.nn.fused_batch_norm(
# inputs,
# scale,
# offset,
# epsilon=1e-2,
# mean=moving_mean,
# variance=moving_variance,
# is_training=False,
# data_format='NCHW'
# )
if is_training is None:
outputs, batch_mean, batch_var = _fused_batch_norm_training()
else:
outputs, batch_mean, batch_var = tf.cond(is_training,
_fused_batch_norm_training,
_fused_batch_norm_inference)
if update_moving_stats:
no_updates = lambda: outputs
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
float_stats_iter = tf.cast(stats_iter, tf.float32)
update_moving_mean = tf.assign(moving_mean, ((float_stats_iter/(float_stats_iter+1))*moving_mean) + ((1/(float_stats_iter+1))*batch_mean))
update_moving_variance = tf.assign(moving_variance, ((float_stats_iter/(float_stats_iter+1))*moving_variance) + ((1/(float_stats_iter+1))*batch_var))
with tf.control_dependencies([update_moving_mean, update_moving_variance]):
return tf.identity(outputs)
outputs = tf.cond(is_training, _force_updates, no_updates)
if axes == [0,2]:
return outputs[:,:,:,0] # collapse last dim
else:
return outputs
else:
# raise Exception('old BN')
# TODO we can probably use nn.fused_batch_norm here too for speedup
mean, var = tf.nn.moments(inputs, axes, keep_dims=True)
shape = mean.get_shape().as_list()
if 0 not in axes:
print("WARNING ({}): didn't find 0 in axes, but not using separate BN params for each item in batch".format(name))
shape[0] = 1
offset = lib.param(name+'.offset', np.zeros(shape, dtype='float32'))
scale = lib.param(name+'.scale', np.ones(shape, dtype='float32'))
result = tf.nn.batch_normalization(inputs, mean, var, offset, scale, 1e-5)
return result
| 4,463 | 48.6 | 169 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/resnet/ops/deconv2d.py | import resnet as lib
import numpy as np
import tensorflow as tf
_default_weightnorm = False
def enable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = True
_weights_stdev = None
def set_weights_stdev(weights_stdev):
global _weights_stdev
_weights_stdev = weights_stdev
def unset_weights_stdev():
global _weights_stdev
_weights_stdev = None
def Deconv2D(
name,
input_dim,
output_dim,
filter_size,
inputs,
he_init=True,
weightnorm=None,
biases=True,
gain=1.,
mask_type=None,
):
"""
inputs: tensor of shape (batch size, height, width, input_dim)
returns: tensor of shape (batch size, 2*height, 2*width, output_dim)
"""
with tf.name_scope(name) as scope:
if mask_type != None:
raise Exception('Unsupported configuration')
def uniform(stdev, size):
return np.random.uniform(
low=-stdev * np.sqrt(3),
high=stdev * np.sqrt(3),
size=size
).astype('float32')
stride = 2
fan_in = input_dim * filter_size**2 / (stride**2)
fan_out = output_dim * filter_size**2
if he_init:
filters_stdev = np.sqrt(4./(fan_in+fan_out))
else: # Normalized init (Glorot & Bengio)
filters_stdev = np.sqrt(2./(fan_in+fan_out))
if _weights_stdev is not None:
filter_values = uniform(
_weights_stdev,
(filter_size, filter_size, output_dim, input_dim)
)
else:
filter_values = uniform(
filters_stdev,
(filter_size, filter_size, output_dim, input_dim)
)
filter_values *= gain
filters = lib.param(
name+'.Filters',
filter_values
)
if weightnorm==None:
weightnorm = _default_weightnorm
if weightnorm:
norm_values = np.sqrt(np.sum(np.square(filter_values), axis=(0,1,3)))
target_norms = lib.param(
name + '.g',
norm_values
)
with tf.name_scope('weightnorm') as scope:
norms = tf.sqrt(tf.reduce_sum(tf.square(filters), reduction_indices=[0,1,3]))
filters = filters * tf.expand_dims(target_norms / norms, 1)
inputs = tf.transpose(inputs, [0,2,3,1], name='NCHW_to_NHWC')
input_shape = tf.shape(inputs)
try: # tf pre-1.0 (top) vs 1.0 (bottom)
output_shape = tf.pack([input_shape[0], 2*input_shape[1], 2*input_shape[2], output_dim])
except Exception as e:
output_shape = tf.stack([input_shape[0], 2*input_shape[1], 2*input_shape[2], output_dim])
result = tf.nn.conv2d_transpose(
value=inputs,
filter=filters,
output_shape=output_shape,
strides=[1, 2, 2, 1],
padding='SAME'
)
if biases:
_biases = lib.param(
name+'.Biases',
np.zeros(output_dim, dtype='float32')
)
result = tf.nn.bias_add(result, _biases)
result = tf.transpose(result, [0,3,1,2], name='NHWC_to_NCHW')
return result
| 3,277 | 27.258621 | 101 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/resnet/ops/layernorm.py | """
Based on https://github.com/igul222/improved_wgan_training/blob/master/
"""
from ... import resnet as lib
import numpy as np
import tensorflow as tf
def Layernorm(name, norm_axes, inputs):
mean, var = tf.nn.moments(inputs, norm_axes, keep_dims=True)
# Assume the 'neurons' axis is the first of norm_axes. This is the case for fully-connected and BCHW conv layers.
n_neurons = inputs.get_shape().as_list()[norm_axes[0]]
offset = lib.param(name+'.offset', np.zeros(n_neurons, dtype='float32'))
scale = lib.param(name+'.scale', np.ones(n_neurons, dtype='float32'))
# Add broadcasting dims to offset and scale (e.g. BCHW conv data)
offset = tf.reshape(offset, [-1] + [1 for i in range(len(norm_axes)-1)])
scale = tf.reshape(scale, [-1] + [1 for i in range(len(norm_axes)-1)])
result = tf.nn.batch_normalization(inputs, mean, var, offset, scale, 1e-5)
return result
| 911 | 37 | 117 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/resnet/ops/conv1d.py | import tflib as lib
import numpy as np
import tensorflow as tf
_default_weightnorm = False
def enable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = True
def Conv1D(name, input_dim, output_dim, filter_size, inputs, he_init=True, mask_type=None, stride=1, weightnorm=None, biases=True, gain=1.):
"""
inputs: tensor of shape (batch size, num channels, width)
mask_type: one of None, 'a', 'b'
returns: tensor of shape (batch size, num channels, width)
"""
with tf.name_scope(name) as scope:
if mask_type is not None:
mask_type, mask_n_channels = mask_type
mask = np.ones(
(filter_size, input_dim, output_dim),
dtype='float32'
)
center = filter_size // 2
# Mask out future locations
# filter shape is (width, input channels, output channels)
mask[center+1:, :, :] = 0.
# Mask out future channels
for i in range(mask_n_channels):
for j in range(mask_n_channels):
if (mask_type=='a' and i >= j) or (mask_type=='b' and i > j):
mask[
center,
i::mask_n_channels,
j::mask_n_channels
] = 0.
def uniform(stdev, size):
return np.random.uniform(
low=-stdev * np.sqrt(3),
high=stdev * np.sqrt(3),
size=size
).astype('float32')
fan_in = input_dim * filter_size
fan_out = output_dim * filter_size / stride
if mask_type is not None: # only approximately correct
fan_in /= 2.
fan_out /= 2.
if he_init:
filters_stdev = np.sqrt(4./(fan_in+fan_out))
else: # Normalized init (Glorot & Bengio)
filters_stdev = np.sqrt(2./(fan_in+fan_out))
filter_values = uniform(
filters_stdev,
(filter_size, input_dim, output_dim)
)
# print "WARNING IGNORING GAIN"
filter_values *= gain
filters = lib.param(name+'.Filters', filter_values)
if weightnorm==None:
weightnorm = _default_weightnorm
if weightnorm:
norm_values = np.sqrt(np.sum(np.square(filter_values), axis=(0,1)))
target_norms = lib.param(
name + '.g',
norm_values
)
with tf.name_scope('weightnorm') as scope:
norms = tf.sqrt(tf.reduce_sum(tf.square(filters), reduction_indices=[0,1]))
filters = filters * (target_norms / norms)
if mask_type is not None:
with tf.name_scope('filter_mask'):
filters = filters * mask
result = tf.nn.conv1d(
value=inputs,
filters=filters,
stride=stride,
padding='SAME',
data_format='NCHW'
)
if biases:
_biases = lib.param(
name+'.Biases',
np.zeros([output_dim], dtype='float32')
)
# result = result + _biases
result = tf.expand_dims(result, 3)
result = tf.nn.bias_add(result, _biases, data_format='NCHW')
result = tf.squeeze(result)
return result
| 3,401 | 30.211009 | 140 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/resnet/ops/linear.py | import resnet as lib
import numpy as np
import tensorflow as tf
_default_weightnorm = False
def enable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = True
def disable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = False
_weights_stdev = None
def set_weights_stdev(weights_stdev):
global _weights_stdev
_weights_stdev = weights_stdev
def unset_weights_stdev():
global _weights_stdev
_weights_stdev = None
def Linear(
name,
input_dim,
output_dim,
inputs,
biases=True,
initialization=None,
weightnorm=None,
gain=1.
):
"""
initialization: None, `lecun`, 'glorot', `he`, 'glorot_he', `orthogonal`, `("uniform", range)`
"""
with tf.name_scope(name) as scope:
def uniform(stdev, size):
if _weights_stdev is not None:
stdev = _weights_stdev
return np.random.uniform(
low=-stdev * np.sqrt(3),
high=stdev * np.sqrt(3),
size=size
).astype('float32')
if initialization == 'lecun':# and input_dim != output_dim):
# disabling orth. init for now because it's too slow
weight_values = uniform(
np.sqrt(1./input_dim),
(input_dim, output_dim)
)
elif initialization == 'glorot' or (initialization == None):
weight_values = uniform(
np.sqrt(2./(input_dim+output_dim)),
(input_dim, output_dim)
)
elif initialization == 'he':
weight_values = uniform(
np.sqrt(2./input_dim),
(input_dim, output_dim)
)
elif initialization == 'glorot_he':
weight_values = uniform(
np.sqrt(4./(input_dim+output_dim)),
(input_dim, output_dim)
)
elif initialization == 'orthogonal' or \
(initialization == None and input_dim == output_dim):
# From lasagne
def sample(shape):
if len(shape) < 2:
raise RuntimeError("Only shapes of length 2 or more are "
"supported.")
flat_shape = (shape[0], np.prod(shape[1:]))
# TODO: why normal and not uniform?
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# pick the one with the correct shape
q = u if u.shape == flat_shape else v
q = q.reshape(shape)
return q.astype('float32')
weight_values = sample((input_dim, output_dim))
elif initialization[0] == 'uniform':
weight_values = np.random.uniform(
low=-initialization[1],
high=initialization[1],
size=(input_dim, output_dim)
).astype('float32')
else:
raise Exception('Invalid initialization!')
weight_values *= gain
weight = lib.param(
name + '.W',
weight_values
)
if weightnorm==None:
weightnorm = _default_weightnorm
if weightnorm:
norm_values = np.sqrt(np.sum(np.square(weight_values), axis=0))
target_norms = lib.param(
name + '.g',
norm_values
)
with tf.name_scope('weightnorm') as scope:
norms = tf.sqrt(tf.reduce_sum(tf.square(weight), reduction_indices=[0]))
weight = weight * (target_norms / norms)
if inputs.get_shape().ndims == 2:
result = tf.matmul(inputs, weight)
else:
reshaped_inputs = tf.reshape(inputs, [-1, input_dim])
result = tf.matmul(reshaped_inputs, weight)
result = tf.reshape(result, tf.pack(tf.unpack(tf.shape(inputs))[:-1] + [output_dim]))
if biases:
result = tf.nn.bias_add(
result,
lib.param(
name + '.b',
np.zeros((output_dim,), dtype='float32')
)
)
return result
| 4,325 | 29.041667 | 98 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/core/resnet/ops/__init__.py | 0 | 0 | 0 | py | |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/utils/timer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 19 13:42:24 2018
@author: mikolajbinkowski
"""
import time
class Timer(object):
def __init__(self, start_time=time.time(), limit=100):
self.start_time = start_time
self.limit = limit
def __call__(self, step, mess='', prints=True):
if prints and (step % self.limit != 0) and (step > 10):
return
message = '[%8d][%s] %s' % (step, hms(self.start_time), mess)
if prints:
print(message)
else:
return message
def hms(start_time):
t = int(time.time() - start_time)
m, s = t//60, t % 60
h, m = m//60, m % 60
if h > 0:
return '%2dh%02dm%02ds' % (h, m, s)
elif m > 0:
return '%5dm%02ds' % (m, s)
else:
return '%8ds' % s | 843 | 23.823529 | 69 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/utils/get_test_images.py | import tensorflow as tf
import numpy as np
import os
os.chdir(os.path.join(os.getcwd(), '..', '..'))
import core.pipeline
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='lsun', help='dataset to sample from')
parser.add_argument('--data-path', default='~/', help='path to read raw images from')
parser.add_argument('--save-path', default=None, help='path to save numpy array of images')
parser.add_argument('-N', type=int, default=25000, help='number of samples to save')
parser.add_argument('--output-size', type=int, default=64, help='size of the sampled pictures')
parser.add_argument('--channels', type=int, default=3, help='number of channels in sampled pictures')
args = parser.parse_args()
if args.save_path is None:
args.save_path = args.data_path
Pipeline_class = pipeline.get_pipeline(args.dataset)
with tf.Session() as sess:
pipe = Pipeline(args.output_size, args.channels, 1000, args.save_path)
ims = pipe.connect()
sampled = []
while len(sampled) < args.N/1000.:
sampled.append(sess.run(ims))
print(len(sampled))
sampled = np.concatenate(sampled, axis=0)
print(sampled.shape)
sampled = sampled[:args.N]
path = os.path.join(args.save_path, '%s-$d-test.npy')
np.save(sampled, path)
print('%d %dx%d %s images saved in %s.' % (args.N, args.output_size, args.output_size, args.dataset, path))
| 1,553 | 41 | 115 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/utils/scorer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 17:23:38 2018
@author: mikolajbinkowski
"""
import time, os, scipy, sys
import numpy as np
from core import mmd
import compute_scores as cs
class Scorer(object):
def __init__(self, dataset, lr_scheduler=True, stdout=sys.stdout):
self.stdout = stdout
self.dataset = dataset
if dataset == 'mnist':
self.model = cs.LeNet()
self.size = 100000
self.frequency = 500
else:
self.model = cs.Inception()
self.size = 25000
self.frequency = 2000
self.output = []
if lr_scheduler:
self.three_sample = []
self.three_sample_chances = 0
self.lr_scheduler = lr_scheduler
def set_train_codes(self, gan):
suffix = '' if (gan.output_size <= 32) else ('-%d' % gan.output_size)
path = os.path.join(gan.data_dir, '%s-codes%s.npy' % (self.dataset, suffix))
if os.path.exists(path):
self.train_codes = np.load(path)
print('[*] Train codes loaded. ')
return
print('[!] Codes not found. Featurizing...')
ims = []
while len(ims) < self.size // gan.batch_size:
ims.append(gan.sess.run(gan.images))
ims = np.concatenate(ims, axis=0)[:self.size]
_, self.train_codes = cs.featurize(ims * 255., self.model, get_preds=True,
get_codes=True, output=self.stdout)
np.save(path, self.train_codes)
print('[*] %d train images featurized and saved in <%s>' % (self.size, path))
def compute(self, gan, step):
if step % self.frequency != 0:
return
if not hasattr(self, 'train_codes'):
print('[ ] Getting train codes...')
self.set_train_codes(gan)
tt = time.time()
gan.timer(step, "Scoring start")
output = {}
images4score = gan.get_samples(n=self.size, save=False)
if self.dataset == 'mnist': #LeNet model takes [-.5, .5] pics
images4score -= .5
if (images4score.max() > .5) or (images4score.min() < -.5):
print('WARNING! LeNet min/max violated: min = %f, max = %f. Clipping values.' % (images4score.min(), images4score.max()))
images4score = images4score.clip(-.5, .5)
else: #Inception model takes [0 , 255] pics
images4score *= 255.0
if (images4score.max() > 255.) or (images4score.min() < .0):
print('WARNING! Inception min/max violated: min = %f, max = %f. Clipping values.' % (images4score.min(), images4score.max()))
images4score = images4score.clip(0., 255.)
preds, codes = cs.featurize(images4score, self.model, get_preds=True,
get_codes=True, output=self.stdout)
gan.timer(step, "featurizing finished")
output['inception'] = scores = cs.inception_score(preds)
gan.timer(step, "Inception mean (std): %f (%f)" % (np.mean(scores), np.std(scores)))
output['fid'] = scores = cs.fid_score(codes, self.train_codes, output=self.stdout,
split_method='bootstrap',
splits=3)
gan.timer(step, "FID mean (std): %f (%f)" % (np.mean(scores), np.std(scores)))
ret = cs.polynomial_mmd_averages(codes, self.train_codes, output=self.stdout,
n_subsets=10, subset_size=1000,
ret_var=False)
output['mmd2'] = mmd2s = ret
gan.timer(step, "KID mean (std): %f (%f)" % (mmd2s.mean(), mmd2s.std()))
if len(self.output) > 0:
if np.min([sc['mmd2'].mean() for sc in self.output]) > output['mmd2'].mean():
print('Saving best model ...')
gan.save_checkpoint()
self.output.append(output)
filepath = os.path.join(gan.sample_dir, 'score%d.npz' % step)
np.savez(filepath, **output)
if self.lr_scheduler:
n = 10
if self.dataset == 'mnist':
n = 10
nc = 3
bs = 2048
new_Y = codes[:bs]
X = self.train_codes[:bs]
print('No. of copmuted 3-sample statics: %d' % len(self.three_sample))
if len(self.three_sample) >= n:
saved_Z = self.three_sample[0]
mmd2_diff, test_stat, Y_related_sums = \
mmd.np_diff_polynomial_mmd2_and_ratio_with_saving(X, new_Y, saved_Z)
p_val = scipy.stats.norm.cdf(test_stat)
gan.timer(step, "3-sample test stat = %.1f" % test_stat)
gan.timer(step, "3-sample p-value = %.1f" % p_val)
if p_val > .1:
self.three_sample_chances += 1
if self.three_sample_chances >= nc:
# no confidence that new Y sample is closer to X than old Z is
gan.sess.run(gan.lr_decay_op)
print('No improvement in last %d tests. Decreasing learning rate to %f' % \
(nc, gan.sess.run(gan.lr)))
self.three_sample = (self.three_sample + [Y_related_sums])[-nc:] # reset memorized sums
self.three_sample_chances = 0
else:
print('No improvement in last %d test(s). Keeping learning rate at %f' % \
(self.three_sample_chances, gan.sess.run(gan.lr)))
else:
# we're confident that new_Y is better than old_Z is
print('Keeping learning rate at %f' % gan.sess.run(gan.lr))
self.three_sample = self.three_sample[1:] + [Y_related_sums]
self.three_sample_chances = 0
else: # add new sums to memory
self.three_sample.append(
mmd.np_diff_polynomial_mmd2_and_ratio_with_saving(X, new_Y, None)
)
gan.timer(step, "computing stats for 3-sample test finished")
print('current learning rate: %f' % gan.sess.run(gan.lr))
gan.timer(step, "Scoring end, total time = %.1f s" % (time.time() - tt))
| 6,569 | 44.625 | 141 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/utils/misc.py | """
Some codes from https://github.com/Newmu/dcgan_code
Released under the MIT license.
"""
from __future__ import division
import random
import pprint
import scipy.misc
import numpy as np
from time import gmtime, strftime
import tensorflow as tf
from six.moves import xrange
pp = pprint.PrettyPrinter()
def inverse_transform(images):
return (images+1.)/2.
def save_images(images, size, image_path):
merged = merge(inverse_transform(images), size)
return scipy.misc.imsave(image_path, merged)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w, :] = image
return img
def center_crop(x, crop_h, crop_w,
resize_h=64, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
return scipy.misc.imresize(x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])
def make_gif(images, fname, duration=2, true_image=False):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images)/duration*t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x+1)/2*255).astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif(fname, fps=len(images) / duration)
def visualize(sess, dcgan, config, option):
if option == 0:
z_sample = np.random.uniform(-0.5, 0.5, size=(config.batch_size, dcgan.z_dim))
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
time0 = strftime("%Y-%m-%d %H:%M:%S", gmtime())
save_images(samples, [8, 8], './samples/test_%s.png' % time0)
elif option == 1:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
save_images(samples, [8, 8], './samples/test_arange_%s.png' % (idx))
elif option == 2:
values = np.arange(0, 1, 1./config.batch_size)
for idx in [random.randint(0, 99) for _ in xrange(100)]:
print(" [*] %d" % idx)
z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
z_sample = np.tile(z, (config.batch_size, 1))
#z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
elif option == 3:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
elif option == 4:
image_set = []
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))
new_image_set = [
merge(np.array([images[idx] for images in image_set]), [10, 10])
for idx in range(64) + range(63, -1, -1)]
make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)
def unpickle(file):
import _pickle as cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo, encoding='latin1')
fo.close()
return dict
def center_and_scale(im, size=64) :
size = int(size)
arr = np.array(im)
scale = min(im.size)/float(size)
new_size = np.array(im.size)/scale
im.thumbnail(new_size)
arr = np.array(im)
assert min(arr.shape[:2]) == size, "shape error: " + repr(arr.shape) + ", lower dim should be " + repr(size)
# l0 = int((arr.shape[0] - size)//2)
# l1 = int((arr.shape[1] - size)//2)
l0 = np.random.choice(np.arange(arr.shape[0] - size + 1), 1)[0]
l1 = np.random.choice(np.arange(arr.shape[1] - size + 1), 1)[0]
arr = arr[l0:l0 + size, l1: l1 + size, :]
sh = (size, size, 3)
assert arr.shape == sh, "shape error: " + repr(arr.shape) + ", should be " + repr(sh)
return np.asarray(arr/255., dtype=np.float32)
def center_and_scale_new(im, size=64, assumed_input_size=256, channels=3):
if assumed_input_size is not None:
ratio = int(assumed_input_size/size)
decoded = tf.image.decode_jpeg(im, channels=channels, ratio=ratio)
cropped = tf.random_crop(decoded, size=[size, size, 3])
return tf.to_float(cropped)/255.
size = int(size)
decoded = tf.image.decode_jpeg(im, channels=channels)
s = tf.reduce_min(tf.shape(decoded)[:2])
cropped = tf.random_crop(decoded, size=[s, s, 3])
scaled = tf.image.resize_images(cropped, [size, size])
return tf.to_float(scaled)/255.
def read_and_scale(file, size=64):
from PIL import Image
im = Image.open(file)
return center_and_scale(im, size=size)
def variable_summary(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
# with tf.get_variable_scope():
if var is None:
print("Variable Summary: None value for variable '%s'" % name)
return
var = tf.clip_by_value(var, -1000., 1000.)
mean = tf.reduce_mean(var)
with tf.name_scope('absdev'):
stddev = tf.reduce_mean(tf.abs(var - mean))
# tf.summary.scalar(name + '_absdev', stddev)
# tf.summary.scalar(name + '_norm', tf.sqrt(tf.reduce_mean(tf.square(var))))
# tf.summary.histogram(name + '_histogram', var)
def variable_summaries(variable_dict):
for name, var in variable_dict.items():
variable_summary(var, name)
def conv_sizes(size, layers, stride=2):
s = [int(size)]
for l in range(layers):
s.append(int(np.ceil(float(s[-1])/float(stride))))
return tuple(s)
def get_image(image_path, input_height, input_width,
resize_height=64, resize_width=64,
crop=True, grayscale=False):
image = imread(image_path, grayscale)
return transform(image, input_height, input_width,
resize_height, resize_width, crop)
def imread(path, grayscale = False):
if (grayscale):
return scipy.misc.imread(path, flatten = True).astype(np.float)
else:
return scipy.misc.imread(path).astype(np.float)
def merge_images(images, size):
return inverse_transform(images)
def imsave(images, size, path):
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(path, image)
def transform(image, input_height, input_width,
resize_height=64, resize_width=64, crop=True):
if crop:
cropped_image = center_crop(image, input_height, input_width,
resize_height, resize_width)
else:
cropped_image = scipy.misc.imresize(image, [resize_height, resize_width])
return np.array(cropped_image)/255.
def tf_read_jpeg(files, base_size=160, target_size=64, batch_size=128,
capacity=4000, num_threads=4, random_crop=9):
filename_queue = tf.train.string_input_producer(files)
reader = tf.WholeFileReader()
_, raw = reader.read(filename_queue)
decoded = tf.image.decode_jpeg(raw, channels=3) # HWC
bs = base_size + 2 * random_crop
cropped = tf.image.resize_image_with_crop_or_pad(decoded, bs, bs)
if random_crop > 0:
cropped = tf.image.random_flip_left_right(cropped)
cropped = tf.random_crop(cropped, [base_size, base_size, 3])
ims = tf.train.shuffle_batch(
[cropped],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=capacity//4,
num_threads=4,
enqueue_many=False
)
resized = tf.image.resize_bilinear(ims, (target_size, target_size))
images = tf.cast(resized, tf.float32)/255.
return images
def PIL_read_jpeg(files, base_size=160, target_size=64, batch_size=128,
capacity=4000, num_threads=4):
from PIL import Image
def read_single(f):
img = Image.open(f)
w, h = img.size
assert w >= base_size, 'wrong width'
assert h >= base_size, 'wrong height'
l, r = (w - base_size)//2, (h - base_size)//2
img.crop((l, r, l + base_size, r + base_size))
img.resize((target_size, target_size), Image.ANTIALIAS)
return np.asarray(img, tf.float32)/255.
filename_queue = tf.train.string_input_producer(files, shuffle=True)
single_file = filename_queue.dequeue()
single_sample = tf.py_func(read_single, [single_file], tf.float32)
single_sample.set_shape([target_size, target_size, 3])
images = tf.train.shuffle_batch(
[single_sample],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=capacity//4,
num_threads=4,
enqueue_many=False
)
return images
| 9,739 | 33.661922 | 112 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/utils/utils.py | """
Some codes from https://github.com/Newmu/dcgan_code
Released under the MIT license.
"""
from __future__ import division
import random
import pprint
import scipy.misc
import numpy as np
from time import gmtime, strftime
import tensorflow as tf
from six.moves import xrange
pp = pprint.PrettyPrinter()
def inverse_transform(images):
return (images+1.)/2.
def save_images(images, size, image_path):
merged = merge(inverse_transform(images), size)
return scipy.misc.imsave(image_path, merged)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w, :] = image
return img
def center_crop(x, crop_h, crop_w,
resize_h=64, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
return scipy.misc.imresize(x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])
def make_gif(images, fname, duration=2, true_image=False):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images)/duration*t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x+1)/2*255).astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif(fname, fps=len(images) / duration)
def visualize(sess, dcgan, config, option):
if option == 0:
z_sample = np.random.uniform(-0.5, 0.5, size=(config.batch_size, dcgan.z_dim))
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
time0 = strftime("%Y-%m-%d %H:%M:%S", gmtime())
save_images(samples, [8, 8], './samples/test_%s.png' % time0)
elif option == 1:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
save_images(samples, [8, 8], './samples/test_arange_%s.png' % (idx))
elif option == 2:
values = np.arange(0, 1, 1./config.batch_size)
for idx in [random.randint(0, 99) for _ in xrange(100)]:
print(" [*] %d" % idx)
z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
z_sample = np.tile(z, (config.batch_size, 1))
#z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
elif option == 3:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
elif option == 4:
image_set = []
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))
new_image_set = [
merge(np.array([images[idx] for images in image_set]), [10, 10])
for idx in range(64) + range(63, -1, -1)]
make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)
def unpickle(file):
import _pickle as cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo, encoding='latin1')
fo.close()
return dict
def center_and_scale(im, size=64) :
size = int(size)
arr = np.array(im)
scale = min(im.size)/float(size)
new_size = np.array(im.size)/scale
im.thumbnail(new_size)
arr = np.array(im)
assert min(arr.shape[:2]) == size, "shape error: " + repr(arr.shape) + ", lower dim should be " + repr(size)
# l0 = int((arr.shape[0] - size)//2)
# l1 = int((arr.shape[1] - size)//2)
l0 = np.random.choice(np.arange(arr.shape[0] - size + 1), 1)[0]
l1 = np.random.choice(np.arange(arr.shape[1] - size + 1), 1)[0]
arr = arr[l0:l0 + size, l1: l1 + size, :]
sh = (size, size, 3)
assert arr.shape == sh, "shape error: " + repr(arr.shape) + ", should be " + repr(sh)
return np.asarray(arr/255., dtype=np.float32)
def center_and_scale_new(im, size=64, assumed_input_size=256, channels=3):
if assumed_input_size is not None:
ratio = int(assumed_input_size/size)
decoded = tf.image.decode_jpeg(im, channels=channels, ratio=ratio)
cropped = tf.random_crop(decoded, size=[size, size, 3])
return tf.to_float(cropped)/255.
size = int(size)
decoded = tf.image.decode_jpeg(im, channels=channels)
s = tf.reduce_min(tf.shape(decoded)[:2])
cropped = tf.random_crop(decoded, size=[s, s, 3])
scaled = tf.image.resize_images(cropped, [size, size])
return tf.to_float(scaled)/255.
def read_and_scale(file, size=64):
from PIL import Image
im = Image.open(file)
return center_and_scale(im, size=size)
def variable_summary(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
# with tf.get_variable_scope():
if var is None:
print("Variable Summary: None value for variable '%s'" % name)
return
var = tf.clip_by_value(var, -1000., 1000.)
mean = tf.reduce_mean(var)
with tf.name_scope('absdev'):
stddev = tf.reduce_mean(tf.abs(var - mean))
tf.summary.scalar(name + '_absdev', stddev)
# tf.summary.scalar(name + '_norm', tf.sqrt(tf.reduce_mean(tf.square(var))))
tf.summary.histogram(name + '_histogram', var)
def variable_summaries(variable_dict):
for name, var in variable_dict.items():
variable_summary(var, name)
def conv_sizes(size, layers, stride=2):
s = [int(size)]
for l in range(layers):
s.append(int(np.ceil(float(s[-1])/float(stride))))
return tuple(s)
def get_image(image_path, input_height, input_width,
resize_height=64, resize_width=64,
crop=True, grayscale=False):
image = imread(image_path, grayscale)
return transform(image, input_height, input_width,
resize_height, resize_width, crop)
def imread(path, grayscale = False):
if (grayscale):
return scipy.misc.imread(path, flatten = True).astype(np.float)
else:
return scipy.misc.imread(path).astype(np.float)
def merge_images(images, size):
return inverse_transform(images)
def imsave(images, size, path):
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(path, image)
def transform(image, input_height, input_width,
resize_height=64, resize_width=64, crop=True):
if crop:
cropped_image = center_crop(image, input_height, input_width,
resize_height, resize_width)
else:
cropped_image = scipy.misc.imresize(image, [resize_height, resize_width])
return np.array(cropped_image)/255.
def tf_read_jpeg(files, base_size=160, target_size=64, batch_size=128,
capacity=4000, num_threads=4, random_crop=9):
filename_queue = tf.train.string_input_producer(files)
reader = tf.WholeFileReader()
_, raw = reader.read(filename_queue)
decoded = tf.image.decode_jpeg(raw, channels=3) # HWC
bs = base_size + 2 * random_crop
cropped = tf.image.resize_image_with_crop_or_pad(decoded, bs, bs)
if random_crop > 0:
cropped = tf.image.random_flip_left_right(cropped)
cropped = tf.random_crop(cropped, [base_size, base_size, 3])
ims = tf.train.shuffle_batch(
[cropped],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=capacity//4,
num_threads=4,
enqueue_many=False
)
resized = tf.image.resize_bilinear(ims, (target_size, target_size))
images = tf.cast(resized, tf.float32)/255.
return images
def PIL_read_jpeg(files, base_size=160, target_size=64, batch_size=128,
capacity=4000, num_threads=4):
from PIL import Image
def read_single(f):
img = Image.open(f)
w, h = img.size
assert w >= base_size, 'wrong width'
assert h >= base_size, 'wrong height'
l, r = (w - base_size)//2, (h - base_size)//2
img.crop((l, r, l + base_size, r + base_size))
img.resize((target_size, target_size), Image.ANTIALIAS)
return np.asarray(img, tf.float32)/255.
filename_queue = tf.train.string_input_producer(files, shuffle=True)
single_file = filename_queue.dequeue()
single_sample = tf.py_func(read_single, [single_file], tf.float32)
single_sample.set_shape([target_size, target_size, 3])
images = tf.train.shuffle_batch(
[single_sample],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=capacity//4,
num_threads=4,
enqueue_many=False
)
return images | 9,736 | 33.775 | 112 | py |
GANFingerprints | GANFingerprints-master/MMDGAN/gan/utils/__init__.py | __all__ = ['scorer', 'timer', 'misc']
| 38 | 18.5 | 37 | py |
GANFingerprints | GANFingerprints-master/SNGAN/updater.py | import numpy as np
import chainer
import chainer.functions as F
from chainer import Variable
from source.miscs.random_samples import sample_continuous, sample_categorical
# Classic Adversarial Loss
def loss_dcgan_dis(dis_fake, dis_real):
L1 = F.mean(F.softplus(-dis_real))
L2 = F.mean(F.softplus(dis_fake))
loss = L1 + L2
return loss
def loss_dcgan_gen(dis_fake):
loss = F.mean(F.softplus(-dis_fake))
return loss
# Hinge Loss
def loss_hinge_dis(dis_fake, dis_real):
loss = F.mean(F.relu(1. - dis_real))
loss += F.mean(F.relu(1. + dis_fake))
return loss
def loss_hinge_gen(dis_fake):
loss = -F.mean(dis_fake)
return loss
class Updater(chainer.training.StandardUpdater):
def __init__(self, *args, **kwargs):
self.models = kwargs.pop('models')
self.n_dis = kwargs.pop('n_dis')
self.loss_type = kwargs.pop('loss_type')
self.conditional = kwargs.pop('conditional')
self.n_gen_samples = kwargs.pop('n_gen_samples')
if self.loss_type == 'dcgan':
self.loss_gen = loss_dcgan_gen
self.loss_dis = loss_dcgan_dis
elif self.loss_type == 'hinge':
self.loss_gen = loss_hinge_gen
self.loss_dis = loss_hinge_dis
else:
raise NotImplementedError
super(Updater, self).__init__(*args, **kwargs)
def _generete_samples(self, n_gen_samples=None):
if n_gen_samples is None:
n_gen_samples = self.n_gen_samples
gen = self.models['gen']
if self.conditional:
y = sample_categorical(gen.n_classes, n_gen_samples, xp=gen.xp)
else:
y = None
x_fake = gen(n_gen_samples, y=y)
return x_fake, y
def get_batch(self, xp):
batch = self.get_iterator('main').next()
batchsize = len(batch)
x = []
y = []
for j in range(batchsize):
x.append(np.asarray(batch[j][0]).astype("f"))
y.append(np.asarray(batch[j][1]).astype(np.int32))
x_real = Variable(xp.asarray(x))
y_real = Variable(xp.asarray(y, dtype=xp.int32)) if self.conditional else None
return x_real, y_real
def update_core(self):
gen = self.models['gen']
dis = self.models['dis']
gen_optimizer = self.get_optimizer('opt_gen')
dis_optimizer = self.get_optimizer('opt_dis')
xp = gen.xp
for i in range(self.n_dis):
if i == 0:
x_fake, y_fake = self._generete_samples()
dis_fake = dis(x_fake, y=y_fake)
loss_gen = self.loss_gen(dis_fake=dis_fake)
gen.cleargrads()
loss_gen.backward()
gen_optimizer.update()
chainer.reporter.report({'loss_gen': loss_gen})
x_real, y_real = self.get_batch(xp)
batchsize = len(x_real)
dis_real = dis(x_real, y=y_real)
x_fake, y_fake = self._generete_samples(n_gen_samples=batchsize)
dis_fake = dis(x_fake, y=y_fake)
x_fake.unchain_backward()
loss_dis = self.loss_dis(dis_fake=dis_fake, dis_real=dis_real)
dis.cleargrads()
loss_dis.backward()
dis_optimizer.update()
chainer.reporter.report({'loss_dis': loss_dis})
| 3,334 | 32.019802 | 86 | py |
GANFingerprints | GANFingerprints-master/SNGAN/train_mn.py | import os, sys, time
import shutil
import yaml
import argparse
import chainer
from chainer import training
from chainer.training import extension
from chainer.training import extensions
import chainermn
import multiprocessing
sys.path.append(os.path.dirname(__file__))
from evaluation import sample_generate_conditional, sample_generate_light, calc_inception
import source.yaml_utils as yaml_utils
def create_result_dir(result_dir, config_path, config):
if not os.path.exists(result_dir):
os.makedirs(result_dir)
def copy_to_result_dir(fn, result_dir):
bfn = os.path.basename(fn)
shutil.copy(fn, '{}/{}'.format(result_dir, bfn))
copy_to_result_dir(config_path, result_dir)
copy_to_result_dir(
config.models['generator']['fn'], result_dir)
copy_to_result_dir(
config.models['discriminator']['fn'], result_dir)
copy_to_result_dir(
config.dataset['dataset_fn'], result_dir)
copy_to_result_dir(
config.updater['fn'], result_dir)
def load_models(config):
gen_conf = config.models['generator']
gen = yaml_utils.load_model(gen_conf['fn'], gen_conf['name'], gen_conf['args'])
dis_conf = config.models['discriminator']
dis = yaml_utils.load_model(dis_conf['fn'], dis_conf['name'], dis_conf['args'])
return gen, dis
def make_optimizer(model, comm, alpha=0.0002, beta1=0., beta2=0.9):
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(alpha=alpha, beta1=beta1, beta2=beta2), comm)
optimizer.setup(model)
return optimizer
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str, default='configs/base.yml', help='path to config file')
parser.add_argument('--data_dir', type=str, default='./data/imagenet')
parser.add_argument('--results_dir', type=str, default='./results/gans',
help='directory to save the results to')
parser.add_argument('--inception_model_path', type=str, default='./datasets/inception_model',
help='path to the inception model')
parser.add_argument('--snapshot', type=str, default='',
help='path to the snapshot')
parser.add_argument('--loaderjob', type=int,
help='number of parallel data loading processes')
parser.add_argument('--communicator', type=str,
default='hierarchical', help='Type of communicator')
args = parser.parse_args()
config = yaml_utils.Config(yaml.load(open(args.config_path)))
comm = chainermn.create_communicator(args.communicator)
device = comm.intra_rank
chainer.cuda.get_device_from_id(device).use()
print("init")
if comm.rank == 0:
print('==========================================')
print('Using {} communicator'.format(args.communicator))
print('==========================================')
# Model
gen, dis = load_models(config)
gen.to_gpu()
dis.to_gpu()
models = {"gen": gen, "dis": dis}
# Optimizer
opt_gen = make_optimizer(gen, comm,
alpha=config.adam['alpha'], beta1=config.adam['beta1'], beta2=config.adam['beta2'])
opt_dis = make_optimizer(dis, comm,
alpha=config.adam['alpha'], beta1=config.adam['beta1'], beta2=config.adam['beta2'])
opts = {"opt_gen": opt_gen, "opt_dis": opt_dis}
# Dataset
config['dataset']['args']['root'] = args.data_dir
if comm.rank == 0:
dataset = yaml_utils.load_dataset(config)
else:
_ = yaml_utils.load_dataset(config) # Dummy, for adding path to the dataset module
dataset = None
dataset = chainermn.scatter_dataset(dataset, comm)
# Iterator
multiprocessing.set_start_method('forkserver')
iterator = chainer.iterators.MultiprocessIterator(dataset, config.batchsize,
n_processes=args.loaderjob)
kwargs = config.updater['args'] if 'args' in config.updater else {}
kwargs.update({
'models': models,
'iterator': iterator,
'optimizer': opts,
'device': device,
})
updater = yaml_utils.load_updater_class(config)
updater = updater(**kwargs)
out = args.results_dir
if comm.rank == 0:
create_result_dir(out, args.config_path, config)
trainer = training.Trainer(updater, (config.iteration, 'iteration'), out=out)
report_keys = ["loss_dis", "loss_gen", "inception_mean", "inception_std"]
if comm.rank == 0:
# Set up logging
trainer.extend(extensions.snapshot(), trigger=(config.snapshot_interval, 'iteration'))
for m in models.values():
trainer.extend(extensions.snapshot_object(
m, m.__class__.__name__ + '_{.updater.iteration}.npz'), trigger=(config.snapshot_interval, 'iteration'))
trainer.extend(extensions.LogReport(keys=report_keys,
trigger=(config.display_interval, 'iteration')))
trainer.extend(extensions.PrintReport(report_keys), trigger=(config.display_interval, 'iteration'))
trainer.extend(sample_generate_conditional(gen, out, n_classes=gen.n_classes),
trigger=(config.evaluation_interval, 'iteration'),
priority=extension.PRIORITY_WRITER)
trainer.extend(sample_generate_light(gen, out, rows=10, cols=10),
trigger=(config.evaluation_interval // 10, 'iteration'),
priority=extension.PRIORITY_WRITER)
trainer.extend(calc_inception(gen, n_ims=5000, splits=1, path=args.inception_model_path),
trigger=(config.evaluation_interval, 'iteration'),
priority=extension.PRIORITY_WRITER)
trainer.extend(extensions.ProgressBar(update_interval=config.progressbar_interval))
ext_opt_gen = extensions.LinearShift('alpha', (config.adam['alpha'], 0.),
(config.iteration_decay_start, config.iteration), opt_gen)
ext_opt_dis = extensions.LinearShift('alpha', (config.adam['alpha'], 0.),
(config.iteration_decay_start, config.iteration), opt_dis)
trainer.extend(ext_opt_gen)
trainer.extend(ext_opt_dis)
if args.snapshot:
print("Resume training with snapshot:{}".format(args.snapshot))
chainer.serializers.load_npz(args.snapshot, trainer)
# Run the training
print("start training")
trainer.run()
if __name__ == '__main__':
main()
| 6,592 | 42.662252 | 120 | py |
GANFingerprints | GANFingerprints-master/SNGAN/__init__.py | 0 | 0 | 0 | py | |
GANFingerprints | GANFingerprints-master/SNGAN/evaluation.py | import os
import sys
import math
import numpy as np
from PIL import Image
import scipy.linalg
import chainer
import chainer.cuda
from chainer import Variable
from chainer import serializers
from chainer import cuda
import chainer.functions as F
sys.path.append(os.path.dirname(__file__))
sys.path.append('../')
from source.inception.inception_score import inception_score, Inception
from source.links.sn_convolution_2d import SNConvolution2D
from source.functions.max_sv import max_singular_value
from numpy.linalg import svd
def gen_images(gen, n=50000, batchsize=100):
ims = []
xp = gen.xp
for i in range(0, n, batchsize):
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
x = gen(batchsize)
x = chainer.cuda.to_cpu(x.data)
x = np.asarray(np.clip(x * 127.5 + 127.5, 0.0, 255.0), dtype=np.uint8)
ims.append(x)
ims = np.asarray(ims)
_, _, _, h, w = ims.shape
ims = ims.reshape((n, 3, h, w))
return ims
def gen_images_with_condition(gen, c=0, n=500, batchsize=100):
ims = []
xp = gen.xp
for i in range(0, n, batchsize):
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
y = xp.asarray([c] * batchsize, dtype=xp.int32)
x = gen(batchsize, y=y)
x = chainer.cuda.to_cpu(x.data)
x = np.asarray(np.clip(x * 127.5 + 127.5, 0.0, 255.0), dtype=np.uint8)
ims.append(x)
ims = np.asarray(ims)
_, _, _, h, w = ims.shape
ims = ims.reshape((n, 3, h, w))
return ims
def sample_generate_light(gen, dst, rows=5, cols=5, seed=0):
@chainer.training.make_extension()
def make_image(trainer):
np.random.seed(seed)
n_images = rows * cols
x = gen_images(gen, n_images, batchsize=n_images)
_, _, H, W = x.shape
x = x.reshape((rows, cols, 3, H, W))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((rows * H, cols * W, 3))
preview_dir = '{}/preview'.format(dst)
preview_path = preview_dir + '/image_latest.png'
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
return make_image
def sample_generate(gen, dst, rows=10, cols=10, seed=0):
@chainer.training.make_extension()
def make_image(trainer):
np.random.seed(seed)
n_images = rows * cols
x = gen_images(gen, n_images, batchsize=n_images)
_, _, h, w = x.shape
x = x.reshape((rows, cols, 3, h, w))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((rows * h, cols * w, 3))
preview_dir = '{}/preview'.format(dst)
preview_path = preview_dir + '/image{:0>8}.png'.format(trainer.updater.iteration)
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
return make_image
def sample_generate_conditional(gen, dst, rows=10, n_classes=1000, seed=0):
"""Visualization of rows*cols images randomly generated by the generator."""
classes = np.asarray(np.arange(10) * (n_classes / 10), dtype=np.int32)
@chainer.training.make_extension()
def make_image(trainer=None):
np.random.seed(seed)
xp = gen.xp
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
x = []
for c in classes:
x.append(gen_images_with_condition(gen, c=c, n=rows, batchsize=rows))
x = np.concatenate(x, 0)
_, _, h, w = x.shape
x = x.reshape((rows, len(classes), 3, h, w))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((rows * h, len(classes) * w, 3))
preview_dir = '{}/preview'.format(dst)
preview_path = preview_dir + '/image{:0>8}.png'.format(
trainer.updater.iteration if trainer is not None else None)
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
return make_image
def load_inception_model(path=None):
path = path if path is not None else "%s/inception/inception_score.model" % os.path.dirname(__file__)
model = Inception()
serializers.load_hdf5(path, model)
model.to_gpu()
return model
def calc_inception(gen, batchsize=100, dst=None, path=None, n_ims=50000, splits=10):
@chainer.training.make_extension()
def evaluation(trainer=None):
model = load_inception_model(path)
ims = gen_images(gen, n_ims, batchsize=batchsize).astype("f")
mean, std = inception_score(model, ims, splits=splits)
chainer.reporter.report({
'inception_mean': mean,
'inception_std': std
})
if dst is not None:
preview_dir = '{}/stats'.format(dst)
preview_path = preview_dir + '/inception_score_{:0>8}.txt'.format(
trainer.updater.iteration if trainer is not None else None)
np.savetxt(preview_path, np.array([mean, std]))
return evaluation
def get_mean_cov(model, ims, batch_size=100):
n, c, w, h = ims.shape
n_batches = int(math.ceil(float(n) / float(batch_size)))
xp = model.xp
print('Batch size:', batch_size)
print('Total number of images:', n)
print('Total number of batches:', n_batches)
ys = xp.empty((n, 2048), dtype=xp.float32)
for i in range(n_batches):
print('Running batch', i + 1, '/', n_batches, '...')
batch_start = (i * batch_size)
batch_end = min((i + 1) * batch_size, n)
ims_batch = ims[batch_start:batch_end]
ims_batch = xp.asarray(ims_batch) # To GPU if using CuPy
ims_batch = Variable(ims_batch)
# Resize image to the shape expected by the inception module
if (w, h) != (299, 299):
ims_batch = F.resize_images(ims_batch, (299, 299)) # bilinear
# Feed images to the inception module to get the features
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
y = model(ims_batch, get_feature=True)
ys[batch_start:batch_end] = y.data
mean = xp.mean(ys, axis=0).get()
# cov = F.cross_covariance(ys, ys, reduce="no").datasets.get()
cov = np.cov(ys.get().T)
return mean, cov
def monitor_largest_singular_values(dis, dst):
@chainer.training.make_extension()
def evaluation(trainer=None):
def _l2normalize(v, eps=1e-12):
return v / (((v ** 2).sum()) ** 0.5 + eps)
xp = dis.xp
links = [[name, link] for name, link in sorted(dis.namedlinks())]
sigmas = []
for name, link in links:
if isinstance(link, SNConvolution2D):
W, u = link.W, link.u
W_mat = W.reshape(W.shape[0], -1)
sigma, _, _ = max_singular_value(W_mat, u)
W_bar = cuda.to_cpu((W_mat.data / xp.squeeze(sigma.data)))
_, s, _ = svd(W_bar)
_sigma = s[0]
print(name.strip('/'), _sigma)
sigmas.append([name.strip('/'), _sigma])
if dst is not None:
preview_dir = '{}/sigmas'.format(dst)
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
preview_path = preview_dir + '/sigmas_{:0>8}.txt'.format(
trainer.updater.iteration if trainer is not None else None)
with open(preview_path, 'wb') as f:
np.savetxt(f, np.array(sigmas, dtype=np.str), delimiter=" ", fmt="%s")
return evaluation
def FID(m0, c0, m1, c1):
ret = 0
ret += np.sum((m0 - m1) ** 2)
ret += np.trace(c0 + c1 - 2.0 * scipy.linalg.sqrtm(np.dot(c0, c1)))
return np.real(ret)
def calc_FID(gen, batchsize=100, stat_file="%s/cifar-10-fid.npz" % os.path.dirname(__file__), dst=None, path=None,
n_ims=5000):
"""Frechet Inception Distance proposed by https://arxiv.org/abs/1706.08500"""
@chainer.training.make_extension()
def evaluation(trainer=None):
model = load_inception_model(path)
stat = np.load(stat_file)
ims = gen_images(gen, n_ims, batchsize=batchsize).astype("f")
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
mean, cov = get_mean_cov(model, ims)
fid = FID(stat["mean"], stat["cov"], mean, cov)
chainer.reporter.report({
'FID': fid,
})
if dst is not None:
preview_dir = '{}/stats'.format(dst)
preview_path = preview_dir + '/fid_{:0>8}.txt'.format(
trainer.updater.iteration if trainer is not None else None)
np.savetxt(preview_path, np.array([fid]))
return evaluation
| 8,815 | 35.580913 | 114 | py |
GANFingerprints | GANFingerprints-master/SNGAN/train.py | import os, sys, time
import shutil
import yaml
import argparse
import chainer
from chainer import training
from chainer.training import extension
from chainer.training import extensions
sys.path.append(os.path.dirname(__file__))
from evaluation import sample_generate_conditional, sample_generate_light, calc_inception
import source.yaml_utils as yaml_utils
def create_result_dir(result_dir, config_path, config):
if not os.path.exists(result_dir):
os.makedirs(result_dir)
def copy_to_result_dir(fn, result_dir):
bfn = os.path.basename(fn)
shutil.copy(fn, '{}/{}'.format(result_dir, bfn))
copy_to_result_dir(config_path, result_dir)
copy_to_result_dir(
config.models['generator']['fn'], result_dir)
copy_to_result_dir(
config.models['discriminator']['fn'], result_dir)
copy_to_result_dir(
config.dataset['dataset_fn'], result_dir)
copy_to_result_dir(
config.updater['fn'], result_dir)
def load_models(config):
gen_conf = config.models['generator']
gen = yaml_utils.load_model(gen_conf['fn'], gen_conf['name'], gen_conf['args'])
dis_conf = config.models['discriminator']
dis = yaml_utils.load_model(dis_conf['fn'], dis_conf['name'], dis_conf['args'])
return gen, dis
def make_optimizer(model, alpha=0.0002, beta1=0., beta2=0.9):
optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1, beta2=beta2)
optimizer.setup(model)
return optimizer
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str, default='configs/base.yml', help='path to config file')
parser.add_argument('--gpu', type=int, default=0, help='index of gpu to be used')
parser.add_argument('--data_dir', type=str, default='./data/imagenet')
parser.add_argument('--results_dir', type=str, default='./results/gans',
help='directory to save the results to')
#parser.add_argument('--inception_model_path', type=str, default='./datasets/inception_model',
# help='path to the inception model')
parser.add_argument('--snapshot', type=str, default='',
help='path to the snapshot')
parser.add_argument('--loaderjob', type=int,
help='number of parallel data loading processes')
args = parser.parse_args()
config = yaml_utils.Config(yaml.load(open(args.config_path)))
chainer.cuda.get_device_from_id(args.gpu).use()
gen, dis = load_models(config)
gen.to_gpu(device=args.gpu)
dis.to_gpu(device=args.gpu)
models = {"gen": gen, "dis": dis}
# Optimizer
opt_gen = make_optimizer(
gen, alpha=config.adam['alpha'], beta1=config.adam['beta1'], beta2=config.adam['beta2'])
opt_dis = make_optimizer(
dis, alpha=config.adam['alpha'], beta1=config.adam['beta1'], beta2=config.adam['beta2'])
opts = {"opt_gen": opt_gen, "opt_dis": opt_dis}
# Dataset
config['dataset']['args']['root'] = args.data_dir
dataset = yaml_utils.load_dataset(config)
# Iterator
iterator = chainer.iterators.MultiprocessIterator(
dataset, config.batchsize, n_processes=args.loaderjob)
kwargs = config.updater['args'] if 'args' in config.updater else {}
kwargs.update({
'models': models,
'iterator': iterator,
'optimizer': opts,
})
updater = yaml_utils.load_updater_class(config)
updater = updater(**kwargs)
out = args.results_dir
create_result_dir(out, args.config_path, config)
trainer = training.Trainer(updater, (config.iteration, 'iteration'), out=out)
report_keys = ["loss_dis", "loss_gen", "inception_mean", "inception_std"]
# Set up logging
trainer.extend(extensions.snapshot(), trigger=(config.snapshot_interval, 'iteration'))
for m in models.values():
trainer.extend(extensions.snapshot_object(
m, m.__class__.__name__ + '_{.updater.iteration}.npz'), trigger=(config.snapshot_interval, 'iteration'))
trainer.extend(extensions.LogReport(keys=report_keys,
trigger=(config.display_interval, 'iteration')))
trainer.extend(extensions.PrintReport(report_keys), trigger=(config.display_interval, 'iteration'))
trainer.extend(sample_generate_conditional(gen, out, n_classes=gen.n_classes),
trigger=(config.evaluation_interval, 'iteration'),
priority=extension.PRIORITY_WRITER)
trainer.extend(sample_generate_light(gen, out, rows=10, cols=10),
trigger=(config.evaluation_interval // 10, 'iteration'),
priority=extension.PRIORITY_WRITER)
#trainer.extend(calc_inception(gen, n_ims=5000, splits=1, path=args.inception_model_path),
# trigger=(config.evaluation_interval, 'iteration'),
# priority=extension.PRIORITY_WRITER)
trainer.extend(extensions.ProgressBar(update_interval=config.progressbar_interval))
ext_opt_gen = extensions.LinearShift('alpha', (config.adam['alpha'], 0.),
(config.iteration_decay_start, config.iteration), opt_gen)
ext_opt_dis = extensions.LinearShift('alpha', (config.adam['alpha'], 0.),
(config.iteration_decay_start, config.iteration), opt_dis)
trainer.extend(ext_opt_gen)
trainer.extend(ext_opt_dis)
if args.snapshot:
print("Resume training with snapshot:{}".format(args.snapshot))
chainer.serializers.load_npz(args.snapshot, trainer)
# Run the training
print("start training")
trainer.run()
if __name__ == '__main__':
main()
| 5,656 | 42.515385 | 116 | py |
GANFingerprints | GANFingerprints-master/SNGAN/evaluations/calc_intra_FID.py | import os, sys
import numpy as np
import argparse
import chainer
base = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(base, '../'))
from evaluation import gen_images, gen_images_with_condition, load_inception_model
import yaml
import source.yaml_utils as yaml_utils
from evaluation import FID
def load_models(config):
gen_conf = config.models['generator']
gen = yaml_utils.load_model(gen_conf['fn'], gen_conf['name'], gen_conf['args'])
return gen
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str, default='configs/base.yml')
parser.add_argument('--gpu', '-g', type=int, default=0)
parser.add_argument('--results_dir', type=str, default='./results/gans')
parser.add_argument('--stat_dir_path', type=str,
default='')
parser.add_argument('--inception_model_path', type=str,
default='')
parser.add_argument('--snapshot', type=str, default='')
parser.add_argument('--n_classes', type=int, default=1000)
parser.add_argument('--class_start_from', type=int, default=0)
parser.add_argument('--tf', action='store_true', default=False)
args = parser.parse_args()
chainer.cuda.get_device_from_id(args.gpu).use()
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
config = yaml_utils.Config(yaml.load(open(args.config_path)))
# Model
gen = load_models(config)
gen.to_gpu()
chainer.serializers.load_npz(args.snapshot, gen)
np.random.seed(1234)
if args.tf:
import source.inception.inception_score_tf
from source.inception.inception_score_tf import get_mean_and_cov as get_mean_cov
else:
from evaluation import get_mean_cov
model = load_inception_model(args.inception_model_path)
for c in range(args.class_start_from, args.n_classes):
print("class:{}".format(c))
stat = np.load(os.path.join(args.stat_dir_path, '{}.npz'.format(c)))
ims = gen_images_with_condition(gen, c, 5000, batchsize=100).astype("f")
if args.tf:
mean, cov = get_mean_cov(ims)
else:
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
mean, cov = get_mean_cov(model, ims, batch_size=100)
fid = FID(stat["mean"], stat["cov"], mean, cov)
print(fid)
np.savetxt('{}/fid_{}.txt'.format(args.results_dir, c), np.array([fid]))
if __name__ == '__main__':
main()
| 2,540 | 38.092308 | 102 | py |
GANFingerprints | GANFingerprints-master/SNGAN/evaluations/calc_ref_stats.py | import os, sys
import numpy as np
import argparse
import chainer
base = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(base, '../'))
from evaluation import load_inception_model
import scipy.ndimage as ndimage
from scipy.misc import imresize
IMAGENET_ROOT_PATH = "/path/to/imagenet/train"
IMAGE_LABEL_LIST_PATH = "/path/to/image_label_list/"
train_filenames_and_labels = np.loadtxt(IMAGE_LABEL_LIST_PATH, dtype=np.str)
def get_imagenet_samples(c):
RESOLUTION = 128
images = []
count = 0
for filename, label in train_filenames_and_labels:
filename = filename.split('\'')[1]
label = label.split('\'')[1]
if int(label) != c:
continue
image = ndimage.imread(os.path.join(IMAGENET_ROOT_PATH, filename))
image = np.asarray(image, dtype=np.uint8)
image = imresize(image, (RESOLUTION, RESOLUTION))
images.append(image)
count += 1
# Reference samples
all_ref_samples = np.stack(images, axis=0).transpose((0, 3, 1, 2)).astype(np.float32)
return all_ref_samples
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', type=int, default=0)
parser.add_argument('--dataset', type=str, default='imagenet')
parser.add_argument('--stat_dir_path', type=str, default='')
parser.add_argument('--n_classes', type=int, default=1000)
parser.add_argument('--tf', action='store_true', default=False)
args = parser.parse_args()
chainer.cuda.get_device_from_id(args.gpu).use()
if args.dataset == 'imagenet':
get_samples = get_imagenet_samples
else:
raise NotImplementedError
if not os.path.exists(args.stat_dir_path):
os.makedirs(args.stat_dir_path)
if args.tf:
import source.inception.inception_score_tf
from source.inception.inception_score_tf import get_mean_and_cov as get_mean_cov
else:
from evaluation import get_mean_cov
model = load_inception_model(args.inception_model_path)
for c in range(args.n_classes):
print('label:{}'.format(c))
all_ref_samples = get_samples(c)
if args.tf:
mean, cov = get_mean_cov(all_ref_samples)
else:
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
mean, cov = get_mean_cov(model, all_ref_samples)
np.savez(os.path.join(args.stat_dir_path, '{}.npz'.format(int(c))), mean=mean, cov=cov)
if __name__ == '__main__':
main()
| 2,532 | 34.180556 | 102 | py |
GANFingerprints | GANFingerprints-master/SNGAN/evaluations/gen_interpolated_images.py | """
Example:
python evaluations/gen_interpolated_images.py --n_zs=10 --n_intp=10 --snapshot=ResNetGenerator_850000.npz --config=configs/sn_projection.yml --classes 986 989
"""
import os, sys, time
import shutil
import numpy as np
import argparse
import chainer
from PIL import Image
base = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(base, '../'))
from evaluation import gen_images_with_condition
import yaml
import source.yaml_utils as yaml_utils
from source.miscs.random_samples import sample_continuous
def load_models(config):
gen_conf = config.models['generator']
gen = yaml_utils.load_model(gen_conf['fn'], gen_conf['name'], gen_conf['args'])
return gen
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str, default='configs/base.yml')
parser.add_argument('--gpu', '-g', type=int, default=0)
parser.add_argument('--results_dir', type=str, default='./results/gans')
parser.add_argument('--snapshot', type=str, default='')
parser.add_argument('--n_intp', type=int, default=5)
parser.add_argument('--n_zs', type=int, default=5)
parser.add_argument('--classes', type=int, nargs="*", default=None)
parser.add_argument('--seed', type=int, default=1234)
args = parser.parse_args()
chainer.cuda.get_device(args.gpu).use()
config = yaml_utils.Config(yaml.load(open(args.config_path)))
gen = load_models(config)
gen.to_gpu()
out = args.results_dir
chainer.serializers.load_npz(args.snapshot, gen)
np.random.seed(args.seed)
xp = gen.xp
n_images = args.n_zs * args.n_intp
imgs = []
classes = tuple(args.classes) if args.classes is not None else [np.random.randint(1000),
np.random.randint(1000)]
for _ in range(args.n_zs):
z = xp.array([np.random.normal(size=(128,))] * args.n_intp, xp.float32)
ys = xp.array([[classes[0], classes[1]]] * args.n_intp, dtype=xp.int32)
ws_y = xp.array([np.linspace(0, 1, args.n_intp)[::-1], np.linspace(0, 1, args.n_intp)], dtype=xp.float32).T
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
x = gen(z=z, y=ys, weights=ws_y)
x = chainer.cuda.to_cpu(x.data)
x = np.asarray(np.clip(x * 127.5 + 127.5, 0.0, 255.0), dtype=np.uint8)
imgs.append(x)
img = np.stack(imgs)
_, _, _, h, w = img.shape
img = img.transpose(0, 3, 1, 4, 2)
img = img.reshape((args.n_zs * h, args.n_intp * w, 3))
save_path = os.path.join(out, 'interpolated_images_{}-{}.png'.format(classes[0], classes[1]))
if not os.path.exists(out):
os.makedirs(out)
Image.fromarray(img).save(save_path)
if __name__ == '__main__':
main()
| 2,812 | 37.534247 | 158 | py |
GANFingerprints | GANFingerprints-master/SNGAN/evaluations/calc_inception_score.py | import os, sys
import numpy as np
import argparse
import chainer
base = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(base, '../'))
from evaluation import gen_images
import yaml
import source.yaml_utils as yaml_utils
def load_models(config):
gen_conf = config.models['generator']
gen = yaml_utils.load_model(gen_conf['fn'], gen_conf['name'], gen_conf['args'])
return gen
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str, default='configs/base.yml')
parser.add_argument('--gpu', '-g', type=int, default=0)
parser.add_argument('--results_dir', type=str, default='./results/gans')
parser.add_argument('--snapshot', type=str, default='')
parser.add_argument('--inception_model_path', type=str, default='')
parser.add_argument('--splits', type=int, default=10)
parser.add_argument('--tf', action='store_true', default=False)
args = parser.parse_args()
chainer.cuda.get_device_from_id(args.gpu).use()
config = yaml_utils.Config(yaml.load(open(args.config_path)))
# Model
gen = load_models(config)
gen.to_gpu(args.gpu)
chainer.serializers.load_npz(args.snapshot, gen)
np.random.seed(1234)
xp = gen.xp
n = int(5000 * args.splits)
#for _ in range(50):
# gen(128)
print("Gen")
ims = gen_images(gen, n, batchsize=100).astype("f")
print(np.max(ims), np.min(ims))
if args.tf:
import source.inception.inception_score_tf as inception_score
mean, std = inception_score.get_inception_score(ims, args.splits)
print(mean, std)
else:
from evaluation import load_inception_model
from source.inception.inception_score import inception_score, Inception
model = load_inception_model(args.inception_model_path)
mean, std = inception_score(model, ims, splits=args.splits)
print(mean, std)
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
np.savetxt('{}/inception_score.txt'.format(args.results_dir),
np.array([mean, std]))
if __name__ == '__main__':
main()
| 2,147 | 33.645161 | 83 | py |
GANFingerprints | GANFingerprints-master/SNGAN/evaluations/__init__.py | 0 | 0 | 0 | py | |
GANFingerprints | GANFingerprints-master/SNGAN/evaluations/gen_images.py | import os, sys, time
import shutil
import numpy as np
import argparse
import chainer
from PIL import Image
base = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(base, '../'))
from evaluation import gen_images_with_condition
import yaml
import source.yaml_utils as yaml_utils
def load_models(config):
gen_conf = config.models['generator']
gen = yaml_utils.load_model(gen_conf['fn'], gen_conf['name'], gen_conf['args'])
return gen
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str, default='configs/base.yml')
parser.add_argument('--gpu', '-g', type=int, default=0)
parser.add_argument('--results_dir', type=str, default='./results/gans')
parser.add_argument('--snapshot', type=str, default='')
parser.add_argument('--rows', type=int, default=1)
parser.add_argument('--columns', type=int, default=1)
parser.add_argument('--classes', type=int, nargs="*", default=None)
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--num_pngs', type=int, default=100)
args = parser.parse_args()
chainer.cuda.get_device_from_id(args.gpu).use()
config = yaml_utils.Config(yaml.load(open(args.config_path)))
# Model
gen = load_models(config)
gen.to_gpu(args.gpu)
out = args.results_dir
chainer.serializers.load_npz(args.snapshot, gen)
np.random.seed(args.seed)
classes = tuple(args.classes) if args.classes is not None else np.arange(0, gen.n_classes, dtype=np.int32)
for c in classes:
for png_idx in range(args.num_pngs):
print('Generating png %d / %d...' % (png_idx, args.num_pngs))
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
x = gen_images_with_condition(gen, c=c, n=args.rows * args.columns, batchsize=args.rows * args.columns)
_, _, h, w = x.shape
x = x.reshape((args.rows, args.columns, 3, h, w))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((args.rows * h, args.columns * w, 3))
save_path = os.path.join(out, 'SNGAN_%08d.png' % png_idx)
if not os.path.exists(out):
os.makedirs(out)
Image.fromarray(x).save(save_path)
if __name__ == '__main__':
main()
| 2,336 | 37.95 | 119 | py |
GANFingerprints | GANFingerprints-master/SNGAN/datasets/lsun_bedroom_200k.py | import numpy as np
from PIL import Image
import chainer
import random
import scipy.misc
class LSUNBedroom200kDataset(chainer.dataset.DatasetMixin):
def __init__(self, path, root, size=128, resize_method='bilinear', augmentation=False, crop_ratio=1.0):
self.base = chainer.datasets.LabeledImageDataset(path, root)
self.size = size
self.resize_method = resize_method
self.augmentation = augmentation
self.crop_ratio = crop_ratio
def __len__(self):
return len(self.base)
def transform(self, image):
image = image / 128. - 1.
image += np.random.uniform(size=image.shape, low=0., high=1. / 128)
return image
def get_example(self, i):
image, label = self.base[i]
image = self.transform(image)
return image, label
if __name__ == "__main__":
import glob, os, sys
root_path = sys.argv[1]
count = 0
n_image_list = []
filenames = glob.glob(root_path + '/*.png')
for filename in filenames:
filename = filename.split('/')
n_image_list.append([filename[-1], 0])
count += 1
if count % 10000 == 0:
print(count)
print("Num of examples:{}".format(count))
n_image_list = np.array(n_image_list, np.str)
np.savetxt('lsun_bedroom_200k_png_image_list.txt', n_image_list, fmt="%s") | 1,358 | 28.543478 | 107 | py |
GANFingerprints | GANFingerprints-master/SNGAN/datasets/celeba.py | import numpy as np
from PIL import Image
import chainer
import random
import scipy.misc
class CelebADataset(chainer.dataset.DatasetMixin):
def __init__(self, path, root, size=128, resize_method='bilinear', augmentation=False, crop_ratio=1.0):
self.base = chainer.datasets.LabeledImageDataset(path, root)
self.size = size
self.resize_method = resize_method
self.augmentation = augmentation
self.crop_ratio = crop_ratio
def __len__(self):
return len(self.base)
def transform(self, image):
image = image / 128. - 1.
image += np.random.uniform(size=image.shape, low=0., high=1. / 128)
return image
def get_example(self, i):
image, label = self.base[i]
image = self.transform(image)
return image, label
if __name__ == "__main__":
import glob, os, sys
root_path = sys.argv[1]
count = 0
n_image_list = []
filenames = glob.glob(root_path + '/*.png')
for filename in filenames:
filename = filename.split('/')
n_image_list.append([filename[-1], 0])
count += 1
if count % 10000 == 0:
print(count)
print("Num of examples:{}".format(count))
n_image_list = np.array(n_image_list, np.str)
np.savetxt('celeba_align_png_cropped_image_list.txt', n_image_list, fmt="%s") | 1,352 | 28.413043 | 107 | py |
GANFingerprints | GANFingerprints-master/SNGAN/dis_models/snresnet_256.py | import chainer
from chainer import functions as F
from source.links.sn_embed_id import SNEmbedID
from source.links.sn_linear import SNLinear
from dis_models.resblocks import Block, OptimizedBlock
class SNResNetProjectionDiscriminator(chainer.Chain):
def __init__(self, ch=64, n_classes=0, activation=F.relu):
super(SNResNetProjectionDiscriminator, self).__init__()
self.activation = activation
initializer = chainer.initializers.GlorotUniform()
with self.init_scope():
self.block1 = OptimizedBlock(3, ch)
self.block2 = Block(ch, ch * 2, activation=activation, downsample=True)
self.block3 = Block(ch * 2, ch * 4, activation=activation, downsample=True)
self.block4 = Block(ch * 4, ch * 8, activation=activation, downsample=True)
self.block5 = Block(ch * 8, ch * 8, activation=activation, downsample=True)
self.block6 = Block(ch * 8, ch * 16, activation=activation, downsample=True)
self.block7 = Block(ch * 16, ch * 16, activation=activation, downsample=False)
self.l8 = SNLinear(ch * 16, 1, initialW=initializer)
if n_classes > 0:
self.l_y = SNEmbedID(n_classes, ch * 16, initialW=initializer)
def __call__(self, x, y=None):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.block5(h)
h = self.block6(h)
h = self.block7(h)
h = self.activation(h)
h = F.sum(h, axis=(2, 3))
output = self.l8(h)
if y is not None:
w_y = self.l_y(y)
output += F.sum(w_y * h, axis=1, keepdims=True)
return output
| 1,727 | 41.146341 | 90 | py |
GANFingerprints | GANFingerprints-master/SNGAN/dis_models/snresnet.py | import chainer
from chainer import functions as F
from source.links.sn_embed_id import SNEmbedID
from source.links.sn_linear import SNLinear
from dis_models.resblocks import Block, OptimizedBlock
class SNResNetProjectionDiscriminator(chainer.Chain):
def __init__(self, ch=64, n_classes=0, activation=F.relu):
super(SNResNetProjectionDiscriminator, self).__init__()
self.activation = activation
initializer = chainer.initializers.GlorotUniform()
with self.init_scope():
self.block1 = OptimizedBlock(3, ch)
self.block2 = Block(ch, ch * 2, activation=activation, downsample=True)
self.block3 = Block(ch * 2, ch * 4, activation=activation, downsample=True)
self.block4 = Block(ch * 4, ch * 8, activation=activation, downsample=True)
self.block5 = Block(ch * 8, ch * 16, activation=activation, downsample=True)
self.block6 = Block(ch * 16, ch * 16, activation=activation, downsample=False)
self.l7 = SNLinear(ch * 16, 1, initialW=initializer)
if n_classes > 0:
self.l_y = SNEmbedID(n_classes, ch * 16, initialW=initializer)
def __call__(self, x, y=None):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.block5(h)
h = self.block6(h)
h = self.activation(h)
h = F.sum(h, axis=(2, 3)) # Global pooling
output = self.l7(h)
if y is not None:
w_y = self.l_y(y)
output += F.sum(w_y * h, axis=1, keepdims=True)
return output
class SNResNetConcatDiscriminator(chainer.Chain):
def __init__(self, ch=64, n_classes=0, activation=F.relu, dim_emb=128):
super(SNResNetConcatDiscriminator, self).__init__()
self.activation = activation
initializer = chainer.initializers.GlorotUniform()
with self.init_scope():
self.block1 = OptimizedBlock(3, ch)
self.block2 = Block(ch, ch * 2, activation=activation, downsample=True)
self.block3 = Block(ch * 2, ch * 4, activation=activation, downsample=True)
self.l_y = SNEmbedID(n_classes, dim_emb, initialW=initializer)
self.block4 = Block(ch * 4 + dim_emb, ch * 8, activation=activation, downsample=True)
self.block5 = Block(ch * 8, ch * 16, activation=activation, downsample=True)
self.block6 = Block(ch * 16, ch * 16, activation=activation, downsample=False)
self.l7 = SNLinear(ch * 16, 1, initialW=initializer)
def __call__(self, x, y=None):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
if y is not None:
emb = self.l_y(y)
H, W = h.shape[2], h.shape[3]
emb = F.broadcast_to(
F.reshape(emb, (emb.shape[0], emb.shape[1], 1, 1)),
(emb.shape[0], emb.shape[1], H, W))
h = F.concat([h, emb], axis=1)
h = self.block4(h)
h = self.block5(h)
h = self.block6(h)
h = self.activation(h)
h = F.sum(h, axis=(2, 3)) # Global pooling
output = self.l7(h)
return output
| 3,224 | 42 | 97 | py |
GANFingerprints | GANFingerprints-master/SNGAN/dis_models/snresnet_small.py | import chainer
from chainer import functions as F
from source.links.sn_embed_id import SNEmbedID
from source.links.sn_linear import SNLinear
from dis_models.resblocks import Block, OptimizedBlock
class SNResNetProjectionDiscriminator(chainer.Chain):
def __init__(self, ch=64, n_classes=0, activation=F.relu):
super(SNResNetProjectionDiscriminator, self).__init__()
self.activation = activation
initializer = chainer.initializers.GlorotUniform()
with self.init_scope():
self.block1 = OptimizedBlock(3, ch)
self.block2 = Block(ch, ch * 2, activation=activation, downsample=True)
self.block3 = Block(ch * 2, ch * 4, activation=activation, downsample=True)
self.block4 = Block(ch * 4, ch * 4, activation=activation, downsample=True)
self.block5 = Block(ch * 4, ch * 8, activation=activation, downsample=True)
self.block6 = Block(ch * 8, ch * 8, activation=activation, downsample=False)
self.l7 = SNLinear(ch * 8, 1, initialW=initializer)
if n_classes > 0:
self.l_y = SNEmbedID(n_classes, ch * 8, initialW=initializer)
def __call__(self, x, y=None):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.block5(h)
h = self.block6(h)
h = self.activation(h)
h = F.sum(h, axis=(2, 3)) # Global pooling
output = self.l7(h)
if y is not None:
w_y = self.l_y(y)
output += F.sum(w_y * h, axis=1, keepdims=True)
return output
| 1,625 | 40.692308 | 88 | py |
GANFingerprints | GANFingerprints-master/SNGAN/dis_models/snresnet_64.py | import chainer
from chainer import functions as F
from source.links.sn_embed_id import SNEmbedID
from source.links.sn_linear import SNLinear
from dis_models.resblocks import Block, OptimizedBlock
class SNResNetProjectionDiscriminator(chainer.Chain):
def __init__(self, ch=64, n_classes=0, activation=F.relu):
super(SNResNetProjectionDiscriminator, self).__init__()
self.activation = activation
initializer = chainer.initializers.GlorotUniform()
with self.init_scope():
self.block1 = OptimizedBlock(3, ch)
self.block2 = Block(ch, ch * 2, activation=activation, downsample=True)
self.block3 = Block(ch * 2, ch * 4, activation=activation, downsample=True)
self.block4 = Block(ch * 4, ch * 8, activation=activation, downsample=True)
self.block5 = Block(ch * 8, ch * 16, activation=activation, downsample=True)
self.l6 = SNLinear(ch * 16, 1, initialW=initializer)
if n_classes > 0:
self.l_y = SNEmbedID(n_classes, ch * 16, initialW=initializer)
def __call__(self, x, y=None):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.block5(h)
h = self.activation(h)
h = F.sum(h, axis=(2, 3)) # Global pooling
output = self.l6(h)
if y is not None:
w_y = self.l_y(y)
output += F.sum(w_y * h, axis=1, keepdims=True)
return output
| 1,512 | 39.891892 | 88 | py |
GANFingerprints | GANFingerprints-master/SNGAN/dis_models/resblocks.py | import math
import chainer
from chainer import functions as F
from source.links.sn_convolution_2d import SNConvolution2D
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return F.average_pooling_2d(x, 2)
class Block(chainer.Chain):
def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=F.relu, downsample=False):
super(Block, self).__init__()
initializer = chainer.initializers.GlorotUniform(math.sqrt(2))
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
with self.init_scope():
self.c1 = SNConvolution2D(in_channels, hidden_channels, ksize=ksize, pad=pad, initialW=initializer)
self.c2 = SNConvolution2D(hidden_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer)
if self.learnable_sc:
self.c_sc = SNConvolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class OptimizedBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.relu):
super(OptimizedBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform(math.sqrt(2))
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
with self.init_scope():
self.c1 = SNConvolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer)
self.c2 = SNConvolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer)
self.c_sc = SNConvolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
| 2,764 | 35.381579 | 112 | py |
GANFingerprints | GANFingerprints-master/SNGAN/dis_models/__init__.py | 0 | 0 | 0 | py | |
GANFingerprints | GANFingerprints-master/SNGAN/gen_models/resnet_small.py | import chainer
import chainer.links as L
from chainer import functions as F
from gen_models.resblocks import Block
from source.miscs.random_samples import sample_categorical, sample_continuous
class ResNetGenerator(chainer.Chain):
def __init__(self, ch=64, dim_z=128, bottom_width=4, activation=F.relu, n_classes=0, distribution="normal"):
super(ResNetGenerator, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.bottom_width = bottom_width
self.activation = activation
self.distribution = distribution
self.dim_z = dim_z
self.n_classes = n_classes
with self.init_scope():
self.l1 = L.Linear(dim_z, (bottom_width ** 2) * ch * 8, initialW=initializer)
self.block2 = Block(ch * 8, ch * 8, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = Block(ch * 8, ch * 4, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = Block(ch * 4, ch * 4, activation=activation, upsample=True, n_classes=n_classes)
self.block5 = Block(ch * 4, ch * 2, activation=activation, upsample=True, n_classes=n_classes)
self.block6 = Block(ch * 2, ch, activation=activation, upsample=True, n_classes=n_classes)
self.b7 = L.BatchNormalization(ch)
self.l7 = L.Convolution2D(ch, 3, ksize=3, stride=1, pad=1, initialW=initializer)
def __call__(self, batchsize=64, z=None, y=None, **kwargs):
if z is None:
z = sample_continuous(self.dim_z, batchsize, distribution=self.distribution, xp=self.xp)
if y is None:
y = sample_categorical(self.n_classes, batchsize, distribution="uniform",
xp=self.xp) if self.n_classes > 0 else None
if (y is not None) and z.shape[0] != y.shape[0]:
raise Exception('z.shape[0] != y.shape[0], z.shape[0]={}, y.shape[0]={}'.format(z.shape[0], y.shape[0]))
h = z
h = self.l1(h)
h = F.reshape(h, (h.shape[0], -1, self.bottom_width, self.bottom_width))
h = self.block2(h, y, **kwargs)
h = self.block3(h, y, **kwargs)
h = self.block4(h, y, **kwargs)
h = self.block5(h, y, **kwargs)
h = self.block6(h, y, **kwargs)
h = self.b7(h)
h = self.activation(h)
h = F.tanh(self.l7(h))
return h
| 2,396 | 50 | 116 | py |
GANFingerprints | GANFingerprints-master/SNGAN/gen_models/resnet_64.py | import chainer
import chainer.links as L
from chainer import functions as F
from gen_models.resblocks import Block
from source.miscs.random_samples import sample_categorical, sample_continuous
class ResNetGenerator(chainer.Chain):
def __init__(self, ch=64, dim_z=128, bottom_width=4, activation=F.relu, n_classes=0, distribution="normal"):
super(ResNetGenerator, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.bottom_width = bottom_width
self.activation = activation
self.distribution = distribution
self.dim_z = dim_z
self.n_classes = n_classes
with self.init_scope():
self.l1 = L.Linear(dim_z, (bottom_width ** 2) * ch * 16, initialW=initializer)
self.block2 = Block(ch * 16, ch * 8, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = Block(ch * 8, ch * 4, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = Block(ch * 4, ch * 2, activation=activation, upsample=True, n_classes=n_classes)
self.block5 = Block(ch * 2, ch, activation=activation, upsample=True, n_classes=n_classes)
self.b6 = L.BatchNormalization(ch)
self.l6 = L.Convolution2D(ch, 3, ksize=3, stride=1, pad=1, initialW=initializer)
def __call__(self, batchsize=64, z=None, y=None, **kwargs):
if z is None:
z = sample_continuous(self.dim_z, batchsize, distribution=self.distribution, xp=self.xp)
if y is None:
y = sample_categorical(self.n_classes, batchsize, distribution="uniform",
xp=self.xp) if self.n_classes > 0 else None
if (y is not None) and z.shape[0] != y.shape[0]:
raise Exception('z.shape[0] != y.shape[0], z.shape[0]={}, y.shape[0]={}'.format(z.shape[0], y.shape[0]))
h = z
h = self.l1(h)
h = F.reshape(h, (h.shape[0], -1, self.bottom_width, self.bottom_width))
h = self.block2(h, y, **kwargs)
h = self.block3(h, y, **kwargs)
h = self.block4(h, y, **kwargs)
h = self.block5(h, y, **kwargs)
h = self.b6(h)
h = self.activation(h)
h = F.tanh(self.l6(h))
return h
| 2,251 | 49.044444 | 116 | py |
GANFingerprints | GANFingerprints-master/SNGAN/gen_models/resnet_256.py | import chainer
import chainer.links as L
from chainer import functions as F
from gen_models.resblocks import Block
from source.miscs.random_samples import sample_categorical, sample_continuous
class ResNetGenerator(chainer.Chain):
def __init__(self, ch=64, dim_z=128, bottom_width=4, activation=F.relu, n_classes=0, distribution="normal"):
super(ResNetGenerator, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.bottom_width = bottom_width
self.activation = activation
self.distribution = distribution
self.dim_z = dim_z
self.n_classes = n_classes
with self.init_scope():
self.l1 = L.Linear(dim_z, (bottom_width ** 2) * ch * 16, initialW=initializer)
self.block2 = Block(ch * 16, ch * 16, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = Block(ch * 16, ch * 8, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = Block(ch * 8, ch * 8, activation=activation, upsample=True, n_classes=n_classes)
self.block5 = Block(ch * 8, ch * 4, activation=activation, upsample=True, n_classes=n_classes)
self.block6 = Block(ch * 4, ch * 2, activation=activation, upsample=True, n_classes=n_classes)
self.block7 = Block(ch * 2, ch, activation=activation, upsample=True, n_classes=n_classes)
self.b8 = L.BatchNormalization(ch)
self.l8 = L.Convolution2D(ch, 3, ksize=3, stride=1, pad=1, initialW=initializer)
def __call__(self, batchsize=64, z=None, y=None, **kwargs):
if z is None:
z = sample_continuous(self.dim_z, batchsize, distribution=self.distribution, xp=self.xp)
if y is None:
y = sample_categorical(self.n_classes, batchsize, distribution="uniform",
xp=self.xp) if self.n_classes > 0 else None
if (y is not None) and z.shape[0] != y.shape[0]:
raise Exception('z.shape[0] != y.shape[0], z.shape[0]={}, y.shape[0]={}'.format(z.shape[0], y.shape[0]))
h = z
h = self.l1(h)
h = F.reshape(h, (h.shape[0], -1, self.bottom_width, self.bottom_width))
h = self.block2(h, y, **kwargs)
h = self.block3(h, y, **kwargs)
h = self.block4(h, y, **kwargs)
h = self.block5(h, y, **kwargs)
h = self.block6(h, y, **kwargs)
h = self.block7(h, y, **kwargs)
h = self.b8(h)
h = self.activation(h)
h = F.tanh(self.l8(h))
return h
| 2,547 | 51 | 116 | py |
GANFingerprints | GANFingerprints-master/SNGAN/gen_models/resnet.py | import chainer
import chainer.links as L
from chainer import functions as F
from gen_models.resblocks import Block
from source.miscs.random_samples import sample_categorical, sample_continuous
class ResNetGenerator(chainer.Chain):
def __init__(self, ch=64, dim_z=128, bottom_width=4, activation=F.relu, n_classes=0, distribution="normal"):
super(ResNetGenerator, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.bottom_width = bottom_width
self.activation = activation
self.distribution = distribution
self.dim_z = dim_z
self.n_classes = n_classes
with self.init_scope():
self.l1 = L.Linear(dim_z, (bottom_width ** 2) * ch * 16, initialW=initializer)
self.block2 = Block(ch * 16, ch * 16, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = Block(ch * 16, ch * 8, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = Block(ch * 8, ch * 4, activation=activation, upsample=True, n_classes=n_classes)
self.block5 = Block(ch * 4, ch * 2, activation=activation, upsample=True, n_classes=n_classes)
self.block6 = Block(ch * 2, ch, activation=activation, upsample=True, n_classes=n_classes)
self.b7 = L.BatchNormalization(ch)
self.l7 = L.Convolution2D(ch, 3, ksize=3, stride=1, pad=1, initialW=initializer)
def __call__(self, batchsize=64, z=None, y=None, **kwargs):
if z is None:
z = sample_continuous(self.dim_z, batchsize, distribution=self.distribution, xp=self.xp)
if y is None:
y = sample_categorical(self.n_classes, batchsize, distribution="uniform",
xp=self.xp) if self.n_classes > 0 else None
if (y is not None) and z.shape[0] != y.shape[0]:
raise ValueError('z.shape[0] != y.shape[0], z.shape[0]={}, y.shape[0]={}'.format(z.shape[0], y.shape[0]))
h = z
h = self.l1(h)
h = F.reshape(h, (h.shape[0], -1, self.bottom_width, self.bottom_width))
h = self.block2(h, y, **kwargs)
h = self.block3(h, y, **kwargs)
h = self.block4(h, y, **kwargs)
h = self.block5(h, y, **kwargs)
h = self.block6(h, y, **kwargs)
h = self.b7(h)
h = self.activation(h)
h = F.tanh(self.l7(h))
return h
| 2,401 | 50.106383 | 117 | py |
GANFingerprints | GANFingerprints-master/SNGAN/gen_models/resblocks.py | import math
import chainer
import chainer.links as L
from chainer import functions as F
from source.links.categorical_conditional_batch_normalization import CategoricalConditionalBatchNormalization
def _upsample(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, 2, outsize=(h * 2, w * 2))
def upsample_conv(x, conv):
return conv(_upsample(x))
class Block(chainer.Chain):
def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=F.relu, upsample=False, n_classes=0):
super(Block, self).__init__()
initializer = chainer.initializers.GlorotUniform(math.sqrt(2))
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.upsample = upsample
self.learnable_sc = in_channels != out_channels or upsample
hidden_channels = out_channels if hidden_channels is None else hidden_channels
self.n_classes = n_classes
with self.init_scope():
self.c1 = L.Convolution2D(in_channels, hidden_channels, ksize=ksize, pad=pad, initialW=initializer)
self.c2 = L.Convolution2D(hidden_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer)
if n_classes > 0:
self.b1 = CategoricalConditionalBatchNormalization(in_channels, n_cat=n_classes)
self.b2 = CategoricalConditionalBatchNormalization(hidden_channels, n_cat=n_classes)
else:
self.b1 = L.BatchNormalization(in_channels)
self.b2 = L.BatchNormalization(hidden_channels)
if self.learnable_sc:
self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x, y=None, z=None, **kwargs):
h = x
h = self.b1(h, y, **kwargs) if y is not None else self.b1(h, **kwargs)
h = self.activation(h)
h = upsample_conv(h, self.c1) if self.upsample else self.c1(h)
h = self.b2(h, y, **kwargs) if y is not None else self.b2(h, **kwargs)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = upsample_conv(x, self.c_sc) if self.upsample else self.c_sc(x)
return x
else:
return x
def __call__(self, x, y=None, z=None, **kwargs):
return self.residual(x, y, z, **kwargs) + self.shortcut(x)
| 2,458 | 40.677966 | 112 | py |
GANFingerprints | GANFingerprints-master/SNGAN/gen_models/__init__.py | 0 | 0 | 0 | py | |
GANFingerprints | GANFingerprints-master/SNGAN/source/yaml_utils.py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import shutil
import sys
import time
import yaml
# Copy from tgans repo.
class Config(object):
def __init__(self, config_dict):
self.config = config_dict
def __getattr__(self, key):
if key in self.config:
return self.config[key]
else:
raise AttributeError(key)
def __getitem__(self, key):
return self.config[key]
def __repr__(self):
return yaml.dump(self.config, default_flow_style=False)
def load_dataset(config):
dataset = load_module(config.dataset['dataset_fn'],
config.dataset['dataset_name'])
return dataset(**config.dataset['args'])
def load_module(fn, name):
mod_name = os.path.splitext(os.path.basename(fn))[0]
mod_path = os.path.dirname(fn)
sys.path.insert(0, mod_path)
return getattr(__import__(mod_name), name)
def load_model(model_fn, model_name, args=None):
model = load_module(model_fn, model_name)
if args:
return model(**args)
return model()
def load_updater_class(config):
return load_module(config.updater['fn'], config.updater['name'])
| 1,201 | 20.464286 | 68 | py |
GANFingerprints | GANFingerprints-master/SNGAN/source/__init__.py | 0 | 0 | 0 | py | |
GANFingerprints | GANFingerprints-master/SNGAN/source/functions/max_sv.py | import chainer.functions as F
from chainer import cuda
def _l2normalize(v, eps=1e-12):
norm = cuda.reduce('T x', 'T out',
'x * x', 'a + b', 'out = sqrt(a)', 0,
'norm_sn')
div = cuda.elementwise('T x, T norm, T eps',
'T out',
'out = x / (norm + eps)',
'div_sn')
return div(v, norm(v), eps)
def max_singular_value(W, u=None, Ip=1):
"""
Apply power iteration for the weight parameter
"""
if not Ip >= 1:
raise ValueError("The number of power iterations should be positive integer")
xp = cuda.get_array_module(W.data)
if u is None:
u = xp.random.normal(size=(1, W.shape[0])).astype(xp.float32)
_u = u
for _ in range(Ip):
_v = _l2normalize(xp.dot(_u, W.data), eps=1e-12)
_u = _l2normalize(xp.dot(_v, W.data.transpose()), eps=1e-12)
sigma = F.sum(F.linear(_u, F.transpose(W)) * _v)
return sigma, _u, _v
def max_singular_value_fully_differentiable(W, u=None, Ip=1):
"""
Apply power iteration for the weight parameter (fully differentiable version)
"""
if not Ip >= 1:
raise ValueError("The number of power iterations should be positive integer")
xp = cuda.get_array_module(W.data)
if u is None:
u = xp.random.normal(size=(1, W.shape[0])).astype(xp.float32)
_u = u
for _ in range(Ip):
_v = F.normalize(F.matmul(_u, W), eps=1e-12)
_u = F.normalize(F.matmul(_v, F.transpose(W)), eps=1e-12)
_u = F.matmul(_v, F.transpose(W))
norm = F.sqrt(F.sum(_u ** 2))
return norm, _l2normalize(_u.data), _v
| 1,678 | 31.921569 | 85 | py |
GANFingerprints | GANFingerprints-master/SNGAN/source/functions/__init__.py | 0 | 0 | 0 | py | |
GANFingerprints | GANFingerprints-master/SNGAN/source/inception/inception_score_tf.py | # Code derived from https://github.com/openai/improved-gan/tree/master/inception_score
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
import glob
import scipy.misc
import math
import sys
import chainer
from chainer import functions as F
MODEL_DIR = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
last_layer = None
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.3
def inception_forward(images, layer):
assert (type(images[0]) == np.ndarray)
assert (len(images[0].shape) == 3)
assert (np.max(images[0]) > 10)
assert (np.min(images[0]) >= 0.0)
bs = 100
images = images.transpose(0, 2, 3, 1)
with tf.Session(config=config) as sess:
preds = []
n_batches = int(math.ceil(float(len(images)) / float(bs)))
for i in range(n_batches):
sys.stdout.write(".")
sys.stdout.flush()
inp = images[(i * bs):min((i + 1) * bs, len(images))]
pred = sess.run(layer, {'ExpandDims:0': inp})
preds.append(pred)
preds = np.concatenate(preds, 0)
return preds
def get_mean_and_cov(images):
before_preds = inception_forward(images, last_layer)
m = np.mean(before_preds, 0)
cov = np.cov(before_preds, rowvar=False)
return m, cov
def get_fid(images, ref_stats=None, images_ref=None, splits=10):
before_preds = inception_forward(images, last_layer)
if ref_stats is None:
if images_ref is None:
raise ValueError('images_ref should be provided if ref_stats is None')
m_ref, cov_ref = get_mean_and_cov(images_ref)
fids = []
for i in range(splits):
part = before_preds[(i * before_preds.shape[0] // splits):((i + 1) * before_preds.shape[0] // splits), :]
m_gen = np.mean(part, 0)
cov_gen = np.cov(part, rowvar=False)
fid = np.sum((m_ref - m_gen) ** 2) + np.trace(
cov_ref + cov_gen - 2 * scipy.linalg.sqrtm(np.dot(cov_ref, cov_gen)))
fids.append(fid)
return np.mean(fids), np.std(fids)
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_score(images, splits=10):
preds = inception_forward(images, softmax)
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_accuracy(images, labels):
batch_size = 100
if isinstance(images, (list, tuple)):
ims_list = images
ys_list = []
for ims in ims_list:
n, _, _, _ = ims.shape
n_batches = int(math.ceil(float(n) / float(batch_size)))
print('batch_size:{}, n_ims{}, n_batches{}'.format(batch_size, n, n_batches))
print('Calculating inception accuracy...')
ys = inception_forward(ims, softmax)[:, 1:1001]
ys_list.append(ys)
ys = sum(ys_list) / len(ys_list)
else:
n, _, _, _, = images.shape
n_batches = int(math.ceil(float(n) / float(batch_size)))
print('batch_size:{}, n_ims{}, n_batches{}'.format(batch_size, n, n_batches))
print('Calculating inception accuracy...')
ys = inception_forward(images, softmax)[:, 1:1001]
return F.accuracy(ys, labels).data
# This function is called automatically.
def _init_inception():
global softmax
global last_layer
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
with tf.Session(config=config) as sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o._shape = tf.TensorShape(new_shape)
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
last_layer = tf.squeeze(pool3)
logits = tf.matmul(last_layer, w)
softmax = tf.nn.softmax(logits)
if softmax is None:
_init_inception()
| 5,784 | 35.613924 | 113 | py |
GANFingerprints | GANFingerprints-master/SNGAN/source/inception/download.py | # code drived from https://github.com/hvy/chainer-inception-score
"""
Including code from the official implementation by OpenAI found at
https://github.com/openai/improved-gan
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import tarfile
import numpy as np
import tensorflow as tf
import glob
import scipy.misc
import math
from six.moves import urllib
from chainer import serializers
from inception_score import Inception
MODEL_DIR = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--outfile', type=str, default='inception_score.model')
return parser.parse_args()
def copy_conv(sess, tftensor, layer):
"""Copy TensorFlow convolution layer weights to Chainer model."""
W = sess.graph.get_tensor_by_name('{}/conv2d_params:0'.format(tftensor)).eval()
W = W.transpose((3, 2, 0, 1))
assert W.shape == layer.W.data.shape
layer.W.data = W
def copy_bn(sess, tftensor, layer):
"""Copy TensorFlow batch normalization params weights to Chainer model."""
gamma = sess.graph.get_tensor_by_name('{}/gamma:0'.format(tftensor)).eval()
beta = sess.graph.get_tensor_by_name('{}/beta:0'.format(tftensor)).eval()
avg_mean = sess.graph.get_tensor_by_name('{}/moving_mean:0'.format(tftensor)).eval()
avg_var = sess.graph.get_tensor_by_name('{}/moving_variance:0'.format(tftensor)).eval()
eps = sess.graph.get_operation_by_name(tftensor).get_attr('variance_epsilon')
assert layer.beta.data.shape == beta.shape
assert layer.gamma.data.shape == gamma.shape
assert layer.avg_mean.shape == avg_mean.shape
assert layer.avg_var.shape == avg_var.shape
assert eps > 0.0
layer.beta.data = beta
layer.gamma.data = gamma
layer.avg_mean = avg_mean
layer.avg_var = avg_var
layer.eps = eps
def copy_inception(sess, model):
"""Copy weights and params from the graph in the given TensorFlow session
to the Chainer chain."""
print('Copying first layers ...')
copy_conv(sess, 'conv', model.conv)
copy_bn(sess, 'conv/batchnorm', model.bn_conv)
copy_conv(sess, 'conv_1', model.conv_1)
copy_bn(sess, 'conv_1/batchnorm', model.bn_conv_1)
copy_conv(sess, 'conv_2', model.conv_2)
copy_bn(sess, 'conv_2/batchnorm', model.bn_conv_2)
copy_conv(sess, 'conv_3', model.conv_3)
copy_bn(sess, 'conv_3/batchnorm', model.bn_conv_3)
copy_conv(sess, 'conv_4', model.conv_4)
copy_bn(sess, 'conv_4/batchnorm', model.bn_conv_4)
for m in ['mixed', 'mixed_1', 'mixed_2']:
print('Copying ', m, '...')
copy_conv(sess, '{}/conv'.format(m), getattr(model, m).conv.conv)
copy_bn(sess, '{}/conv/batchnorm'.format(m), getattr(model, m).conv.bn_conv)
for t in ['tower', 'tower_1', 'tower_2']:
copy_conv(sess, '{}/{}/conv'.format(m, t), getattr(getattr(model, m), t).conv)
copy_bn(sess, '{}/{}/conv/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv)
if t == 'tower' or t == 'tower_1':
copy_conv(sess, '{}/{}/conv_1'.format(m, t), getattr(getattr(model, m), t).conv_1)
copy_bn(sess, '{}/{}/conv_1/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_1)
if t == 'tower_1':
copy_conv(sess, '{}/{}/conv_2'.format(m, t), getattr(getattr(model, m), t).conv_2)
copy_bn(sess, '{}/{}/conv_2/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_2)
for m in ['mixed_3']:
print('Copying ', m, '...')
copy_conv(sess, '{}/conv'.format(m), getattr(model, m).conv.conv)
copy_bn(sess, '{}/conv/batchnorm'.format(m), getattr(model, m).conv.bn_conv)
for t in ['tower']:
copy_conv(sess, '{}/{}/conv'.format(m, t), getattr(getattr(model, m), t).conv)
copy_bn(sess, '{}/{}/conv/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv)
copy_conv(sess, '{}/{}/conv_1'.format(m, t), getattr(getattr(model, m), t).conv_1)
copy_bn(sess, '{}/{}/conv_1/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_1)
copy_conv(sess, '{}/{}/conv_2'.format(m, t), getattr(getattr(model, m), t).conv_2)
copy_bn(sess, '{}/{}/conv_2/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_2)
for m in ['mixed_4', 'mixed_5', 'mixed_6', 'mixed_7']:
print('Copying ', m, '...')
copy_conv(sess, '{}/conv'.format(m), getattr(model, m).conv.conv)
copy_bn(sess, '{}/conv/batchnorm'.format(m), getattr(model, m).conv.bn_conv)
for t in ['tower', 'tower_1', 'tower_2']:
copy_conv(sess, '{}/{}/conv'.format(m, t), getattr(getattr(model, m), t).conv)
copy_bn(sess, '{}/{}/conv/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv)
if t == 'tower' or t == 'tower_1':
copy_conv(sess, '{}/{}/conv_1'.format(m, t), getattr(getattr(model, m), t).conv_1)
copy_bn(sess, '{}/{}/conv_1/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_1)
copy_conv(sess, '{}/{}/conv_2'.format(m, t), getattr(getattr(model, m), t).conv_2)
copy_bn(sess, '{}/{}/conv_2/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_2)
if t == 'tower_1':
copy_conv(sess, '{}/{}/conv_3'.format(m, t), getattr(getattr(model, m), t).conv_3)
copy_bn(sess, '{}/{}/conv_3/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_3)
copy_conv(sess, '{}/{}/conv_4'.format(m, t), getattr(getattr(model, m), t).conv_4)
copy_bn(sess, '{}/{}/conv_4/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_4)
for m in ['mixed_8']:
print('Copying ', m, '...')
for t in ['tower', 'tower_1']:
copy_conv(sess, '{}/{}/conv'.format(m, t), getattr(getattr(model, m), t).conv)
copy_bn(sess, '{}/{}/conv/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv)
copy_conv(sess, '{}/{}/conv_1'.format(m, t), getattr(getattr(model, m), t).conv_1)
copy_bn(sess, '{}/{}/conv_1/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_1)
if t == 'tower_1':
copy_conv(sess, '{}/{}/conv_2'.format(m, t), getattr(getattr(model, m), t).conv_2)
copy_bn(sess, '{}/{}/conv_2/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_2)
copy_conv(sess, '{}/{}/conv_3'.format(m, t), getattr(getattr(model, m), t).conv_3)
copy_bn(sess, '{}/{}/conv_3/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_3)
for m in ['mixed_9', 'mixed_10']:
print('Copying ', m, '...')
copy_conv(sess, '{}/conv'.format(m), getattr(model, m).conv.conv)
copy_bn(sess, '{}/conv/batchnorm'.format(m), getattr(model, m).conv.bn_conv)
for t in ['tower', 'tower_1', 'tower_2']:
copy_conv(sess, '{}/{}/conv'.format(m, t), getattr(getattr(model, m), t).conv)
copy_bn(sess, '{}/{}/conv/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv)
if t == 'tower' or t == 'tower_1':
copy_conv(sess, '{}/{}/mixed/conv'.format(m, t), getattr(getattr(model, m), t).mixed.conv.conv)
copy_bn(sess, '{}/{}/mixed/conv/batchnorm'.format(m, t), getattr(getattr(model, m), t).mixed.conv.bn_conv)
copy_conv(sess, '{}/{}/mixed/conv_1'.format(m, t), getattr(getattr(model, m), t).mixed.conv_1.conv_1)
copy_bn(sess, '{}/{}/mixed/conv_1/batchnorm'.format(m, t), getattr(getattr(model, m), t).mixed.conv_1.bn_conv_1)
if t == 'tower_1':
copy_conv(sess, '{}/{}/conv_1'.format(m, t), getattr(getattr(model, m), t).conv_1)
copy_bn(sess, '{}/{}/conv_1/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_1)
print('Copying logit...')
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1].eval()
b = sess.graph.get_tensor_by_name("softmax/biases:0").eval()
assert w.T.shape == model.logit.W.shape
assert b.shape == model.logit.b.shape
model.logit.W.data = w.T
model.logit.b.data = b
def download_tf_params():
"""Download and extract pretrained TensorFlow inception model params."""
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
statinfo = os.stat(filepath)
print()
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
def set_tf_params(model, write_graph=False):
"""Update the parameters of the given chainer model with the downloaded
TensorFlow model."""
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
if write_graph:
# Write graph to file so that it can be visualized using TensorBoard
summary_writer = tf.summary.FileWriter('data', graph=graph_def)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # Do not allocale the whole GPU mem
with tf.Session(config=config) as sess:
copy_inception(sess, model)
def main(args):
outfile = args.outfile
# Download pretrained TensorFlow model
download_tf_params()
# Create empty Chainer inception model
model = Inception()
# Update parameters of Chainer model with pretrained TensorFlow model
set_tf_params(model)
# TODO(hvy): Test score similarity with the original implementation
print('Saving ', outfile, '...')
serializers.save_hdf5(outfile, model)
if __name__ == '__main__':
args = parse_args()
main(args)
| 10,506 | 41.538462 | 128 | py |
GANFingerprints | GANFingerprints-master/SNGAN/source/inception/inception_score.py | # code drived from https://github.com/hvy/chainer-inception-score
import math
import chainer
from chainer import Chain
from chainer import functions as F
from chainer import links as L
from chainer import Variable
def inception_forward(model, ims, batch_size):
n, c, w, h = ims.shape
n_batches = int(math.ceil(float(n) / float(batch_size)))
xp = model.xp
# Compute the softmax predicitions for for all images, split into batches
# in order to fit in memory
ys = xp.empty((n, 1008), dtype=xp.float32) # Softmax container
for i in range(n_batches):
batch_start = (i * batch_size)
batch_end = min((i + 1) * batch_size, n)
ims_batch = ims[batch_start:batch_end]
ims_batch = xp.asarray(ims_batch) # To GPU if using CuPy
ims_batch = Variable(ims_batch)
# Resize image to the shape expected by the inception module
if (w, h) != (299, 299):
ims_batch = F.resize_images(ims_batch, (299, 299)) # bilinear
# Feed images to the inception module to get the softmax predictions
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
y = model(ims_batch)
ys[batch_start:batch_end] = y.data
ys = ys[:, 1:1001] # 0 and 1001-1008 are the dummies
return ys
def inception_score(model, ims, batch_size=100, splits=10):
"""Compute the inception score for given images.
Default batch_size is 100 and split size is 10. Please refer to the
official implementation. It is recommended to to use at least 50000
images to obtain a reliable score.
Reference:
https://github.com/openai/improved-gan/blob/master/inception_score/model.py
"""
n, c, w, h = ims.shape
n_batches = int(math.ceil(float(n) / float(batch_size)))
xp = model.xp
print('batch_size:{}, n_ims{}, n_batches{}'.format(batch_size, n, n_batches))
print('Calculating inception scores...')
ys = inception_forward(model, ims, batch_size)
# Compute the inception score based on the softmax predictions of the
# inception module.
scores = xp.empty((splits), dtype=xp.float32) # Split inception scores
for i in range(splits):
part = ys[(i * n // splits):((i + 1) * n // splits), :]
kl = part * (xp.log(part) -
xp.log(xp.expand_dims(xp.mean(part, 0), 0)))
kl = xp.mean(xp.sum(kl, 1))
scores[i] = xp.exp(kl)
return xp.mean(scores), xp.std(scores)
def inception_accuracy(model, ims, labels, batch_size=100, splits=10):
"""Compute the inception score for given images.
Default batch_size is 100 and split size is 10. Please refer to the
official implementation. It is recommended to to use at least 50000
images to obtain a reliable score.
Reference:
https://github.com/openai/improved-gan/blob/master/inception_score/model.py
"""
if isinstance(ims, (list, tuple)):
ims_list = ims
ys_list = []
for ims in ims_list:
n, c, w, h = ims.shape
n_batches = int(math.ceil(float(n) / float(batch_size)))
xp = model.xp
print('batch_size:{}, n_ims{}, n_batches{}'.format(batch_size, n, n_batches))
print('Calculating inception accuracy...')
ys = inception_forward(model, ims, batch_size)
ys_list.append(ys)
ys = sum(ys_list) / len(ys_list)
else:
n, c, w, h = ims.shape
n_batches = int(math.ceil(float(n) / float(batch_size)))
xp = model.xp
print('batch_size:{}, n_ims{}, n_batches{}'.format(batch_size, n, n_batches))
print('Calculating inception accuracy...')
ys = inception_forward(model, ims, batch_size)
return F.accuracy(ys, labels).data
class Mixed(Chain):
def __init__(self, trunk):
super().__init__()
for name, link in trunk:
self.add_link(name, link)
self.trunk = trunk
def __call__(self, x):
hs = []
for name, _ in self.trunk:
h = getattr(self, name)(x)
hs.append(h)
return F.concat(hs)
class Tower(Chain):
def __init__(self, trunk):
super().__init__()
for name, link in trunk:
if not name.startswith('_'):
self.add_link(name, link)
self.trunk = trunk
def __call__(self, x):
h = x
for name, f in self.trunk:
if not name.startswith('_'): # Link
if 'bn' in name:
h = getattr(self, name)(h)
else:
h = getattr(self, name)(h)
else: # AveragePooling2D, MaxPooling2D or ReLU
h = f(h)
return h
def _average_pooling_2d(h):
return F.average_pooling_2d(h, 3, 1, 1)
def _max_pooling_2d(h):
return F.max_pooling_2d(h, 3, 1, 1)
def _max_pooling_2d_320(h):
return F.max_pooling_2d(h, 3, 2, 0)
class Inception(Chain):
def __init__(self):
super().__init__(
conv=L.Convolution2D(3, 32, 3, stride=2, pad=0),
conv_1=L.Convolution2D(32, 32, 3, stride=1, pad=0),
conv_2=L.Convolution2D(32, 64, 3, stride=1, pad=1),
conv_3=L.Convolution2D(64, 80, 1, stride=1, pad=0),
conv_4=L.Convolution2D(80, 192, 3, stride=1, pad=0),
bn_conv=L.BatchNormalization(32),
bn_conv_1=L.BatchNormalization(32),
bn_conv_2=L.BatchNormalization(64),
bn_conv_3=L.BatchNormalization(80),
bn_conv_4=L.BatchNormalization(192),
mixed=Mixed([
('conv', Tower([
('conv', L.Convolution2D(192, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', F.relu)
])),
('tower', Tower([
('conv', L.Convolution2D(192, 48, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(48)),
('_relu', F.relu),
('conv_1', L.Convolution2D(48, 64, 5, stride=1, pad=2)),
('bn_conv_1', L.BatchNormalization(64)),
('_relu_1', F.relu)
])),
('tower_1', Tower([
('conv', L.Convolution2D(192, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', F.relu),
('conv_1', L.Convolution2D(64, 96, 3, stride=1, pad=1)),
('bn_conv_1', L.BatchNormalization(96)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(96, 96, 3, stride=1, pad=1)),
('bn_conv_2', L.BatchNormalization(96)),
('_relu_2', F.relu)
])),
('tower_2', Tower([
('_pooling', _average_pooling_2d),
('conv', L.Convolution2D(192, 32, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(32)),
('_relu', F.relu)
]))
]),
mixed_1=Mixed([
('conv', Tower([
('conv', L.Convolution2D(256, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', F.relu)
])),
('tower', Tower([
('conv', L.Convolution2D(256, 48, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(48)),
('_relu', F.relu),
('conv_1', L.Convolution2D(48, 64, 5, stride=1, pad=2)),
('bn_conv_1', L.BatchNormalization(64)),
('_relu_1', F.relu)
])),
('tower_1', Tower([
('conv', L.Convolution2D(256, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', F.relu),
('conv_1', L.Convolution2D(64, 96, 3, stride=1, pad=1)),
('bn_conv_1', L.BatchNormalization(96)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(96, 96, 3, stride=1, pad=1)),
('bn_conv_2', L.BatchNormalization(96)),
('_relu_2', F.relu)
])),
('tower_2', Tower([
('_pooling', _average_pooling_2d),
('conv', L.Convolution2D(256, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', F.relu)
]))
]),
mixed_2=Mixed([
('conv', Tower([
('conv', L.Convolution2D(288, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', F.relu)
])),
('tower', Tower([
('conv', L.Convolution2D(288, 48, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(48)),
('_relu', F.relu),
('conv_1', L.Convolution2D(48, 64, 5, stride=1, pad=2)),
('bn_conv_1', L.BatchNormalization(64)),
('_relu_1', F.relu)
])),
('tower_1', Tower([
('conv', L.Convolution2D(288, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', F.relu),
('conv_1', L.Convolution2D(64, 96, 3, stride=1, pad=1)),
('bn_conv_1', L.BatchNormalization(96)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(96, 96, 3, stride=1, pad=1)),
('bn_conv_2', L.BatchNormalization(96)),
('_relu_2', F.relu)
])),
('tower_2', Tower([
('_pooling', _average_pooling_2d),
('conv', L.Convolution2D(288, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', F.relu)
]))
]),
mixed_3=Mixed([
('conv', Tower([
('conv', L.Convolution2D(288, 384, 3, stride=2, pad=0)),
('bn_conv', L.BatchNormalization(384)),
('_relu', F.relu)
])),
('tower', Tower([
('conv', L.Convolution2D(288, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', F.relu),
('conv_1', L.Convolution2D(64, 96, 3, stride=1, pad=1)),
('bn_conv_1', L.BatchNormalization(96)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(96, 96, 3, stride=2, pad=0)),
('bn_conv_2', L.BatchNormalization(96)),
('_relu_2', F.relu)
])),
('pool', Tower([
('_pooling', _max_pooling_2d_320)
]))
]),
mixed_4=Mixed([
('conv', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu)
])),
('tower', Tower([
('conv', L.Convolution2D(768, 128, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(128)),
('_relu', F.relu),
('conv_1', L.Convolution2D(128, 128, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_1', L.BatchNormalization(128)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(128, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_2', L.BatchNormalization(192)),
('_relu_2', F.relu)
])),
('tower_1', Tower([
('conv', L.Convolution2D(768, 128, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(128)),
('_relu', F.relu),
('conv_1', L.Convolution2D(128, 128, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_1', L.BatchNormalization(128)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(128, 128, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_2', L.BatchNormalization(128)),
('_relu_2', F.relu),
('conv_3', L.Convolution2D(128, 128, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_3', L.BatchNormalization(128)),
('_relu_3', F.relu),
('conv_4', L.Convolution2D(128, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_4', L.BatchNormalization(192)),
('_relu_4', F.relu)
])),
('tower_2', Tower([
('_pooling', _average_pooling_2d),
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu)
]))
]),
mixed_5=Mixed([
('conv', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu)
])),
('tower', Tower([
('conv', L.Convolution2D(768, 160, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(160)),
('_relu', F.relu),
('conv_1', L.Convolution2D(160, 160, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_1', L.BatchNormalization(160)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(160, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_2', L.BatchNormalization(192)),
('_relu_2', F.relu)
])),
('tower_1', Tower([
('conv', L.Convolution2D(768, 160, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(160)),
('_relu', F.relu),
('conv_1', L.Convolution2D(160, 160, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_1', L.BatchNormalization(160)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(160, 160, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_2', L.BatchNormalization(160)),
('_relu_2', F.relu),
('conv_3', L.Convolution2D(160, 160, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_3', L.BatchNormalization(160)),
('_relu_3', F.relu),
('conv_4', L.Convolution2D(160, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_4', L.BatchNormalization(192)),
('_relu_4', F.relu)
])),
('tower_2', Tower([
('_pooling', _average_pooling_2d),
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu)
]))
]),
mixed_6=Mixed([
('conv', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu)
])),
('tower', Tower([
('conv', L.Convolution2D(768, 160, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(160)),
('_relu', F.relu),
('conv_1', L.Convolution2D(160, 160, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_1', L.BatchNormalization(160)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(160, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_2', L.BatchNormalization(192)),
('_relu_2', F.relu)
])),
('tower_1', Tower([
('conv', L.Convolution2D(768, 160, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(160)),
('_relu', F.relu),
('conv_1', L.Convolution2D(160, 160, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_1', L.BatchNormalization(160)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(160, 160, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_2', L.BatchNormalization(160)),
('_relu_2', F.relu),
('conv_3', L.Convolution2D(160, 160, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_3', L.BatchNormalization(160)),
('_relu_3', F.relu),
('conv_4', L.Convolution2D(160, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_4', L.BatchNormalization(192)),
('_relu_4', F.relu)
])),
('tower_2', Tower([
('_pooling', _average_pooling_2d),
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu)
]))
]),
mixed_7=Mixed([
('conv', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu)
])),
('tower', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu),
('conv_1', L.Convolution2D(192, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_1', L.BatchNormalization(192)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(192, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_2', L.BatchNormalization(192)),
('_relu_2', F.relu)
])),
('tower_1', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu),
('conv_1', L.Convolution2D(192, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_1', L.BatchNormalization(192)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(192, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_2', L.BatchNormalization(192)),
('_relu_2', F.relu),
('conv_3', L.Convolution2D(192, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_3', L.BatchNormalization(192)),
('_relu_3', F.relu),
('conv_4', L.Convolution2D(192, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_4', L.BatchNormalization(192)),
('_relu_4', F.relu)
])),
('tower_2', Tower([
('_pooling', _average_pooling_2d),
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu)
]))
]),
mixed_8=Mixed([
('tower', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu),
('conv_1', L.Convolution2D(192, 320, 3, stride=2, pad=0)),
('bn_conv_1', L.BatchNormalization(320)),
('_relu_1', F.relu)
])),
('tower_1', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu),
('conv_1', L.Convolution2D(192, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_1', L.BatchNormalization(192)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(192, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_2', L.BatchNormalization(192)),
('_relu_2', F.relu),
('conv_3', L.Convolution2D(192, 192, 3, stride=2, pad=0)),
('bn_conv_3', L.BatchNormalization(192)),
('_relu_3', F.relu)
])),
('pool', Tower([
('_pooling', _max_pooling_2d_320)
]))
]),
mixed_9=Mixed([
('conv', Tower([
('conv', L.Convolution2D(1280, 320, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(320)),
('_relu', F.relu),
])),
('tower', Tower([
('conv', L.Convolution2D(1280, 384, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(384)),
('_relu', F.relu),
('mixed', Mixed([
('conv', Tower([
('conv', L.Convolution2D(384, 384, (1, 3), stride=1, pad=(0, 1))),
('bn_conv', L.BatchNormalization(384)),
('_relu', F.relu),
])),
('conv_1', Tower([
('conv_1', L.Convolution2D(384, 384, (3, 1), stride=1, pad=(1, 0))),
('bn_conv_1', L.BatchNormalization(384)),
('_relu_1', F.relu),
]))
]))
])),
('tower_1', Tower([
('conv', L.Convolution2D(1280, 448, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(448)),
('_relu', F.relu),
('conv_1', L.Convolution2D(448, 384, 3, stride=1, pad=1)),
('bn_conv_1', L.BatchNormalization(384)),
('_relu_1', F.relu),
('mixed', Mixed([
('conv', Tower([
('conv', L.Convolution2D(384, 384, (1, 3), stride=1, pad=(0, 1))),
('bn_conv', L.BatchNormalization(384)),
('_relu', F.relu),
])),
('conv_1', Tower([
('conv_1', L.Convolution2D(384, 384, (3, 1), stride=1, pad=(1, 0))),
('bn_conv_1', L.BatchNormalization(384)),
('_relu_1', F.relu),
]))
]))
])),
('tower_2', Tower([
('_pooling', _average_pooling_2d),
('conv', L.Convolution2D(1280, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu)
]))
]),
mixed_10=Mixed([
('conv', Tower([
('conv', L.Convolution2D(2048, 320, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(320)),
('_relu', F.relu),
])),
('tower', Tower([
('conv', L.Convolution2D(2048, 384, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(384)),
('_relu', F.relu),
('mixed', Mixed([
('conv', Tower([
('conv', L.Convolution2D(384, 384, (1, 3), stride=1, pad=(0, 1))),
('bn_conv', L.BatchNormalization(384)),
('_relu', F.relu),
])),
('conv_1', Tower([
('conv_1', L.Convolution2D(384, 384, (3, 1), stride=1, pad=(1, 0))),
('bn_conv_1', L.BatchNormalization(384)),
('_relu_1', F.relu),
]))
]))
])),
('tower_1', Tower([
('conv', L.Convolution2D(2048, 448, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(448)),
('_relu', F.relu),
('conv_1', L.Convolution2D(448, 384, 3, stride=1, pad=1)),
('bn_conv_1', L.BatchNormalization(384)),
('_relu_1', F.relu),
('mixed', Mixed([
('conv', Tower([
('conv', L.Convolution2D(384, 384, (1, 3), stride=1, pad=(0, 1))),
('bn_conv', L.BatchNormalization(384)),
('_relu', F.relu)
])),
('conv_1', Tower([
('conv_1', L.Convolution2D(384, 384, (3, 1), stride=1, pad=(1, 0))),
('bn_conv_1', L.BatchNormalization(384)),
('_relu_1', F.relu)
]))
]))
])),
('tower_2', Tower([
('_pooling', _max_pooling_2d),
('conv', L.Convolution2D(2048, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu)
]))
]),
logit=L.Linear(2048, 1008)
)
def __call__(self, x, get_feature=False):
"""Input dims are (batch_size, 3, 299, 299)."""
# assert x.shape[1:] == (3, 299, 299)
x -= 128.0
x *= 0.0078125
h = F.relu(self.bn_conv(self.conv(x)))
# assert h.shape[1:] == (32, 149, 149)
h = F.relu(self.bn_conv_1(self.conv_1(h)))
# assert h.shape[1:] == (32, 147, 147)
h = F.relu(self.bn_conv_2(self.conv_2(h)))
# assert h.shape[1:] == (64, 147, 147)
h = F.max_pooling_2d(h, 3, stride=2, pad=0)
# assert h.shape[1:] == (64, 73, 73)
h = F.relu(self.bn_conv_3(self.conv_3(h)))
# assert h.shape[1:] == (80, 73, 73)
h = F.relu(self.bn_conv_4(self.conv_4(h)))
# assert h.shape[1:] == (192, 71, 71)
h = F.max_pooling_2d(h, 3, stride=2, pad=0)
# assert h.shape[1:] == (192, 35, 35)
h = self.mixed(h)
# assert h.shape[1:] == (256, 35, 35)
h = self.mixed_1(h)
# assert h.shape[1:] == (288, 35, 35)
h = self.mixed_2(h)
# assert h.shape[1:] == (288, 35, 35)
h = self.mixed_3(h)
# assert h.shape[1:] == (768, 17, 17)
h = self.mixed_4(h)
# assert h.shape[1:] == (768, 17, 17)
h = self.mixed_5(h)
# assert h.shape[1:] == (768, 17, 17)
h = self.mixed_6(h)
# assert h.shape[1:] == (768, 17, 17)
h = self.mixed_7(h)
# assert h.shape[1:] == (768, 17, 17)
h = self.mixed_8(h)
# assert h.shape[1:] == (1280, 8, 8)
h = self.mixed_9(h)
# assert h.shape[1:] == (2048, 8, 8)
h = self.mixed_10(h)
# assert h.shape[1:] == (2048, 8, 8)
h = F.average_pooling_2d(h, 8, 1)
# assert h.shape[1:] == (2048, 1, 1)
h = F.reshape(h, (-1, 2048))
if get_feature:
return h
else:
h = self.logit(h)
h = F.softmax(h)
# assert h.shape[1:] == (1008,)
return h
| 28,246 | 42.059451 | 98 | py |
GANFingerprints | GANFingerprints-master/SNGAN/source/inception/__init__.py | 0 | 0 | 0 | py | |
GANFingerprints | GANFingerprints-master/SNGAN/source/links/sn_embed_id.py | from chainer.functions.connection import embed_id
from chainer.initializers import normal
from chainer import link
from chainer import variable
from chainer.functions.array.broadcast import broadcast_to
from source.functions.max_sv import max_singular_value
import numpy as np
class SNEmbedID(link.Link):
"""Efficient linear layer for one-hot input.
This is a link that wraps the :func:`~chainer.functions.embed_id` function.
This link holds the ID (word) embedding matrix ``W`` as a parameter.
Args:
in_size (int): Number of different identifiers (a.k.a. vocabulary
size).
out_size (int): Size of embedding vector.
initialW (2-D array): Initial weight value. If ``None``, then the
matrix is initialized from the standard normal distribution.
May also be a callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
ignore_label (int or None): If ``ignore_label`` is an int value,
``i``-th column of return value is filled with ``0``.
Ip (int): The number of power iteration for calculating the spcetral
norm of the weights.
factor (float) : constant factor to adjust spectral norm of W_bar.
.. seealso:: :func:`chainer.functions.embed_id`
Attributes:
W (~chainer.Variable): Embedding parameter matrix.
W_bar (~chainer.Variable): Spectrally normalized weight parameter.
u (~numpy.array): Current estimation of the right largest singular vector of W.
(optional) gamma (~chainer.Variable): the multiplier parameter.
(optional) factor (float): constant factor to adjust spectral norm of W_bar.
"""
ignore_label = None
def __init__(self, in_size, out_size, initialW=None, ignore_label=None, Ip=1, factor=None):
super(SNEmbedID, self).__init__()
self.ignore_label = ignore_label
self.Ip = Ip
self.factor = factor
with self.init_scope():
if initialW is None:
initialW = normal.Normal(1.0)
self.W = variable.Parameter(initialW, (in_size, out_size))
self.u = np.random.normal(size=(1, in_size)).astype(dtype="f")
self.register_persistent('u')
@property
def W_bar(self):
"""
Spectral Normalized Weight
"""
sigma, _u, _ = max_singular_value(self.W, self.u, self.Ip)
if self.factor:
sigma = sigma / self.factor
sigma = broadcast_to(sigma.reshape((1, 1)), self.W.shape)
self.u[:] = _u
return self.W / sigma
def __call__(self, x):
"""Extracts the word embedding of given IDs.
Args:
x (~chainer.Variable): Batch vectors of IDs.
Returns:
~chainer.Variable: Batch of corresponding embeddings.
"""
return embed_id.embed_id(x, self.W_bar, ignore_label=self.ignore_label)
| 2,924 | 40.197183 | 95 | py |
GANFingerprints | GANFingerprints-master/SNGAN/source/links/conditional_batch_normalization.py | import numpy
import chainer
from chainer import configuration
from chainer import cuda
from chainer.functions.normalization import batch_normalization
from chainer import initializers
from chainer import link
from chainer.utils import argument
from chainer import variable
from chainer.links import EmbedID
import chainer.functions as F
class ConditionalBatchNormalization(chainer.Chain):
"""
Conditional Batch Normalization
Args:
size (int or tuple of ints): Size (or shape) of channel
dimensions.
n_cat (int): the number of categories of categorical variable.
decay (float): Decay rate of moving average. It is used on training.
eps (float): Epsilon value for numerical stability.
dtype (numpy.dtype): Type to use in computing.
use_gamma (bool): If ``True``, use scaling parameter. Otherwise, use
unit(1) which makes no effect.
use_beta (bool): If ``True``, use shifting parameter. Otherwise, use
unit(0) which makes no effect.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing\
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_
.. seealso::
:func:`~chainer.functions.batch_normalization`,
:func:`~chainer.functions.fixed_batch_normalization`
Attributes:
gamma (~chainer.Variable): Scaling parameter.
beta (~chainer.Variable): Shifting parameter.
avg_mean (numpy.ndarray or cupy.ndarray): Population mean.
avg_var (numpy.ndarray or cupy.ndarray): Population variance.
N (int): Count of batches given for fine-tuning.
decay (float): Decay rate of moving average. It is used on training.
eps (float): Epsilon value for numerical stability. This value is added
to the batch variances.
"""
def __init__(self, size, n_cat, decay=0.9, eps=2e-5, dtype=numpy.float32):
super(ConditionalBatchNormalization, self).__init__()
self.avg_mean = numpy.zeros(size, dtype=dtype)
self.register_persistent('avg_mean')
self.avg_var = numpy.zeros(size, dtype=dtype)
self.register_persistent('avg_var')
self.N = 0
self.register_persistent('N')
self.decay = decay
self.eps = eps
self.n_cat = n_cat
def __call__(self, x, gamma, beta, **kwargs):
"""__call__(self, x, c, finetune=False)
Invokes the forward propagation of BatchNormalization.
In training mode, the BatchNormalization computes moving averages of
mean and variance for evaluatino during training, and normalizes the
input using batch statistics.
.. warning::
``test`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
x (Variable): Input variable.
gamma (Variable): Input variable of gamma of shape
finetune (bool): If it is in the training mode and ``finetune`` is
``True``, BatchNormalization runs in fine-tuning mode; it
accumulates the input array to compute population statistics
for normalization, and normalizes the input using batch
statistics.
"""
argument.check_unexpected_kwargs(
kwargs, test='test argument is not supported anymore. '
'Use chainer.using_config')
finetune, = argument.parse_kwargs(kwargs, ('finetune', False))
with cuda.get_device_from_id(self._device_id):
_gamma = variable.Variable(self.xp.ones(
self.avg_mean.shape, dtype=x.dtype))
with cuda.get_device_from_id(self._device_id):
_beta = variable.Variable(self.xp.zeros(
self.avg_mean.shape, dtype=x.dtype))
if configuration.config.train:
if finetune:
self.N += 1
decay = 1. - 1. / self.N
else:
decay = self.decay
ret = chainer.functions.batch_normalization(x, _gamma, _beta, eps=self.eps, running_mean=self.avg_mean,
running_var=self.avg_var, decay=decay)
else:
# Use running average statistics or fine-tuned statistics.
mean = variable.Variable(self.avg_mean)
var = variable.Variable(self.avg_var)
ret = batch_normalization.fixed_batch_normalization(
x, _gamma, _beta, mean, var, self.eps)
shape = ret.shape
ndim = len(shape)
gamma = F.broadcast_to(F.reshape(gamma, list(gamma.shape) + [1] * (ndim - len(gamma.shape))), shape)
beta = F.broadcast_to(F.reshape(beta, list(beta.shape) + [1] * (ndim - len(beta.shape))), shape)
return gamma * ret + beta
def start_finetuning(self):
"""Resets the population count for collecting population statistics.
This method can be skipped if it is the first time to use the
fine-tuning mode. Otherwise, this method should be called before
starting the fine-tuning mode again.
"""
self.N = 0
| 5,186 | 44.5 | 115 | py |
GANFingerprints | GANFingerprints-master/SNGAN/source/links/sn_linear.py | import chainer
import numpy as np
from chainer.functions.array.broadcast import broadcast_to
from chainer.functions.connection import linear
from chainer.links.connection.linear import Linear
from source.functions.max_sv import max_singular_value
class SNLinear(Linear):
"""Linear layer with Spectral Normalization.
Args:
in_size (int): Dimension of input vectors. If ``None``, parameter
initialization will be deferred until the first forward datasets pass
at which time the size will be determined.
out_size (int): Dimension of output vectors.
wscale (float): Scaling factor of the weight matrix.
bias (float): Initial bias value.
nobias (bool): If ``True``, then this function does not use the bias.
initialW (2-D array): Initial weight value. If ``None``, then this
function uses to initialize ``wscale``.
May also be a callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
initial_bias (1-D array): Initial bias value. If ``None``, then this
function uses to initialize ``bias``.
May also be a callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
use_gamma (bool): If true, apply scalar multiplication to the
normalized weight (i.e. reparameterize).
Ip (int): The number of power iteration for calculating the spcetral
norm of the weights.
factor (float) : constant factor to adjust spectral norm of W_bar.
.. seealso:: :func:`~chainer.functions.linear`
Attributes:
W (~chainer.Variable): Weight parameter.
W_bar (~chainer.Variable): Spectrally normalized weight parameter.
b (~chainer.Variable): Bias parameter.
u (~numpy.array): Current estimation of the right largest singular vector of W.
(optional) gamma (~chainer.Variable): the multiplier parameter.
(optional) factor (float): constant factor to adjust spectral norm of W_bar.
"""
def __init__(self, in_size, out_size, use_gamma=False, nobias=False,
initialW=None, initial_bias=None, Ip=1, factor=None):
self.Ip = Ip
self.use_gamma = use_gamma
self.factor = factor
super(SNLinear, self).__init__(
in_size, out_size, nobias, initialW, initial_bias
)
self.u = np.random.normal(size=(1, out_size)).astype(dtype="f")
self.register_persistent('u')
@property
def W_bar(self):
"""
Spectral Normalized Weight
"""
sigma, _u, _ = max_singular_value(self.W, self.u, self.Ip)
if self.factor:
sigma = sigma / self.factor
sigma = broadcast_to(sigma.reshape((1, 1)), self.W.shape)
self.u[:] = _u
if hasattr(self, 'gamma'):
return broadcast_to(self.gamma, self.W.shape) * self.W / sigma
else:
return self.W / sigma
def _initialize_params(self, in_size):
super(SNLinear, self)._initialize_params(in_size)
if self.use_gamma:
_, s, _ = np.linalg.svd(self.W.data)
with self.init_scope():
self.gamma = chainer.Parameter(s[0], (1, 1))
def __call__(self, x):
"""Applies the linear layer.
Args:
x (~chainer.Variable): Batch of input vectors.
Returns:
~chainer.Variable: Output of the linear layer.
"""
if self.W.data is None:
self._initialize_params(x.size // x.shape[0])
return linear.linear(x, self.W_bar, self.b)
| 3,641 | 38.586957 | 87 | py |
GANFingerprints | GANFingerprints-master/SNGAN/source/links/categorical_conditional_batch_normalization.py | import numpy
import chainer
from chainer import configuration
from chainer import cuda
from chainer.functions.normalization import batch_normalization
from chainer import initializers
from chainer import link
from chainer.utils import argument
from chainer import variable
from chainer.links import EmbedID
import chainer.functions as F
from source.links.conditional_batch_normalization import ConditionalBatchNormalization
class CategoricalConditionalBatchNormalization(ConditionalBatchNormalization):
"""
Conditional Batch Normalization
Args:
size (int or tuple of ints): Size (or shape) of channel
dimensions.
n_cat (int): the number of categories of categorical variable.
decay (float): Decay rate of moving average. It is used on training.
eps (float): Epsilon value for numerical stability.
dtype (numpy.dtype): Type to use in computing.
use_gamma (bool): If ``True``, use scaling parameter. Otherwise, use
unit(1) which makes no effect.
use_beta (bool): If ``True``, use shifting parameter. Otherwise, use
unit(0) which makes no effect.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing\
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_
.. seealso::
:func:`~chainer.functions.batch_normalization`,
:func:`~chainer.functions.fixed_batch_normalization`
Attributes:
gamma (~chainer.Variable): Scaling parameter.
beta (~chainer.Variable): Shifting parameter.
avg_mean (numpy.ndarray or cupy.ndarray): Population mean.
avg_var (numpy.ndarray or cupy.ndarray): Population variance.
N (int): Count of batches given for fine-tuning.
decay (float): Decay rate of moving average. It is used on training.
eps (float): Epsilon value for numerical stability. This value is added
to the batch variances.
"""
def __init__(self, size, n_cat, decay=0.9, eps=2e-5, dtype=numpy.float32,
initial_gamma=None, initial_beta=None):
super(CategoricalConditionalBatchNormalization, self).__init__(
size=size, n_cat=n_cat, decay=decay, eps=eps, dtype=dtype)
with self.init_scope():
if initial_gamma is None:
initial_gamma = 1
initial_gamma = initializers._get_initializer(initial_gamma)
initial_gamma.dtype = dtype
self.gammas = EmbedID(n_cat, size, initialW=initial_gamma)
if initial_beta is None:
initial_beta = 0
initial_beta = initializers._get_initializer(initial_beta)
initial_beta.dtype = dtype
self.betas = EmbedID(n_cat, size, initialW=initial_beta)
def __call__(self, x, c, finetune=False, **kwargs):
"""__call__(self, x, c, finetune=False)
Invokes the forward propagation of BatchNormalization.
In training mode, the BatchNormalization computes moving averages of
mean and variance for evaluatino during training, and normalizes the
input using batch statistics.
.. warning::
``test`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
x (Variable): Input variable.
c (Variable): Input variable for conditioning gamma and beta
finetune (bool): If it is in the training mode and ``finetune`` is
``True``, BatchNormalization runs in fine-tuning mode; it
accumulates the input array to compute population statistics
for normalization, and normalizes the input using batch
statistics.
"""
weights, = argument.parse_kwargs(kwargs, ('weights', None))
if c.ndim == 2 and weights is not None:
_gamma_c = self.gammas(c)
_beta_c = self.betas(c)
_gamma_c = F.broadcast_to(F.expand_dims(weights, 2), _gamma_c.shape) * _gamma_c
_beta_c = F.broadcast_to(F.expand_dims(weights, 2), _beta_c.shape) * _beta_c
gamma_c = F.sum(_gamma_c, 1)
beta_c = F.sum(_beta_c, 1)
else:
gamma_c = self.gammas(c)
beta_c = self.betas(c)
return super(CategoricalConditionalBatchNormalization, self).__call__(x, gamma_c, beta_c, **kwargs)
def start_finetuning(self):
"""Resets the population count for collecting population statistics.
This method can be skipped if it is the first time to use the
fine-tuning mode. Otherwise, this method should be called before
starting the fine-tuning mode again.
"""
self.N = 0
| 4,747 | 45.097087 | 107 | py |
GANFingerprints | GANFingerprints-master/SNGAN/source/links/sn_convolution_nd.py | import numpy as np
from chainer.functions.connection import convolution_nd
from chainer import initializers
from chainer import link
from chainer.utils import conv_nd
from chainer import variable
from chainer.functions.array.broadcast import broadcast_to
from source.functions.max_sv import max_singular_value
class SNConvolutionND(link.Link):
"""N-dimensional convolution layer.
This link wraps the :func:`~chainer.functions.convolution_nd` function and
holds the filter weight and bias vector as parameters.
Args:
ndim (int): Number of spatial dimensions.
in_channels (int): Number of channels of input arrays.
out_channels (int): Number of channels of output arrays.
ksize (int or tuple of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints): Stride of filter application.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent.
pad (int or tuple of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
nobias (bool): If ``True``, then this function does not use the bias.
initialW (array): Initial weight array. If ``None``, the default
initializer is used.
May be a callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
initial_bias (array): Initial bias vector. If ``None``, the bias is
set to zero.
May be a callable that takes ``numpy.ndarray`` or ``cupy.ndarray``
and edits its value.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
``cover_all`` needs to be ``False`` if you want to use cuDNN.
use_gamma (bool): If true, apply scalar multiplication to the
normalized weight (i.e. reparameterize).
Ip (int): The number of power iteration for calculating the spcetral
norm of the weights.
factor (float) : constant factor to adjust spectral norm of W_bar.
.. seealso::
See :func:`~chainer.functions.convolution_nd` for the definition of
N-dimensional convolution. See
:func:`~chainer.functions.convolution_2d` for the definition of
two-dimensional convolution.
Attributes:
W (~chainer.Variable): Weight parameter.
W_bar (~chainer.Variable): Spectrally normalized weight parameter.
b (~chainer.Variable): Bias parameter. If ``initial_bias`` is ``None``,
set to ``None``.
u (~numpy.array): Current estimation of the right largest singular vector of W.
(optional) gamma (~chainer.Variable): the multiplier parameter.
(optional) factor (float): constant factor to adjust spectral norm of W_bar.
"""
def __init__(self, ndim, in_channels, out_channels, ksize, stride=1, pad=0,
nobias=False, initialW=None, initial_bias=None,
cover_all=False, use_gamma=False, Ip=1, factor=None):
super(SNConvolutionND, self).__init__()
ksize = conv_nd.as_tuple(ksize, ndim)
self.stride = stride
self.pad = pad
self.cover_all = cover_all
self.use_gamma = use_gamma
self.Ip = Ip
self.u = np.random.normal(size=(1, out_channels)).astype(dtype="f")
self.register_persistent('u')
self.factor = factor
with self.init_scope():
W_shape = (out_channels, in_channels) + ksize
self.W = variable.Parameter(
initializers._get_initializer(initialW), W_shape)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = 0
initial_bias = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(initial_bias, out_channels)
if self.use_gamma:
W_mat = self.W.data.reshape(self.W.shape[0], -1)
_, s, _ = np.linalg.svd(W_mat)
self.gamma = variable.Parameter(s[0], (1,) * len(self.W.shape))
@property
def W_bar(self):
"""
Spectral Normalized Weight
"""
W_mat = self.W.reshape(self.W.shape[0], -1)
sigma, _u, _ = max_singular_value(W_mat, self.u, self.Ip)
if self.factor:
sigma = sigma / self.factor
sigma = broadcast_to(sigma.reshape([1] * len(self.W.shape)), self.W.shape)
self.u[:] = _u
if hasattr(self, 'gamma'):
return broadcast_to(self.gamma, self.W.shape) * self.W / sigma
else:
return self.W / sigma
def __call__(self, x):
"""Applies N-dimensional convolution layer.
Args:
x (~chainer.Variable): Input image.
Returns:
~chainer.Variable: Output of convolution.
"""
return convolution_nd.convolution_nd(
x, self.W_bar, self.b, self.stride, self.pad, cover_all=self.cover_all)
| 5,130 | 43.617391 | 87 | py |
GANFingerprints | GANFingerprints-master/SNGAN/source/links/__init__.py | 0 | 0 | 0 | py | |
GANFingerprints | GANFingerprints-master/SNGAN/source/links/sn_convolution_2d.py | import chainer
import numpy as np
from chainer import cuda
from chainer.functions.array.broadcast import broadcast_to
from chainer.functions.connection import convolution_2d
from chainer.links.connection.convolution_2d import Convolution2D
from source.functions.max_sv import max_singular_value
class SNConvolution2D(Convolution2D):
"""Two-dimensional convolutional layer with spectral normalization.
This link wraps the :func:`~chainer.functions.convolution_2d` function and
holds the filter weight and bias vector as parameters.
Args:
in_channels (int): Number of channels of input arrays. If ``None``,
parameter initialization will be deferred until the first forward
datasets pass at which time the size will be determined.
out_channels (int): Number of channels of output arrays.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
wscale (float): Scaling factor of the initial weight.
bias (float): Initial bias value.
nobias (bool): If ``True``, then this link does not use the bias term.
initialW (4-D array): Initial weight value. If ``None``, then this
function uses to initialize ``wscale``.
May also be a callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
initial_bias (1-D array): Initial bias value. If ``None``, then this
function uses to initialize ``bias``.
May also be a callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
use_gamma (bool): If true, apply scalar multiplication to the
normalized weight (i.e. reparameterize).
Ip (int): The number of power iteration for calculating the spcetral
norm of the weights.
factor (float) : constant factor to adjust spectral norm of W_bar.
.. seealso::
See :func:`chainer.functions.convolution_2d` for the definition of
two-dimensional convolution.
Attributes:
W (~chainer.Variable): Weight parameter.
W_bar (~chainer.Variable): Spectrally normalized weight parameter.
b (~chainer.Variable): Bias parameter.
u (~numpy.array): Current estimation of the right largest singular vector of W.
(optional) gamma (~chainer.Variable): the multiplier parameter.
(optional) factor (float): constant factor to adjust spectral norm of W_bar.
"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
nobias=False, initialW=None, initial_bias=None, use_gamma=False, Ip=1, factor=None):
self.Ip = Ip
self.use_gamma = use_gamma
self.factor = factor
super(SNConvolution2D, self).__init__(
in_channels, out_channels, ksize, stride, pad,
nobias, initialW, initial_bias)
self.u = np.random.normal(size=(1, out_channels)).astype(dtype="f")
self.register_persistent('u')
@property
def W_bar(self):
"""
Spectrally Normalized Weight
"""
W_mat = self.W.reshape(self.W.shape[0], -1)
sigma, _u, _ = max_singular_value(W_mat, self.u, self.Ip)
if self.factor:
sigma = sigma / self.factor
sigma = broadcast_to(sigma.reshape((1, 1, 1, 1)), self.W.shape)
if chainer.config.train:
# Update estimated 1st singular vector
self.u[:] = _u
if hasattr(self, 'gamma'):
return broadcast_to(self.gamma, self.W.shape) * self.W / sigma
else:
return self.W / sigma
def _initialize_params(self, in_size):
super(SNConvolution2D, self)._initialize_params(in_size)
if self.use_gamma:
W_mat = self.W.data.reshape(self.W.shape[0], -1)
_, s, _ = np.linalg.svd(W_mat)
with self.init_scope():
self.gamma = chainer.Parameter(s[0], (1, 1, 1, 1))
def __call__(self, x):
"""Applies the convolution layer.
Args:
x (~chainer.Variable): Input image.
Returns:
~chainer.Variable: Output of the convolution.
"""
if self.W.data is None:
self._initialize_params(x.shape[1])
return convolution_2d.convolution_2d(
x, self.W_bar, self.b, self.stride, self.pad)
| 4,687 | 42.009174 | 101 | py |
GANFingerprints | GANFingerprints-master/SNGAN/source/miscs/random_samples.py | import numpy as np
import chainer
def sample_continuous(dim, batchsize, distribution='normal', xp=np):
if distribution == "normal":
return xp.random.randn(batchsize, dim) \
.astype(xp.float32)
elif distribution == "uniform":
return xp.random.uniform(-1, 1, (batchsize, dim)) \
.astype(xp.float32)
else:
raise NotImplementedError
def sample_categorical(n_cat, batchsize, distribution='uniform', xp=np):
if distribution == 'uniform':
return xp.random.randint(low=0, high=n_cat, size=(batchsize)).astype(xp.int32)
else:
raise NotImplementedError
def sample_from_categorical_distribution(batch_probs):
"""Sample a batch of actions from a batch of action probabilities.
Args:
batch_probs (ndarray): batch of action probabilities BxA
Returns:
ndarray consisting of sampled action indices
"""
xp = chainer.cuda.get_array_module(batch_probs)
return xp.argmax(
xp.log(batch_probs) + xp.random.gumbel(size=batch_probs.shape),
axis=1).astype(np.int32, copy=False)
| 1,099 | 31.352941 | 86 | py |
GANFingerprints | GANFingerprints-master/SNGAN/source/miscs/__init__.py | 0 | 0 | 0 | py | |
GANFingerprints | GANFingerprints-master/CramerGAN/gan/main.py | import sys
import numpy as np
import core
from utils.misc import pp, visualize
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_integer("max_iteration", 150000, "Epoch to train [150000]")
flags.DEFINE_float("learning_rate", .0001, "Learning rate [.0001]")
flags.DEFINE_float("learning_rate_D", -1, "Learning rate for discriminator, if negative same as generator [-1]")
flags.DEFINE_boolean("MMD_lr_scheduler", True, "Wheather to use lr scheduler based on 3-sample test")
flags.DEFINE_float("decay_rate", .5, "Decay rate [.5]")
flags.DEFINE_float("gp_decay_rate", 1.0, "Decay rate [1.0]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_float("init", 0.1, "Initialization value [0.1]")
flags.DEFINE_integer("batch_size", 32, "The size of batch images [1000]")
flags.DEFINE_integer("real_batch_size", -1, "The size of batch images for real samples. If -1 then same as batch_size [-1]")
flags.DEFINE_integer("output_size", 128, "The size of the output images to produce [64]")
flags.DEFINE_integer("c_dim", 3, "Dimension of image color. [3]")
flags.DEFINE_string("dataset", "celebA", "The name of the model fro saving puposes")
flags.DEFINE_string("name", "mmd_test", "The name of dataset [celebA, mnist, lsun, cifar10]")
flags.DEFINE_string("checkpoint_dir", "checkpoint_mmd", "Directory name to save the checkpoints [checkpoint_mmd]")
flags.DEFINE_string("sample_dir", "samples_mmd", "Directory name to save the image samples [samples_mmd]")
flags.DEFINE_string("log_dir", "logs_mmd", "Directory name to save the image samples [logs_mmd]")
flags.DEFINE_string("data_dir", "./data", "Directory containing datasets [./data]")
flags.DEFINE_string("architecture", "dcgan", "The name of the architecture [dcgan, g-resnet5, dcgan5]")
flags.DEFINE_string("kernel", "", "The name of the architecture ['', 'mix_rbf', 'mix_rq', 'distance', 'dot', 'mix_rq_dot']")
flags.DEFINE_string("model", "mmd", "The model type [mmd, cramer, wgan_gp]")
flags.DEFINE_boolean("is_train", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("visualize", False, "True for visualizing, False for nothing [False]")
flags.DEFINE_boolean("is_demo", False, "For testing [False]")
flags.DEFINE_float("gradient_penalty", 0.0, "Use gradient penalty [0.0]")
flags.DEFINE_integer("threads", 64, "Upper limit for number of threads [np.inf]")
flags.DEFINE_integer("dsteps", 5, "Number of discriminator steps in a row [1] ")
flags.DEFINE_integer("gsteps", 1, "Number of generator steps in a row [1] ")
flags.DEFINE_integer("start_dsteps", 5, "Number of discrimintor steps in a row during first 20 steps and every 100th step" [1])
flags.DEFINE_integer("df_dim", 64, "Discriminator no of channels at first conv layer [64]")
flags.DEFINE_integer("dof_dim", 16, "No of discriminator output features [16]")
flags.DEFINE_integer("gf_dim", 64, "no of generator channels [64]")
flags.DEFINE_boolean("batch_norm", True, "Use of batch norm [False] (always False for discriminator if gradient_penalty > 0)")
flags.DEFINE_boolean("log", False, "Wheather to write log to a file in samples directory [True]")
flags.DEFINE_string("suffix", '', "For additional settings ['', '_tf_records']")
flags.DEFINE_boolean('compute_scores', False, "Compute scores [True]")
flags.DEFINE_float("gpu_mem", .9, "GPU memory fraction limit [0.9]")
flags.DEFINE_float("L2_discriminator_penalty", 0.0, "L2 penalty on discriminator features [0.0]")
flags.DEFINE_integer("no_of_samples", 100000, "number of samples to produce")
flags.DEFINE_boolean("print_pca", False, "")
flags.DEFINE_integer("save_layer_outputs", 0, "Wheather to save_layer_outputs. If == 2, saves outputs at exponential steps: 1, 2, 4, ..., 512 and every 1000. [0, 1, 2]")
flags.DEFINE_string("output_dir_of_test_samples", 'samples_mmd', "Output directory for testing samples")
flags.DEFINE_integer("random_seed", 0, "Random seed")
FLAGS = flags.FLAGS
class EasyDict(dict):
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
def __getattr__(self, name): return self[name]
def __setattr__(self, name, value): self[name] = value
def __delattr__(self, name): del self[name]
def create_session(config_dict=dict(), force_as_default=False):
config = tf.ConfigProto()
for key, value in config_dict.items():
fields = key.split('.')
obj = config
for field in fields[:-1]:
obj = getattr(obj, field)
setattr(obj, fields[-1], value)
session = tf.Session(config=config)
if force_as_default:
session._default_session = session.as_default()
session._default_session.enforce_nesting = False
session._default_session.__enter__()
return session
if tf.get_default_session() is None:
np.random.seed(FLAGS.random_seed)
tf.set_random_seed(np.random.randint(1 << 31))
tf_config = EasyDict() # TensorFlow session config, set by tfutil.init_tf().
tf_config['graph_options.place_pruned_graph'] = True # False (default) = Check that all ops are available on the designated device. True = Skip the check for ops that are not used.
tf_config['gpu_options.allow_growth'] = False # False (default) = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed.
create_session(tf_config, force_as_default=True)
def main(_):
pp.pprint(FLAGS.__flags)
if FLAGS.threads < np.inf:
sess_config = tf.ConfigProto(intra_op_parallelism_threads=FLAGS.threads)
sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_mem
else:
sess_config = tf.ConfigProto()
if 'mmd' in FLAGS.model:
from core.model import MMD_GAN as Model
elif FLAGS.model == 'wgan_gp':
from core.wgan_gp import WGAN_GP as Model
elif 'cramer' in FLAGS.model:
from core.cramer import Cramer_GAN as Model
with tf.Session(config=sess_config) as sess:
if FLAGS.dataset == 'mnist':
gan = Model(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=28, c_dim=1,
data_dir=FLAGS.data_dir)
elif FLAGS.dataset == 'cifar10':
gan = Model(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=32, c_dim=3,
data_dir=FLAGS.data_dir)
elif FLAGS.dataset in ['celebA', 'lsun']:
gan = Model(sess, config=FLAGS, batch_size=FLAGS.batch_size, output_size=FLAGS.output_size, c_dim=3,
data_dir=FLAGS.data_dir)
else:
gan = Model(sess, config=FLAGS, batch_size=FLAGS.batch_size,
output_size=FLAGS.output_size, c_dim=FLAGS.c_dim,
data_dir=FLAGS.data_dir)
if FLAGS.is_train:
gan.train()
elif FLAGS.print_pca:
gan.print_pca()
elif FLAGS.visualize:
gan.load_checkpoint()
visualize(sess, gan, FLAGS, 2)
else:
gan.get_samples(FLAGS.no_of_samples, layers=[-1])
if FLAGS.log:
sys.stdout = gan.old_stdout
gan.log_file.close()
gan.sess.close()
if __name__ == '__main__':
tf.app.run()
| 7,225 | 53.330827 | 191 | py |
GANFingerprints | GANFingerprints-master/CramerGAN/gan/compute_scores.py | from __future__ import division, print_function
import os.path, sys, tarfile
import numpy as np
from scipy import linalg
from six.moves import range, urllib
from sklearn.metrics.pairwise import polynomial_kernel
import tensorflow as tf
from tqdm import tqdm
# from tqdm docs: https://pypi.python.org/pypi/tqdm#hooks-and-callbacks
class TqdmUpTo(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n) # also sets self.n = b * bsize
class Inception(object):
def __init__(self):
MODEL_DIR = '/tmp/imagenet'
DATA_URL = ('http://download.tensorflow.org/models/image/imagenet/'
'inception-2015-12-05.tgz')
self.softmax_dim = 1008
self.coder_dim = 2048
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
with TqdmUpTo(unit='B', unit_scale=True, miniters=1,
desc=filename) as t:
filepath, _ = urllib.request.urlretrieve(
DATA_URL, filepath, reporthook=t.update_to)
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
self.sess = sess = tf.Session()
#with sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = [s.value for s in o.get_shape()]
if len(shape) and shape[0] == 1:
shape[0] = None
o._shape = tf.TensorShape(shape)
w = sess.graph.get_operation_by_name(
"softmax/logits/MatMul").inputs[1]
self.coder = tf.squeeze(tf.squeeze(pool3, 2), 1)
logits = tf.matmul(self.coder, w)
self.softmax = tf.nn.softmax(logits)
assert self.coder.get_shape()[1].value == self.coder_dim
assert self.softmax.get_shape()[1].value == self.softmax_dim
self.input = 'ExpandDims:0'
class LeNet(object):
def __init__(self):
MODEL_DIR = 'lenet/saved_model'
self.softmax_dim = 10
self.coder_dim = 512
self.sess = sess = tf.Session()
tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.TRAINING], MODEL_DIR)
g = sess.graph
self.softmax = g.get_tensor_by_name('Softmax_1:0')
self.coder = g.get_tensor_by_name('Relu_5:0')
assert self.coder.get_shape()[1].value == self.coder_dim
assert self.softmax.get_shape()[1].value == self.softmax_dim
self.input = 'Placeholder_2:0'
def featurize(images, model, batch_size=100, transformer=np.asarray,
get_preds=True, get_codes=False, output=sys.stdout,
out_preds=None, out_codes=None):
'''
images: a list of numpy arrays with values in [0, 255]
'''
sub = transformer(images[:10])
assert(sub.ndim == 4)
if isinstance(model, Inception):
assert sub.shape[3] == 3
if (sub.max() > 255) or (sub.min() < 0):
print('WARNING! Inception min/max violated: min = %f, max = %f. Clipping values.' % (sub.min(), sub.max()))
sub = sub.clip(0., 255.)
elif isinstance(model, LeNet):
batch_size = 64
assert sub.shape[3] == 1
if (sub.max() > .5) or (sub.min() < -.5):
print('WARNING! LeNet min/max violated: min = %f, max = %f. Clipping values.' % (sub.min(), sub.max()))
sub = sub.clip(-.5, .5)
n = len(images)
to_get = ()
ret = ()
if get_preds:
to_get += (model.softmax,)
if out_preds is not None:
assert out_preds.shape == (n, model.softmax_dim)
assert out_preds.dtype == np.float32
preds = out_preds
else:
preds = np.empty((n, model.softmax_dim), dtype=np.float32)
preds.fill(np.nan)
ret += (preds,)
if get_codes:
to_get += (model.coder,)
if out_codes is not None:
assert out_codes.shape == (n, model.coder_dim)
assert out_codes.dtype == np.float32
codes = out_codes
else:
codes = np.empty((n, model.coder_dim), dtype=np.float32)
codes.fill(np.nan)
ret += (codes,)
# with model.sess:
with TqdmUpTo(unit='img', unit_scale=True, total=n, file=output) as t:
for start in range(0, n, batch_size):
t.update_to(start)
end = min(start + batch_size, n)
inp = transformer(images[start:end])
if end - start != batch_size:
pad = batch_size - (end - start)
extra = np.zeros((pad,) + inp.shape[1:], dtype=inp.dtype)
inp = np.r_[inp, extra]
w = slice(0, end - start)
else:
w = slice(None)
out = model.sess.run(to_get, {model.input: inp})
if get_preds:
preds[start:end] = out[0][w]
if get_codes:
codes[start:end] = out[-1][w]
t.update_to(n)
return ret
def get_splits(n, splits=10, split_method='openai'):
if split_method == 'openai':
return [slice(i * n // splits, (i + 1) * n // splits)
for i in range(splits)]
elif split_method == 'bootstrap':
return [np.random.choice(n, n) for _ in range(splits)]
else:
raise ValueError("bad split_method {}".format(split_method))
def inception_score(preds, **split_args):
split_inds = get_splits(preds.shape[0], **split_args)
scores = np.zeros(len(split_inds))
for i, inds in enumerate(split_inds):
part = preds[inds]
kl = part * (np.log(part) - np.log(np.mean(part, 0, keepdims=True)))
kl = np.mean(np.sum(kl, 1))
scores[i] = np.exp(kl)
return scores
def fid_score(codes_g, codes_r, eps=1e-6, output=sys.stdout, **split_args):
splits_g = get_splits(codes_g.shape[0], **split_args)
splits_r = get_splits(codes_r.shape[0], **split_args)
assert len(splits_g) == len(splits_r)
d = codes_g.shape[1]
assert codes_r.shape[1] == d
scores = np.zeros(len(splits_g))
with tqdm(splits_g, desc='FID', file=output) as bar:
for i, (w_g, w_r) in enumerate(zip(bar, splits_r)):
part_g = codes_g[w_g]
part_r = codes_r[w_r]
mn_g = part_g.mean(axis=0)
mn_r = part_r.mean(axis=0)
cov_g = np.cov(part_g, rowvar=False)
cov_r = np.cov(part_r, rowvar=False)
covmean, _ = linalg.sqrtm(cov_g.dot(cov_r), disp=False)
if not np.isfinite(covmean).all():
cov_g[range(d), range(d)] += eps
cov_r[range(d), range(d)] += eps
covmean = linalg.sqrtm(cov_g.dot(cov_r))
scores[i] = np.sum((mn_g - mn_r) ** 2) + (
np.trace(cov_g) + np.trace(cov_r) - 2 * np.trace(covmean))
bar.set_postfix({'mean': scores[:i+1].mean()})
return scores
def polynomial_mmd_averages(codes_g, codes_r, n_subsets=50, subset_size=1000,
ret_var=True, output=sys.stdout, **kernel_args):
m = min(codes_g.shape[0], codes_r.shape[0])
mmds = np.zeros(n_subsets)
if ret_var:
vars = np.zeros(n_subsets)
choice = np.random.choice
with tqdm(range(n_subsets), desc='MMD', file=output) as bar:
for i in bar:
g = codes_g[choice(len(codes_g), subset_size, replace=False)]
r = codes_r[choice(len(codes_r), subset_size, replace=False)]
o = polynomial_mmd(g, r, **kernel_args, var_at_m=m, ret_var=ret_var)
if ret_var:
mmds[i], vars[i] = o
else:
mmds[i] = o
bar.set_postfix({'mean': mmds[:i+1].mean()})
return (mmds, vars) if ret_var else mmds
def polynomial_mmd(codes_g, codes_r, degree=3, gamma=None, coef0=1,
var_at_m=None, ret_var=True):
# use k(x, y) = (gamma <x, y> + coef0)^degree
# default gamma is 1 / dim
X = codes_g
Y = codes_r
K_XX = polynomial_kernel(X, degree=degree, gamma=gamma, coef0=coef0)
K_YY = polynomial_kernel(Y, degree=degree, gamma=gamma, coef0=coef0)
K_XY = polynomial_kernel(X, Y, degree=degree, gamma=gamma, coef0=coef0)
return _mmd2_and_variance(K_XX, K_XY, K_YY,
var_at_m=var_at_m, ret_var=ret_var)
def _sqn(arr):
flat = np.ravel(arr)
return flat.dot(flat)
def _mmd2_and_variance(K_XX, K_XY, K_YY, unit_diagonal=False,
mmd_est='unbiased', block_size=1024,
var_at_m=None, ret_var=True):
# based on
# https://github.com/dougalsutherland/opt-mmd/blob/master/two_sample/mmd.py
# but changed to not compute the full kernel matrix at once
m = K_XX.shape[0]
assert K_XX.shape == (m, m)
assert K_XY.shape == (m, m)
assert K_YY.shape == (m, m)
if var_at_m is None:
var_at_m = m
# Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to compute them explicitly
if unit_diagonal:
diag_X = diag_Y = 1
sum_diag_X = sum_diag_Y = m
sum_diag2_X = sum_diag2_Y = m
else:
diag_X = np.diagonal(K_XX)
diag_Y = np.diagonal(K_YY)
sum_diag_X = diag_X.sum()
sum_diag_Y = diag_Y.sum()
sum_diag2_X = _sqn(diag_X)
sum_diag2_Y = _sqn(diag_Y)
Kt_XX_sums = K_XX.sum(axis=1) - diag_X
Kt_YY_sums = K_YY.sum(axis=1) - diag_Y
K_XY_sums_0 = K_XY.sum(axis=0)
K_XY_sums_1 = K_XY.sum(axis=1)
Kt_XX_sum = Kt_XX_sums.sum()
Kt_YY_sum = Kt_YY_sums.sum()
K_XY_sum = K_XY_sums_0.sum()
if mmd_est == 'biased':
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
+ (Kt_YY_sum + sum_diag_Y) / (m * m)
- 2 * K_XY_sum / (m * m))
else:
assert mmd_est in {'unbiased', 'u-statistic'}
mmd2 = (Kt_XX_sum + Kt_YY_sum) / (m * (m-1))
if mmd_est == 'unbiased':
mmd2 -= 2 * K_XY_sum / (m * m)
else:
mmd2 -= 2 * (K_XY_sum - np.trace(K_XY)) / (m * (m-1))
if not ret_var:
return mmd2
Kt_XX_2_sum = _sqn(K_XX) - sum_diag2_X
Kt_YY_2_sum = _sqn(K_YY) - sum_diag2_Y
K_XY_2_sum = _sqn(K_XY)
dot_XX_XY = Kt_XX_sums.dot(K_XY_sums_1)
dot_YY_YX = Kt_YY_sums.dot(K_XY_sums_0)
m1 = m - 1
m2 = m - 2
zeta1_est = (
1 / (m * m1 * m2) * (
_sqn(Kt_XX_sums) - Kt_XX_2_sum + _sqn(Kt_YY_sums) - Kt_YY_2_sum)
- 1 / (m * m1)**2 * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 1 / (m * m * m1) * (
_sqn(K_XY_sums_1) + _sqn(K_XY_sums_0) - 2 * K_XY_2_sum)
- 2 / m**4 * K_XY_sum**2
- 2 / (m * m * m1) * (dot_XX_XY + dot_YY_YX)
+ 2 / (m**3 * m1) * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
)
zeta2_est = (
1 / (m * m1) * (Kt_XX_2_sum + Kt_YY_2_sum)
- 1 / (m * m1)**2 * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 2 / (m * m) * K_XY_2_sum
- 2 / m**4 * K_XY_sum**2
- 4 / (m * m * m1) * (dot_XX_XY + dot_YY_YX)
+ 4 / (m**3 * m1) * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
)
var_est = (4 * (var_at_m - 2) / (var_at_m * (var_at_m - 1)) * zeta1_est
+ 2 / (var_at_m * (var_at_m - 1)) * zeta2_est)
return mmd2, var_est
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('samples')
parser.add_argument('reference_feats', nargs='?')
parser.add_argument('--output', '-o')
parser.add_argument('--reference-subset', default=slice(None),
type=lambda x: slice(*(int(s) if s else None
for s in x.split(':'))))
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--model', choices=['inception', 'lenet'],
default='inception')
g = parser.add_mutually_exclusive_group()
g.add_argument('--save-codes')
g.add_argument('--load-codes')
g = parser.add_mutually_exclusive_group()
g.add_argument('--save-preds')
g.add_argument('--load-preds')
g = parser.add_mutually_exclusive_group()
g.add_argument('--do-inception', action='store_true', default=True)
g.add_argument('--no-inception', action='store_false', dest='do_inception')
g = parser.add_mutually_exclusive_group()
g.add_argument('--do-fid', action='store_true', default=False)
g.add_argument('--no-fid', action='store_false', dest='do_fid')
g = parser.add_mutually_exclusive_group()
g.add_argument('--do-mmd', action='store_true', default=False)
g.add_argument('--no-mmd', action='store_false', dest='do_mmd')
parser.add_argument('--mmd-degree', type=int, default=3)
parser.add_argument('--mmd-gamma', type=float, default=None)
parser.add_argument('--mmd-coef0', type=float, default=1)
parser.add_argument('--mmd-subsets', type=int, default=100)
parser.add_argument('--mmd-subset-size', type=int, default=1000)
g = parser.add_mutually_exclusive_group()
g.add_argument('--mmd-var', action='store_true', default=False)
g.add_argument('--no-mmd-var', action='store_false', dest='mmd_var')
parser.add_argument('--splits', type=int, default=10)
parser.add_argument('--split-method', choices=['openai', 'bootstrap'],
default='bootstrap')
args = parser.parse_args()
if args.do_fid and args.reference_feats is None:
parser.error("Need REFERENCE_FEATS if you're doing FID")
def check_path(pth):
if os.path.exists(pth):
parser.error("Path {} already exists".format(pth))
d = os.path.dirname(pth)
if d and not os.path.exists(d):
os.makedirs(d)
if args.output:
check_path(args.output)
samples = np.load(args.samples, mmap_mode='r')
if args.model == 'inception':
model = Inception()
if samples.dtype == np.uint8:
transformer = np.asarray
elif samples.dtype == np.float32:
m = samples[:10].max()
assert .5 <= m <= 1
transformer = lambda x: x * 255
else:
raise TypeError("don't know how to handle {}".format(samples.dtype))
elif args.model == 'lenet':
model = LeNet()
if samples.dtype == np.uint8:
def transformer(x):
return (np.asarray(x, dtype=np.float32) - (255 / 2.)) / 255
elif samples.dtype == np.float32:
assert .8 <= samples[:10].max() <= 1
assert 0 <= samples[:10].min() <= .3
transformer = lambda x: x - .5
else:
raise TypeError("don't know how to handle {}".format(samples.dtype))
else:
raise ValueError("bad model {}".format(args.model))
if args.reference_feats:
ref_feats = np.load(args.reference_feats, mmap_mode='r')[
args.reference_subset]
out_kw = {}
if args.save_codes:
check_path(args.save_codes)
out_kw['out_codes'] = np.lib.format.open_memmap(
args.save_codes, mode='w+', dtype=np.float32,
shape=(samples.shape[0], model.coder_dim))
if args.save_preds:
check_path(args.save_preds)
out_kw['out_preds'] = np.lib.format.open_memmap(
args.save_preds, mode='w+', dtype=np.float32,
shape=(samples.shape[0], model.softmax_dim))
need_preds = args.do_inception or args.save_preds
need_codes = args.do_fid or args.do_mmd or args.save_codes
print('Transformer test: transformer([-1, 0, 10.]) = ' + repr(transformer(np.array([-1, 0, 10.]))))
if args.load_codes or args.load_preds:
if args.load_codes:
codes = np.load(args.load_codes, mmap_mode='r')
assert codes.ndim == 2
assert codes.shape[0] == samples.shape[0]
assert codes.shape[1] == model.coder_dim
if args.load_preds:
preds = np.load(args.load_preds, mmap_mode='r')
assert preds.ndim == 2
assert preds.shape[0] == samples.shape[0]
assert preds.shape[1] == model.softmax_dim
elif need_preds:
raise NotImplementedError()
else:
out = featurize(
samples, model, batch_size=args.batch_size, transformer=transformer,
get_preds=need_preds, get_codes=need_codes, **out_kw)
if need_preds:
preds = out[0]
if need_codes:
codes = out[-1]
split_args = {'splits': args.splits, 'split_method': args.split_method}
output = {'args': args}
if args.do_inception:
output['inception'] = scores = inception_score(preds, **split_args)
print("Inception mean:", np.mean(scores))
print("Inception std:", np.std(scores))
print("Inception scores:", scores, sep='\n')
if args.do_fid:
output['fid'] = scores = fid_score(codes, ref_feats, **split_args)
print("FID mean:", np.mean(scores))
print("FID std:", np.std(scores))
print("FID scores:", scores, sep='\n')
print()
if args.do_mmd:
ret = polynomial_mmd_averages(
codes, ref_feats, degree=args.mmd_degree, gamma=args.mmd_gamma,
coef0=args.mmd_coef0, ret_var=args.mmd_var,
n_subsets=args.mmd_subsets, subset_size=args.mmd_subset_size)
if args.mmd_var:
output['mmd2'], output['mmd2_var'] = mmd2s, vars = ret
else:
output['mmd2'] = mmd2s = ret
print("mean MMD^2 estimate:", mmd2s.mean())
print("std MMD^2 estimate:", mmd2s.std())
print("MMD^2 estimates:", mmd2s, sep='\n')
print()
if args.mmd_var:
print("mean Var[MMD^2] estimate:", vars.mean())
print("std Var[MMD^2] estimate:", vars.std())
print("Var[MMD^2] estimates:", vars, sep='\n')
print()
if args.output:
np.savez(args.output, **output)
if __name__ == '__main__':
main()
| 18,512 | 35.087719 | 119 | py |
GANFingerprints | GANFingerprints-master/CramerGAN/gan/summarize.py | import argparse
import os
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs='+')
parser.add_argument('--tex', action='store_true')
args = parser.parse_args()
if args.tex:
split = ' & '
end = '\\\\\n'
else:
split = ' '
end = '\n'
print(' ' * 79 + 'Inception (std) FID (std) MMD^2 (std)')
print(' ' * (18 if args.tex else 87), end=split)
print('{:>9}'.format('Inception'), end=' ' if args.tex else ' ')
print('{:>7}'.format('' if args.tex else '(std)'), end=split)
print('{:>8}'.format('FID'), end=' ')
print('{:>7}'.format('' if args.tex else '(std)'), end=split)
print('{:>8}'.format('KID'), end=' ')
print('{:>8}'.format('' if args.tex else '(std)'), end=end)
if args.tex:
print("\\hline")
for fn in sorted(args.files):
with np.load(fn) as d:
n = '/'.join(fn.split('/')[-3:-1])#os.path.basename(fn)
if n.endswith('.npz'):
n = n[:-4]
if n.endswith('-results'):
n = n[:-len('-results')]
if args.tex:
n = n.replace('_', ' ')
print('{:88}'.format(n), end=split)
print('{:8.3f}'.format(d['inception'].mean()), end=' ')
print('({:5.3f})'.format(d['inception'].std()), end=split)
print('{:8.3f}'.format(d['fid'].mean()), end=' ')
print('({:5.3f})'.format(d['fid'].std()), end=split)
print('{:8.4f}'.format(d['mmd2'].mean()), end=' ')
print('({:6.4f})'.format(d['mmd2'].std()), end=end)
| 1,506 | 30.395833 | 77 | py |
GANFingerprints | GANFingerprints-master/CramerGAN/gan/core/architecture.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 14:34:47 2018
@author: mikolajbinkowski
"""
import tensorflow as tf
from core.ops import batch_norm, conv2d, deconv2d, linear, lrelu
from utils.misc import conv_sizes
# Generators
class Generator:
def __init__(self, dim, c_dim, output_size, use_batch_norm, prefix='g_'):
self.used = False
self.dim = dim
self.c_dim = c_dim
self.output_size = output_size
self.prefix = prefix
if use_batch_norm:
self.g_bn0 = batch_norm(name=prefix + 'bn0')
self.g_bn1 = batch_norm(name=prefix + 'bn1')
self.g_bn2 = batch_norm(name=prefix + 'bn2')
self.g_bn3 = batch_norm(name=prefix + 'bn3')
self.g_bn4 = batch_norm(name=prefix + 'bn4')
self.g_bn5 = batch_norm(name=prefix + 'bn5')
else:
self.g_bn0 = lambda x: x
self.g_bn1 = lambda x: x
self.g_bn2 = lambda x: x
self.g_bn3 = lambda x: x
self.g_bn4 = lambda x: x
self.g_bn5 = lambda x: x
def __call__(self, seed, batch_size):
with tf.variable_scope('generator') as scope:
if self.used:
scope.reuse_variables()
self.used = True
return self.network(seed, batch_size)
def network(self, seed, batch_size):
pass
class DCGANGenerator(Generator):
def network(self, seed, batch_size):
s1, s2, s4, s8, s16 = conv_sizes(self.output_size, layers=4, stride=2)
# 64, 32, 16, 8, 4 - for self.output_size = 64
# default architecture
# For Cramer: self.gf_dim = 64
z_ = linear(seed, self.dim * 8 * s16 * s16, self.prefix + 'h0_lin') # project random noise seed and reshape
h0 = tf.reshape(z_, [batch_size, s16, s16, self.dim * 8])
h0 = tf.nn.relu(self.g_bn0(h0))
h1 = deconv2d(h0, [batch_size, s8, s8, self.dim*4], name=self.prefix + 'h1')
h1 = tf.nn.relu(self.g_bn1(h1))
h2 = deconv2d(h1, [batch_size, s4, s4, self.dim*2], name=self.prefix + 'h2')
h2 = tf.nn.relu(self.g_bn2(h2))
h3 = deconv2d(h2, [batch_size, s2, s2, self.dim*1], name=self.prefix + 'h3')
h3 = tf.nn.relu(self.g_bn3(h3))
h4 = deconv2d(h3, [batch_size, s1, s1, self.c_dim], name=self.prefix + 'h4')
return tf.nn.sigmoid(h4)
class DCGAN5Generator(Generator):
def network(self, seed, batch_size):
s1, s2, s4, s8, s16, s32 = conv_sizes(self.output_size, layers=5, stride=2)
# project `z` and reshape
z_= linear(seed, self.dim * 16 * s32 * s32, self.prefix + 'h0_lin')
h0 = tf.reshape(z_, [-1, s32, s32, self.dim * 16])
h0 = tf.nn.relu(self.g_bn0(h0))
h1 = deconv2d(h0, [batch_size, s16, s16, self.dim*8], name=self.prefix + 'h1')
h1 = tf.nn.relu(self.g_bn1(h1))
h2 = deconv2d(h1, [batch_size, s8, s8, self.dim*4], name=self.prefix + 'h2')
h2 = tf.nn.relu(self.g_bn2(h2))
h3 = deconv2d(h2, [batch_size, s4, s4, self.dim*2], name=self.prefix + 'h3')
h3 = tf.nn.relu(self.g_bn3(h3))
h4 = deconv2d(h3, [batch_size, s2, s2, self.dim], name=self.prefix + 'h4')
h4 = tf.nn.relu(self.g_bn4(h4))
h5 = deconv2d(h4, [batch_size, s1, s1, self.c_dim], name=self.prefix + 'h5')
return tf.nn.sigmoid(h5)
class ResNetGenerator(Generator):
def network(self, seed, batch_size):
from core.resnet import block, ops
s1, s2, s4, s8, s16, s32 = conv_sizes(self.output_size, layers=5, stride=2)
# project `z` and reshape
z_= linear(seed, self.dim * 16 * s32 * s32, self.prefix + 'h0_lin')
h0 = tf.reshape(z_, [-1, self.dim * 16, s32, s32]) # NCHW format
h1 = block.ResidualBlock(self.prefix + 'res1', 16 * self.dim,
8 * self.dim, 3, h0, resample='up')
h2 = block.ResidualBlock(self.prefix + 'res2', 8 * self.dim,
4 * self.dim, 3, h1, resample='up')
h3 = block.ResidualBlock(self.prefix + 'res3', 4 * self.dim,
2 * self.dim, 3, h2, resample='up')
h4 = block.ResidualBlock(self.prefix + 'res4', 2 * self.dim,
self.dim, 3, h3, resample='up')
h4 = ops.batchnorm.Batchnorm('g_h4', [0, 2, 3], h4, fused=True)
h4 = tf.nn.relu(h4)
# h5 = lib.ops.conv2d.Conv2D('g_h5', dim, 3, 3, h4)
h5 = tf.transpose(h4, [0, 2, 3, 1]) # NCHW to NHWC
h5 = deconv2d(h5, [batch_size, s1, s1, self.c_dim], name='g_h5')
return tf.nn.sigmoid(h5)
# Discriminator
class Discriminator:
def __init__(self, dim, o_dim, use_batch_norm, prefix='d_'):
self.dim = dim
self.o_dim = o_dim
self.prefix = prefix
self.used = False
if use_batch_norm:
self.d_bn0 = batch_norm(name=prefix + 'bn0')
self.d_bn1 = batch_norm(name=prefix + 'bn1')
self.d_bn2 = batch_norm(name=prefix + 'bn2')
self.d_bn3 = batch_norm(name=prefix + 'bn3')
self.d_bn4 = batch_norm(name=prefix + 'bn4')
self.d_bn5 = batch_norm(name=prefix + 'bn5')
else:
self.d_bn0 = lambda x: x
self.d_bn1 = lambda x: x
self.d_bn2 = lambda x: x
self.d_bn3 = lambda x: x
self.d_bn4 = lambda x: x
self.d_bn5 = lambda x: x
def __call__(self, image, batch_size, return_layers=False):
with tf.variable_scope("discriminator") as scope:
if self.used:
scope.reuse_variables()
self.used = True
layers = self.network(image, batch_size)
if return_layers:
return layers
return layers['hF']
def network(self, image, batch_size):
pass
class DCGANDiscriminator(Discriminator):
def network(self, image, batch_size):
o_dim = self.o_dim if (self.o_dim > 0) else 8 * self.dim
h0 = lrelu(conv2d(image, self.dim, name=self.prefix + 'h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.dim * 2, name=self.prefix + 'h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.dim * 4, name=self.prefix + 'h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.dim * 8, name=self.prefix + 'h3_conv')))
hF = linear(tf.reshape(h3, [batch_size, -1]), o_dim, self.prefix + 'h4_lin')
return {'h0': h0, 'h1': h1, 'h2': h2, 'h3': h3, 'hF': hF}
class DCGAN5Discriminator(Discriminator):
def network(self, image, batch_size):
o_dim = self.o_dim if (self.o_dim > 0) else 16 * self.dim
h0 = lrelu(conv2d(image, self.dim, name=self.prefix + 'h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.dim * 2, name=self.prefix + 'h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.dim * 4, name=self.prefix + 'h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.dim * 8, name=self.prefix + 'h3_conv')))
h4 = lrelu(self.d_bn4(conv2d(h3, self.dim * 16, name=self.prefix + 'h4_conv')))
hF = linear(tf.reshape(h4, [batch_size, -1]), o_dim, self.prefix + 'h6_lin')
return {'h0': h0, 'h1': h1, 'h2': h2, 'h3': h3, 'h4': h4, 'hF': hF}
class FullConvDiscriminator(Discriminator):
def network(self, image, batch_size):
h0 = lrelu(conv2d(image, self.dim, name=self.prefix + 'h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.dim * 2, name=self.prefix + 'h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.dim * 4, name=self.prefix + 'h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.dim * 8, name=self.prefix + 'h3_conv')))
hF = lrelu(self.d_bn4(conv2d(h3, self.o_dim, name=self.prefix + 'hF_conv')))
hF = tf.reshape(hF, [batch_size, -1])
return {'h0': h0, 'h1': h1, 'h2': h2, 'h3': h3, 'hF': hF}
class ResNetDiscriminator(Discriminator):
def network(self, image, batch_size):
from core.resnet import block, ops
image = tf.transpose(image, [0, 3, 1, 2]) # NHWC to NCHW
h0 = lrelu(ops.conv2d.Conv2D(self.prefix + 'h0_conv', 3, self.dim,
3, image, he_init=False))
h1 = block.ResidualBlock(self.prefix + 'res1', self.dim,
2 * self.dim, 3, h0, resample='down')
h2 = block.ResidualBlock(self.prefix + 'res2', 2 * self.dim,
4 * self.dim, 3, h1, resample='down')
h3 = block.ResidualBlock(self.prefix + 'res3', 4 * self.dim,
8 * self.dim, 3, h2, resample='down')
h4 = block.ResidualBlock(self.prefix + 'res4', 8 * self.dim,
8 * self.dim, 3, h3, resample='down')
hF = tf.reshape(h4, [-1, 4 * 4 * 8 * self.dim])
hF = linear(hF, self.o_dim, self.prefix + 'h5_lin')
return {'h0': h0, 'h1': h1, 'h2': h2, 'h3': h3, 'h4': h4, 'hF': hF}
def get_networks(architecture):
if architecture == 'dcgan':
return DCGANGenerator, DCGANDiscriminator
elif architecture == 'dcgan5':
return DCGAN5Generator, DCGAN5Discriminator
elif 'g_resnet5' in architecture:
return ResNetGenerator, DCGAN5Discriminator
elif architecture == 'resnet5':
return ResNetGenerator, ResNetDiscriminator
elif architecture == 'd_fullconv5':
return DCGAN5Generator, FullConvDiscriminator
raise ValueError('Wrong architecture: "%s"' % architecture)
| 9,781 | 42.475556 | 115 | py |
GANFingerprints | GANFingerprints-master/CramerGAN/gan/core/cramer.py | from .model import MMD_GAN, tf, np
from .architecture import get_networks
from .ops import safer_norm
class Cramer_GAN(MMD_GAN):
def build_model(self):
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.lr = tf.Variable(self.config.learning_rate, name='lr',
trainable=False, dtype=tf.float32)
self.lr_decay_op = self.lr.assign(tf.maximum(self.lr * self.config.decay_rate, 1.e-6))
with tf.variable_scope('loss'):
if self.config.is_train and (self.config.gradient_penalty > 0):
self.gp = tf.Variable(self.config.gradient_penalty,
name='gradient_penalty',
trainable=False, dtype=tf.float32)
self.gp_decay_op = self.gp.assign(self.gp * self.config.gp_decay_rate)
self.set_pipeline()
self.sample_z = tf.constant(np.random.uniform(-1, 1, size=(self.sample_size,
self.z_dim)).astype(np.float32),
dtype=tf.float32, name='sample_z')
Generator, Discriminator = get_networks(self.config.architecture)
generator = Generator(self.gf_dim, self.c_dim, self.output_size, self.config.batch_norm)
dbn = self.config.batch_norm & (self.config.gradient_penalty <= 0)
self.discriminator = Discriminator(self.df_dim, self.dof_dim, dbn)
self.G = generator(tf.random_uniform([self.batch_size, self.z_dim], minval=-1.,
maxval=1., dtype=tf.float32, name='z'),
self.batch_size)
self.G2 = generator(tf.random_uniform([self.batch_size, self.z_dim], minval=-1.,
maxval=1., dtype=tf.float32, name='z2'),
self.batch_size)
self.sampler = generator(self.sample_z, self.sample_size)
self.d_images_layers = self.discriminator(self.images, self.real_batch_size, return_layers=True)
self.d_G_layers = self.discriminator(self.G, self.batch_size, return_layers=True)
self.d_images = self.d_images_layers['hF']
self.d_G = self.d_G_layers['hF']
G2 = self.discriminator(self.G2, self.batch_size)
self.set_loss(self.d_G, G2, self.d_images)
block = min(8, int(np.sqrt(self.real_batch_size)), int(np.sqrt(self.batch_size)))
tf.summary.image("train/input image",
self.imageRearrange(tf.clip_by_value(self.images, 0, 1), block))
tf.summary.image("train/gen image",
self.imageRearrange(tf.clip_by_value(self.G, 0, 1), block))
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver(max_to_keep=2)
def set_loss(self, G, G2, images):
bs = min([self.batch_size, self.real_batch_size])
alpha = tf.random_uniform(shape=[bs])
alpha = tf.reshape(alpha, [bs, 1, 1, 1])
real_data = self.images[:bs] #before discirminator
fake_data = self.G[:bs] #before discriminator
x_hat_data = (1. - alpha) * real_data + alpha * fake_data
x_hat = self.discriminator(x_hat_data, bs)
critic = lambda x, x_ : safer_norm(x - x_, axis=1) - safer_norm(x, axis=1)
with tf.variable_scope('loss'):
if self.config.model == 'cramer': # Cramer GAN paper
self.g_loss = tf.reduce_mean(
- safer_norm(G - G2, axis=1) + safer_norm(G - images, axis=1) + safer_norm(G2 - images, axis=1))
self.d_loss = -tf.reduce_mean(critic(images, G) - critic(G2, G))
to_penalize = critic(x_hat, G)
elif self.config.model == 'reddit_cramer':
self.g_loss = tf.reduce_mean(critic(images, G) - critic(G, G2))
self.d_loss = -self.g_loss
to_penalize = critic(x_hat, G)
else:
raise(AttributeError('wrong model: %s' % self.config.model))
gradients = tf.gradients(to_penalize, [x_hat_data])[0]
penalty = tf.reduce_mean(tf.square(safer_norm(gradients, axis=1) - 1.0))#
self.gp = tf.get_variable('gradient_penalty', dtype=tf.float32,
initializer=self.config.gradient_penalty)
self.d_loss += penalty * self.gp
self.optim_name = '%s gp %.1f' % (self.config.model, self.config.gradient_penalty)
tf.summary.scalar(self.optim_name + ' G', self.g_loss)
tf.summary.scalar(self.optim_name + ' D', self.d_loss)
tf.summary.scalar('dx_penalty', penalty)
| 4,955 | 50.625 | 116 | py |
GANFingerprints | GANFingerprints-master/CramerGAN/gan/core/mmd.py | '''
MMD functions implemented in tensorflow.
'''
from __future__ import division
_eps=1.0e-5
import tensorflow as tf
import numpy as np
from .ops import dot, sq_sum
mysqrt = lambda x: tf.sqrt(tf.maximum(x + _eps, 0.))
def _distance_kernel(X, Y, K_XY_only=False):
XX = tf.matmul(X, X, transpose_b=True)
XY = tf.matmul(X, Y, transpose_b=True)
YY = tf.matmul(Y, Y, transpose_b=True)
X_sqnorms = tf.diag_part(XX)
Y_sqnorms = tf.diag_part(YY)
r = lambda x: tf.expand_dims(x, 0)
c = lambda x: tf.expand_dims(x, 1)
K_XY = c(mysqrt(X_sqnorms)) + r(mysqrt(Y_sqnorms)) - mysqrt(-2 * XY + c(X_sqnorms) + r(Y_sqnorms))
if K_XY_only:
return K_XY
K_XX = c(mysqrt(X_sqnorms)) + r(mysqrt(X_sqnorms)) - mysqrt(-2 * XX + c(X_sqnorms) + r(X_sqnorms))
K_YY = c(mysqrt(Y_sqnorms)) + r(mysqrt(Y_sqnorms)) - mysqrt(-2 * YY + c(Y_sqnorms) + r(Y_sqnorms))
return K_XX, K_XY, K_YY, False
def _tanh_distance_kernel(X, Y, K_XY_only=False):
return _distance_kernel(tf.tanh(X), tf.tanh(Y), K_XY_only=K_XY_only)
def _dot_kernel(X, Y, K_XY_only=False):
K_XY = tf.matmul(X, Y, transpose_b=True)
if K_XY_only:
return K_XY
K_XX = tf.matmul(X, X, transpose_b=True)
K_YY = tf.matmul(Y, Y, transpose_b=True)
return K_XX, K_XY, K_YY, False
def _mix_rbf_kernel(X, Y, sigmas=[2.0, 5.0, 10.0, 20.0, 40.0, 80.0], wts=None,
K_XY_only=False):
if wts is None:
wts = [1] * len(sigmas)
XX = tf.matmul(X, X, transpose_b=True)
XY = tf.matmul(X, Y, transpose_b=True)
YY = tf.matmul(Y, Y, transpose_b=True)
X_sqnorms = tf.diag_part(XX)
Y_sqnorms = tf.diag_part(YY)
r = lambda x: tf.expand_dims(x, 0)
c = lambda x: tf.expand_dims(x, 1)
K_XX, K_XY, K_YY = 0, 0, 0
XYsqnorm = -2 * XY + c(X_sqnorms) + r(Y_sqnorms)
for sigma, wt in zip(sigmas, wts):
gamma = 1 / (2 * sigma**2)
K_XY += wt * tf.exp(-gamma * XYsqnorm)
if K_XY_only:
return K_XY
XXsqnorm = -2 * XX + c(X_sqnorms) + r(X_sqnorms)
YYsqnorm = -2 * YY + c(Y_sqnorms) + r(Y_sqnorms)
for sigma, wt in zip(sigmas, wts):
gamma = 1 / (2 * sigma**2)
K_XX += wt * tf.exp(-gamma * XXsqnorm)
K_YY += wt * tf.exp(-gamma * YYsqnorm)
return K_XX, K_XY, K_YY, tf.reduce_sum(wts)
def _mix_rq_dot_kernel(X, Y, alphas=[.1, 1., 10.], wts=None, K_XY_only=False):
return _mix_rq_kernel(X, Y, alphas=alphas, wts=wts, K_XY_only=K_XY_only, add_dot=.1)
def _mix_rq_1dot_kernel(X, Y, alphas=[.1, 1., 10.], wts=None, K_XY_only=False):
return _mix_rq_kernel(X, Y, alphas=alphas, wts=wts, K_XY_only=K_XY_only, add_dot=1.)
def _mix_rq_10dot_kernel(X, Y, alphas=[.1, 1., 10.], wts=None, K_XY_only=False):
return _mix_rq_kernel(X, Y, alphas=alphas, wts=wts, K_XY_only=K_XY_only, add_dot=10.)
def _mix_rq_01dot_kernel(X, Y, alphas=[.1, 1., 10.], wts=None, K_XY_only=False):
return _mix_rq_kernel(X, Y, alphas=alphas, wts=wts, K_XY_only=K_XY_only, add_dot=.1)
def _mix_rq_001dot_kernel(X, Y, alphas=[.1, 1., 10.], wts=None, K_XY_only=False):
return _mix_rq_kernel(X, Y, alphas=alphas, wts=wts, K_XY_only=K_XY_only, add_dot=.01)
def _tanh_mix_rq_kernel(X, Y, K_XY_only=False):
return _mix_rq_kernel(tf.tanh(X), tf.tanh(Y), K_XY_only=K_XY_only)
def _mix_rq_kernel(X, Y, alphas=[.1, 1., 10.], wts=None, K_XY_only=False, add_dot=.0):
"""
Rational quadratic kernel
http://www.cs.toronto.edu/~duvenaud/cookbook/index.html
"""
if wts is None:
wts = [1.] * len(alphas)
XX = tf.matmul(X, X, transpose_b=True)
XY = tf.matmul(X, Y, transpose_b=True)
YY = tf.matmul(Y, Y, transpose_b=True)
X_sqnorms = tf.diag_part(XX)
Y_sqnorms = tf.diag_part(YY)
r = lambda x: tf.expand_dims(x, 0)
c = lambda x: tf.expand_dims(x, 1)
K_XX, K_XY, K_YY = 0., 0., 0.
XYsqnorm = tf.maximum(-2. * XY + c(X_sqnorms) + r(Y_sqnorms), 0.)
for alpha, wt in zip(alphas, wts):
logXY = tf.log(1. + XYsqnorm/(2.*alpha))
K_XY += wt * tf.exp(-alpha * logXY)
if add_dot > 0:
K_XY += tf.cast(add_dot, tf.float32) * XY
if K_XY_only:
return K_XY
XXsqnorm = tf.maximum(-2. * XX + c(X_sqnorms) + r(X_sqnorms), 0.)
YYsqnorm = tf.maximum(-2. * YY + c(Y_sqnorms) + r(Y_sqnorms), 0.)
for alpha, wt in zip(alphas, wts):
logXX = tf.log(1. + XXsqnorm/(2.*alpha))
logYY = tf.log(1. + YYsqnorm/(2.*alpha))
K_XX += wt * tf.exp(-alpha * logXX)
K_YY += wt * tf.exp(-alpha * logYY)
if add_dot > 0:
K_XX += tf.cast(add_dot, tf.float32) * XX
K_YY += tf.cast(add_dot, tf.float32) * YY
# wts = tf.reduce_sum(tf.cast(wts, tf.float32))
wts = tf.reduce_sum(tf.cast(wts, tf.float32))
return K_XX, K_XY, K_YY, wts
def mmd2(K, biased=False):
K_XX, K_XY, K_YY, const_diagonal = K
return _mmd2(K_XX, K_XY, K_YY, const_diagonal, biased) # numerics checked at _mmd2 return
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(K_XX.get_shape()[0], tf.float32)
n = tf.cast(K_YY.get_shape()[0], tf.float32)
if biased:
mmd2 = (tf.reduce_sum(K_XX) / (m * m)
+ tf.reduce_sum(K_YY) / (n * n)
- 2 * tf.reduce_sum(K_XY) / (m * n))
else:
if const_diagonal is not False:
const_diagonal = tf.cast(const_diagonal, tf.float32)
trace_X = m * const_diagonal
trace_Y = n * const_diagonal
else:
trace_X = tf.trace(K_XX)
trace_Y = tf.trace(K_YY)
mmd2 = ((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))
+ (tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1))
- 2 * tf.reduce_sum(K_XY) / (m * n))
return mmd2
def mmd2_and_ratio(K, biased=False, min_var_est=_eps):
K_XX, K_XY, K_YY, const_diagonal = K
return _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal, biased, min_var_est)
def _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal=False, biased=False,
min_var_est=_eps):
mmd2, var_est = _mmd2_and_variance(
K_XX, K_XY, K_YY, const_diagonal=const_diagonal, biased=biased)
ratio = mmd2 / tf.sqrt(tf.maximum(var_est, min_var_est))
return mmd2, ratio, var_est
def _mmd2_and_variance(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(K_XX.get_shape()[0], tf.float32) # Assumes X, Y are same shape
### Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to compute them explicitly
if const_diagonal is not False:
const_diagonal = tf.cast(const_diagonal, tf.float32)
diag_X = diag_Y = const_diagonal
sum_diag_X = sum_diag_Y = m * const_diagonal
sum_diag2_X = sum_diag2_Y = m * const_diagonal**2
else:
diag_X = tf.diag_part(K_XX)
diag_Y = tf.diag_part(K_YY)
sum_diag_X = tf.reduce_sum(diag_X)
sum_diag_Y = tf.reduce_sum(diag_Y)
sum_diag2_X = sq_sum(diag_X)
sum_diag2_Y = sq_sum(diag_Y)
Kt_XX_sums = tf.reduce_sum(K_XX, 1) - diag_X
Kt_YY_sums = tf.reduce_sum(K_YY, 1) - diag_Y
K_XY_sums_0 = tf.reduce_sum(K_XY, 0)
K_XY_sums_1 = tf.reduce_sum(K_XY, 1)
Kt_XX_sum = tf.reduce_sum(Kt_XX_sums)
Kt_YY_sum = tf.reduce_sum(Kt_YY_sums)
K_XY_sum = tf.reduce_sum(K_XY_sums_0)
Kt_XX_2_sum = sq_sum(K_XX) - sum_diag2_X
Kt_YY_2_sum = sq_sum(K_YY) - sum_diag2_Y
K_XY_2_sum = sq_sum(K_XY)
if biased:
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
+ (Kt_YY_sum + sum_diag_Y) / (m * m)
- 2 * K_XY_sum / (m * m))
else:
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * (m-1))
+ (Kt_YY_sum + sum_diag_Y) / (m * (m-1))
- 2 * K_XY_sum / (m * m))
var_est = (
2 / (m**2 * (m-1)**2) * (
2 * sq_sum(Kt_XX_sums) - Kt_XX_2_sum
+ 2 * sq_sum(Kt_YY_sums) - Kt_YY_2_sum)
- (4*m-6) / (m**3 * (m-1)**3) * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 4*(m-2) / (m**3 * (m-1)**2) * (
sq_sum(K_XY_sums_1) + sq_sum(K_XY_sums_0))
- 4 * (m-3) / (m**3 * (m-1)**2) * K_XY_2_sum
- (8*m - 12) / (m**5 * (m-1)) * K_XY_sum**2
+ 8 / (m**3 * (m-1)) * (
1/m * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
- dot(Kt_XX_sums, K_XY_sums_1)
- dot(Kt_YY_sums, K_XY_sums_0))
)
return mmd2, var_est
def diff_polynomial_mmd2_and_ratio(X, Y, Z):
dim = tf.cast(X.get_shape()[1], tf.float32)
# TODO: could definitely do this faster
K_XY = (tf.matmul(X, Y, transpose_b=True) / dim + 1) ** 3
K_XZ = (tf.matmul(X, Z, transpose_b=True) / dim + 1) ** 3
K_YY = (tf.matmul(Y, Y, transpose_b=True) / dim + 1) ** 3
K_ZZ = (tf.matmul(Z, Z, transpose_b=True) / dim + 1) ** 3
return _diff_mmd2_and_ratio(K_XY, K_XZ, K_YY, K_ZZ, const_diagonal=False)
def diff_polynomial_mmd2_and_ratio_with_saving(X, Y, saved_sums_for_Z):
dim = tf.cast(X.get_shape()[1], tf.float32)
# TODO: could definitely do this faster
K_XY = (tf.matmul(X, Y, transpose_b=True) / dim + 1) ** 3
K_YY = (tf.matmul(Y, Y, transpose_b=True) / dim + 1) ** 3
m = tf.cast(K_YY.get_shape()[0], tf.float32)
Y_related_sums = _get_sums(K_XY, K_YY)
mmd2_diff, ratio = _diff_mmd2_and_ratio_from_sums(Y_related_sums, saved_sums_for_Z, m)
return mmd2_diff, ratio, Y_related_sums
def _diff_mmd2_and_ratio(K_XY, K_XZ, K_YY, K_ZZ, const_diagonal=False):
m = tf.cast(K_YY.get_shape()[0], tf.float32) # Assumes X, Y, Z are same shape
### Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to explicitly form them
return _diff_mmd2_and_ratio_from_sums(
_get_sums(K_XY, K_YY, const_diagonal),
_get_sums(K_XZ, K_ZZ, const_diagonal),
m,
const_diagonal=const_diagonal
)
def _diff_mmd2_and_ratio_from_sums(Y_related_sums, Z_related_sums, m, const_diagonal=False):
Kt_YY_sums, Kt_YY_2_sum, K_XY_sums_0, K_XY_sums_1, K_XY_2_sum = Y_related_sums
Kt_ZZ_sums, Kt_ZZ_2_sum, K_XZ_sums_0, K_XZ_sums_1, K_XZ_2_sum = Z_related_sums
Kt_YY_sum = tf.reduce_sum(Kt_YY_sums)
Kt_ZZ_sum = tf.reduce_sum(Kt_ZZ_sums)
K_XY_sum = tf.reduce_sum(K_XY_sums_0)
K_XZ_sum = tf.reduce_sum(K_XZ_sums_0)
# TODO: turn these into dot products?
# should figure out if that's faster or not on GPU / with theano...
### Estimators for the various terms involved
muY_muY = Kt_YY_sum / (m * (m-1))
muZ_muZ = Kt_ZZ_sum / (m * (m-1))
muX_muY = K_XY_sum / (m * m)
muX_muZ = K_XZ_sum / (m * m)
E_y_muY_sq = (sq_sum(Kt_YY_sums) - Kt_YY_2_sum) / (m*(m-1)*(m-2))
E_z_muZ_sq = (sq_sum(Kt_ZZ_sums) - Kt_ZZ_2_sum) / (m*(m-1)*(m-2))
E_x_muY_sq = (sq_sum(K_XY_sums_1) - K_XY_2_sum) / (m*m*(m-1))
E_x_muZ_sq = (sq_sum(K_XZ_sums_1) - K_XZ_2_sum) / (m*m*(m-1))
E_y_muX_sq = (sq_sum(K_XY_sums_0) - K_XY_2_sum) / (m*m*(m-1))
E_z_muX_sq = (sq_sum(K_XZ_sums_0) - K_XZ_2_sum) / (m*m*(m-1))
E_y_muY_y_muX = dot(Kt_YY_sums, K_XY_sums_0) / (m*m*(m-1))
E_z_muZ_z_muX = dot(Kt_ZZ_sums, K_XZ_sums_0) / (m*m*(m-1))
E_x_muY_x_muZ = dot(K_XY_sums_1, K_XZ_sums_1) / (m*m*m)
E_kyy2 = Kt_YY_2_sum / (m * (m-1))
E_kzz2 = Kt_ZZ_2_sum / (m * (m-1))
E_kxy2 = K_XY_2_sum / (m * m)
E_kxz2 = K_XZ_2_sum / (m * m)
### Combine into overall estimators
mmd2_diff = muY_muY - 2 * muX_muY - muZ_muZ + 2 * muX_muZ
first_order = 4 * (m-2) / (m * (m-1)) * (
E_y_muY_sq - muY_muY**2
+ E_x_muY_sq - muX_muY**2
+ E_y_muX_sq - muX_muY**2
+ E_z_muZ_sq - muZ_muZ**2
+ E_x_muZ_sq - muX_muZ**2
+ E_z_muX_sq - muX_muZ**2
- 2 * E_y_muY_y_muX + 2 * muY_muY * muX_muY
- 2 * E_x_muY_x_muZ + 2 * muX_muY * muX_muZ
- 2 * E_z_muZ_z_muX + 2 * muZ_muZ * muX_muZ
)
second_order = 2 / (m * (m-1)) * (
E_kyy2 - muY_muY**2
+ 2 * E_kxy2 - 2 * muX_muY**2
+ E_kzz2 - muZ_muZ**2
+ 2 * E_kxz2 - 2 * muX_muZ**2
- 4 * E_y_muY_y_muX + 4 * muY_muY * muX_muY
- 4 * E_x_muY_x_muZ + 4 * muX_muY * muX_muZ
- 4 * E_z_muZ_z_muX + 4 * muZ_muZ * muX_muZ
)
var_est = first_order + second_order
ratio = mmd2_diff / mysqrt(tf.maximum(var_est, _eps))
return mmd2_diff, ratio
def _get_sums(K_XY, K_YY, const_diagonal=False):
m = tf.cast(K_YY.get_shape()[0], tf.float32) # Assumes X, Y, Z are same shape
### Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to explicitly form them
if const_diagonal is not False:
const_diagonal = tf.cast(const_diagonal, tf.float32)
diag_Y = const_diagonal
sum_diag2_Y = m * const_diagonal**2
else:
diag_Y = tf.diag_part(K_YY)
sum_diag2_Y = sq_sum(diag_Y)
Kt_YY_sums = tf.reduce_sum(K_YY, 1) - diag_Y
K_XY_sums_0 = tf.reduce_sum(K_XY, 0)
K_XY_sums_1 = tf.reduce_sum(K_XY, 1)
# TODO: turn these into dot products?
# should figure out if that's faster or not on GPU / with theano...
Kt_YY_2_sum = sq_sum(K_YY) - sum_diag2_Y
K_XY_2_sum = sq_sum(K_XY)
return Kt_YY_sums, Kt_YY_2_sum, K_XY_sums_0, K_XY_sums_1, K_XY_2_sum
def np_diff_polynomial_mmd2_and_ratio_with_saving(X, Y, saved_sums_for_Z):
dim = float(X.shape[1])
# TODO: could definitely do this faster
K_XY = (np.dot(X, Y.transpose()) / dim + 1) ** 3
K_YY = (np.dot(Y, Y.transpose()) / dim + 1) ** 3
m = float(K_YY.shape[0])
Y_related_sums = _np_get_sums(K_XY, K_YY)
if saved_sums_for_Z is None:
return Y_related_sums
mmd2_diff, ratio = _np_diff_mmd2_and_ratio_from_sums(Y_related_sums, saved_sums_for_Z, m)
return mmd2_diff, ratio, Y_related_sums
def _np_diff_mmd2_and_ratio_from_sums(Y_related_sums, Z_related_sums, m, const_diagonal=False):
Kt_YY_sums, Kt_YY_2_sum, K_XY_sums_0, K_XY_sums_1, K_XY_2_sum = Y_related_sums
Kt_ZZ_sums, Kt_ZZ_2_sum, K_XZ_sums_0, K_XZ_sums_1, K_XZ_2_sum = Z_related_sums
Kt_YY_sum = Kt_YY_sums.sum()
Kt_ZZ_sum = Kt_ZZ_sums.sum()
K_XY_sum = K_XY_sums_0.sum()
K_XZ_sum = K_XZ_sums_0.sum()
# TODO: turn these into dot products?
# should figure out if that's faster or not on GPU / with theano...
### Estimators for the various terms involved
muY_muY = Kt_YY_sum / (m * (m-1))
muZ_muZ = Kt_ZZ_sum / (m * (m-1))
muX_muY = K_XY_sum / (m * m)
muX_muZ = K_XZ_sum / (m * m)
E_y_muY_sq = (np.dot(Kt_YY_sums, Kt_YY_sums) - Kt_YY_2_sum) / (m*(m-1)*(m-2))
E_z_muZ_sq = (np.dot(Kt_ZZ_sums, Kt_ZZ_sums) - Kt_ZZ_2_sum) / (m*(m-1)*(m-2))
E_x_muY_sq = (np.dot(K_XY_sums_1, K_XY_sums_1) - K_XY_2_sum) / (m*m*(m-1))
E_x_muZ_sq = (np.dot(K_XZ_sums_1, K_XZ_sums_1) - K_XZ_2_sum) / (m*m*(m-1))
E_y_muX_sq = (np.dot(K_XY_sums_0, K_XY_sums_0) - K_XY_2_sum) / (m*m*(m-1))
E_z_muX_sq = (np.dot(K_XZ_sums_0, K_XZ_sums_0) - K_XZ_2_sum) / (m*m*(m-1))
E_y_muY_y_muX = np.dot(Kt_YY_sums, K_XY_sums_0) / (m*m*(m-1))
E_z_muZ_z_muX = np.dot(Kt_ZZ_sums, K_XZ_sums_0) / (m*m*(m-1))
E_x_muY_x_muZ = np.dot(K_XY_sums_1, K_XZ_sums_1) / (m*m*m)
E_kyy2 = Kt_YY_2_sum / (m * (m-1))
E_kzz2 = Kt_ZZ_2_sum / (m * (m-1))
E_kxy2 = K_XY_2_sum / (m * m)
E_kxz2 = K_XZ_2_sum / (m * m)
### Combine into overall estimators
mmd2_diff = muY_muY - 2 * muX_muY - muZ_muZ + 2 * muX_muZ
first_order = 4 * (m-2) / (m * (m-1)) * (
E_y_muY_sq - muY_muY**2
+ E_x_muY_sq - muX_muY**2
+ E_y_muX_sq - muX_muY**2
+ E_z_muZ_sq - muZ_muZ**2
+ E_x_muZ_sq - muX_muZ**2
+ E_z_muX_sq - muX_muZ**2
- 2 * E_y_muY_y_muX + 2 * muY_muY * muX_muY
- 2 * E_x_muY_x_muZ + 2 * muX_muY * muX_muZ
- 2 * E_z_muZ_z_muX + 2 * muZ_muZ * muX_muZ
)
second_order = 2 / (m * (m-1)) * (
E_kyy2 - muY_muY**2
+ 2 * E_kxy2 - 2 * muX_muY**2
+ E_kzz2 - muZ_muZ**2
+ 2 * E_kxz2 - 2 * muX_muZ**2
- 4 * E_y_muY_y_muX + 4 * muY_muY * muX_muY
- 4 * E_x_muY_x_muZ + 4 * muX_muY * muX_muZ
- 4 * E_z_muZ_z_muX + 4 * muZ_muZ * muX_muZ
)
var_est = first_order + second_order
ratio = mmd2_diff / np.sqrt(max(var_est, _eps))
return mmd2_diff, ratio
def _np_get_sums(K_XY, K_YY, const_diagonal=False):
m = float(K_YY.shape[0]) # Assumes X, Y, Z are same shape
### Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to explicitly form them
if const_diagonal is not False:
const_diagonal = float(const_diagonal)
diag_Y = const_diagonal
sum_diag2_Y = m * const_diagonal**2
else:
diag_Y = np.diag(K_YY)
sum_diag2_Y = np.dot(diag_Y, diag_Y)
Kt_YY_sums = K_YY.sum(axis=1) - diag_Y
K_XY_sums_0 = K_XY.sum(axis=0)
K_XY_sums_1 = K_XY.sum(axis=1)
# TODO: turn these into dot products?
# should figure out if that's faster or not on GPU / with theano...
Kt_YY_2_sum = (K_YY ** 2).sum() - sum_diag2_Y
K_XY_2_sum = (K_XY ** 2).sum()
return Kt_YY_sums, Kt_YY_2_sum, K_XY_sums_0, K_XY_sums_1, K_XY_2_sum
| 17,404 | 33.465347 | 102 | py |
GANFingerprints | GANFingerprints-master/CramerGAN/gan/core/model.py | from __future__ import division, print_function
import os, sys, time, pprint, numpy as np
from . import mmd
from .ops import safer_norm, tf
from .architecture import get_networks
from .pipeline import get_pipeline
from utils import timer, scorer, misc
class MMD_GAN(object):
def __init__(self, sess, config,
batch_size=64, output_size=64,
z_dim=100, c_dim=3, data_dir='./data'):
if config.learning_rate_D < 0:
config.learning_rate_D = config.learning_rate
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
output_size: (optional) The resolution in pixels of the images. [64]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.timer = timer.Timer()
self.dataset = config.dataset
if config.architecture == 'dc128':
output_size = 128
if config.architecture in ['dc64', 'dcgan64']:
output_size = 64
self.sess = sess
if config.real_batch_size == -1:
config.real_batch_size = config.batch_size
self.config = config
self.is_grayscale = (c_dim == 1)
self.batch_size = batch_size
self.real_batch_size = config.real_batch_size
self.sample_size = 64 if self.config.is_train else batch_size
self.output_size = output_size
self.data_dir = data_dir
self.z_dim = z_dim
self.gf_dim = config.gf_dim
self.df_dim = config.df_dim
self.dof_dim = self.config.dof_dim
self.c_dim = c_dim
discriminator_desc = '_dc'
if self.config.learning_rate_D == self.config.learning_rate:
lr = 'lr%.8f' % self.config.learning_rate
else:
lr = 'lr%.8fG%fD' % (self.config.learning_rate, self.config.learning_rate_D)
arch = '%dx%d' % (self.config.gf_dim, self.config.df_dim)
self.description = ("%s%s_%s%s_%sd%d-%d-%d_%s_%s_%s" % (
self.dataset, arch,
self.config.architecture, discriminator_desc,
self.config.kernel, self.config.dsteps,
self.config.start_dsteps, self.config.gsteps, self.batch_size,
self.output_size, lr))
if self.config.batch_norm:
self.description += '_bn'
self._ensure_dirs()
stdout = sys.stdout
if self.config.log:
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
self.log_file = open(os.path.join(self.sample_dir, 'log.txt'), 'w', buffering=1)
print('Execution start time: %s' % time.ctime())
print('Log file: %s' % self.log_file)
stdout = self.log_file
sys.stdout = self.log_file
sys.stderr = self.log_file
if config.compute_scores:
self.scorer = scorer.Scorer(self.dataset, config.MMD_lr_scheduler, stdout=stdout)
print('Execution start time: %s' % time.ctime())
#pprint.PrettyPrinter().pprint(self.config.__dict__['__flags'])
self.build_model()
self.initialized_for_sampling = config.is_train
def _ensure_dirs(self, folders=['sample', 'log', 'checkpoint']):
if type(folders) == str:
folders = [folders]
for folder in folders:
ff = folder + '_dir'
if not os.path.exists(ff):
os.makedirs(ff)
self.__dict__[ff] = os.path.join(self.config.__getattr__(ff),
self.config.name + self.config.suffix,
self.description)
if not os.path.exists(self.__dict__[ff]):
os.makedirs(self.__dict__[ff])
def build_model(self):
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.lr = tf.Variable(self.config.learning_rate, name='lr',
trainable=False, dtype=tf.float32)
self.lr_decay_op = self.lr.assign(tf.maximum(self.lr * self.config.decay_rate, 1.e-6))
with tf.variable_scope('loss'):
if self.config.is_train and (self.config.gradient_penalty > 0):
self.gp = tf.Variable(self.config.gradient_penalty,
name='gradient_penalty',
trainable=False, dtype=tf.float32)
self.gp_decay_op = self.gp.assign(self.gp * self.config.gp_decay_rate)
self.set_pipeline()
self.z = tf.random_uniform([self.batch_size, self.z_dim], minval=-1.,
maxval=1., dtype=tf.float32, name='z')
self.sample_z = tf.constant(np.random.uniform(-1, 1, size=(self.sample_size,
self.z_dim)).astype(np.float32),
dtype=tf.float32, name='sample_z')
Generator, Discriminator = get_networks(self.config.architecture)
generator = Generator(self.gf_dim, self.c_dim, self.output_size, self.config.batch_norm)
dbn = self.config.batch_norm & (self.config.gradient_penalty <= 0)
self.discriminator = Discriminator(self.df_dim, self.dof_dim, dbn)
# tf.summary.histogram("z", self.z)
self.G = generator(self.z, self.batch_size)
self.sampler = generator(self.sample_z, self.sample_size)
self.d_images_layers = self.discriminator(self.images,
self.real_batch_size, return_layers=True)
self.d_G_layers = self.discriminator(self.G, self.batch_size,
return_layers=True)
self.d_images = self.d_images_layers['hF']
self.d_G = self.d_G_layers['hF']
if self.config.is_train:
self.set_loss(self.d_G, self.d_images)
block = min(8, int(np.sqrt(self.real_batch_size)), int(np.sqrt(self.batch_size)))
tf.summary.image("train/input image",
self.imageRearrange(tf.clip_by_value(self.images, 0, 1), block))
tf.summary.image("train/gen image",
self.imageRearrange(tf.clip_by_value(self.G, 0, 1), block))
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver(max_to_keep=2)
print('[*] Model built.')
def set_loss(self, G, images):
kernel = getattr(mmd, '_%s_kernel' % self.config.kernel)
kerGI = kernel(G, images)
with tf.variable_scope('loss'):
self.g_loss = mmd.mmd2(kerGI)
self.d_loss = -self.g_loss
self.optim_name = 'kernel_loss'
self.add_gradient_penalty(kernel, G, images)
self.add_l2_penalty()
print('[*] Loss set')
def add_gradient_penalty(self, kernel, fake, real):
bs = min([self.batch_size, self.real_batch_size])
real, fake = real[:bs], fake[:bs]
alpha = tf.random_uniform(shape=[bs, 1, 1, 1])
real_data = self.images[:bs] # discirminator input level
fake_data = self.G[:bs] # discriminator input level
x_hat_data = (1. - alpha) * real_data + alpha * fake_data
x_hat = self.discriminator(x_hat_data, bs)
Ekx = lambda yy: tf.reduce_mean(kernel(x_hat, yy, K_XY_only=True), axis=1)
Ekxr, Ekxf = Ekx(real), Ekx(fake)
witness = Ekxr - Ekxf
gradients = tf.gradients(witness, [x_hat_data])[0]
penalty = tf.reduce_mean(tf.square(safer_norm(gradients, axis=1) - 1.0))
with tf.variable_scope('loss'):
if self.config.gradient_penalty > 0:
self.d_loss += penalty * self.gp
self.optim_name += ' (gp %.1f)' % self.config.gradient_penalty
tf.summary.scalar('dx_penalty', penalty)
print('[*] Gradient penalty added')
tf.summary.scalar(self.optim_name + ' G', self.g_loss)
tf.summary.scalar(self.optim_name + ' D', self.d_loss)
def add_l2_penalty(self):
if self.config.L2_discriminator_penalty > 0:
penalty = 0.0
for _, layer in self.d_G_layers.items():
penalty += tf.reduce_mean(tf.reshape(tf.square(layer), [self.batch_size, -1]), axis=1)
for _, layer in self.d_images_layers.items():
penalty += tf.reduce_mean(tf.reshape(tf.square(layer), [self.batch_size, -1]), axis=1)
self.d_L2_penalty = self.config.L2_discriminator_penalty * tf.reduce_mean(penalty)
self.d_loss += self.d_L2_penalty
self.optim_name += ' (L2 dp %.6f)' % self.config.L2_discriminator_penalty
self.optim_name = self.optim_name.replace(') (', ', ')
tf.summary.scalar('L2_disc_penalty', self.d_L2_penalty)
print('[*] L2 discriminator penalty added')
def set_grads(self):
with tf.variable_scope("G_grads"):
self.g_optim = tf.train.AdamOptimizer(self.lr, beta1=self.config.beta1, beta2=0.9)
self.g_gvs = self.g_optim.compute_gradients(
loss=self.g_loss,
var_list=self.g_vars
)
self.g_gvs = [(tf.clip_by_norm(gg, 1.), vv) for gg, vv in self.g_gvs]
self.g_grads = self.g_optim.apply_gradients(
self.g_gvs,
global_step=self.global_step
) # minimizes self.g_loss <==> minimizes MMD
with tf.variable_scope("D_grads"):
self.d_optim = tf.train.AdamOptimizer(
self.lr * self.config.learning_rate_D / self.config.learning_rate,
beta1=self.config.beta1, beta2=0.9
)
self.d_gvs = self.d_optim.compute_gradients(
loss=self.d_loss,
var_list=self.d_vars
)
# negative gradients not needed - by definition d_loss = -optim_loss
self.d_gvs = [(tf.clip_by_norm(gg, 1.), vv) for gg, vv in self.d_gvs]
self.d_grads = self.d_optim.apply_gradients(self.d_gvs) # minimizes self.d_loss <==> max MMD
print('[*] Gradients set')
def train_step(self, batch_images=None):
step = self.sess.run(self.global_step)
write_summary = ((np.mod(step, 50) == 0) and (step < 1000)) \
or (np.mod(step, 1000) == 0) or (self.err_counter > 0)
if (self.g_counter == 0) and (self.d_grads is not None):
d_steps = self.config.dsteps
if ((step % 500 == 0) or (step < 20)):
d_steps = self.config.start_dsteps
self.d_counter = (self.d_counter + 1) % (d_steps + 1)
if self.d_counter == 0:
self.g_counter = (self.g_counter + 1) % self.config.gsteps
eval_ops = [self.g_gvs, self.d_gvs, self.g_loss, self.d_loss]
if self.config.is_demo:
summary_str, g_grads, d_grads, g_loss, d_loss = self.sess.run(
[self.TrainSummary] + eval_ops
)
else:
if self.d_counter == 0:
if write_summary:
_, summary_str, g_grads, d_grads, g_loss, d_loss = self.sess.run(
[self.g_grads, self.TrainSummary] + eval_ops
)
else:
_, g_grads, d_grads, g_loss, d_loss = self.sess.run([self.g_grads] + eval_ops)
else:
_, g_grads, d_grads, g_loss, d_loss = self.sess.run([self.d_grads] + eval_ops)
et = self.timer(step, "g step" if (self.d_counter == 0) else "d step", False)
assert ~np.isnan(g_loss), et + "NaN g_loss, epoch: "
assert ~np.isnan(d_loss), et + "NaN d_loss, epoch: "
# if G STEP, after D steps
if self.d_counter == 0:
if step % 10000 == 0:
try:
self.writer.add_summary(summary_str, step)
self.err_counter = 0
except Exception as e:
print('Step %d summary exception. ' % step, e)
self.err_counter += 1
if write_summary:
self.timer(step, "%s, G: %.8f, D: %.8f" % (self.optim_name, g_loss, d_loss))
if self.config.L2_discriminator_penalty > 0:
print(' ' * 22 + ('Discriminator L2 penalty: %.8f' % self.sess.run(self.d_L2_penalty)))
if np.mod(step + 1, self.config.max_iteration//5) == 0:
if not self.config.MMD_lr_scheduler:
# self.lr *= self.config.decay_rate
self.sess.run(self.lr_decay_op)
print('current learning rate: %f' % self.sess.run(self.lr))
if (self.config.gp_decay_rate > 0) and (self.config.gradient_penalty > 0):
self.sess.run(self.gp_decay_op)
print('current gradient penalty: %f' % self.sess.run(self.gp))
if self.config.compute_scores:
self.scorer.compute(self, step)
return g_loss, d_loss, step
def train_init(self):
self.set_grads()
self.sess.run(tf.local_variables_initializer())
self.sess.run(tf.global_variables_initializer())
print('[*] Variables initialized.')
self.TrainSummary = tf.summary.merge_all()
self._ensure_dirs('log')
self.writer = tf.summary.FileWriter(self.log_dir, self.sess.graph)
self.d_counter, self.g_counter, self.err_counter = 0, 0, 0
if self.load_checkpoint():
print(""" [*] Load SUCCESS, re-starting at epoch %d with learning
rate %.7f""" % (self.sess.run(self.global_step),
self.sess.run(self.lr)))
else:
print(" [!] Load failed...")
# self.sess.run(self.lr.assign(self.config.learning_rate))
if (not self.config.MMD_lr_scheduler) and (self.sess.run(self.gp) == self.config.gradient_penalty):
step = self.sess.run(self.global_step)
lr_decays_so_far = int((step * 5.)/self.config.max_iteration)
self.lr *= self.config.decay_rate ** lr_decays_so_far
if self.config.gp_decay_rate > 0:
self.gp *= self.config.gp_decay_rate ** lr_decays_so_far
print('current gradient penalty: %f' % self.sess.run(self.gp))
print('current learning rate: %f' % self.sess.run(self.lr))
print('[*] Model initialized for training')
def set_pipeline(self):
Pipeline = get_pipeline(self.dataset, self.config.suffix)
pipe = Pipeline(self.output_size, self.c_dim, self.real_batch_size,
self.data_dir,
timer=self.timer, sample_dir=self.sample_dir)
self.images = pipe.connect()
def train(self):
self.train_init()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=self.sess, coord=coord)
step = 0
print('[ ] Training ... ')
while step <= self.config.max_iteration:
g_loss, d_loss, step = self.train_step()
self.save_checkpoint_and_samples(step)
if self.config.save_layer_outputs:
self.save_layers(step)
coord.request_stop()
coord.join(threads)
def save_checkpoint(self, step=None):
self._ensure_dirs('checkpoint')
if step is None:
self.saver.save(self.sess,
os.path.join(self.checkpoint_dir, "best.model"))
else:
self.saver.save(self.sess,
os.path.join(self.checkpoint_dir, "MMDGAN.model"),
global_step=step)
def load_checkpoint(self):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(self.checkpoint_dir, ckpt_name))
return True
else:
return False
def save_checkpoint_and_samples(self, step, freq=1000):
if (np.mod(step, freq) == 0) and (self.d_counter == 0):
self.save_checkpoint(step)
samples = self.sess.run(self.sampler)
self._ensure_dirs('sample')
p = os.path.join(self.sample_dir, 'train_{:02d}.png'.format(step))
misc.save_images(samples[:64, :, :, :], [8, 8], p)
def save_layers(self, step, freq=1000, n=256, layers=[-1, -2]):
c = self.config.save_layer_outputs
valid = list(freq * np.arange(self.config.max_iteration/freq + 1))
if c > 1:
valid += [int(k) for k in c**np.arange(np.log(freq)/np.log(c))]
if (step in valid) and (self.d_counter == 0):
if not (layers == 'all'):
keys = [sorted(list(self.d_G_layers))[i] for i in layers]
fake = [(key + '_fake', self.d_G_layers[key]) for key in keys]
real = [(key + '_real', self.d_images_layers[key]) for key in keys]
values = self._evaluate_tensors(dict(real + fake), n=n)
path = os.path.join(self.sample_dir, 'layer_outputs_%d.npz' % step)
np.savez(path, **values)
def imageRearrange(self, image, block=4):
image = tf.slice(image, [0, 0, 0, 0], [block * block, -1, -1, -1])
x1 = tf.batch_to_space(image, [[0, 0], [0, 0]], block)
image_r = tf.reshape(tf.transpose(tf.reshape(x1,
[self.output_size, block, self.output_size, block, self.c_dim])
, [1, 0, 3, 2, 4]),
[1, self.output_size * block, self.output_size * block, self.c_dim])
return image_r
def _evaluate_tensors(self, variable_dict, n=None):
if n is None:
n = self.batch_size
values = dict([(key, []) for key in variable_dict.keys()])
sampled = 0
while sampled < n:
vv = self.sess.run(variable_dict)
for key, val in vv.items():
values[key].append(val)
sampled += list(vv.items())[0][1].shape[0]
for key, val in values.items():
values[key] = np.concatenate(val, axis=0)[:n]
return values
def get_samples(self, n=None, save=True, layers=[]):
if not (self.initialized_for_sampling or self.config.is_train):
print('[*] Loading from ' + self.checkpoint_dir + '...')
self.sess.run(tf.local_variables_initializer())
self.sess.run(tf.global_variables_initializer())
if self.load_checkpoint():
print(" [*] Load SUCCESS, model trained up to epoch %d" % \
self.sess.run(self.global_step))
else:
print(" [!] Load failed...")
return
if len(layers) > 0:
outputs = dict([(key + '_features', val) for key, val in self.d_G_layers.items()])
if not (layers == 'all'):
keys = [sorted(list(outputs.keys()))[i] for i in layers]
outputs = dict([(key, outputs[key]) for key in keys])
else:
outputs = {}
outputs['samples'] = self.G
values = self._evaluate_tensors(outputs, n=n)
if not save:
if len(layers) > 0:
return values
return values['samples']
if not os.path.isdir(self.config.output_dir_of_test_samples):
os.mkdir(self.config.output_dir_of_test_samples)
for key, val in values.items():
if key == 'samples':
for idx in range(val.shape[0]):
print('Generating png to %s: %d / %d...' % (self.config.output_dir_of_test_samples, idx, val.shape[0]), end='\r')
if self.config.model == 'mmd':
p = os.path.join(self.config.output_dir_of_test_samples, 'MMD_{:08d}.png'.format(idx))
elif self.config.model == 'wgan_gp':
p = os.path.join(self.config.output_dir_of_test_samples, 'WGAN-GP_{:08d}.png'.format(idx))
elif self.config.model == 'cramer':
p = os.path.join(self.config.output_dir_of_test_samples, 'CRAMER_{:08d}.png'.format(idx))
misc.save_images(val[idx:idx+1, :, :, :], [1, 1], p) | 21,413 | 44.464968 | 133 | py |
GANFingerprints | GANFingerprints-master/CramerGAN/gan/core/wgan_gp.py | from .model import MMD_GAN, tf
class WGAN_GP(MMD_GAN):
def __init__(self, sess, config, **kwargs):
config.dof_dim = 1
super(WGAN_GP, self).__init__(sess, config, **kwargs)
def set_loss(self, G, images):
alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1])
real_data = self.images
fake_data = self.G
differences = fake_data - real_data
interpolates0 = real_data + (alpha*differences)
interpolates = self.discriminator(interpolates0, self.batch_size)
gradients = tf.gradients(interpolates, [interpolates0])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1.)**2)
self.gp = tf.get_variable('gradient_penalty', dtype=tf.float32,
initializer=self.config.gradient_penalty)
self.d_loss = tf.reduce_mean(G) - tf.reduce_mean(images) + self.gp * gradient_penalty
self.g_loss = -tf.reduce_mean(G)
self.optim_name = 'wgan_gp%d_loss' % int(self.config.gradient_penalty)
tf.summary.scalar(self.optim_name + ' G', self.g_loss)
tf.summary.scalar(self.optim_name + ' D', self.d_loss) | 1,240 | 41.793103 | 93 | py |
GANFingerprints | GANFingerprints-master/CramerGAN/gan/core/__init__.py | __all__= ['model', 'wgan_gp', 'cramer', 'ops', 'mmd', 'resnet', 'architecture']
| 80 | 39.5 | 79 | py |
GANFingerprints | GANFingerprints-master/CramerGAN/gan/core/ops.py | from tensorflow.python.framework import ops
from utils.misc import variable_summaries
from .mmd import _eps, tf
class batch_norm(object):
def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"):
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x, train=True):
return tf.contrib.layers.batch_norm(x,
decay=self.momentum,
updates_collections=None,
epsilon=self.epsilon,
scale=True,
is_training=train,
scope=self.name)
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
def conv_cond_concat(x, y):
"""Concatenate conditioning vector on feature map axis."""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return tf.concat(3, [x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])])
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
with tf.variable_scope(name):
scope_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
has_summary = any([('w' in v.op.name) for v in scope_vars])
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
if not has_summary:
variable_summaries({'W': w, 'b': biases})
return conv
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
with tf.variable_scope(name):
scope_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
has_summary = any([('w' in v.op.name) for v in scope_vars])
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
try:
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
# Support for verisons of TensorFlow before 0.7.0
except AttributeError:
deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if not has_summary:
variable_summaries({'W': w, 'b': biases})
if with_w:
return deconv, w, biases
else:
return deconv
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak*x)
def linear(input_, output_size, name="Linear", stddev=0.01, bias_start=0.0, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
scope_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
has_summary = any([('Matrix' in v.op.name) for v in scope_vars])
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if not has_summary:
variable_summaries({'W': matrix, 'b': bias})
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
class linear_n:
def __init__(self, input_, output_size, scope=None, stddev=0.1,
bias_start=0., train_scale=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
self.matrix = tf.get_variable(
"Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
self.scale = tf.get_variable(
"scale", [output_size], tf.float32, tf.constant_initializer(1.0),
trainable=train_scale)
self.b = tf.get_variable(
"bias", [output_size], tf.float32, tf.constant_initializer(bias_start))
self.scale_ = tf.get_variable(
"scale_", [output_size], tf.float32, tf.constant_initializer(1.0))
self.W = self.matrix * (self.scale/tf.sqrt(tf.reduce_sum(tf.square(self.matrix),0)))
self.out = self.output(input_)
def output(self, inp):
return tf.matmul(inp, self.W) + self.b
def init_op(self):
mean = tf.reduce_mean(self.out, 0)
stdv = tf.sqrt(tf.reduce_mean(tf.square(self.out), 0))
self.out = (self.out - mean)/stdv
scale_update_op = tf.assign(self.scale, self.scale/stdv)
b_update_op = tf.assign(self.b, -mean/stdv)
return tf.group(*[scale_update_op, b_update_op])
def l2_normalize_op(self):
self.W = self.W * (self.scale_ / tf.sqrt(
1e-6 + tf.reduce_sum(tf.square(self.W), 0)))
def safer_norm(tensor, axis=None, keep_dims=False, epsilon=_eps):
sq = tf.square(tensor)
squares = tf.reduce_sum(sq, axis=axis, keep_dims=keep_dims)
return tf.sqrt(squares + epsilon)
def sq_sum(t, name=None):
"The squared Frobenius-type norm of a tensor, sum(t ** 2)."
with tf.name_scope(name, "SqSum", [t]):
t = tf.convert_to_tensor(t, name='t')
return 2 * tf.nn.l2_loss(t)
def dot(x, y, name=None):
"The dot product of two vectors x and y."
with tf.name_scope(name, "Dot", [x, y]):
x = tf.convert_to_tensor(x, name='x')
y = tf.convert_to_tensor(y, name='y')
x.get_shape().assert_has_rank(1)
y.get_shape().assert_has_rank(1)
return tf.squeeze(tf.matmul(tf.expand_dims(x, 0), tf.expand_dims(y, 1)))
| 7,220 | 38.244565 | 104 | py |
GANFingerprints | GANFingerprints-master/CramerGAN/gan/core/pipeline.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 11 14:11:46 2018
@author: mikolajbinkowski
"""
import os, time, lmdb, io
import numpy as np
import tensorflow as tf
from PIL import Image
from glob import glob
import matplotlib.pyplot as plt
from utils import misc
class Pipeline:
def __init__(self, output_size, c_dim, batch_size, data_dir, **kwargs):
self.output_size = output_size
self.c_dim = c_dim
# data_dir = os.path.join(self.data_dir, self.dataset)
self.batch_size = batch_size
self.read_batch = max(4000, batch_size * 10)
self.read_count = 0
self.data_dir = data_dir
self.shape = [self.read_batch, self.output_size, self.output_size, self.c_dim]
def _transform(self, x):
return x
def connect(self):
assert hasattr(self, 'single_sample'), 'Pipeline needs to have single_sample defined before connecting'
self.single_sample.set_shape(self.shape)
ims = tf.train.shuffle_batch([self.single_sample], self.batch_size,
capacity=self.read_batch,
min_after_dequeue=self.read_batch//8,
num_threads=16,
enqueue_many=len(self.shape) == 4)
return self._transform(ims)
class LMDB(Pipeline):
def __init__(self, *args, **kwargs):
super(LMDB, self).__init__(*args, **kwargs)
self.timer = kwargs.get('timer', None)
self.keys = []
env = lmdb.open(self.data_dir, map_size=1099511627776, max_readers=100, readonly=True)
with env.begin() as txn:
cursor = txn.cursor()
while cursor.next():
self.keys.append(cursor.key())
env.close()
print('No. of records in lmdb database: %d' % len(self.keys))
# tf queue for getting keys
key_producer = tf.train.string_input_producer(self.keys, shuffle=True)
single_key = key_producer.dequeue()
self.single_sample = tf.py_func(self._get_sample_from_lmdb, [single_key], tf.float32)
def _get_sample_from_lmdb(self, key, limit=None):
if limit is None:
limit = self.read_batch
with tf.device('/cpu:0'):
rc = self.read_count
self.read_count += 1
tt = time.time()
self.timer(rc, 'lmdb: start reading chunk from database')
ims = []
db_count = 1
while len(ims) < limit:
env = lmdb.open(self.data_dir, map_size=1099511627776, max_readers=100, readonly=True)
with env.begin(write=False) as txn:
cursor = txn.cursor()
cursor.set_key(key)
if not cursor.next():
cursor.first()
db_err = False
while (len(ims) < limit) and (not db_err):
try:
key, byte_arr = cursor.item()
byte_im = io.BytesIO(byte_arr)
# byte_im.seek(0)
im = Image.open(byte_im)
ims.append(misc.center_and_scale(im, size=self.output_size))
except Exception as e:
self.timer(rc, 'lmdb error: ' + str(e))
self.timer(rc, 'lmdb open no. %d failed at key %s, with %d collected images' % (db_count, repr(key), len(ims)))
db_count += 1
db_err = True
if not cursor.next():
cursor.first()
env.close()
self.timer(rc, 'lmdb read time = %f' % (time.time() - tt))
return np.asarray(ims, dtype=np.float32)
def constant_sample(self, size):
choice = np.random.choice(self.keys, 1)[0]
return self._get_sample_from_lmdb(choice, limit=size)
class TfRecords(Pipeline):
def __init__(self, *args, **kwargs):
regex = os.path.join(self.data_dir, 'lsun-%d/bedroom_train_*' % self.output_size)
filename_queue = tf.train.string_input_producer(tf.gfile.Glob(regex), num_epochs=None)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={
'image/class/label': tf.FixedLenFeature([1], tf.int64),
'image/encoded': tf.FixedLenFeature([], tf.string),
})
image = tf.image.decode_jpeg(features['image/encoded'])
self.single_sample = tf.cast(image, tf.float32)/255.
self.shape = [self.output_size, self.output_size, self.c_dim]
class JPEG_128(Pipeline):
def __init__(self, *args, base_size=128, random_crop=9, **kwargs):
super(JPEG_128, self).__init__(*args, **kwargs)
#base_size = kwargs.get('base_size', 160)
#random_crop = kwargs.get('random_crop', 9)
files = glob(os.path.join(self.data_dir, '*.png'))
filename_queue = tf.train.string_input_producer(files, shuffle=True)
reader = tf.WholeFileReader()
_, raw = reader.read(filename_queue)
decoded = tf.image.decode_jpeg(raw, channels=self.c_dim) # HWC
#bs = base_size + 2 * random_crop
#cropped = tf.image.resize_image_with_crop_or_pad(decoded, bs, bs)
#if random_crop > 0:
# cropped = tf.image.random_flip_left_right(cropped)
# cropped = tf.random_crop(cropped, [base_size, base_size, self.c_dim])
self.single_sample = decoded
self.shape = [base_size, base_size, self.c_dim]
def _transform(self, x):
x = tf.image.resize_bilinear(x, (self.output_size, self.output_size))
return tf.cast(x, tf.float32)/255.
class JPEG_64(Pipeline):
def __init__(self, *args, base_size=64, random_crop=9, **kwargs):
super(JPEG_64, self).__init__(*args, **kwargs)
#base_size = kwargs.get('base_size', 160)
#random_crop = kwargs.get('random_crop', 9)
files = glob(os.path.join(self.data_dir, '*.png'))
filename_queue = tf.train.string_input_producer(files, shuffle=True)
reader = tf.WholeFileReader()
_, raw = reader.read(filename_queue)
decoded = tf.image.decode_jpeg(raw, channels=self.c_dim) # HWC
#bs = base_size + 2 * random_crop
#cropped = tf.image.resize_image_with_crop_or_pad(decoded, bs, bs)
#if random_crop > 0:
# cropped = tf.image.random_flip_left_right(cropped)
# cropped = tf.random_crop(cropped, [base_size, base_size, self.c_dim])
self.single_sample = decoded
self.shape = [base_size, base_size, self.c_dim]
def _transform(self, x):
x = tf.image.resize_bilinear(x, (self.output_size, self.output_size))
return tf.cast(x, tf.float32)/255.
class Mnist(Pipeline):
def __init__(self, *args, **kwargs):
super(Mnist, self).__init__(*args, **kwargs)
fd = open(os.path.join(self.data_dir,'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trX = loaded[16:].reshape((60000,28,28,1)).astype(np.float)
fd = open(os.path.join(self.data_dir,'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trY = loaded[8:].reshape((60000)).astype(np.float)
fd = open(os.path.join(self.data_dir,'t10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teX = loaded[16:].reshape((10000,28,28,1)).astype(np.float)
fd = open(os.path.join(self.data_dir,'t10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teY = loaded[8:].reshape((10000)).astype(np.float)
trY = np.asarray(trY)
teY = np.asarray(teY)
X = np.concatenate((trX, teX), axis=0).astype(np.float32) / 255.
y = np.concatenate((trY, teY), axis=0)
seed = 547
np.random.seed(seed)
np.random.shuffle(X)
queue = tf.train.input_producer(tf.constant(X), shuffle=False)
self.single_sample = queue.dequeue_many(self.read_batch)
class Cifar10(Pipeline):
def __init__(self, *args, **kwargs):
super(Cifar10, self).__init__(*args, **kwargs)
categories = np.arange(10)
batchesX, batchesY = [], []
for batch in range(1,6):
loaded = misc.unpickle(os.path.join(self.data_dir, 'data_batch_%d' % batch))
idx = np.in1d(np.array(loaded['labels']), categories)
batchesX.append(loaded['data'][idx].reshape(idx.sum(), 3, 32, 32))
batchesY.append(np.array(loaded['labels'])[idx])
trX = np.concatenate(batchesX, axis=0).transpose(0, 2, 3, 1)
trY = np.concatenate(batchesY, axis=0)
test = misc.unpickle(os.path.join(self.data_dir, 'test_batch'))
idx = np.in1d(np.array(test['labels']), categories)
teX = test['data'][idx].reshape(idx.sum(), 3, 32, 32).transpose(0, 2, 3, 1)
teY = np.array(test['labels'])[idx]
X = np.concatenate((trX, teX), axis=0).astype(np.float32) / 255.
y = np.concatenate((trY, teY), axis=0)
seed = 547
np.random.seed(seed)
np.random.shuffle(X)
queue = tf.train.input_producer(tf.constant(X), shuffle=False)
self.single_sample = queue.dequeue_many(self.read_batch)
class GaussianMix(Pipeline):
def __init__(self, *args, sample_dir='/', means=[.0, 3.0], stds=[1.0, .5], size=1000, **kwargs):
super(GaussianMix, self).__init__(*args, **kwargs)
from matplotlib import animation
X_real = np.r_[
np.random.normal(0, 1, size=size),
np.random.normal(3, .5, size=size),
]
X_real = X_real.reshape(X_real.shape[0], 1, 1, 1)
xlo = -5
xhi = 7
ax1 = plt.gca()
fig = ax1.figure
ax1.grid(False)
ax1.set_yticks([], [])
myhist(X_real.ravel(), color='r')
ax1.set_xlim(xlo, xhi)
ax1.set_ylim(0, 1.05)
ax1._autoscaleXon = ax1._autoscaleYon = False
wrtr = animation.writers['ffmpeg'](fps=20)
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
wrtr.setup(fig=fig, outfile=os.path.join(sample_dir, 'train.mp4'), dpi=100)
self.G_config = {'g_line': None,
'ax1': ax1,
'writer': wrtr,
'figure': ax1.figure}
queue = tf.train.input_producer(tf.constant(X_real.astype(np.float32)), shuffle=False)
self.single_sample = queue.dequeue_many(self.read_batch)
def myhist(X, ax=plt, bins='auto', **kwargs):
hist, bin_edges = np.histogram(X, bins=bins)
hist = hist / hist.max()
return ax.plot(
np.c_[bin_edges, bin_edges].ravel(),
np.r_[0, np.c_[hist, hist].ravel(), 0],
**kwargs
)
def get_pipeline(dataset, info):
if dataset == 'celebA' or dataset == 'lsun_bedroom_200k':
return JPEG_128
elif 'lsun' in dataset:
if 'tf_records' in info:
return TfRecords
else:
return LMDB
elif dataset == 'mnist':
return Mnist
elif dataset == 'cifar10':
return Cifar10
elif dataset == 'GaussianMix':
return GaussianMix
elif dataset == 'tiny_imagenet':
return JPEG_64
else:
raise Exception('invalid dataset: %s' % dataset)
| 11,697 | 39.337931 | 139 | py |
GANFingerprints | GANFingerprints-master/CramerGAN/gan/core/resnet/block.py | """
Based on https://github.com/igul222/improved_wgan_training/blob/master/gan_64x64.py.
"""
import functools
import tensorflow as tf
from core.resnet.ops import conv2d, batchnorm, layernorm
def ResidualBlock(name, input_dim, output_dim, filter_size, inputs, resample=None, he_init=True):
"""
resample: None, 'down', or 'up'
"""
if resample=='down':
conv_shortcut = MeanPoolConv
conv_1 = functools.partial(conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(ConvMeanPool, input_dim=input_dim, output_dim=output_dim)
elif resample=='up':
conv_shortcut = UpsampleConv
conv_1 = functools.partial(UpsampleConv, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
elif resample==None:
conv_shortcut = conv2d.Conv2D
conv_1 = functools.partial(conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim==input_dim and resample==None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1,
he_init=False, biases=True, inputs=inputs)
output = inputs
output = Normalize(name+'.BN1', [0,2,3], output)
output = tf.nn.relu(output)
output = conv_1(name+'.Conv1', filter_size=filter_size, inputs=output, he_init=he_init, biases=False)
output = Normalize(name+'.BN2', [0,2,3], output)
output = tf.nn.relu(output)
output = conv_2(name+'.Conv2', filter_size=filter_size, inputs=output, he_init=he_init)
return shortcut + output
def UpsampleConv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
output = inputs
output = tf.concat([output, output, output, output], axis=1)
output = tf.transpose(output, [0,2,3,1])
output = tf.depth_to_space(output, 2)
output = tf.transpose(output, [0,3,1,2])
output = conv2d.Conv2D(name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases)
return output
def ConvMeanPool(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
output = conv2d.Conv2D(name, input_dim, output_dim, filter_size, inputs, he_init=he_init, biases=biases)
output = tf.add_n([output[:,:,::2,::2], output[:,:,1::2,::2], output[:,:,::2,1::2], output[:,:,1::2,1::2]]) / 4.
return output
def MeanPoolConv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
output = inputs
output = tf.add_n([output[:,:,::2,::2], output[:,:,1::2,::2], output[:,:,::2,1::2], output[:,:,1::2,1::2]]) / 4.
output = conv2d.Conv2D(name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases)
return output
def Normalize(name, axes, inputs):
if ('d_' in name):# and (MODE == 'wgan-gp'):
if axes != [0,2,3]:
raise Exception('Layernorm over non-standard axes is unsupported')
return layernorm.Layernorm(name,[1,2,3],inputs)
else:
return batchnorm.Batchnorm(name,axes,inputs,fused=True)
| 3,394 | 44.266667 | 116 | py |
GANFingerprints | GANFingerprints-master/CramerGAN/gan/core/resnet/__init__.py | import numpy as np
import tensorflow as tf
import locale
locale.setlocale(locale.LC_ALL, '')
__all__ = ['block', 'ops']
_params = {}
_param_aliases = {}
def param(name, *args, **kwargs):
"""
A wrapper for `tf.Variable` which enables parameter sharing in models.
Creates and returns theano shared variables similarly to `tf.Variable`,
except if you try to create a param with the same name as a
previously-created one, `param(...)` will just return the old one instead of
making a new one.
This constructor also adds a `param` attribute to the shared variables it
creates, so that you can easily search a graph for all params.
"""
if name not in _params:
kwargs['name'] = name
param = tf.Variable(*args, **kwargs)
param.param = True
_params[name] = param
result = _params[name]
i = 0
while result in _param_aliases:
i += 1
result = _param_aliases[result]
return result
def params_with_name(name):
return [p for n,p in _params.items() if name in n]
def delete_all_params():
_params.clear()
def alias_params(replace_dict):
for old,new in replace_dict.items():
_param_aliases[old] = new
def delete_param_aliases():
_param_aliases.clear()
def print_model_settings(locals_):
print('Uppercase local vars:')
all_vars = [(k,v) for (k,v) in locals_.items() if (k.isupper() and k!='T' and k!='SETTINGS' and k!='ALL_SETTINGS')]
all_vars = sorted(all_vars, key=lambda x: x[0])
for var_name, var_value in all_vars:
print('\t{}: {}'.format(var_name, var_value))
def print_model_settings_dict(settings):
print('Settings dict:')
all_vars = [(k,v) for (k,v) in settings.items()]
all_vars = sorted(all_vars, key=lambda x: x[0])
for var_name, var_value in all_vars:
print('\t{}: {}'.format(var_name, var_value))
| 1,889 | 29 | 119 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.