repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
pyUSID-legacy | pyUSID-master-legacy/tests/io/data_utils.py | from __future__ import division, print_function, unicode_literals, absolute_import
import os
import sys
import socket
from warnings import warn
import h5py
import numpy as np
from io import StringIO
from contextlib import contextmanager
from platform import platform
from sidpy.hdf.hdf_utils import get_attr
from sidpy.base.string_utils import get_time_stamp
sys.path.append("../../pyUSID/")
from pyUSID import __version__
from pyUSID.io.anc_build_utils import INDICES_DTYPE, VALUES_DTYPE, build_ind_val_matrices
std_beps_path = 'test_hdf_utils.h5'
sparse_sampling_path = 'sparse_sampling.h5'
incomplete_measurement_path = 'incomplete_measurement.h5'
relaxation_path = 'relaxation.h5'
if sys.version_info.major == 3:
unicode = str
def delete_existing_file(file_path):
if os.path.exists(file_path):
os.remove(file_path)
def write_safe_attrs(h5_object, attrs):
for key, val in attrs.items():
h5_object.attrs[key] = val
def write_string_list_as_attr(h5_object, attrs):
for key, val in attrs.items():
h5_object.attrs[key] = np.array(val, dtype='S')
def write_aux_reg_ref(h5_dset, labels, is_spec=True):
for index, reg_ref_name in enumerate(labels):
if is_spec:
reg_ref_tuple = (slice(index, index + 1), slice(None))
else:
reg_ref_tuple = (slice(None), slice(index, index + 1))
h5_dset.attrs[reg_ref_name] = h5_dset.regionref[reg_ref_tuple]
def write_main_reg_refs(h5_dset, attrs):
for reg_ref_name, reg_ref_tuple in attrs.items():
h5_dset.attrs[reg_ref_name] = h5_dset.regionref[reg_ref_tuple]
write_string_list_as_attr(h5_dset, {'labels': list(attrs.keys())})
@contextmanager
def capture_stdout():
"""
context manager encapsulating a pattern for capturing stdout writes
and restoring sys.stdout even upon exceptions
https://stackoverflow.com/questions/17067560/intercept-pythons-print-statement-and-display-in-gui
Examples:
>>> with capture_stdout() as get_value:
>>> print("here is a print")
>>> captured = get_value()
>>> print('Gotcha: ' + captured)
>>> with capture_stdout() as get_value:
>>> print("here is a print")
>>> raise Exception('oh no!')
>>> print('Does printing still work?')
"""
# Redirect sys.stdout
out = StringIO()
sys.stdout = out
# Yield a method clients can use to obtain the value
try:
yield out.getvalue
finally:
# Restore the normal stdout
sys.stdout = sys.__stdout__
def validate_aux_dset_pair(test_class, h5_group, h5_inds, h5_vals, dim_names, dim_units, inds_matrix,
vals_matrix=None, base_name=None, h5_main=None, is_spectral=True,
slow_to_fast=False, check_reg_refs=False):
if vals_matrix is None:
vals_matrix = inds_matrix
if base_name is None:
if is_spectral:
base_name = 'Spectroscopic'
else:
base_name = 'Position'
else:
test_class.assertIsInstance(base_name, (str, unicode))
if not slow_to_fast:
# Sending in to Fast to Slow but what comes out is slow to fast
func = np.flipud if is_spectral else np.fliplr
print(inds_matrix)
vals_matrix = func(vals_matrix)
inds_matrix = func(inds_matrix)
dim_names = dim_names[::-1]
dim_units = dim_units[::-1]
for h5_dset, exp_dtype, exp_name, ref_data in zip([h5_inds, h5_vals],
[INDICES_DTYPE, VALUES_DTYPE],
[base_name + '_Indices', base_name + '_Values'],
[inds_matrix, vals_matrix]):
if isinstance(h5_main, h5py.Dataset):
test_class.assertEqual(h5_main.file[h5_main.attrs[exp_name]], h5_dset)
test_class.assertIsInstance(h5_dset, h5py.Dataset)
test_class.assertEqual(h5_dset.parent, h5_group)
test_class.assertEqual(h5_dset.name.split('/')[-1], exp_name)
test_class.assertTrue(np.allclose(ref_data, h5_dset[()]))
test_class.assertEqual(h5_dset.dtype, exp_dtype)
test_class.assertTrue(np.all([_ in h5_dset.attrs.keys() for _ in ['labels', 'units']]))
test_class.assertTrue(np.all([x == y for x, y in zip(dim_names, get_attr(h5_dset, 'labels'))]))
test_class.assertTrue(np.all([x == y for x, y in zip(dim_units, get_attr(h5_dset, 'units'))]))
# assert region references even though these are not used anywhere:
if check_reg_refs:
for dim_ind, curr_name in enumerate(dim_names):
if is_spectral:
expected = np.squeeze(ref_data[dim_ind])
else:
expected = np.squeeze(ref_data[:, dim_ind])
actual = np.squeeze(h5_dset[h5_dset.attrs[curr_name]])
try:
match = np.allclose(expected, actual)
except ValueError:
match = False
if match:
test_class.assertTrue(match)
else:
warn('Test for region reference: ' + curr_name + ' failed')
def verify_book_keeping_attrs(test_class, h5_obj):
time_stamp = get_time_stamp()
in_file = h5_obj.attrs['timestamp']
test_class.assertEqual(time_stamp[:time_stamp.rindex('_')], in_file[:in_file.rindex('_')])
test_class.assertEqual(__version__, h5_obj.attrs['pyUSID_version'])
test_class.assertEqual(socket.getfqdn(), h5_obj.attrs['machine_id'])
test_class.assertEqual(platform(), h5_obj.attrs['platform'])
def make_sparse_sampling_file():
if os.path.exists(sparse_sampling_path):
os.remove(sparse_sampling_path)
h5_main = None
with h5py.File(sparse_sampling_path, mode='w') as h5_f:
h5_meas_grp = h5_f.create_group('Measurement_000')
freq_pts = 3
spec_inds = np.expand_dims(np.arange(freq_pts), 0)
spec_vals = np.expand_dims(np.linspace(300, 330, freq_pts), 0)
h5_spec_inds = h5_meas_grp.create_dataset('Spectroscopic_Indices', data=spec_inds, dtype=np.uint16)
h5_spec_vals = h5_meas_grp.create_dataset('Spectroscopic_Values', data=spec_vals, dtype=np.float32)
spec_attrs = {'labels': ['Frequency'], 'units': ['kHz']}
for dset in [h5_spec_inds, h5_spec_vals]:
write_aux_reg_ref(dset, spec_attrs['labels'], is_spec=True)
write_string_list_as_attr(dset, spec_attrs)
import random
num_rows = 5
num_cols = 7
full_vals = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols))).T
full_vals = np.vstack((full_vals[:, 0] * 50, full_vals[:, 1] * 1.25)).T
fract = 0.25
chosen_pos = random.sample(range(num_rows * num_cols), int(fract * num_rows * num_cols))
pos_inds = np.tile(np.arange(len(chosen_pos)), (2, 1)).T
pos_vals = full_vals[chosen_pos]
pos_attrs = {'units': ['nm', 'um'], 'labels': ['X', 'Y']}
h5_chan_grp_1 = h5_meas_grp.create_group('Channel_000')
h5_chan_grp_2 = h5_meas_grp.create_group('Channel_001')
for h5_chan_grp, add_attribute in zip([h5_chan_grp_1, h5_chan_grp_2], [False, True]):
this_pos_attrs = pos_attrs.copy()
if add_attribute:
this_pos_attrs.update({'incomplete_dimensions': ['X', 'Y']})
h5_pos_inds = h5_chan_grp.create_dataset('Position_Indices', data=pos_inds, dtype=np.uint16)
h5_pos_vals = h5_chan_grp.create_dataset('Position_Values', data=pos_vals, dtype=np.float32)
for dset in [h5_pos_inds, h5_pos_vals]:
write_aux_reg_ref(dset, this_pos_attrs['labels'], is_spec=False)
write_string_list_as_attr(dset, this_pos_attrs)
h5_main = h5_chan_grp.create_dataset('Raw_Data',
data=np.random.rand(len(chosen_pos), freq_pts),
dtype=np.float32)
# Write mandatory attributes:
write_safe_attrs(h5_main, {'units': 'V', 'quantity': 'Cantilever Deflection'})
# Link ancillary
for dset in [h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals]:
h5_main.attrs[dset.name.split('/')[-1]] = dset.ref
return h5_meas_grp
def make_incomplete_measurement_file():
if os.path.exists(incomplete_measurement_path):
os.remove(incomplete_measurement_path)
with h5py.File(incomplete_measurement_path, mode='w') as h5_f:
h5_meas_grp = h5_f.create_group('Measurement_000')
freq_pts = 3
dc_offsets = 2
spec_inds = np.vstack((np.tile(np.arange(freq_pts), dc_offsets),
np.repeat(np.arange(dc_offsets), freq_pts)))
# make the values more interesting:
spec_vals = np.vstack((np.tile(np.linspace(320, 340, freq_pts), dc_offsets),
np.repeat(np.linspace(-8, 8, dc_offsets), freq_pts)))
h5_spec_inds = h5_meas_grp.create_dataset('Spectroscopic_Indices', data=spec_inds, dtype=np.uint16)
h5_spec_vals = h5_meas_grp.create_dataset('Spectroscopic_Values', data=spec_vals, dtype=np.float32)
spec_attrs = {'labels': ['Frequency', 'DC_Offset'], 'units': ['kHz', 'V']}
for dset in [h5_spec_inds, h5_spec_vals]:
write_aux_reg_ref(dset, spec_attrs['labels'], is_spec=True)
write_string_list_as_attr(dset, spec_attrs)
num_rows = 5
num_cols = 7
pos_inds = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols))).T
# make the values more interesting:
pos_vals = np.vstack((pos_inds[:, 0] * 50, pos_inds[:, 1] * 1.25)).T
pos_attrs = {'units': ['nm', 'um'], 'labels': ['X', 'Y']}
tot_positions = 4 * num_cols + 3
incomp_dim_names = ['X', 'Y']
def _create_pos_and_main(h5_group, add_attribute):
h5_pos_inds = h5_group.create_dataset('Position_Indices',
data=pos_inds[:tot_positions],
dtype=np.uint16)
h5_pos_vals = h5_group.create_dataset('Position_Values',
data=pos_vals[:tot_positions],
dtype=np.float32)
this_pos_attrs = pos_attrs.copy()
if add_attribute:
this_pos_attrs.update({'incomplete_dimensions': incomp_dim_names})
for dset in [h5_pos_inds, h5_pos_vals]:
write_aux_reg_ref(dset, pos_attrs['labels'], is_spec=False)
write_string_list_as_attr(dset, this_pos_attrs)
h5_main = h5_group.create_dataset('Raw_Data',
data=np.random.rand(tot_positions, freq_pts * dc_offsets),
dtype=np.float32)
# Write mandatory attributes:
write_safe_attrs(h5_main, {'units': 'V', 'quantity': 'Cantilever Deflection'})
# Link ancillary
for dset in [h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals]:
h5_main.attrs[dset.name.split('/')[-1]] = dset.ref
return h5_main
h5_main_1 = _create_pos_and_main(h5_meas_grp.create_group('Channel_000'), False)
h5_main_2 = _create_pos_and_main(h5_meas_grp.create_group('Channel_001'), True)
def make_relaxation_file():
if os.path.exists(relaxation_path):
os.remove(relaxation_path)
with h5py.File(relaxation_path, mode='w') as h5_f:
h5_meas_grp = h5_f.create_group('Measurement_000')
num_rows = 2
num_cols = 11
pos_inds = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols))).T
# make the values more interesting:
pos_vals = np.vstack((pos_inds[:, 0] * 50, pos_inds[:, 1] * 1.25)).T
pos_attrs = {'units': ['nm', 'um'], 'labels': ['X', 'Y']}
h5_pos_inds = h5_meas_grp.create_dataset('Position_Indices', data=pos_inds, dtype=np.uint16)
h5_pos_vals = h5_meas_grp.create_dataset('Position_Values', data=pos_vals, dtype=np.float32)
for dset in [h5_pos_inds, h5_pos_vals]:
write_aux_reg_ref(dset, pos_attrs['labels'], is_spec=False)
write_string_list_as_attr(dset, pos_attrs)
spec_attrs = {'labels': ['Frequency', 'Repeats', 'DC_Offset', 'Field'],
'units': ['kHz', 'a. u.', 'V', 'a.u.']}
freq_pts = 3
repeats = 5
dc_offsets = 7
field_inds = 1
spec_unit_vals = [np.linspace(320, 340, freq_pts),
np.arange(repeats),
3 * np.pi * np.linspace(0, 1, dc_offsets),
np.array([1, 0])]
spec_ind_mat, spec_val_mat = build_ind_val_matrices(spec_unit_vals[:-1])
# Manually creating the field array that starts with 1
field_ind_unit = np.hstack(([0], np.ones(repeats - field_inds, dtype=np.uint16)))
field_val_unit = np.hstack(([1], np.zeros(repeats - field_inds, dtype=np.uint16)))
# Manually appending to the indices and values table
spec_ind_mat = np.vstack((spec_ind_mat,
np.tile(np.repeat(field_ind_unit, freq_pts), dc_offsets)))
spec_val_mat = np.vstack((spec_val_mat,
np.tile(np.repeat(field_val_unit, freq_pts), dc_offsets)))
spec_unit_vals_dict = dict()
for dim_ind, dim_unit_vals in enumerate(spec_unit_vals):
spec_unit_vals_dict['unit_vals_dim_' + str(dim_ind)] = dim_unit_vals
h5_chan_grp_1 = h5_meas_grp.create_group('Channel_000')
h5_chan_grp_2 = h5_meas_grp.create_group('Channel_001')
for h5_chan_grp, add_attribute in zip([h5_chan_grp_1, h5_chan_grp_2], [False, True]):
h5_spec_inds = h5_chan_grp.create_dataset('Spectroscopic_Indices', data=spec_ind_mat, dtype=np.uint16)
h5_spec_vals = h5_chan_grp.create_dataset('Spectroscopic_Values', data=spec_val_mat, dtype=np.float32)
this_spec_attrs = spec_attrs.copy()
if add_attribute:
this_spec_attrs.update({'dependent_dimensions': ['Field']})
for dset in [h5_spec_inds, h5_spec_vals]:
write_aux_reg_ref(dset, spec_attrs['labels'], is_spec=True)
write_string_list_as_attr(dset, this_spec_attrs)
# Write the unit values as attributes - testing purposes only:
write_safe_attrs(dset, spec_unit_vals_dict)
h5_main = h5_chan_grp.create_dataset('Raw_Data',
data=np.random.rand(num_rows * num_cols,
freq_pts * repeats * dc_offsets),
dtype=np.float32)
# Write mandatory attributes:
write_safe_attrs(h5_main, {'units': 'V', 'quantity': 'Cantilever Deflection'})
# Link ancillary
for dset in [h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals]:
h5_main.attrs[dset.name.split('/')[-1]] = dset.ref
def make_beps_file(rev_spec=False):
if os.path.exists(std_beps_path):
os.remove(std_beps_path)
with h5py.File(std_beps_path, mode='w') as h5_f:
h5_raw_grp = h5_f.create_group('Raw_Measurement')
write_safe_attrs(h5_raw_grp, {'att_1': 'string_val', 'att_2': 1.2345, 'att_3': [1, 2, 3, 4]})
write_string_list_as_attr(h5_raw_grp, {'att_4': ['str_1', 'str_2', 'str_3']})
_ = h5_raw_grp.create_group('Misc')
num_rows = 3
num_cols = 5
num_cycles = 2
num_cycle_pts = 7
source_dset_name = 'source_main'
tool_name = 'Fitter'
# Per USID, dimensions are arranged from fastest to slowest
source_pos_data = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols))).T
pos_attrs = {'units': ['nm', 'um'], 'labels': ['X', 'Y']}
h5_pos_inds = h5_raw_grp.create_dataset('Position_Indices', data=source_pos_data, dtype=np.uint16)
write_aux_reg_ref(h5_pos_inds, pos_attrs['labels'], is_spec=False)
write_string_list_as_attr(h5_pos_inds, pos_attrs)
# make the values more interesting:
cols_offset = -750
cols_step = 50
rows_offset = 2
rows_step = 1.25
source_pos_data = np.vstack((cols_offset + source_pos_data[:, 0] * cols_step,
rows_offset + source_pos_data[:, 1] * rows_step)).T
_ = h5_raw_grp.create_dataset('X', data=cols_offset + cols_step * np.arange(num_cols))
_ = h5_raw_grp.create_dataset('Y', data=rows_offset + rows_step * np.arange(num_rows))
h5_pos_vals = h5_raw_grp.create_dataset('Position_Values', data=source_pos_data, dtype=np.float32)
write_aux_reg_ref(h5_pos_vals, pos_attrs['labels'], is_spec=False)
write_string_list_as_attr(h5_pos_vals, pos_attrs)
if rev_spec:
source_spec_data = np.vstack((np.repeat(np.arange(num_cycles), num_cycle_pts),
np.tile(np.arange(num_cycle_pts), num_cycles)))
source_spec_attrs = {'units': ['', 'V'],
'labels': ['Cycle', 'Bias']}
else:
source_spec_data = np.vstack((np.tile(np.arange(num_cycle_pts), num_cycles),
np.repeat(np.arange(num_cycles), num_cycle_pts)))
source_spec_attrs = {'units': ['V', ''], 'labels': ['Bias', 'Cycle']}
h5_source_spec_inds = h5_raw_grp.create_dataset('Spectroscopic_Indices', data=source_spec_data,
dtype=np.uint16)
write_aux_reg_ref(h5_source_spec_inds, source_spec_attrs['labels'], is_spec=True)
write_string_list_as_attr(h5_source_spec_inds, source_spec_attrs)
# make spectroscopic axis interesting as well
bias_amp = 2.5
bias_period = np.pi
bias_vec = bias_amp * np.sin(np.linspace(0, bias_period, num_cycle_pts, endpoint=False))
_ = h5_raw_grp.create_dataset('Bias', data=bias_vec)
_ = h5_raw_grp.create_dataset('Cycle', data=np.arange(num_cycles))
if rev_spec:
source_spec_data = np.vstack((np.repeat(np.arange(num_cycles), num_cycle_pts),
np.tile(bias_vec, num_cycles)))
else:
source_spec_data = np.vstack((np.tile(bias_vec, num_cycles),
np.repeat(np.arange(num_cycles), num_cycle_pts)))
h5_source_spec_vals = h5_raw_grp.create_dataset('Spectroscopic_Values', data=source_spec_data,
dtype=np.float32)
write_aux_reg_ref(h5_source_spec_vals, source_spec_attrs['labels'], is_spec=True)
write_string_list_as_attr(h5_source_spec_vals, source_spec_attrs)
main_nd = np.random.rand(num_rows, num_cols, num_cycles, num_cycle_pts)
h5_nd_main = h5_raw_grp.create_dataset('n_dim_form', data=main_nd)
write_string_list_as_attr(h5_nd_main, {'dims': ['Y', 'X', 'Cycle', 'Bias']})
if rev_spec:
# This simulates things like BEPS where Field should actually be varied slower but is varied faster during acquisition
main_nd = main_nd.transpose(0, 1, 3, 2)
source_main_data = main_nd.reshape(num_rows * num_cols, num_cycle_pts * num_cycles)
# source_main_data = np.random.rand(num_rows * num_cols, num_cycle_pts * num_cycles)
h5_source_main = h5_raw_grp.create_dataset(source_dset_name, data=source_main_data)
write_safe_attrs(h5_source_main, {'units': 'A', 'quantity': 'Current'})
write_main_reg_refs(h5_source_main, {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))})
# Now need to link as main!
for dset in [h5_pos_inds, h5_pos_vals, h5_source_spec_inds, h5_source_spec_vals]:
h5_source_main.attrs[dset.name.split('/')[-1]] = dset.ref
_ = h5_raw_grp.create_dataset('Ancillary', data=np.arange(5))
# Now add a few results:
h5_results_grp_1 = h5_raw_grp.create_group(source_dset_name + '-' + tool_name + '_000')
write_safe_attrs(h5_results_grp_1,
{'att_1': 'string_val', 'att_2': 1.2345, 'att_3': [1, 2, 3, 4]})
write_string_list_as_attr(h5_results_grp_1, {'att_4': ['str_1', 'str_2', 'str_3']})
num_cycles = 1
num_cycle_pts = 7
results_spec_inds = np.expand_dims(np.arange(num_cycle_pts), 0)
results_spec_attrs = {'units': ['V'], 'labels': ['Bias']}
h5_results_1_spec_inds = h5_results_grp_1.create_dataset('Spectroscopic_Indices',
data=results_spec_inds, dtype=np.uint16)
write_aux_reg_ref(h5_results_1_spec_inds, results_spec_attrs['labels'], is_spec=True)
write_string_list_as_attr(h5_results_1_spec_inds, results_spec_attrs)
results_spec_vals = np.expand_dims(2.5 * np.sin(np.linspace(0, np.pi, num_cycle_pts, endpoint=False)), 0)
h5_results_1_spec_vals = h5_results_grp_1.create_dataset('Spectroscopic_Values', data=results_spec_vals,
dtype=np.float32)
write_aux_reg_ref(h5_results_1_spec_vals, results_spec_attrs['labels'], is_spec=True)
write_string_list_as_attr(h5_results_1_spec_vals, results_spec_attrs)
# Let this be a compound dataset:
struc_dtype = np.dtype({'names': ['r', 'g', 'b'],
'formats': [np.float32, np.float16,
np.float64]})
num_elems = (num_rows, num_cols, num_cycles, num_cycle_pts)
results_1_nd = np.zeros(shape=num_elems, dtype=struc_dtype)
for name_ind, name in enumerate(struc_dtype.names):
results_1_nd[name] = np.random.random(size=num_elems)
h5_results_1_nd = h5_results_grp_1.create_dataset('n_dim_form',
data=results_1_nd)
write_string_list_as_attr(h5_results_1_nd,
{'dims': ['Y', 'X', 'Cycle', 'Bias']})
results_1_main_data = results_1_nd.reshape(num_rows * num_cols,
num_cycle_pts * num_cycles)
h5_results_1_main = h5_results_grp_1.create_dataset('results_main', data=results_1_main_data)
write_safe_attrs(h5_results_1_main, {'units': 'pF', 'quantity': 'Capacitance'})
# Now need to link as main!
for dset in [h5_pos_inds, h5_pos_vals, h5_results_1_spec_inds, h5_results_1_spec_vals]:
h5_results_1_main.attrs[dset.name.split('/')[-1]] = dset.ref
# add another result with different parameters
h5_results_grp_2 = h5_raw_grp.create_group(source_dset_name + '-' + tool_name + '_001')
write_safe_attrs(h5_results_grp_2,
{'att_1': 'other_string_val', 'att_2': 5.4321, 'att_3': [4, 1, 3]})
write_string_list_as_attr(h5_results_grp_2, {'att_4': ['s', 'str_2', 'str_3']})
# Let these results be a complex typed dataset:
results_2_nd = np.random.random(size=num_elems) + \
1j * np.random.random(size=num_elems)
h5_results_2_nd = h5_results_grp_2.create_dataset('n_dim_form',
data=results_2_nd)
write_string_list_as_attr(h5_results_2_nd,
{'dims': ['Y', 'X', 'Cycle', 'Bias']})
results_2_main_data = results_2_nd.reshape(num_rows * num_cols,
num_cycle_pts * num_cycles)
h5_results_2_main = h5_results_grp_2.create_dataset('results_main', data=results_2_main_data)
write_safe_attrs(h5_results_2_main, {'units': 'pF', 'quantity': 'Capacitance'})
h5_results_2_spec_inds = h5_results_grp_2.create_dataset('Spectroscopic_Indices',
data=results_spec_inds, dtype=np.uint16)
write_aux_reg_ref(h5_results_2_spec_inds, results_spec_attrs['labels'], is_spec=True)
write_string_list_as_attr(h5_results_2_spec_inds, results_spec_attrs)
h5_results_2_spec_vals = h5_results_grp_2.create_dataset('Spectroscopic_Values', data=results_spec_vals,
dtype=np.float32)
write_aux_reg_ref(h5_results_2_spec_vals, results_spec_attrs['labels'], is_spec=True)
write_string_list_as_attr(h5_results_2_spec_vals, results_spec_attrs)
# Now need to link as main!
for dset in [h5_pos_inds, h5_pos_vals, h5_results_2_spec_inds, h5_results_2_spec_vals]:
h5_results_2_main.attrs[dset.name.split('/')[-1]] = dset.ref
| 25,569 | 44.336879 | 130 | py |
pyUSID-legacy | pyUSID-master-legacy/tests/io/test_dimension.py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import sys
import unittest
import numpy as np
from pyUSID.io import dimension
if sys.version_info.major == 3:
unicode = str
class TestDimension(unittest.TestCase):
def test_values_as_array(self):
name = 'Bias'
units = 'V'
values = np.random.rand(5)
descriptor = dimension.Dimension(name, units, values)
for expected, actual in zip([name, units, values],
[descriptor.name, descriptor.units, descriptor.values]):
self.assertTrue(np.all([x == y for x, y in zip(expected, actual)]))
def test_values_as_length(self):
name = 'Bias'
units = 'V'
values = np.arange(5)
descriptor = dimension.Dimension(name, units, len(values))
for expected, actual in zip([name, units],
[descriptor.name, descriptor.units]):
self.assertTrue(np.all([x == y for x, y in zip(expected, actual)]))
self.assertTrue(np.allclose(values, descriptor.values))
def test_repr(self):
name = 'Bias'
quantity = 'generic'
units = 'V'
values = np.arange(5, dtype=np.float)
descriptor = dimension.Dimension(name, units, len(values))
print(type(descriptor))
actual = '{}'.format(descriptor)
expected = '{}: {} ({}) mode:{} : {}'.format(name, quantity, units, descriptor.mode, values)
self.assertEqual(actual, expected)
def test_equality(self):
name = 'Bias'
units = 'V'
dim_1 = dimension.Dimension(name, units, [0, 1, 2, 3, 4])
dim_2 = dimension.Dimension(name, units, np.arange(5, dtype=np.float32))
self.assertEqual(dim_1, dim_2)
def test_inequality(self):
name = 'Bias'
units = 'V'
values = [0, 1, 2, 3]
left = dimension.Dimension(name, units, values)
right = dimension.Dimension(name, units, [0, 1, 2, 4])
self.assertFalse(left == right)
left = dimension.Dimension(name, units, [0, 1, 2])
right = dimension.Dimension(name, units, values)
self.assertFalse(left == right)
left = dimension.Dimension('name', units, values)
right = dimension.Dimension(name, units, values)
self.assertFalse(left == right)
left = dimension.Dimension(name, 'units', values)
right = dimension.Dimension(name, units, values)
self.assertFalse(left == right)
left = dimension.Dimension(name, units, values,
mode=dimension.DimType.DEPENDENT)
right = dimension.Dimension(name, units, values)
self.assertFalse(left == right)
def test_invalid_mode(self):
with self.assertRaises(TypeError):
_ = dimension.Dimension('Name', 'units', 5, mode='Incomplete')
def test_default_mode(self):
dim = dimension.Dimension('Name', 'units', 1)
self.assertEqual(dim.mode, dimension.DimType.DEFAULT)
def test_illegal_instantiation(self):
with self.assertRaises(TypeError):
_ = dimension.Dimension('Name', 14, np.arange(4))
with self.assertRaises(TypeError):
_ = dimension.Dimension(14, 'nm', np.arange(4))
with self.assertRaises(ValueError):
_ = dimension.Dimension('Name', 'unit', 0)
with self.assertRaises(TypeError):
_ = dimension.Dimension('Name', 'unit', 'invalid')
class TestDimType(unittest.TestCase):
def test_dim_type_invalid_comparison(self):
with self.assertRaises(TypeError):
dimension.DimType.INCOMPLETE == "Default"
def test_dim_type_valid_comparison(self):
self.assertTrue(dimension.DimType.DEFAULT < dimension.DimType.INCOMPLETE)
self.assertTrue(dimension.DimType.INCOMPLETE < dimension.DimType.DEPENDENT) | 4,008 | 32.974576 | 100 | py |
pyUSID-legacy | pyUSID-master-legacy/tests/io/__init__.py | 0 | 0 | 0 | py | |
pyUSID-legacy | pyUSID-master-legacy/tests/io/test_reg_ref.py | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 29 15:07:16 2018
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import sys
import h5py
import numpy as np
from . import data_utils
sys.path.append("../../pyUSID/")
from pyUSID.io.hdf_utils import get_attr
from pyUSID.io import reg_ref
if sys.version_info.major == 3:
unicode = str
class TestRegRef(unittest.TestCase):
def setUp(self):
data_utils.make_beps_file()
def tearDown(self):
data_utils.delete_existing_file(data_utils.std_beps_path)
def test_copy_reg_ref_reduced_dim(self):
# TODO: Fill this test in at earliest convenience. Overriden temporarily
assert True
if __name__ == '__main__':
unittest.main()
| 810 | 19.275 | 82 | py |
pyUSID-legacy | pyUSID-master-legacy/tests/io/simple_process.py | """
Simple process class for purpose of testing.
Created on: Jul 19, 2019
Author: Emily Costa
"""
import h5py
from pyUSID.processing.process import Process
import numpy as np
from pyUSID import hdf_utils
import matplotlib.pyplot as plt
class SimpleProcess(Process):
def __init__(self, h5_main, verbose=True, **kwargs):
super(SimpleProcess, self).__init__(h5_main, verbose, **kwargs)
self.data = None
self.test_data = None
self.results = None
self.chunk_amount = 0
self.process_name = 'Simple_Process'
if self.verbose: print('Done with initializing book-keepings')
def test(self):
if self.mpi_rank > 0:
return
ran_ind = np.random.randint(0, high=self.h5_main.shape[0])
self.test_data = np.fft.fftshift(np.fft.fft(self.h5_main[ran_ind]))
def _create_results_datasets(self):
self.h5_results_grp = hdf_utils.create_results_group(self.h5_main, self.process_name)
assert isinstance(self.h5_results_grp, h5py.Group)
if self.verbose: print('Results group created.')
self.results = hdf_utils.create_empty_dataset(self.h5_main, self.h5_main.dtype, 'Filtered_Data',
h5_group=self.h5_results_grp)
#self.results = hdf_utils.write_main_dataset(self.h5_results_grp, (self.h5_main.shape[0], 1), "Results", "Results", "Units", None,
#usid.io.write_utils.Dimension('arb', '', [1]), h5_pos_inds=self.h5_main.h5_pos_inds, h5_pos_vals=self.h5_main.h5_pos_vals, dtype=np.float32)
if self.verbose: print('Empty main dataset for results written')
def _write_results_chunk(self):
pos_in_batch = self._get_pixels_in_current_batch()
print(type(self.data))
print(type(self.results))
self.results[pos_in_batch, :] = self.data
#self.results = self.h5_results_grp['Simple_Data']
self.chunk_amount = self.chunk_amount + 1
if self.verbose: print('Chunk {} written.'.format(self.chunk_amount))
def _unit_computation(self):
self.data = np.fft.fftshift(np.fft.fft(self.data, axis=1), axes=1)
def plot_test(self):
fig, axis = plt.subplots()
axis.plot(self.test_data)
plt.savefig('test_partial.png')
if self.verbose: print('Test image created.')
| 2,354 | 38.915254 | 149 | py |
pyUSID-legacy | pyUSID-master-legacy/tests/io/hdf_utils/test_simple.py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import sys
import h5py
import numpy as np
import shutil
sys.path.append("../../pyUSID/")
from pyUSID.io import hdf_utils, Dimension, USIDataset
from .. import data_utils
if sys.version_info.major == 3:
unicode = str
class TestSimple(unittest.TestCase):
def setUp(self):
data_utils.make_beps_file()
data_utils.make_sparse_sampling_file()
data_utils.make_incomplete_measurement_file()
data_utils.make_relaxation_file()
def tearDown(self):
for file_path in [data_utils.std_beps_path,
data_utils.sparse_sampling_path,
data_utils.incomplete_measurement_path,
data_utils.relaxation_path]:
data_utils.delete_existing_file(file_path)
class TestCheckIfMain(TestSimple):
def test_legal(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
expected_dsets = [h5_f['/Raw_Measurement/source_main'],
h5_f['/Raw_Measurement/source_main-Fitter_000/results_main'],
h5_f['/Raw_Measurement/source_main-Fitter_001/results_main']]
for dset in expected_dsets:
self.assertTrue(hdf_utils.check_if_main(dset, verbose=False))
def test_illegal_01(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
not_main_dsets = [h5_f,
4.123,
np.arange(6),
h5_f['/Raw_Measurement/Position_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/Spectroscopic_Values']]
for dset in not_main_dsets:
self.assertFalse(hdf_utils.check_if_main(dset))
def test_anc_not_dsets(self):
temp_path = 'test.h5'
data_utils.delete_existing_file(temp_path)
with h5py.File(temp_path, mode='w') as h5_f:
h5_dset = h5_f.create_dataset('Main', data=np.random.rand(2, 3))
for anc_dset_name in ['Position_Indices', 'Position_Values',
'Spectroscopic_Indices', 'Spectroscopic_Values']:
h5_dset.attrs[anc_dset_name] = h5_f.ref
self.assertFalse(hdf_utils.check_if_main(h5_dset, verbose=False))
os.remove(temp_path)
def test_missing_str_attrs(self):
temp_path = 'test.h5'
data_utils.delete_existing_file(temp_path)
with h5py.File(temp_path, mode='w') as h5_f:
h5_dset = h5_f.create_dataset('Main', data=np.random.rand(2, 3))
for anc_dset_name in ['Position_Indices', 'Position_Values',
'Spectroscopic_Indices', 'Spectroscopic_Values']:
h5_dset.attrs[anc_dset_name] = h5_dset.ref
self.assertFalse(hdf_utils.check_if_main(h5_dset, verbose=False))
os.remove(temp_path)
def test_invalid_str_attrs(self):
temp_path = 'test.h5'
data_utils.delete_existing_file(temp_path)
with h5py.File(temp_path, mode='w') as h5_f:
h5_dset = h5_f.create_dataset('Main', data=np.random.rand(2, 3))
h5_dset.attrs['quantity'] = [1, 2, 3]
h5_dset.attrs['units'] = 4.1234
for anc_dset_name in ['Position_Indices', 'Position_Values',
'Spectroscopic_Indices', 'Spectroscopic_Values']:
h5_dset.attrs[anc_dset_name] = h5_dset.ref
self.assertFalse(hdf_utils.check_if_main(h5_dset, verbose=False))
os.remove(temp_path)
def test_anc_shapes_not_matching(self):
temp_path = 'test.h5'
data_utils.delete_existing_file(temp_path)
with h5py.File(temp_path, mode='w') as h5_f:
h5_main = h5_f.create_dataset('Main', data=np.random.rand(2, 3))
h5_pos_ind = h5_f.create_dataset('Pos_Inds', data=np.random.rand(2, 1))
h5_spec_ind = h5_f.create_dataset('Spec_Inds', data=np.random.rand(1, 5))
h5_main.attrs['quantity'] = 'quant'
h5_main.attrs['units'] = 'unit'
for anc_dset_name in ['Position_Indices', 'Position_Values']:
h5_main.attrs[anc_dset_name] = h5_pos_ind.ref
for anc_dset_name in ['Spectroscopic_Indices', 'Spectroscopic_Values']:
h5_main.attrs[anc_dset_name] = h5_spec_ind.ref
self.assertFalse(hdf_utils.check_if_main(h5_main, verbose=False))
os.remove(temp_path)
class TestGetSourceDataset(TestSimple):
def test_legal(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_groups = [h5_f['/Raw_Measurement/source_main-Fitter_000'],
h5_f['/Raw_Measurement/source_main-Fitter_001']]
h5_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
for h5_grp in h5_groups:
self.assertEqual(h5_main, hdf_utils.get_source_dataset(h5_grp))
def test_invalid_type(self):
with self.assertRaises(TypeError):
_ = hdf_utils.get_source_dataset('/Raw_Measurement/Misc')
def test_illegal(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
with self.assertRaises(ValueError):
_ = hdf_utils.get_source_dataset(h5_f['/Raw_Measurement/Misc'])
class TestGetAllMain(TestSimple):
def test_invalid_type(self):
with self.assertRaises(TypeError):
_ = hdf_utils.get_all_main("sdsdsds")
with self.assertRaises(TypeError):
_ = hdf_utils.get_all_main(np.arange(4))
def test_legal(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
expected_dsets = [h5_f['/Raw_Measurement/source_main'],
h5_f['/Raw_Measurement/source_main-Fitter_000/results_main'],
h5_f['/Raw_Measurement/source_main-Fitter_001/results_main']]
main_dsets = hdf_utils.get_all_main(h5_f, verbose=False)
# self.assertEqual(set(main_dsets), set(expected_dsets))
self.assertEqual(len(main_dsets), len(expected_dsets))
self.assertTrue(np.all([x.name == y.name for x, y in zip(main_dsets, expected_dsets)]))
class TestWriteIndValDsets(TestSimple):
def base_bare_minimum_inputs(self, slow_to_fast, is_spectral):
num_cols = 3
num_rows = 2
sizes = [num_cols, num_rows]
dim_names = ['X', 'Y']
dim_units = ['nm', 'um']
if slow_to_fast:
dim_names = dim_names[::-1]
dim_units = dim_units[::-1]
sizes = sizes[::-1]
descriptor = []
for length, name, units in zip(sizes, dim_names, dim_units):
descriptor.append(Dimension(name, units, np.arange(length)))
inds_data = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols)))\
if not is_spectral:
inds_data = inds_data.T
if slow_to_fast:
func = np.flipud if is_spectral else np.fliplr
inds_data = func(inds_data)
file_path = 'test_write_ind_val_dsets.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_inds, h5_vals = hdf_utils.write_ind_val_dsets(h5_f, descriptor, is_spectral=is_spectral,
slow_to_fast=slow_to_fast)
data_utils.validate_aux_dset_pair(self, h5_f, h5_inds, h5_vals, dim_names, dim_units, inds_data,
is_spectral=is_spectral, slow_to_fast=slow_to_fast)
os.remove(file_path)
def test_legal_bare_minimum_pos_f2s(self):
self.base_bare_minimum_inputs(False, False)
def test_legal_bare_minimum_pos_s2f(self):
self.base_bare_minimum_inputs(True, False)
def test_legal_bare_minimum_spec_f2s(self):
self.base_bare_minimum_inputs(False, True)
def test_legal_bare_minimum_spec_s2f(self):
self.base_bare_minimum_inputs(True, True)
def test_legal_override_steps_offsets_base_name(self):
num_cols = 2
num_rows = 3
dim_names = ['X', 'Y']
dim_units = ['nm', 'um']
col_step = 0.25
row_step = 0.05
col_initial = 1
row_initial = 0.2
descriptor = []
for length, name, units, step, initial in zip([num_cols, num_rows], dim_names, dim_units,
[col_step, row_step], [col_initial, row_initial]):
descriptor.append(Dimension(name, units, initial + step * np.arange(length)))
new_base_name = 'Overriden'
# Sending in Fast to Slow but what comes out is slow to fast
spec_inds = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols)))
spec_vals = np.vstack((np.tile(np.arange(num_cols), num_rows) * col_step + col_initial,
np.repeat(np.arange(num_rows), num_cols) * row_step + row_initial))
file_path = 'test_write_ind_val_dsets.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_group = h5_f.create_group("Blah")
h5_inds, h5_vals = hdf_utils.write_ind_val_dsets(h5_group, descriptor, is_spectral=True,
base_name=new_base_name, slow_to_fast=False)
data_utils.validate_aux_dset_pair(self, h5_group, h5_inds, h5_vals, dim_names, dim_units, spec_inds,
vals_matrix=spec_vals, base_name=new_base_name, is_spectral=True, slow_to_fast=False)
os.remove(file_path)
def test_illegal(self):
sizes = [3, 2]
dim_names = ['X', 'Y']
dim_units = ['nm', 'um']
descriptor = []
for length, name, units in zip(sizes, dim_names, dim_units):
descriptor.append(Dimension(name, units, np.arange(length)))
file_path = 'test_write_ind_val_dsets.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
pass
with self.assertRaises(ValueError):
# h5_f should be valid in terms of type but closed
_ = hdf_utils.write_ind_val_dsets(h5_f, descriptor)
os.remove(file_path)
class TestWriteReducedAncDsets(TestSimple):
def test_spec_2d_to_1d(self):
duplicate_path = 'copy_test_hdf_utils.h5'
data_utils.delete_existing_file(duplicate_path)
shutil.copy(data_utils.std_beps_path, duplicate_path)
with h5py.File(duplicate_path, mode='r+') as h5_f:
h5_spec_inds_orig = h5_f['/Raw_Measurement/Spectroscopic_Indices']
h5_spec_vals_orig = h5_f['/Raw_Measurement/Spectroscopic_Values']
new_base_name = 'Blah'
# cycle_starts = np.where(h5_spec_inds_orig[0] == 0)[0]
h5_spec_inds_new, h5_spec_vals_new = hdf_utils.write_reduced_anc_dsets(h5_spec_inds_orig.parent,
h5_spec_inds_orig,
h5_spec_vals_orig,
'Bias',
basename=new_base_name)
dim_names = ['Cycle']
dim_units = ['']
ref_data = np.expand_dims(np.arange(2), axis=0)
for h5_dset, exp_dtype, exp_name in zip([h5_spec_inds_new, h5_spec_vals_new],
[h5_spec_inds_orig.dtype, h5_spec_vals_orig.dtype],
[new_base_name + '_Indices', new_base_name + '_Values']):
self.assertIsInstance(h5_dset, h5py.Dataset)
self.assertEqual(h5_dset.parent, h5_spec_inds_orig.parent)
self.assertEqual(h5_dset.name.split('/')[-1], exp_name)
self.assertTrue(np.allclose(ref_data, h5_dset[()]))
self.assertEqual(h5_dset.dtype, exp_dtype)
self.assertTrue(np.all([_ in h5_dset.attrs.keys() for _ in ['labels', 'units']]))
self.assertTrue(np.all([x == y for x, y in zip(dim_names, hdf_utils.get_attr(h5_dset, 'labels'))]))
self.assertTrue(np.all([x == y for x, y in zip(dim_units, hdf_utils.get_attr(h5_dset, 'units'))]))
os.remove(duplicate_path)
def test_spec_2d_to_1d_new_file(self):
new_file_path = 'reduced_ancs.h5'
data_utils.delete_existing_file(new_file_path)
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_spec_inds_orig = h5_f['/Raw_Measurement/Spectroscopic_Indices']
h5_spec_vals_orig = h5_f['/Raw_Measurement/Spectroscopic_Values']
new_base_name = 'Blah'
with h5py.File(new_file_path, mode='w') as h5_f_new:
h5_spec_inds_new, h5_spec_vals_new = hdf_utils.write_reduced_anc_dsets(h5_f_new,
h5_spec_inds_orig,
h5_spec_vals_orig,
'Bias',
basename=new_base_name)
dim_names = ['Cycle']
dim_units = ['']
ref_data = np.expand_dims(np.arange(2), axis=0)
for h5_dset, exp_dtype, exp_name in zip([h5_spec_inds_new, h5_spec_vals_new],
[h5_spec_inds_orig.dtype, h5_spec_vals_orig.dtype],
[new_base_name + '_Indices', new_base_name + '_Values']):
self.assertIsInstance(h5_dset, h5py.Dataset)
self.assertEqual(h5_dset.parent, h5_f_new)
self.assertEqual(h5_dset.name.split('/')[-1], exp_name)
self.assertTrue(np.allclose(ref_data, h5_dset[()]))
self.assertEqual(h5_dset.dtype, exp_dtype)
self.assertTrue(np.all([_ in h5_dset.attrs.keys() for _ in ['labels', 'units']]))
self.assertTrue(np.all([x == y for x, y in zip(dim_names, hdf_utils.get_attr(h5_dset, 'labels'))]))
self.assertTrue(np.all([x == y for x, y in zip(dim_units, hdf_utils.get_attr(h5_dset, 'units'))]))
os.remove(new_file_path)
def test_spec_1d_to_0d(self):
duplicate_path = 'copy_test_hdf_utils.h5'
data_utils.delete_existing_file(duplicate_path)
with h5py.File(duplicate_path, mode='w') as h5_f:
h5_spec_inds_orig, h5_spec_vals_orig = hdf_utils.write_ind_val_dsets(h5_f,
Dimension('Bias', 'V', 10),
is_spectral=True)
new_base_name = 'Blah'
h5_spec_inds_new, h5_spec_vals_new = hdf_utils.write_reduced_anc_dsets(h5_f, h5_spec_inds_orig,
h5_spec_vals_orig,
'Bias', basename=new_base_name)
dim_names = ['Single_Step']
dim_units = ['a. u.']
ref_data = np.expand_dims(np.arange(1), axis=0)
for h5_dset, exp_dtype, exp_name in zip([h5_spec_inds_new, h5_spec_vals_new],
[h5_spec_inds_orig.dtype, h5_spec_vals_orig.dtype],
[new_base_name + '_Indices', new_base_name + '_Values']):
self.assertIsInstance(h5_dset, h5py.Dataset)
self.assertEqual(h5_dset.parent, h5_spec_inds_orig.parent)
self.assertEqual(h5_dset.name.split('/')[-1], exp_name)
self.assertTrue(np.allclose(ref_data, h5_dset[()]))
self.assertEqual(h5_dset.dtype, exp_dtype)
self.assertTrue(np.all([_ in h5_dset.attrs.keys() for _ in ['labels', 'units']]))
self.assertTrue(np.all([x == y for x, y in zip(dim_names, hdf_utils.get_attr(h5_dset, 'labels'))]))
self.assertTrue(np.all([x == y for x, y in zip(dim_units, hdf_utils.get_attr(h5_dset, 'units'))]))
os.remove(duplicate_path)
def test_3d_to_1d_pos_fastest_n_slowest(self):
duplicate_path = 'copy_test_hdf_utils.h5'
data_utils.delete_existing_file(duplicate_path)
with h5py.File(duplicate_path, mode='w') as h5_f:
dims = [Dimension('X', 'nm', np.linspace(300, 350, 5)),
Dimension('Y', 'um', [-2, 4, 10]),
Dimension('Z', 'm', 2)]
h5_spec_inds_orig, h5_spec_vals_orig = hdf_utils.write_ind_val_dsets(h5_f, dims, is_spectral=False)
new_base_name = 'Position'
h5_grp = h5_f.create_group('My_Group')
h5_spec_inds_new, h5_spec_vals_new = hdf_utils.write_reduced_anc_dsets(h5_grp, h5_spec_inds_orig,
h5_spec_vals_orig, ['X', 'Z'])
dim_names = ['Y']
dim_units = ['um']
ref_inds = np.expand_dims(np.arange(3), axis=1)
ref_vals = np.expand_dims([-2, 4, 10], axis=1)
for h5_dset, exp_dtype, exp_name, ref_data in zip([h5_spec_inds_new, h5_spec_vals_new],
[h5_spec_inds_orig.dtype, h5_spec_vals_orig.dtype],
[new_base_name + '_Indices', new_base_name + '_Values'],
[ref_inds, ref_vals]):
self.assertIsInstance(h5_dset, h5py.Dataset)
self.assertEqual(h5_dset.parent, h5_grp)
self.assertEqual(h5_dset.name.split('/')[-1], exp_name)
self.assertTrue(np.allclose(ref_data, h5_dset[()]))
self.assertEqual(h5_dset.dtype, exp_dtype)
self.assertTrue(np.all([_ in h5_dset.attrs.keys() for _ in ['labels', 'units']]))
self.assertTrue(np.all([x == y for x, y in zip(dim_names, hdf_utils.get_attr(h5_dset, 'labels'))]))
self.assertTrue(np.all([x == y for x, y in zip(dim_units, hdf_utils.get_attr(h5_dset, 'units'))]))
os.remove(duplicate_path)
def test_3d_to_1d_spec_fastest_n_slowest(self):
duplicate_path = 'copy_test_hdf_utils.h5'
data_utils.delete_existing_file(duplicate_path)
with h5py.File(duplicate_path, mode='w') as h5_f:
dims = [Dimension('Freq', 'Hz', np.linspace(300, 350, 5)),
Dimension('Bias', 'V', [-2, 4, 10]),
Dimension('Cycle', 'a.u.', 2)]
h5_spec_inds_orig, h5_spec_vals_orig = hdf_utils.write_ind_val_dsets(h5_f, dims, is_spectral=True)
new_base_name = 'Blah'
h5_spec_inds_new, h5_spec_vals_new = hdf_utils.write_reduced_anc_dsets(h5_f, h5_spec_inds_orig,
h5_spec_vals_orig,
['Freq', 'Cycle'],
basename=new_base_name)
dim_names = ['Bias']
dim_units = ['V']
ref_inds = np.expand_dims(np.arange(3), axis=0)
ref_vals = np.expand_dims([-2, 4, 10], axis=0)
for h5_dset, exp_dtype, exp_name, ref_data in zip([h5_spec_inds_new, h5_spec_vals_new],
[h5_spec_inds_orig.dtype, h5_spec_vals_orig.dtype],
[new_base_name + '_Indices', new_base_name + '_Values'],
[ref_inds, ref_vals]):
self.assertIsInstance(h5_dset, h5py.Dataset)
self.assertEqual(h5_dset.parent, h5_spec_inds_orig.parent)
self.assertEqual(h5_dset.name.split('/')[-1], exp_name)
self.assertTrue(np.allclose(ref_data, h5_dset[()]))
self.assertEqual(h5_dset.dtype, exp_dtype)
self.assertTrue(np.all([_ in h5_dset.attrs.keys() for _ in ['labels', 'units']]))
self.assertTrue(np.all([x == y for x, y in zip(dim_names, hdf_utils.get_attr(h5_dset, 'labels'))]))
self.assertTrue(np.all([x == y for x, y in zip(dim_units, hdf_utils.get_attr(h5_dset, 'units'))]))
os.remove(duplicate_path)
def test_3d_to_1d_spec_fastest(self):
duplicate_path = 'copy_test_hdf_utils.h5'
data_utils.delete_existing_file(duplicate_path)
with h5py.File(duplicate_path, mode='w') as h5_f:
dims = [Dimension('Freq', 'Hz', np.linspace(300, 350, 5)),
Dimension('Bias', 'V', [-2, 4, 10]),
Dimension('Cycle', 'a.u.', 2)]
h5_spec_inds_orig, h5_spec_vals_orig = hdf_utils.write_ind_val_dsets(h5_f, dims, is_spectral=True)
new_base_name = 'Blah'
h5_spec_inds_new, h5_spec_vals_new = hdf_utils.write_reduced_anc_dsets(h5_f, h5_spec_inds_orig,
h5_spec_vals_orig,
['Freq', 'Bias'],
basename=new_base_name)
dim_names = ['Cycle']
dim_units = ['a.u.']
ref_inds = np.expand_dims(np.arange(2), axis=0)
ref_vals = np.expand_dims([0, 1], axis=0)
for h5_dset, exp_dtype, exp_name, ref_data in zip([h5_spec_inds_new, h5_spec_vals_new],
[h5_spec_inds_orig.dtype, h5_spec_vals_orig.dtype],
[new_base_name + '_Indices', new_base_name + '_Values'],
[ref_inds, ref_vals]):
self.assertIsInstance(h5_dset, h5py.Dataset)
self.assertEqual(h5_dset.parent, h5_spec_inds_orig.parent)
self.assertEqual(h5_dset.name.split('/')[-1], exp_name)
self.assertTrue(np.allclose(ref_data, h5_dset[()]))
self.assertEqual(h5_dset.dtype, exp_dtype)
self.assertTrue(np.all([_ in h5_dset.attrs.keys() for _ in ['labels', 'units']]))
self.assertTrue(np.all([x == y for x, y in zip(dim_names, hdf_utils.get_attr(h5_dset, 'labels'))]))
self.assertTrue(np.all([x == y for x, y in zip(dim_units, hdf_utils.get_attr(h5_dset, 'units'))]))
os.remove(duplicate_path)
def test_3d_to_1d_spec_slowest(self):
duplicate_path = 'copy_test_hdf_utils.h5'
data_utils.delete_existing_file(duplicate_path)
with h5py.File(duplicate_path, mode='w') as h5_f:
dims = [Dimension('Freq', 'Hz', np.linspace(300, 350, 5)),
Dimension('Bias', 'V', [-2, 4, 10]),
Dimension('Cycle', 'a.u.', 2)]
h5_spec_inds_orig, h5_spec_vals_orig = hdf_utils.write_ind_val_dsets(h5_f, dims, is_spectral=True)
new_base_name = 'Blah'
h5_spec_inds_new, h5_spec_vals_new = hdf_utils.write_reduced_anc_dsets(h5_f, h5_spec_inds_orig,
h5_spec_vals_orig,
['Cycle', 'Bias'],
basename=new_base_name)
dim_names = ['Freq']
dim_units = ['Hz']
ref_inds = np.expand_dims(np.arange(5), axis=0)
ref_vals = np.expand_dims(np.linspace(300, 350, 5), axis=0)
for h5_dset, exp_dtype, exp_name, ref_data in zip([h5_spec_inds_new, h5_spec_vals_new],
[h5_spec_inds_orig.dtype, h5_spec_vals_orig.dtype],
[new_base_name + '_Indices', new_base_name + '_Values'],
[ref_inds, ref_vals]):
self.assertIsInstance(h5_dset, h5py.Dataset)
self.assertEqual(h5_dset.parent, h5_spec_inds_orig.parent)
self.assertEqual(h5_dset.name.split('/')[-1], exp_name)
self.assertTrue(np.allclose(ref_data, h5_dset[()]))
self.assertEqual(h5_dset.dtype, exp_dtype)
self.assertTrue(np.all([_ in h5_dset.attrs.keys() for _ in ['labels', 'units']]))
self.assertTrue(np.all([x == y for x, y in zip(dim_names, hdf_utils.get_attr(h5_dset, 'labels'))]))
self.assertTrue(np.all([x == y for x, y in zip(dim_units, hdf_utils.get_attr(h5_dset, 'units'))]))
os.remove(duplicate_path)
def test_3d_to_2d_spec_fastest_n_slowest(self):
duplicate_path = 'copy_test_hdf_utils.h5'
data_utils.delete_existing_file(duplicate_path)
with h5py.File(duplicate_path, mode='w') as h5_f:
dims = [Dimension('Freq', 'Hz', np.linspace(300, 350, 5)),
Dimension('Bias', 'V', [-2, 4, 10]),
Dimension('Cycle', 'a.u.', 2)]
h5_spec_inds_orig, h5_spec_vals_orig = hdf_utils.write_ind_val_dsets(h5_f, dims, is_spectral=True,
slow_to_fast=False)
new_base_name = 'Blah'
h5_spec_inds_new, h5_spec_vals_new = hdf_utils.write_reduced_anc_dsets(h5_f, h5_spec_inds_orig,
h5_spec_vals_orig,
['Bias'],
basename=new_base_name)
dim_names = ['Freq', 'Cycle']
dim_units = ['Hz', 'a.u.']
ref_vals = np.vstack((np.tile(np.linspace(300, 350, 5), 2),
np.repeat(np.arange(2), 5)))
ref_inds = np.vstack((np.tile(np.arange(5, dtype=np.uint16), 2),
np.repeat(np.arange(2, dtype=np.uint16), 5)))
# Sending in Fast to Slow but what comes out is slow to fast
ref_inds = np.flipud(ref_inds)
ref_vals = np.flipud(ref_vals)
dim_names = dim_names[::-1]
dim_units = dim_units[::-1]
for h5_dset, exp_dtype, exp_name, ref_data in zip([h5_spec_inds_new, h5_spec_vals_new],
[h5_spec_inds_orig.dtype, h5_spec_vals_orig.dtype],
[new_base_name + '_Indices', new_base_name + '_Values'],
[ref_inds, ref_vals]):
self.assertIsInstance(h5_dset, h5py.Dataset)
self.assertEqual(h5_dset.parent, h5_spec_inds_orig.parent)
self.assertEqual(h5_dset.name.split('/')[-1], exp_name)
self.assertTrue(np.allclose(ref_data, h5_dset[()]))
self.assertEqual(h5_dset.dtype, exp_dtype)
self.assertTrue(np.all([_ in h5_dset.attrs.keys() for _ in ['labels', 'units']]))
self.assertTrue(np.all([x == y for x, y in zip(dim_names, hdf_utils.get_attr(h5_dset, 'labels'))]))
self.assertTrue(np.all([x == y for x, y in zip(dim_units, hdf_utils.get_attr(h5_dset, 'units'))]))
os.remove(duplicate_path)
class TestFindResultsGroup(TestSimple):
def test_legal(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
expected_groups = [h5_f['/Raw_Measurement/source_main-Fitter_000'],
h5_f['/Raw_Measurement/source_main-Fitter_001']]
ret_val = hdf_utils.find_results_groups(h5_main, 'Fitter')
self.assertEqual(set(ret_val), set(expected_groups))
def test_no_dset(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
with self.assertRaises(TypeError):
_ = hdf_utils.find_results_groups(h5_f, 'Fitter')
def test_not_string(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
with self.assertRaises(TypeError):
_ = hdf_utils.find_results_groups(h5_main, np.arange(5))
def test_no_such_tool(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
ret_val = hdf_utils.find_results_groups(h5_main, 'Blah')
self.assertEqual(len(ret_val), 0)
def test_results_in_diff_file(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
new_path = 'new.h5'
data_utils.delete_existing_file(new_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_main = h5_f.create_dataset('Main', data=[1, 2, 3])
with h5py.File(new_path, mode='w') as h5_f_2:
grp_1 = h5_f_2.create_group('Main-Tool_000')
grp_2 = h5_f_2.create_group('Main-Tool_001')
grps = hdf_utils.find_results_groups(h5_main, 'Tool',
h5_parent_group=h5_f_2)
self.assertEqual(set([grp_1, grp_2]), set(grps))
os.remove(file_path)
os.remove(new_path)
def test_results_in_diff_file_invalid_type(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_main = h5_f.create_dataset('Main', data=[1, 2, 3])
with self.assertRaises(TypeError):
_ = hdf_utils.find_results_groups(h5_main, 'Tool',
h5_parent_group=h5_main)
os.remove(file_path)
class TestCheckForMatchingAttrs(TestSimple):
def test_dset_no_attrs(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
self.assertTrue(hdf_utils.check_for_matching_attrs(h5_main, new_parms=None))
def test_dset_matching_attrs(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
attrs = {'units': 'A', 'quantity':'Current'}
self.assertTrue(hdf_utils.check_for_matching_attrs(h5_main, new_parms=attrs))
def test_dset_one_mismatched_attrs(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
attrs = {'units': 'A', 'blah': 'meh'}
self.assertFalse(hdf_utils.check_for_matching_attrs(h5_main, new_parms=attrs))
def test_grp(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main-Fitter_000']
attrs = {'att_1': 'string_val', 'att_2': 1.2345,
'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2', 'str_3']}
self.assertTrue(hdf_utils.check_for_matching_attrs(h5_main, new_parms=attrs))
def test_grp_mismatched_types_01(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main-Fitter_000']
attrs = {'att_4': 'string_val'}
self.assertFalse(hdf_utils.check_for_matching_attrs(h5_main, new_parms=attrs))
def test_grp_mismatched_types_02(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main-Fitter_000']
attrs = {'att_1': ['str_1', 'str_2', 'str_3']}
self.assertFalse(hdf_utils.check_for_matching_attrs(h5_main, new_parms=attrs))
def test_grp_mismatched_types_03(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main-Fitter_000']
attrs = {'att_4': [1, 4.234, 'str_3']}
self.assertFalse(hdf_utils.check_for_matching_attrs(h5_main, new_parms=attrs))
def test_grp_mismatched_types_04(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main-Fitter_000']
attrs = {'att_4': [1, 4.234, 45]}
self.assertFalse(hdf_utils.check_for_matching_attrs(h5_main, new_parms=attrs))
class TestCheckForOld(TestSimple):
def test_invalid_types(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
with self.assertRaises(TypeError):
_ = hdf_utils.check_for_old("h5_main", "blah")
with self.assertRaises(TypeError):
_ = hdf_utils.check_for_old(np.arange(4), "blah")
with self.assertRaises(TypeError):
_ = hdf_utils.check_for_old(h5_main, 1.234)
with self.assertRaises(TypeError):
_ = hdf_utils.check_for_old(h5_main, 'Fitter',
new_parms="not_a_dictionary")
with self.assertRaises(TypeError):
_ = hdf_utils.check_for_old(h5_main, 'Fitter',
target_dset=1.234)
def test_valid_target_dset(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
attrs = {'units': ['V'], 'labels': ['Bias']}
dset_name = 'Spectroscopic_Indices'
groups = hdf_utils.check_for_old(h5_main, 'Fitter',
new_parms=attrs,
target_dset=dset_name,
verbose=False)
groups = set(groups)
self.assertEqual(groups, set([h5_f['/Raw_Measurement/source_main-Fitter_000/'],
h5_f['/Raw_Measurement/source_main-Fitter_001/']]))
def test_invalid_target_dset(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
attrs = {'att_1': 'string_val', 'att_2': 1.2345,
'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2',
'str_3']}
ret = hdf_utils.check_for_old(h5_main, 'Fitter', new_parms=attrs,
target_dset='Does_not_exist')
self.assertEqual(ret, [])
def test_exact_match(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
attrs = {'att_1': 'string_val', 'att_2': 1.2345,
'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2', 'str_3']}
[h5_ret_grp] = hdf_utils.check_for_old(h5_main, 'Fitter',
new_parms=attrs,
target_dset=None)
self.assertEqual(h5_ret_grp, h5_f['/Raw_Measurement/source_main-Fitter_000'])
def test_subset_but_match(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
attrs = {'att_2': 1.2345,
'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2', 'str_3']}
[h5_ret_grp] = hdf_utils.check_for_old(h5_main, 'Fitter',
new_parms=attrs,
target_dset=None)
self.assertEqual(h5_ret_grp, h5_f['/Raw_Measurement/source_main-Fitter_000'])
def test_exact_match_02(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
attrs = {'att_1': 'other_string_val', 'att_2': 5.4321,
'att_3': [4, 1, 3], 'att_4': ['s', 'str_2', 'str_3']}
[h5_ret_grp] = hdf_utils.check_for_old(h5_main, 'Fitter',
new_parms=attrs,
target_dset=None)
self.assertEqual(h5_ret_grp, h5_f['/Raw_Measurement/source_main-Fitter_001'])
def test_fail_01(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
attrs = {'att_1': [4, 1, 3], 'att_2': ['s', 'str_2', 'str_3'],
'att_3': 'other_string_val', 'att_4': 5.4321}
ret_val = hdf_utils.check_for_old(h5_main, 'Fitter',
new_parms=attrs, target_dset=None)
self.assertIsInstance(ret_val, list)
self.assertEqual(len(ret_val), 0)
def test_fail_02(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
attrs = {'att_x': [4, 1, 3], 'att_z': ['s', 'str_2', 'str_3'],
'att_y': 'other_string_val', 'att_4': 5.4321}
ret_val = hdf_utils.check_for_old(h5_main, 'Fitter',
new_parms=attrs, target_dset=None)
self.assertIsInstance(ret_val, list)
self.assertEqual(len(ret_val), 0)
class TestCreateIndexedGroup(unittest.TestCase):
def test_first_group(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_group = hdf_utils.create_indexed_group(h5_f, 'Hello')
self.assertIsInstance(h5_group, h5py.Group)
self.assertEqual(h5_group.name, '/Hello_000')
self.assertEqual(h5_group.parent, h5_f)
data_utils.verify_book_keeping_attrs(self, h5_group)
h5_sub_group = hdf_utils.create_indexed_group(h5_group, 'Test')
self.assertIsInstance(h5_sub_group, h5py.Group)
self.assertEqual(h5_sub_group.name, '/Hello_000/Test_000')
self.assertEqual(h5_sub_group.parent, h5_group)
data_utils.verify_book_keeping_attrs(self, h5_sub_group)
os.remove(file_path)
def test_second(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_group_1 = hdf_utils.create_indexed_group(h5_f, 'Hello')
self.assertIsInstance(h5_group_1, h5py.Group)
self.assertEqual(h5_group_1.name, '/Hello_000')
self.assertEqual(h5_group_1.parent, h5_f)
data_utils.verify_book_keeping_attrs(self, h5_group_1)
h5_group_2 = hdf_utils.create_indexed_group(h5_f, 'Hello')
self.assertIsInstance(h5_group_2, h5py.Group)
self.assertEqual(h5_group_2.name, '/Hello_001')
self.assertEqual(h5_group_2.parent, h5_f)
data_utils.verify_book_keeping_attrs(self, h5_group_2)
os.remove(file_path)
def test_w_suffix_(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_group = hdf_utils.create_indexed_group(h5_f, 'Hello_')
self.assertIsInstance(h5_group, h5py.Group)
self.assertEqual(h5_group.name, '/Hello_000')
self.assertEqual(h5_group.parent, h5_f)
data_utils.verify_book_keeping_attrs(self, h5_group)
os.remove(file_path)
def test_empty_base_name(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
with self.assertRaises(ValueError):
_ = hdf_utils.create_indexed_group(h5_f, ' ')
os.remove(file_path)
def test_create_indexed_group_invalid_types(self):
with self.assertRaises(TypeError):
_ = hdf_utils.create_indexed_group(np.arange(4), "fddfd")
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
with self.assertRaises(TypeError):
_ = hdf_utils.create_indexed_group(h5_f, 1.2343)
os.remove(file_path)
class TestCreateResultsGroup(unittest.TestCase):
def test_first(self):
self.helper_first()
def test_dash_in_name(self):
self.helper_first(add_dash_to_name=True)
def helper_first(self, add_dash_to_name=False):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset = h5_f.create_dataset('Main', data=[1, 2, 3])
if add_dash_to_name:
h5_group = hdf_utils.create_results_group(h5_dset, 'Some-Tool')
tool_name = 'Some_Tool'
else:
tool_name = 'Tool'
h5_group = hdf_utils.create_results_group(h5_dset, tool_name)
self.assertIsInstance(h5_group, h5py.Group)
self.assertEqual(h5_group.name, '/Main-' + tool_name + '_000')
self.assertEqual(h5_group.parent, h5_f)
data_utils.verify_book_keeping_attrs(self, h5_group)
h5_dset = h5_group.create_dataset('Main_Dataset', data=[1, 2, 3])
h5_sub_group = hdf_utils.create_results_group(h5_dset, 'SHO_Fit')
self.assertIsInstance(h5_sub_group, h5py.Group)
self.assertEqual(h5_sub_group.name, '/Main-' + tool_name + '_000/Main_Dataset-SHO_Fit_000')
self.assertEqual(h5_sub_group.parent, h5_group)
data_utils.verify_book_keeping_attrs(self, h5_sub_group)
os.remove(file_path)
def test_second(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset = h5_f.create_dataset('Main', data=[1, 2, 3])
h5_group = hdf_utils.create_results_group(h5_dset, 'Tool')
self.assertIsInstance(h5_group, h5py.Group)
self.assertEqual(h5_group.name, '/Main-Tool_000')
self.assertEqual(h5_group.parent, h5_f)
data_utils.verify_book_keeping_attrs(self, h5_group)
h5_sub_group = hdf_utils.create_results_group(h5_dset, 'Tool')
self.assertIsInstance(h5_sub_group, h5py.Group)
self.assertEqual(h5_sub_group.name, '/Main-Tool_001')
self.assertEqual(h5_sub_group.parent, h5_f)
data_utils.verify_book_keeping_attrs(self, h5_sub_group)
os.remove(file_path)
def test_empty_tool_name(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset = h5_f.create_dataset('Main', data=[1, 2, 3])
with self.assertRaises(ValueError):
_ = hdf_utils.create_results_group(h5_dset, ' ')
os.remove(file_path)
def test_invalid_types(self):
with self.assertRaises(TypeError):
_ = hdf_utils.create_results_group("not a dataset", 'Tool')
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
with self.assertRaises(TypeError):
_ = hdf_utils.create_results_group(h5_f, 'Tool')
h5_dset = h5_f.create_dataset('Main', data=[1, 2, 3])
with self.assertRaises(TypeError):
_ = hdf_utils.create_results_group(h5_dset, 'Tool',
h5_parent_group='not_group')
os.remove(file_path)
def test_different_file(self):
file_path = 'test.h5'
new_path = 'new.h5'
data_utils.delete_existing_file(file_path)
data_utils.delete_existing_file(new_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset = h5_f.create_dataset('Main', data=[1, 2, 3])
# Ensuring that index is calculated at destination, not source:
_ = h5_f.create_group('Main-Tool_000')
with h5py.File(new_path, mode='w') as h5_f_new:
_ = h5_f_new.create_group('Main-Tool_000')
h5_group = hdf_utils.create_results_group(h5_dset, 'Tool',
h5_parent_group=h5_f_new)
self.assertIsInstance(h5_group, h5py.Group)
self.assertEqual(h5_group.name, '/Main-Tool_001')
self.assertEqual(h5_group.parent, h5_f_new)
self.assertNotEqual(h5_dset.file, h5_group.file)
data_utils.verify_book_keeping_attrs(self, h5_group)
os.remove(file_path)
os.remove(new_path)
class TestAssignGroupIndex(TestSimple):
def test_existing(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_group = h5_f['/Raw_Measurement']
ret_val = hdf_utils.assign_group_index(h5_group, 'source_main-Fitter')
self.assertEqual(ret_val, 'source_main-Fitter_002')
def test_new(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_group = h5_f['/Raw_Measurement']
ret_val = hdf_utils.assign_group_index(h5_group, 'blah_')
self.assertEqual(ret_val, 'blah_000')
def test_invalid_dtypes(self):
with self.assertRaises(TypeError):
_ = hdf_utils.assign_group_index("not a dataset", 'blah_')
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_group = h5_f['/Raw_Measurement']
with self.assertRaises(TypeError):
_ = hdf_utils.assign_group_index(h5_group, 1.24)
class TestLinkAsMain(unittest.TestCase):
def test_pos_args_not_h5_dset(self):
file_path = 'link_as_main.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset = h5_f.create_dataset("Blah", data=np.random.rand(2, 3))
with self.assertRaises(TypeError):
hdf_utils.link_as_main("h5_main", 1.234, -2, False, {"h5_spec_vals": 2.432})
with self.assertRaises(TypeError):
hdf_utils.link_as_main(h5_dset, 1.234, -2, False, {"h5_spec_vals": 2.432})
with self.assertRaises(TypeError):
hdf_utils.link_as_main(h5_dset, h5_dset, -2, False, {"h5_spec_vals": 2.432})
with self.assertRaises(TypeError):
hdf_utils.link_as_main(h5_dset, h5_dset, h5_dset, False, {"h5_spec_vals": 2.432})
with self.assertRaises(TypeError):
hdf_utils.link_as_main(h5_dset, h5_dset, h5_dset, h5_dset, {"h5_spec_vals": 2.432})
data_utils.delete_existing_file(file_path)
def test_anc_args_not_h5_dset(self):
file_path = 'link_as_main.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset = h5_f.create_dataset("Blah", data=np.random.rand(2, 3))
with self.assertRaises(TypeError):
hdf_utils.link_as_main("h5_main", 1.234, -2, False, {"h5_spec_vals": 2.432})
with self.assertRaises(TypeError):
hdf_utils.link_as_main(h5_dset, 1.234, -2, False, {"h5_spec_vals": 2.432})
with self.assertRaises(TypeError):
hdf_utils.link_as_main(h5_dset, h5_dset, -2, False, {"h5_spec_vals": 2.432})
with self.assertRaises(TypeError):
hdf_utils.link_as_main(h5_dset, h5_dset, h5_dset, False, {"h5_spec_vals": 2.432})
with self.assertRaises(TypeError):
hdf_utils.link_as_main(h5_dset, h5_dset, h5_dset, h5_dset, {"h5_spec_vals": 2.432})
data_utils.delete_existing_file(file_path)
def test_ind_vals_not_same_shape(self):
file_path = 'link_as_main.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_main = h5_f.create_dataset("Blah", data=np.zeros((3, 5), dtype=np.uint16))
h5_pos_inds = h5_f.create_dataset("P_I", data=np.zeros((7, 2), dtype=np.uint16))
h5_pos_vals = h5_f.create_dataset("P_V", data=np.zeros((3, 2), dtype=np.uint16))
h5_spec_inds = h5_f.create_dataset("S_I", data=np.zeros((2, 5), dtype=np.uint16))
h5_spec_vals = h5_f.create_dataset("S_V", data=np.zeros((2, 5), dtype=np.uint16))
with self.assertRaises(ValueError):
hdf_utils.link_as_main(h5_main, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_main = h5_f.create_dataset("Blah", data=np.zeros((3, 5), dtype=np.uint16))
h5_pos_inds = h5_f.create_dataset("P_I", data=np.zeros((3, 2), dtype=np.uint16))
h5_pos_vals = h5_f.create_dataset("P_V", data=np.zeros((3, 2), dtype=np.uint16))
h5_spec_inds = h5_f.create_dataset("S_I", data=np.zeros((2, 8), dtype=np.uint16))
h5_spec_vals = h5_f.create_dataset("S_V", data=np.zeros((2, 5), dtype=np.uint16))
with self.assertRaises(ValueError):
hdf_utils.link_as_main(h5_main, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)
data_utils.delete_existing_file(file_path)
def helper_test(self, quant_units_specified):
file_path = 'link_as_main.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_raw_grp = h5_f.create_group('Raw_Measurement')
num_rows = 3
num_cols = 5
num_cycles = 2
num_cycle_pts = 7
source_dset_name = 'source_main'
source_pos_data = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols))).T
pos_attrs = {'units': ['nm', 'um'], 'labels': ['X', 'Y']}
h5_pos_inds = h5_raw_grp.create_dataset('Position_Indices', data=source_pos_data, dtype=np.uint16)
data_utils.write_aux_reg_ref(h5_pos_inds, pos_attrs['labels'], is_spec=False)
data_utils.write_string_list_as_attr(h5_pos_inds, pos_attrs)
h5_pos_vals = h5_raw_grp.create_dataset('Position_Values', data=source_pos_data, dtype=np.float32)
data_utils.write_aux_reg_ref(h5_pos_vals, pos_attrs['labels'], is_spec=False)
data_utils.write_string_list_as_attr(h5_pos_vals, pos_attrs)
source_spec_data = np.vstack((np.tile(np.arange(num_cycle_pts), num_cycles),
np.repeat(np.arange(num_cycles), num_cycle_pts)))
source_spec_attrs = {'units': ['V', ''], 'labels': ['Bias', 'Cycle']}
h5_source_spec_inds = h5_raw_grp.create_dataset('Spectroscopic_Indices', data=source_spec_data,
dtype=np.uint16)
data_utils.write_aux_reg_ref(h5_source_spec_inds, source_spec_attrs['labels'], is_spec=True)
data_utils.write_string_list_as_attr(h5_source_spec_inds, source_spec_attrs)
h5_source_spec_vals = h5_raw_grp.create_dataset('Spectroscopic_Values', data=source_spec_data,
dtype=np.float32)
data_utils.write_aux_reg_ref(h5_source_spec_vals, source_spec_attrs['labels'], is_spec=True)
data_utils.write_string_list_as_attr(h5_source_spec_vals, source_spec_attrs)
source_main_data = np.random.rand(num_rows * num_cols, num_cycle_pts * num_cycles)
h5_source_main = h5_raw_grp.create_dataset(source_dset_name, data=source_main_data)
expected_type = h5py.Dataset
if quant_units_specified:
expected_type = USIDataset
data_utils.write_safe_attrs(h5_source_main, {'units': 'A', 'quantity': 'Current'})
self.assertFalse(hdf_utils.check_if_main(h5_source_main))
# Now need to link as main!
usid_source = hdf_utils.link_as_main(h5_source_main, h5_pos_inds, h5_pos_vals, h5_source_spec_inds,
h5_source_spec_vals)
# Finally:
if quant_units_specified:
self.assertTrue(hdf_utils.check_if_main(h5_source_main))
self.assertIsInstance(usid_source, expected_type)
os.remove(file_path)
def test_typical_attrs_specified(self):
self.helper_test(True)
def test_typical_attrs_not_specified(self):
self.helper_test(False)
class TestCopyMainAttributes(unittest.TestCase):
def test_valid(self):
file_path = 'test.h5'
main_attrs = {'quantity': 'Current', 'units': 'nA'}
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Main_01', data=[1, 23])
h5_dset_source.attrs.update(main_attrs)
h5_group = h5_f.create_group('Group')
h5_dset_sink = h5_group.create_dataset('Main_02', data=[4, 5])
hdf_utils.copy_main_attributes(h5_dset_source, h5_dset_sink)
for key, val in main_attrs.items():
self.assertEqual(val, h5_dset_sink.attrs[key])
os.remove(file_path)
def test_no_main_attrs(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Main_01', data=[1, 23])
h5_group = h5_f.create_group('Group')
h5_dset_sink = h5_group.create_dataset('Main_02', data=[4, 5])
with self.assertRaises(KeyError):
hdf_utils.copy_main_attributes(h5_dset_source, h5_dset_sink)
os.remove(file_path)
def test_wrong_objects(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Main_01', data=[1, 23])
h5_group = h5_f.create_group('Group')
with self.assertRaises(TypeError):
hdf_utils.copy_main_attributes(h5_dset_source, h5_group)
with self.assertRaises(TypeError):
hdf_utils.copy_main_attributes(h5_group, h5_dset_source)
os.remove(file_path)
class TestCreateEmptyDataset(unittest.TestCase):
def test_invalid_types(self):
with self.assertRaises(TypeError):
_ = hdf_utils.create_empty_dataset("not a dataset", np.float16, 'Duplicate')
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Source', data=[1, 2, 3])
with self.assertRaises(TypeError):
_ = hdf_utils.create_empty_dataset(h5_dset_source, np.arange(15), 'Duplicate')
with self.assertRaises(TypeError):
_ = hdf_utils.create_empty_dataset(h5_dset_source, np.float32, {'not a': 'string'})
with self.assertRaises(TypeError):
_ = hdf_utils.create_empty_dataset(h5_dset_source, np.float16, 'Duplicate',
new_attrs="not_a_dictionary")
with self.assertRaises(TypeError):
_ = hdf_utils.create_empty_dataset(h5_dset_source, np.float16, 'Duplicate',
h5_group=h5_dset_source)
os.remove(file_path)
def test_same_group_new_attrs(self):
file_path = 'test.h5'
existing_attrs = {'a': 1, 'b': 'Hello'}
easy_attrs = {'1_string': 'Current', '1_number': 35.23}
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Source', data=[1, 2, 3])
h5_dset_source.attrs.update(existing_attrs)
h5_duplicate = hdf_utils.create_empty_dataset(h5_dset_source, np.float16, 'Duplicate', new_attrs=easy_attrs)
self.assertIsInstance(h5_duplicate, h5py.Dataset)
self.assertEqual(h5_duplicate.parent, h5_dset_source.parent)
self.assertEqual(h5_duplicate.name, '/Duplicate')
self.assertEqual(h5_duplicate.dtype, np.float16)
for key, val in easy_attrs.items():
self.assertEqual(val, h5_duplicate.attrs[key])
for key, val in existing_attrs.items():
self.assertEqual(val, h5_duplicate.attrs[key])
os.remove(file_path)
def validate_copied_dataset(self, h5_f_new, h5_dest, dset_new_name,
dset_data, dset_attrs):
self.assertTrue(dset_new_name in h5_f_new.keys())
h5_anc_dest = h5_f_new[dset_new_name]
self.assertIsInstance(h5_anc_dest, h5py.Dataset)
self.assertTrue(np.allclose(dset_data, h5_anc_dest[()]))
self.assertEqual(len(dset_attrs),
len(h5_anc_dest.attrs.keys()))
for key, val in dset_attrs.items():
self.assertEqual(val, h5_anc_dest.attrs[key])
self.assertTrue(dset_new_name in h5_dest.attrs.keys())
self.assertEqual(h5_f_new[h5_dest.attrs[dset_new_name]],
h5_anc_dest)
def test_diff_file_new_attrs_linked_dsets(self):
file_path = 'test.h5'
new_path = 'new.h5'
existing_attrs = {'a': 1, 'b': 'Hello'}
easy_attrs = {'1_string': 'Current', '1_number': 35.23}
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Source', data=[1, 2, 3])
h5_dset_source.attrs.update(existing_attrs)
h5_other = h5_f.create_dataset('Other', data=[1, 2, 3])
anc_attrs = {'a': 1, 'b': -55}
h5_other.attrs.update(anc_attrs)
h5_dset_source.attrs['dset_ref'] = h5_other.ref
with h5py.File(new_path, mode='w') as h5_f_new:
func = hdf_utils.create_empty_dataset
if sys.version_info.major == 3:
with self.assertWarns(UserWarning):
h5_duplicate = func(h5_dset_source, np.float16,
'Duplicate', h5_group=h5_f_new,
new_attrs=easy_attrs,
skip_refs=False)
else:
h5_duplicate = func(h5_dset_source, np.float16,
'Duplicate', h5_group=h5_f_new,
new_attrs=easy_attrs, skip_refs=False)
self.assertIsInstance(h5_duplicate, h5py.Dataset)
self.assertEqual(h5_duplicate.parent, h5_f_new)
self.assertFalse(h5_dset_source.file == h5_duplicate.file)
self.assertEqual(h5_duplicate.name, '/Duplicate')
self.assertEqual(h5_duplicate.dtype, np.float16)
for key, val in easy_attrs.items():
self.assertEqual(val, h5_duplicate.attrs[key])
for key, val in existing_attrs.items():
self.assertEqual(val, h5_duplicate.attrs[key])
self.assertTrue('dset_ref' in h5_duplicate.attrs.keys())
self.validate_copied_dataset(h5_f_new, h5_duplicate,
'dset_ref', h5_other[()],
anc_attrs)
os.remove(file_path)
def test_diff_groups(self):
file_path = 'test.h5'
existing_attrs = {'a': 1, 'b': 'Hello'}
easy_attrs = {'1_string': 'Current', '1_number': 35.23}
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Source', data=[1, 2, 3])
h5_dset_source.attrs.update(existing_attrs)
h5_group = h5_f.create_group('Group')
h5_duplicate = hdf_utils.create_empty_dataset(h5_dset_source, np.float16, 'Duplicate',
h5_group=h5_group, new_attrs=easy_attrs)
self.assertIsInstance(h5_duplicate, h5py.Dataset)
self.assertEqual(h5_duplicate.parent, h5_group)
self.assertEqual(h5_duplicate.name, '/Group/Duplicate')
self.assertEqual(h5_duplicate.dtype, np.float16)
for key, val in easy_attrs.items():
self.assertEqual(val, h5_duplicate.attrs[key])
for key, val in existing_attrs.items():
self.assertEqual(val, h5_duplicate.attrs[key])
os.remove(file_path)
def test_existing_dset_name(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Source', data=[1, 2, 3])
_ = h5_f.create_dataset('Existing', data=[4, 5, 6])
if sys.version_info.major == 3:
with self.assertWarns(UserWarning):
h5_duplicate = hdf_utils.create_empty_dataset(h5_dset_source, np.float16, 'Existing')
else:
h5_duplicate = hdf_utils.create_empty_dataset(h5_dset_source, np.float16, 'Existing')
self.assertIsInstance(h5_duplicate, h5py.Dataset)
self.assertEqual(h5_duplicate.name, '/Existing')
self.assertTrue(np.allclose(h5_duplicate[()], np.zeros(3)))
self.assertEqual(h5_duplicate.dtype, np.float16)
os.remove(file_path)
class TestCheckAndLinkAncillary(TestSimple):
def test_not_dset(self):
with self.assertRaises(TypeError):
hdf_utils.check_and_link_ancillary(np.arange(5), ['Spec'])
def test_h5_main_not_dset(self):
file_path = 'check_and_link_ancillary.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Source', data=[1, 2, 3])
with self.assertRaises(TypeError):
hdf_utils.check_and_link_ancillary(h5_dset_source, ['Spec'],
h5_main="not_a_dataset")
os.remove(file_path)
def test_one_dset_to_name(self):
file_path = 'check_and_link_ancillary.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Source', data=[1, 2, 3])
h5_dset_2 = h5_f.create_dataset('Other', data=[1, 2, 3])
att_name = 'root'
expected = h5_dset_2
hdf_utils.check_and_link_ancillary(h5_dset_source, att_name,
h5_main=None, anc_refs=expected)
# Only one attribute expected
self.assertEqual(len(h5_dset_source.attrs.keys()), 1)
self.assertTrue(att_name in h5_dset_source.attrs.keys())
actual = h5_dset_source.attrs[att_name]
self.assertIsInstance(actual, h5py.Reference)
self.assertEqual(h5_f[actual], expected)
os.remove(file_path)
def test_many_objs_to_many_names(self):
file_path = 'check_and_link_ancillary.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Source', data=[1, 2, 3])
h5_dset_2 = h5_f.create_dataset('Other', data=[1, 2, 3])
h5_grp = h5_f.create_group('Blah')
att_names = ['Meh', 'Wah']
expected = [h5_dset_2, h5_grp]
hdf_utils.check_and_link_ancillary(h5_dset_source, att_names,
h5_main=None, anc_refs=expected)
self.assertEqual(len(h5_dset_source.attrs.keys()), len(att_names))
self.assertEqual(set(att_names), set(h5_dset_source.attrs.keys()))
for name, exp_val in zip(att_names, expected):
actual = h5_dset_source.attrs[name]
self.assertIsInstance(actual, h5py.Reference)
self.assertEqual(h5_f[actual], exp_val)
os.remove(file_path)
def test_objs_and_refs_to_names(self):
file_path = 'check_and_link_ancillary.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Source', data=[1, 2, 3])
h5_dset_2 = h5_f.create_dataset('Other', data=[1, 2, 3])
h5_grp = h5_f.create_group('Blah')
att_names = ['Meh', 'Wah']
expected = [h5_dset_2, h5_grp.ref]
hdf_utils.check_and_link_ancillary(h5_dset_source, att_names,
h5_main=None, anc_refs=expected)
expected = [h5_dset_2, h5_grp]
self.assertEqual(len(h5_dset_source.attrs.keys()), len(att_names))
self.assertEqual(set(att_names), set(h5_dset_source.attrs.keys()))
for name, exp_val in zip(att_names, expected):
actual = h5_dset_source.attrs[name]
self.assertIsInstance(actual, h5py.Reference)
self.assertEqual(h5_f[actual], exp_val)
os.remove(file_path)
def test_too_many_names(self):
file_path = 'check_and_link_ancillary.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Source', data=[1, 2, 3])
h5_dset_2 = h5_f.create_dataset('Other', data=[1, 2, 3])
_ = h5_f.create_group('Blah')
att_names = ['Meh', 'Wah']
expected = [h5_dset_2]
hdf_utils.check_and_link_ancillary(h5_dset_source, att_names,
h5_main=None, anc_refs=expected)
att_names = [att_names[0]]
print(list(h5_dset_source.attrs.keys()), att_names)
self.assertEqual(len(h5_dset_source.attrs.keys()), len(att_names))
self.assertEqual(set(att_names), set(h5_dset_source.attrs.keys()))
for name, exp_val in zip(att_names, expected):
actual = h5_dset_source.attrs[name]
self.assertIsInstance(actual, h5py.Reference)
self.assertEqual(h5_f[actual], exp_val)
os.remove(file_path)
def test_too_many_ancs(self):
file_path = 'check_and_link_ancillary.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Source', data=[1, 2, 3])
h5_dset_2 = h5_f.create_dataset('Other', data=[1, 2, 3])
h5_grp = h5_f.create_group('Blah')
att_names = ['Meh']
expected = [h5_dset_2, h5_grp]
hdf_utils.check_and_link_ancillary(h5_dset_source, att_names,
h5_main=None, anc_refs=expected)
expected = [expected[0]]
self.assertEqual(len(h5_dset_source.attrs.keys()), len(att_names))
self.assertEqual(set(att_names), set(h5_dset_source.attrs.keys()))
for name, exp_val in zip(att_names, expected):
actual = h5_dset_source.attrs[name]
self.assertIsInstance(actual, h5py.Reference)
self.assertEqual(h5_f[actual], exp_val)
os.remove(file_path)
def test_no_anc_refs_provided(self):
file_path = 'check_and_link_ancillary.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset_source = h5_f.create_dataset('Source', data=[1, 2, 3])
with self.assertRaises(ValueError):
hdf_utils.check_and_link_ancillary(h5_dset_source, ['M', 'Wa'],
h5_main=None, anc_refs=None)
os.remove(file_path)
def test_linking_main_legit_anc_names(self):
file_path = 'check_and_link_ancillary.h5'
data_utils.delete_existing_file(file_path)
shutil.copy(data_utils.std_beps_path, file_path)
with h5py.File(file_path, mode='r+') as h5_f:
h5_grp = h5_f['Raw_Measurement']
h5_dset_source = h5_grp['Ancillary']
h5_main = h5_grp['source_main']
att_names = ['Spectroscopic_Values', 'Position_Indices']
expected = [h5_grp[name] for name in att_names]
hdf_utils.check_and_link_ancillary(h5_dset_source, att_names,
h5_main=h5_main, anc_refs=None)
self.assertEqual(len(h5_dset_source.attrs.keys()), len(att_names))
self.assertEqual(set(att_names), set(h5_dset_source.attrs.keys()))
for name, exp_val in zip(att_names, expected):
actual = h5_dset_source.attrs[name]
self.assertIsInstance(actual, h5py.Reference)
self.assertEqual(h5_f[actual], exp_val)
os.remove(file_path)
def test_h5_main_non_dset_anc_names(self):
file_path = 'check_and_link_ancillary.h5'
data_utils.delete_existing_file(file_path)
shutil.copy(data_utils.std_beps_path, file_path)
with h5py.File(file_path, mode='r+') as h5_f:
h5_grp = h5_f['Raw_Measurement']
h5_dset_source = h5_grp['Ancillary']
h5_main = h5_grp['source_main']
att_names = ['Spectroscopic_Values', 'quantity']
hdf_utils.check_and_link_ancillary(h5_dset_source, att_names,
h5_main=h5_main, anc_refs=None)
# Should NOT copy the quantity attribute since it is not a dset
att_names = [att_names[0]]
expected = [h5_grp['Spectroscopic_Values']]
self.assertEqual(set(att_names),
set(h5_dset_source.attrs.keys()))
for name, exp_val in zip(att_names, expected):
actual = h5_dset_source.attrs[name]
self.assertIsInstance(actual, h5py.Reference)
self.assertEqual(h5_f[actual], exp_val)
os.remove(file_path)
class TestValidateAncH5Dsets(TestSimple):
def test_valid_shapes(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_grp = h5_f['Raw_Measurement']
h5_main = h5_grp['source_main']
h5_pos_inds = h5_grp['Position_Indices']
h5_pos_vals = h5_grp['Position_Values']
hdf_utils.validate_anc_h5_dsets(h5_pos_inds, h5_pos_vals,
h5_main.shape,
is_spectroscopic=False)
hdf_utils.validate_anc_h5_dsets(h5_pos_inds, h5_pos_vals,
list(h5_main.shape),
is_spectroscopic=False)
h5_spec_inds = h5_grp['Spectroscopic_Indices']
h5_spec_vals = h5_grp['Spectroscopic_Values']
hdf_utils.validate_anc_h5_dsets(h5_spec_inds, h5_spec_vals,
h5_main.shape,
is_spectroscopic=True)
hdf_utils.validate_anc_h5_dsets(h5_spec_inds, h5_spec_vals,
list(h5_main.shape),
is_spectroscopic=True)
def test_invalid_spec_pos_bool(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_grp = h5_f['Raw_Measurement']
h5_main = h5_grp['source_main']
h5_pos_inds = h5_grp['Position_Indices']
h5_pos_vals = h5_grp['Position_Values']
with self.assertRaises(ValueError):
hdf_utils.validate_anc_h5_dsets(h5_pos_inds, h5_pos_vals,
h5_main.shape,
is_spectroscopic=True)
h5_spec_inds = h5_grp['Spectroscopic_Indices']
h5_spec_vals = h5_grp['Spectroscopic_Values']
with self.assertRaises(ValueError):
hdf_utils.validate_anc_h5_dsets(h5_spec_inds, h5_spec_vals,
h5_main.shape,
is_spectroscopic=False)
def test_mismatched_anc_shapes(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_grp = h5_f['Raw_Measurement']
h5_main = h5_grp['source_main']
h5_pos_inds = h5_grp['Position_Indices']
h5_pos_vals = h5_grp['Ancillary']
with self.assertRaises(ValueError):
hdf_utils.validate_anc_h5_dsets(h5_pos_inds, h5_pos_vals,
h5_main.shape,
is_spectroscopic=False)
def test_invalid_dtypes(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_grp = h5_f['Raw_Measurement']
h5_main = h5_grp['source_main']
h5_pos_inds = h5_grp['Position_Indices']
h5_pos_vals = h5_grp['Position_Values']
with self.assertRaises(TypeError):
hdf_utils.validate_anc_h5_dsets('h5_pos_inds', h5_pos_vals,
h5_main.shape,
is_spectroscopic=False)
with self.assertRaises(TypeError):
hdf_utils.validate_anc_h5_dsets(h5_pos_inds, h5_pos_vals,
np.arange(3),
is_spectroscopic=False)
class TestValidateDimsAgainstMAin(unittest.TestCase):
def test_single_dim(self):
func = hdf_utils.validate_dims_against_main
func((1, 5), Dimension('blah', 'meh', np.arange(5)),
is_spectroscopic=True)
func((5, 1), [Dimension('blah', 'meh', np.arange(5))],
is_spectroscopic=False)
def test_multi_dims(self):
func = hdf_utils.validate_dims_against_main
func((1, 15), [Dimension('a', 'b', 5),
Dimension('a', 'b', 3)],
is_spectroscopic=True)
func((15, 5), [Dimension('a', 'b', 5),
Dimension('a', 'b', 3)],
is_spectroscopic=False)
def test_invalid_dims(self):
func = hdf_utils.validate_dims_against_main
with self.assertRaises(ValueError):
func((1, 25), [Dimension('a', 'b', 5),
Dimension('a', 'b', 3)],
is_spectroscopic=True)
with self.assertRaises(ValueError):
func((25, 5), [Dimension('a', 'b', 5)],
is_spectroscopic=False)
def test_invalid_dtypes(self):
func = hdf_utils.validate_dims_against_main
with self.assertRaises(TypeError):
func('hello', Dimension('a', 'b', 5))
with self.assertRaises(TypeError):
func((25, 5), "Dimension")
def test_invalid_main_shape(self):
func = hdf_utils.validate_dims_against_main
with self.assertRaises(ValueError):
func([-5, 1], Dimension('a', 'b', 5))
with self.assertRaises(ValueError):
func([5], Dimension('a', 'b', 5))
with self.assertRaises(ValueError):
func([1, 2, 5], Dimension('a', 'b', 5))
class TestValidateAncDsetAttrs(TestSimple):
def test_blah(self):
pass
if __name__ == '__main__':
unittest.main()
| 77,720 | 48.566964 | 127 | py |
pyUSID-legacy | pyUSID-master-legacy/tests/io/hdf_utils/__init__.py | 0 | 0 | 0 | py | |
pyUSID-legacy | pyUSID-master-legacy/tests/io/hdf_utils/test_model.py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import sys
import h5py
import numpy as np
import dask.array as da
import shutil
sys.path.append("../../pyUSID/")
from pyUSID.io import hdf_utils, Dimension, USIDataset
from tests.io import data_utils
if sys.version_info.major == 3:
unicode = str
class TestModel(unittest.TestCase):
def setUp(self):
data_utils.make_beps_file()
data_utils.make_sparse_sampling_file()
data_utils.make_incomplete_measurement_file()
data_utils.make_relaxation_file()
def tearDown(self):
for file_path in [data_utils.std_beps_path,
data_utils.sparse_sampling_path,
data_utils.incomplete_measurement_path,
data_utils.relaxation_path]:
data_utils.delete_existing_file(file_path)
class TestGetDimensionality(TestModel):
def test_legal_no_sort(self):
self.__helper_no_sort(hdf_dsets=True)
self.__helper_no_sort(hdf_dsets=False)
def __helper_no_sort(self, hdf_dsets=True):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_dsets = [h5_f['/Raw_Measurement/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/Position_Indices']]
expected_shapes = [[7, 2],
[7],
[5, 3]]
for h5_dset, exp_shape in zip(h5_dsets, expected_shapes):
if not hdf_dsets:
h5_dset = h5_dset[()]
self.assertTrue(np.all(exp_shape == hdf_utils.get_dimensionality(h5_dset)))
def test_legal_w_sort(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_dsets = [h5_f['/Raw_Measurement/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/Position_Indices']]
expected_shapes = [[2, 7],
[7],
[3, 5]]
sort_orders = [[1, 0],
[0],
[1, 0]]
for h5_dset, s_oder, exp_shape in zip(h5_dsets, sort_orders, expected_shapes):
self.assertTrue(np.all(exp_shape == hdf_utils.get_dimensionality(h5_dset, index_sort=s_oder)))
def test_not_hdf_dset(self):
for obj in [15, 'srds']:
with self.assertRaises(TypeError):
_ = hdf_utils.get_dimensionality(obj)
def test_invalid_sort(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_dset = h5_f['/Raw_Measurement/Spectroscopic_Indices']
with self.assertRaises(ValueError):
_ = hdf_utils.get_dimensionality(h5_dset, index_sort=[3, 4])
_ = hdf_utils.get_dimensionality(h5_dset, index_sort=['a', np.arange(5)])
class TestGetSortOrder(TestModel):
def test_invalid_types(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
for obj in ['fdfdfd', h5_f]:
with self.assertRaises(TypeError):
_ = hdf_utils.get_sort_order(obj)
def test_simple(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_dsets = [h5_f['/Raw_Measurement/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/Position_Indices']]
expected_order = [[0, 1], [0], [0, 1]]
for h5_dset, exp_order in zip(h5_dsets, expected_order):
self.assertTrue(np.all(exp_order == hdf_utils.get_sort_order(h5_dset)))
def test_reversed(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_dsets = [np.flipud(h5_f['/Raw_Measurement/Spectroscopic_Indices']),
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
np.fliplr(h5_f['/Raw_Measurement/Position_Indices'])]
expected_order = [[1, 0], [0], [1, 0]]
for h5_dset, exp_order in zip(h5_dsets, expected_order):
self.assertTrue(np.all(exp_order == hdf_utils.get_sort_order(h5_dset)))
class TestGetUnitValues(TestModel):
def test_source_spec_all(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']
h5_vals = h5_f['/Raw_Measurement/Spectroscopic_Values']
expected = {}
for dim_name in ['Bias', 'Cycle']:
expected[dim_name] = h5_f['/Raw_Measurement/' + dim_name][()]
ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals)
self.assertEqual(len(expected), len(ret_val))
for key, exp in expected.items():
self.assertTrue(np.allclose(exp, ret_val[key]))
def test_source_spec_all_explicit(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']
h5_vals = h5_f['/Raw_Measurement/Spectroscopic_Values']
expected = {}
for dim_name in ['Bias', 'Cycle']:
expected[dim_name] = h5_f['/Raw_Measurement/' + dim_name][()]
ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Cycle', 'Bias'])
self.assertEqual(len(expected), len(ret_val))
for key, exp in expected.items():
self.assertTrue(np.allclose(exp, ret_val[key]))
def test_illegal_key(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']
h5_vals = h5_f['/Raw_Measurement/Spectroscopic_Values']
with self.assertRaises(KeyError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Cycle', 'Does not exist'])
def test_illegal_dset(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']
h5_vals = h5_f['/Raw_Measurement/Ancillary']
with self.assertRaises(ValueError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Cycle', 'Bias'])
def test_source_spec_single(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']
h5_vals = h5_f['/Raw_Measurement/Spectroscopic_Values']
expected = {'Bias': h5_f['/Raw_Measurement/Bias'][()]}
ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names='Bias')
self.assertEqual(len(expected), len(ret_val))
for key, exp in expected.items():
self.assertTrue(np.allclose(exp, ret_val[key]))
def test_source_pos_all(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Position_Indices']
h5_vals = h5_f['/Raw_Measurement/Position_Values']
expected = {}
for dim_name in ['X', 'Y']:
expected[dim_name] = h5_f['/Raw_Measurement/' + dim_name][()]
ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals)
self.assertEqual(len(expected), len(ret_val))
for key, exp in expected.items():
self.assertTrue(np.allclose(exp, ret_val[key]))
def test_source_pos_single(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Position_Indices']
h5_vals = h5_f['/Raw_Measurement/Position_Values']
expected = {'Y': h5_f['/Raw_Measurement/Y'][()]}
ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names='Y')
self.assertEqual(len(expected), len(ret_val))
for key, exp in expected.items():
self.assertTrue(np.allclose(exp, ret_val[key]))
def test_all_dim_names_not_provided(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_inds = h5_f['/Raw_Measurement/Position_Indices'][()]
h5_vals = h5_f['/Raw_Measurement/Position_Values'][()]
with self.assertRaises(TypeError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Y'])
def test_dependent_dim(self):
with h5py.File(data_utils.relaxation_path, mode='r') as h5_f:
h5_inds = h5_f['/Measurement_000/Channel_000/Spectroscopic_Indices']
h5_vals = h5_f['/Measurement_000/Channel_000/Spectroscopic_Values']
spec_dim_names = hdf_utils.get_attr(h5_inds, 'labels')
ret_dict = hdf_utils.get_unit_values(h5_inds, h5_vals)
for dim_ind, dim_name in enumerate(spec_dim_names):
exp_val = hdf_utils.get_attr(h5_inds, 'unit_vals_dim_' + str(dim_ind))
act_val = ret_dict[dim_name]
self.assertTrue(np.allclose(exp_val, act_val))
def test_sparse_samp_no_attr(self):
# What should the user expect this function to do? throw an error.
# Without the attribute, this function will have no idea that it is looking at a sparse sampling case
# it will return the first and second columns of vals blindly
with h5py.File(data_utils.sparse_sampling_path, mode='r') as h5_f:
h5_inds = h5_f['/Measurement_000/Channel_000/Position_Indices']
h5_vals = h5_f['/Measurement_000/Channel_000/Position_Values']
dim_names = hdf_utils.get_attr(h5_inds, 'labels')
ret_dict = hdf_utils.get_unit_values(h5_inds, h5_vals)
for dim_ind, dim_name in enumerate(dim_names):
exp_val = h5_vals[:, dim_ind]
act_val = ret_dict[dim_name]
self.assertTrue(np.allclose(exp_val, act_val))
def test_sparse_samp_w_attr(self):
# What should the user expect this function to do? throw an error.
with h5py.File(data_utils.sparse_sampling_path, mode='r') as h5_f:
h5_inds = h5_f['/Measurement_000/Channel_001/Position_Indices']
h5_vals = h5_f['/Measurement_000/Channel_001/Position_Values']
with self.assertRaises(ValueError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Y'])
def test_incomp_dim_no_attr(self):
# What should the user expect this function to do? throw an error.
# Given that the unit values for each tile are different, it should throw a ValueError for X.
# Even though we know Y is incomplete, it won't know since it wasn't looking at X.
# However, now this function will automatically find unit values for ALL dimensions just to catch such scenarios
with h5py.File(data_utils.incomplete_measurement_path, mode='r') as h5_f:
h5_inds = h5_f['/Measurement_000/Channel_000/Position_Indices']
h5_vals = h5_f['/Measurement_000/Channel_000/Position_Values']
with self.assertRaises(ValueError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals)
with self.assertRaises(ValueError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['X'])
with self.assertRaises(ValueError):
_ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Y'])
class TestReshapeToNDims(TestModel):
def test_h5_already_sorted(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
nd_slow_to_fast = h5_f['/Raw_Measurement/n_dim_form'][()]
h5_main = h5_f['/Raw_Measurement/source_main']
# Data is always slowest to fastest
# Anc dims arranged from fastest to slowest
# Expecting data dims to be arranged according to anc dims order
n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main, get_labels=True, sort_dims=False,
lazy=False, verbose=True)
self.assertTrue(np.all([x == y for x, y in zip(labels, ['X', 'Y', 'Bias', 'Cycle'])]))
self.assertTrue(success)
nd_fast_to_slow = nd_slow_to_fast.transpose(1, 0, 3, 2)
self.assertTrue(np.allclose(nd_fast_to_slow, n_dim))
# Anc dims arranged from fastest to slowest
# Expecting data dims to be arranged according to slow to fast
n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main, get_labels=True, sort_dims=True,
lazy=False, verbose=True)
self.assertTrue(success)
self.assertTrue(np.all([x == y for x, y in zip(labels, ['Y', 'X', 'Cycle', 'Bias'])]))
self.assertTrue(np.allclose(nd_slow_to_fast, n_dim))
def test_h5_manually_provided_anc_dsets_h5(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
nd_slow_to_fast = h5_f['/Raw_Measurement/n_dim_form'][()]
nd_fast_to_slow = nd_slow_to_fast.transpose(1, 0, 3, 2)
exp_labs = ['X', 'Y', 'Bias', 'Cycle']
h5_main = h5_f['/Raw_Measurement/source_main']
h5_pos_inds = h5_f['/Raw_Measurement/Position_Indices']
h5_spec_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']
# BOTH POS AND SPEC
n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main,
h5_pos=h5_pos_inds,
h5_spec=h5_spec_inds,
get_labels=True,
sort_dims=False,
lazy=False, verbose=True)
self.assertTrue(np.all([x == y for x, y in zip(labels, exp_labs)]))
self.assertTrue(success)
self.assertTrue(np.allclose(nd_fast_to_slow, n_dim))
# ONLY POS:
n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main,
h5_pos=h5_pos_inds,
h5_spec=None,
get_labels=True,
sort_dims=False,
lazy=False,
verbose=True)
self.assertTrue(np.all([x == y for x, y in zip(labels, exp_labs)]))
self.assertTrue(success)
self.assertTrue(np.allclose(nd_fast_to_slow, n_dim))
# ONLY SPEC
n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main,
h5_pos=None,
h5_spec=h5_spec_inds,
get_labels=True,
sort_dims=False,
lazy=False,
verbose=True)
self.assertTrue(np.all([x == y for x, y in zip(labels, exp_labs)]))
self.assertTrue(success)
self.assertTrue(np.allclose(nd_fast_to_slow, n_dim))
def test_h5_not_main_dset(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/Ancillary']
h5_pos = h5_f['/Raw_Measurement/Position_Indices']
h5_spec = h5_f['/Raw_Measurement/Spectroscopic_Indices']
# Not main
with self.assertRaises(ValueError):
_ = hdf_utils.reshape_to_n_dims(h5_main)
# Not main and not helping that we are supplign incompatible ancillary datasets
with self.assertRaises(ValueError):
_ = hdf_utils.reshape_to_n_dims(h5_main, h5_pos=h5_pos, h5_spec=h5_spec)
# main but we are supplign incompatible ancillary datasets
h5_main = h5_f['/Raw_Measurement/source_main-Fitter_000/results_main']
with self.assertRaises(ValueError):
_ = hdf_utils.reshape_to_n_dims(h5_main, h5_pos=h5_pos, h5_spec=h5_spec)
def build_main_anc_4d(self):
num_rows = 3
num_cols = 5
num_cycles = 2
num_cycle_pts = 7
# arrange as fast, slow
pos_inds = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols))).T
# arrange as fast, slow
spec_inds = np.vstack((np.tile(np.arange(num_cycle_pts), num_cycles),
np.repeat(np.arange(num_cycles), num_cycle_pts)))
# Data is arranged from slowest to fastest
main_nd = np.zeros(shape=(num_rows, num_cols, num_cycles,
num_cycle_pts), dtype=np.uint8)
for row_ind in range(num_rows):
for col_ind in range(num_cols):
for cycle_ind in range(num_cycles):
# for bias_ind in range(num_cycle_pts):
val = 1E+3*row_ind + 1E+2*col_ind + 1E+1*cycle_ind + np.arange(num_cycle_pts)
main_nd[row_ind, col_ind, cycle_ind] = val
return main_nd, pos_inds, spec_inds
def base_comparison_4d(self, flip_pos_inds, flip_spec_inds, lazy_in=False,
lazy_out=False, verbose=False):
# Generated Data dims from slowest to fastest
exp_nd_s2f, pos_inds, spec_inds = self.build_main_anc_4d()
# nd (Y, X, Cycle, Bias)
main_2d = exp_nd_s2f.reshape(np.prod(exp_nd_s2f.shape[:2]),
np.prod(exp_nd_s2f.shape[2:]))
# Dimension names arranged from slowest to fastest
labs_s2f = ['Position Dimension 1', 'Position Dimension 0',
'Spectral Dimension 1', 'Spectral Dimension 0']
# Generated ancillary dimensions are arranged from fastest to slowest
# Unless any flipping is requested, as-is order should be fast to slow
as_is_nd_order = [1, 0, 3, 2]
# Unless any flipping is requested, s2f order is already in place
s2f_lab_order = [0, 1, 2, 3]
if flip_pos_inds:
# arranged as slow to fast
pos_inds = np.fliplr(pos_inds)
as_is_nd_order = as_is_nd_order[:2][::-1] + as_is_nd_order[2:]
s2f_lab_order = [1, 0] + s2f_lab_order[2:]
if flip_spec_inds:
# arranged as slow to fast
as_is_nd_order = as_is_nd_order[:2] + as_is_nd_order[2:][::-1]
s2f_lab_order = s2f_lab_order[:2] + [3, 2]
spec_inds = np.flipud(spec_inds)
if lazy_in:
main_2d = da.from_array(main_2d, chunks=main_2d.shape)
pos_inds = da.from_array(pos_inds, chunks=pos_inds.shape)
spec_inds = da.from_array(spec_inds, chunks=spec_inds.shape)
n_dim, suc, labs = hdf_utils.reshape_to_n_dims(main_2d,
h5_pos=pos_inds,
h5_spec=spec_inds, sort_dims=True,
get_labels=True,
lazy=lazy_out,
verbose=verbose)
if lazy_out:
self.assertIsInstance(n_dim, da.core.Array)
self.assertTrue(np.allclose(exp_nd_s2f, n_dim))
self.assertTrue(suc)
# labels were auto-generated and these will be flipped blindly
exp_labs = np.array(labs_s2f)[s2f_lab_order]
self.assertTrue(np.all([x == y for x, y in zip(labs, exp_labs)]))
if verbose:
print('~~~~~~~~~~~~~~~~~~~~~~ UNSORTED ~~~~~~~~~~~~~~~~~~~~~~~~~')
n_dim, suc, labs = hdf_utils.reshape_to_n_dims(main_2d,
h5_pos=pos_inds,
h5_spec=spec_inds,
sort_dims=False,
get_labels=True,
lazy=lazy_out,
verbose=verbose)
if lazy_out:
self.assertIsInstance(n_dim, da.core.Array)
# Rearrange the dim labels and N-dim form from slow-to-fast to:
if verbose:
print('N-dim order will be permuted as: {}'.format(as_is_nd_order))
print('Labels will be permuted as: {}'.format([1, 0, 3, 2]))
exp_nd = exp_nd_s2f.transpose(tuple(as_is_nd_order))
"""
This is sort of confusing:
No matter how the pos / spec dims are ordered, the names will always
start as P0, P1, S0, S1
"""
exp_labs = np.array(labs_s2f)[[1, 0, 3, 2]]
if verbose:
print('Expected N-dim shape: {} and labels: {}'
''.format(exp_nd.shape, exp_labs))
self.assertTrue(np.allclose(exp_nd, n_dim))
self.assertTrue(suc)
self.assertTrue(np.all([x == y for x, y in zip(labs, exp_labs)]))
def test_numpy_ordinary(self):
self.base_comparison_4d(False, False)
def test_dask_input(self):
self.base_comparison_4d(False, False, lazy_in=True, lazy_out=False)
def test_dask_output(self):
self.base_comparison_4d(False, False, lazy_in=False, lazy_out=True)
def test_dask_all(self):
self.base_comparison_4d(False, False, lazy_in=True, lazy_out=True)
def test_numpy_pos_inds_order_flipped(self):
self.base_comparison_4d(True, False)
def test_numpy_spec_inds_order_flipped(self):
# This is the same situation as in BEPS
self.base_comparison_4d(False, True)
def test_numpy_both_inds_order_flipped(self):
self.base_comparison_4d(True, True)
def test_dask_all_both_inds_order_flipped(self):
self.base_comparison_4d(True, True, lazy_in=True, lazy_out=True)
def build_main_anc_1_2d(self, is_2d=True, is_spec=False):
num_rows = 2
num_cols = 3
# arrange as fast, slow
pos_inds = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols))).T
# Data is arranged from slowest to fastest
main_nd = np.random.randint(0, high=255, size=(num_rows, num_cols),
dtype=np.uint8)
if not is_2d:
pos_inds = np.expand_dims(np.arange(num_rows), axis=1)
main_nd = np.random.randint(0, high=255, size=num_rows,
dtype=np.uint8)
spec_inds= np.expand_dims([0], axis=0)
if is_spec:
return main_nd, spec_inds, pos_inds.T
return main_nd, pos_inds, spec_inds
def base_comparison_1_2d(self, is_2d, is_spec, flip_inds,
lazy_in=False, lazy_out=False):
# Data is always stored from fastest to slowest
# By default the ancillary dimensions are arranged from fastest to slowest
main_nd, pos_inds, spec_inds = self.build_main_anc_1_2d(is_2d=is_2d,
is_spec=is_spec)
main_2d = main_nd.reshape(-1, 1)
main_nd_w_sing = np.expand_dims(main_nd, axis=-1)
if is_spec:
main_2d = main_2d.T
main_nd_w_sing = np.expand_dims(main_nd, axis=0)
# nd (Y, X)
order = [1, 0, 2]
if is_spec:
order = [0, 2, 1]
if flip_inds:
# arranged as slow to fast
if is_spec:
spec_inds = np.flipud(spec_inds)
order = [0] + order[1:][::-1]
else:
pos_inds = np.fliplr(pos_inds)
order = order[:2][::-1] + [2]
print('2D: {}, Spec: {}, Flip: {}'.format(is_2d, is_spec, flip_inds))
print('Main data shapes ND: {}, 2D: {}'.format(main_nd.shape, main_2d.shape))
print(main_nd)
print(main_2d)
if lazy_in:
main_2d = da.from_array(main_2d, chunks=main_2d.shape)
n_dim, success = hdf_utils.reshape_to_n_dims(main_2d, h5_pos=pos_inds,
h5_spec=spec_inds,
sort_dims=True,
get_labels=False,
lazy=lazy_out,
verbose=True)
if lazy_out:
self.assertIsInstance(n_dim, da.core.Array)
self.assertTrue(np.allclose(main_nd_w_sing, n_dim))
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
n_dim, success = hdf_utils.reshape_to_n_dims(main_2d, h5_pos=pos_inds,
h5_spec=spec_inds,
sort_dims=False,
get_labels=False,
lazy=lazy_out,
verbose=True)
if lazy_out:
self.assertIsInstance(n_dim, da.core.Array)
if is_2d:
main_nd_w_sing = main_nd_w_sing.transpose(order)
self.assertTrue(np.allclose(main_nd_w_sing, n_dim))
def test_numpy_ordinary_1d_pos(self):
self.base_comparison_1_2d(False, False, False)
def test_dask_in_ordinary_1d_pos(self):
self.base_comparison_1_2d(False, False, False,
lazy_in=True, lazy_out=False)
def test_dask_out_ordinary_1d_pos(self):
self.base_comparison_1_2d(False, False, False,
lazy_in=False, lazy_out=True)
def test_dask_all_ordinary_1d_pos(self):
self.base_comparison_1_2d(False, False, False,
lazy_in=True, lazy_out=True)
def test_numpy_ordinary_1d_spec(self):
self.base_comparison_1_2d(False, True, False)
def test_dask_in_ordinary_1d_spec(self):
self.base_comparison_1_2d(False, True, False,
lazy_in=True, lazy_out=False)
def test_dask_out_ordinary_1d_spec(self):
self.base_comparison_1_2d(False, True, False,
lazy_in=False, lazy_out=True)
def test_dask_all_ordinary_1d_spec(self):
self.base_comparison_1_2d(False, True, False,
lazy_in=True, lazy_out=True)
def test_numpy_ordinary_2d_pos(self):
self.base_comparison_1_2d(True, False, False)
def test_numpy_ordinary_2d_spec(self):
self.base_comparison_1_2d(True, True, False)
def test_h5_both_inds_flipped(self):
# Flipping both the spec and pos dimensions means that the order in which
# the data is stored is the same order in which dimensions are arranged
# In other words, sort should make no difference at all!
file_path = 'reshape_to_n_dim_sort_required.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_raw_grp = h5_f.create_group('Raw_Measurement')
main_nd, source_pos_data, source_spec_data = self.build_main_anc_4d()
# arrange as slow, fast instead of fast, slow
source_pos_data = np.fliplr(source_pos_data)
# make spectroscopic slow, fast instead of fast, slow
source_spec_data = np.flipud(source_spec_data)
source_dset_name = 'source_main'
# Arrange from slow to fast
pos_attrs = {'units': ['nm', 'um'], 'labels': ['Y', 'X']}
#def build_ind_val_dsets(name, inds, attrs, is_spec):
h5_pos_inds = h5_raw_grp.create_dataset('Position_Indices', data=source_pos_data, dtype=np.uint16)
data_utils.write_aux_reg_ref(h5_pos_inds, pos_attrs['labels'], is_spec=False)
data_utils.write_string_list_as_attr(h5_pos_inds, pos_attrs)
h5_pos_vals = h5_raw_grp.create_dataset('Position_Values', data=source_pos_data, dtype=np.float32)
data_utils.write_aux_reg_ref(h5_pos_vals, pos_attrs['labels'], is_spec=False)
data_utils.write_string_list_as_attr(h5_pos_vals, pos_attrs)
source_main_data = main_nd.reshape(np.prod(main_nd.shape[:2]),
np.prod(main_nd.shape[2:]))
h5_source_main = h5_raw_grp.create_dataset(source_dset_name, data=source_main_data)
data_utils.write_safe_attrs(h5_source_main, {'units': 'A', 'quantity': 'Current'})
# Remember to set from slow to faset
source_spec_attrs = {'units': ['', 'V'], 'labels': ['Cycle', 'Bias']}
h5_source_spec_inds = h5_raw_grp.create_dataset('Spectroscopic_Indices', data=source_spec_data,
dtype=np.uint16)
data_utils.write_aux_reg_ref(h5_source_spec_inds, source_spec_attrs['labels'], is_spec=True)
data_utils.write_string_list_as_attr(h5_source_spec_inds, source_spec_attrs)
h5_source_spec_vals = h5_raw_grp.create_dataset('Spectroscopic_Values', data=source_spec_data,
dtype=np.float32)
data_utils.write_aux_reg_ref(h5_source_spec_vals, source_spec_attrs['labels'], is_spec=True)
data_utils.write_string_list_as_attr(h5_source_spec_vals, source_spec_attrs)
# Now need to link as main!
for dset in [h5_pos_inds, h5_pos_vals, h5_source_spec_inds, h5_source_spec_vals]:
h5_source_main.attrs[dset.name.split('/')[-1]] = dset.ref
n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_source_main, get_labels=True, sort_dims=True,
lazy=False, verbose=False)
self.assertTrue(np.all([x == y for x, y in zip(labels, ['Y', 'X', 'Cycle', 'Bias'])]))
self.assertTrue(np.allclose(main_nd, n_dim))
expected_n_dim = main_nd # np.transpose(main_nd, [1, 0, 3, 2])
n_dim, success, labels = hdf_utils.reshape_to_n_dims(
h5_source_main, get_labels=True, sort_dims=False,
lazy=False, verbose=False)
self.assertTrue(np.all([x == y for x, y in zip(labels, ['Y', 'X', 'Cycle', 'Bias'])]))
self.assertTrue(np.allclose(expected_n_dim, n_dim))
os.remove(file_path)
def test_h5_beps_field(self):
# Flipping both the spec and pos dimensions means that the order in which
# the data is stored is the same order in which dimensions are arranged
# In other words, sort should make no difference at all!
file_path = 'reshape_to_n_dim_sort_required.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_raw_grp = h5_f.create_group('Raw_Measurement')
num_rows = 3
num_cols = 5
num_fields = 2
num_cycle_pts = 7
# arrange as fast, slow
source_pos_data = np.vstack(
(np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols))).T
# arrange as fast, slow
source_spec_data = np.vstack(
(np.tile(np.arange(num_fields), num_cycle_pts),
np.repeat(np.arange(num_cycle_pts), num_fields),))
# Data is arranged from slowest to fastest
test = np.vstack((np.arange(num_cycle_pts) * -1 - 1,
np.arange(num_cycle_pts) + 1))
main_nd = np.zeros(
shape=(num_rows, num_cols, num_fields, num_cycle_pts),
dtype=np.float16)
for row_ind in range(num_rows):
for col_ind in range(num_cols):
main_nd[
row_ind, col_ind] = 1E+3 * row_ind + 1E+2 * col_ind + test
main_nd = main_nd.transpose(0, 1, 3, 2)
source_dset_name = 'source_main'
# Arrange from fast to slow
pos_attrs = {'units': ['nm', 'um'], 'labels': ['X', 'Y']}
h5_pos_inds = h5_raw_grp.create_dataset('Position_Indices',
data=source_pos_data,
dtype=np.uint16)
data_utils.write_aux_reg_ref(h5_pos_inds, pos_attrs['labels'],
is_spec=False)
data_utils.write_string_list_as_attr(h5_pos_inds, pos_attrs)
h5_pos_vals = h5_raw_grp.create_dataset('Position_Values',
data=source_pos_data,
dtype=np.float32)
data_utils.write_aux_reg_ref(h5_pos_vals, pos_attrs['labels'],
is_spec=False)
data_utils.write_string_list_as_attr(h5_pos_vals, pos_attrs)
source_main_data = main_nd.reshape(np.prod(main_nd.shape[:2]),
np.prod(main_nd.shape[2:]))
h5_source_main = h5_raw_grp.create_dataset(source_dset_name,
data=source_main_data)
data_utils.write_safe_attrs(h5_source_main,
{'units': 'A', 'quantity': 'Current'})
# Remember to set from fast to slow
source_spec_attrs = {'units': ['', 'V'],
'labels': ['Field', 'Bias']}
h5_source_spec_inds = h5_raw_grp.create_dataset(
'Spectroscopic_Indices', data=source_spec_data,
dtype=np.uint16)
data_utils.write_aux_reg_ref(h5_source_spec_inds,
source_spec_attrs['labels'],
is_spec=True)
data_utils.write_string_list_as_attr(h5_source_spec_inds,
source_spec_attrs)
h5_source_spec_vals = h5_raw_grp.create_dataset(
'Spectroscopic_Values', data=source_spec_data,
dtype=np.float32)
data_utils.write_aux_reg_ref(h5_source_spec_vals,
source_spec_attrs['labels'],
is_spec=True)
data_utils.write_string_list_as_attr(h5_source_spec_vals,
source_spec_attrs)
# Now need to link as main!
for dset in [h5_pos_inds, h5_pos_vals, h5_source_spec_inds,
h5_source_spec_vals]:
h5_source_main.attrs[dset.name.split('/')[-1]] = dset.ref
n_dim, success, labels = hdf_utils.reshape_to_n_dims(
h5_source_main, get_labels=True, sort_dims=True,
lazy=False, verbose=False)
self.assertTrue(np.all(
[x == y for x, y in zip(labels, ['Y', 'X', 'Bias', 'Field'])]))
self.assertTrue(np.allclose(main_nd, n_dim))
expected_n_dim = np.transpose(main_nd, [1, 0, 3, 2])
n_dim, success, labels = hdf_utils.reshape_to_n_dims(
h5_source_main, get_labels=True, sort_dims=False,
lazy=False, verbose=False)
self.assertTrue(np.all(
[x == y for x, y in zip(labels, ['X', 'Y', 'Field', 'Bias'])]))
self.assertTrue(np.allclose(expected_n_dim, n_dim))
os.remove(file_path)
class TestReshapeFromNDims(TestModel):
def test_pos_and_spec_provided(self):
num_rows = 3
num_cols = 5
num_cycles = 2
num_cycle_pts = 7
# the N dimensional dataset should be arranged in the following order:
# [positions slowest to fastest, spectroscopic slowest to fastest]
source_nd = np.zeros(shape=(num_rows, num_cols, num_cycles, num_cycle_pts), dtype=np.float16)
expected_2d = np.zeros(shape=(num_rows * num_cols, num_cycle_pts * num_cycles), dtype=np.float16)
for row_ind in range(num_rows):
for col_ind in range(num_cols):
for cycle_ind in range(num_cycles):
for bias_ind in range(num_cycle_pts):
val = 1E+3 * row_ind + 1E+2 * col_ind + 1E+1 * cycle_ind + bias_ind
expected_2d[row_ind * num_cols + col_ind, cycle_ind * num_cycle_pts + bias_ind] = val
source_nd[row_ind, col_ind, cycle_ind, bias_ind] = val
# case 1: Pos and Spec both arranged as slow to fast:
source_pos_data = np.vstack((np.repeat(np.arange(num_rows), num_cols),
np.tile(np.arange(num_cols), num_rows))).T
source_spec_data = np.vstack((np.repeat(np.arange(num_cycles), num_cycle_pts),
np.tile(np.arange(num_cycle_pts), num_cycles)))
ret_2d, success = hdf_utils.reshape_from_n_dims(source_nd, h5_pos=source_pos_data, h5_spec=source_spec_data)
self.assertTrue(success)
self.assertTrue(np.allclose(ret_2d, expected_2d))
# case 2: Only Pos arranged as slow to fast:
main_pos_sorted = np.transpose(source_nd, (0, 1, 3, 2))
source_pos_data = np.vstack((np.repeat(np.arange(num_rows), num_cols),
np.tile(np.arange(num_cols), num_rows))).T
source_spec_data = np.vstack((np.tile(np.arange(num_cycle_pts), num_cycles),
np.repeat(np.arange(num_cycles), num_cycle_pts),))
ret_2d, success = hdf_utils.reshape_from_n_dims(main_pos_sorted, h5_pos=source_pos_data,
h5_spec=source_spec_data)
self.assertTrue(success)
self.assertTrue(np.allclose(ret_2d, expected_2d))
# case 3: only Spec arranged as slow to fast:
main_spec_sorted = np.transpose(source_nd, (1, 0, 2, 3))
source_pos_data = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols))).T
source_spec_data = np.vstack((np.repeat(np.arange(num_cycles), num_cycle_pts),
np.tile(np.arange(num_cycle_pts), num_cycles)))
ret_2d, success = hdf_utils.reshape_from_n_dims(main_spec_sorted, h5_pos=source_pos_data,
h5_spec=source_spec_data)
self.assertTrue(success)
self.assertTrue(np.allclose(ret_2d, expected_2d))
# case 4: neither pos nor spec arranged as slow to fast:
main_not_sorted = np.transpose(source_nd, (1, 0, 3, 2))
source_pos_data = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols))).T
source_spec_data = np.vstack((np.tile(np.arange(num_cycle_pts), num_cycles),
np.repeat(np.arange(num_cycles), num_cycle_pts),))
ret_2d, success = hdf_utils.reshape_from_n_dims(main_not_sorted, h5_pos=source_pos_data,
h5_spec=source_spec_data)
self.assertTrue(success)
self.assertTrue(np.allclose(ret_2d, expected_2d))
def test_pos_and_spec_may_may_not_be_provided(self):
num_rows = 3
num_cols = 5
num_cycles = 2
num_cycle_pts = 7
# the N dimensional dataset should be arranged in the following order:
# [positions slowest to fastest, spectroscopic slowest to fastest]
source_nd = np.zeros(shape=(num_rows, num_cols, num_cycles, num_cycle_pts), dtype=np.float16)
expected_2d = np.zeros(shape=(num_rows * num_cols, num_cycle_pts * num_cycles), dtype=np.float16)
for row_ind in range(num_rows):
for col_ind in range(num_cols):
for cycle_ind in range(num_cycles):
for bias_ind in range(num_cycle_pts):
val = 1E+3 * row_ind + 1E+2 * col_ind + 1E+1 * cycle_ind + bias_ind
expected_2d[row_ind * num_cols + col_ind, cycle_ind * num_cycle_pts + bias_ind] = val
source_nd[row_ind, col_ind, cycle_ind, bias_ind] = val
source_pos_data = np.vstack((np.repeat(np.arange(num_rows), num_cols),
np.tile(np.arange(num_cols), num_rows))).T
source_spec_data = np.vstack((np.repeat(np.arange(num_cycles), num_cycle_pts),
np.tile(np.arange(num_cycle_pts), num_cycles)))
# case 1: only pos provided:
ret_2d, success = hdf_utils.reshape_from_n_dims(source_nd, h5_pos=source_pos_data)
self.assertTrue(success)
self.assertTrue(np.allclose(ret_2d, expected_2d))
# case 2: only spec provided:
ret_2d, success = hdf_utils.reshape_from_n_dims(source_nd, h5_spec=source_spec_data)
self.assertTrue(success)
self.assertTrue(np.allclose(ret_2d, expected_2d))
# case 3: neither pos nor spec provided:
with self.assertRaises(ValueError):
_ = hdf_utils.reshape_from_n_dims(source_nd)
class TestWriteMainDataset(TestModel):
def base_write(self, lazy_main=False, empty_main=False, pre_pos=False,
pre_spec=False, to_new_file=False):
file_path = 'test.h5'
new_file_path = 'new.h5'
data_utils.delete_existing_file(file_path)
main_data = np.random.rand(15, 14)
main_data_name = 'Test_Main'
quantity = 'Current'
dset_units = 'nA'
pos_sizes = [5, 3]
pos_names = ['X', 'Y']
pos_units = ['nm', 'um']
pos_dims = []
for length, name, units in zip(pos_sizes, pos_names, pos_units):
pos_dims.append(Dimension(name, units, np.arange(length)))
pos_data = np.vstack((np.tile(np.arange(5), 3),
np.repeat(np.arange(3), 5))).T
spec_sizes = [7, 2]
spec_names = ['Bias', 'Cycle']
spec_units = ['V', '']
spec_dims = []
for length, name, units in zip(spec_sizes, spec_names, spec_units):
spec_dims.append(Dimension(name, units, np.arange(length)))
spec_data = np.vstack((np.tile(np.arange(7), 2),
np.repeat(np.arange(2), 7)))
input_data = main_data
kwargs = {}
if lazy_main:
input_data = da.from_array(main_data, chunks=main_data.shape)
if empty_main:
input_data = main_data.shape
kwargs.update({'dtype': np.float16})
with h5py.File(file_path, mode='w') as h5_f:
if pre_spec:
h5_spec_inds, h5_spec_vals = hdf_utils.write_ind_val_dsets(
h5_f, spec_dims, is_spectral=True)
spec_dims = None
kwargs.update({'h5_spec_inds': h5_spec_inds,
'h5_spec_vals': h5_spec_vals})
if pre_pos:
h5_pos_inds, h5_pos_vals = hdf_utils.write_ind_val_dsets(h5_f,
pos_dims,
is_spectral=False)
pos_dims = None
kwargs.update({'h5_pos_inds': h5_pos_inds,
'h5_pos_vals': h5_pos_vals})
targ_loc = h5_f
if to_new_file:
h5_f_2 = h5py.File(new_file_path, mode='w')
targ_loc = h5_f_2
usid_main = hdf_utils.write_main_dataset(targ_loc, input_data, main_data_name, quantity, dset_units, pos_dims,
spec_dims, main_dset_attrs=None, slow_to_fast=False, verbose=True, **kwargs)
self.assertIsInstance(usid_main, USIDataset)
self.assertEqual(usid_main.name.split('/')[-1], main_data_name)
self.assertEqual(usid_main.parent, targ_loc)
if not empty_main:
self.assertTrue(np.allclose(main_data, usid_main[()]))
data_utils.validate_aux_dset_pair(self, targ_loc, usid_main.h5_pos_inds, usid_main.h5_pos_vals, pos_names, pos_units,
pos_data, h5_main=usid_main, is_spectral=False, slow_to_fast=False)
data_utils.validate_aux_dset_pair(self, targ_loc, usid_main.h5_spec_inds, usid_main.h5_spec_vals, spec_names, spec_units,
spec_data, h5_main=usid_main, is_spectral=True, slow_to_fast=False)
if to_new_file:
h5_f_2.close()
os.remove(new_file_path)
h5_f.close()
os.remove(file_path)
def test_numpy_small(self):
self.base_write()
def test_dask_small(self):
self.base_write(lazy_main=True)
def test_empty_main(self):
self.base_write(empty_main=True)
def test_write_main_existing_pos_aux(self):
self.base_write(pre_pos=True, pre_spec=False)
def test_write_main_existing_pos_aux_diff_file(self):
self.base_write(pre_pos=True, pre_spec=False, to_new_file=True)
def test_write_main_existing_spec_aux(self):
self.base_write(pre_pos=False, pre_spec=True)
def test_write_main_existing_spec_aux_diff_file(self):
self.base_write(pre_pos=False, pre_spec=True, to_new_file=True)
def test_write_main_both_existing_aux(self):
self.base_write(pre_pos=True, pre_spec=True)
def test_write_main_both_existing_aux_diff_file(self):
self.base_write(pre_pos=True, pre_spec=True, to_new_file=True)
def test_prod_sizes_mismatch(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
main_data = np.random.rand(15, 14)
main_data_name = 'Test_Main'
quantity = 'Current'
dset_units = 'nA'
pos_sizes = [5, 15] # too many steps in the Y direction
pos_names = ['X', 'Y']
pos_units = ['nm', 'um']
pos_dims = []
for length, name, units in zip(pos_sizes, pos_names, pos_units):
pos_dims.append(Dimension(name, units, np.arange(length)))
spec_sizes = [7, 2]
spec_names = ['Bias', 'Cycle']
spec_units = ['V', '']
spec_dims = []
for length, name, units in zip(spec_sizes, spec_names, spec_units):
spec_dims.append(Dimension(name, units, np.arange(length)))
with h5py.File(file_path, mode='w') as h5_f:
with self.assertRaises(ValueError):
_ = hdf_utils.write_main_dataset(h5_f, main_data, main_data_name, quantity, dset_units, pos_dims,
spec_dims)
os.remove(file_path)
if __name__ == '__main__':
unittest.main()
| 48,034 | 46.79602 | 133 | py |
pyUSID-legacy | pyUSID-master-legacy/tests/io/hdf_utils/test_base.py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import sys
import h5py
import numpy as np
sys.path.append("../../pyUSID/")
from pyUSID.io import hdf_utils
from .. import data_utils
if sys.version_info.major == 3:
unicode = str
class TestHDFUtilsBase(unittest.TestCase):
def setUp(self):
data_utils.make_beps_file()
def tearDown(self):
data_utils.delete_existing_file(data_utils.std_beps_path)
class TestGetH5ObjRefs(TestHDFUtilsBase):
def test_many(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_obj_refs = [h5_f,
4.123,
np.arange(6),
h5_f['/Raw_Measurement/Position_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/Spectroscopic_Values']]
chosen_objs = [h5_f['/Raw_Measurement/Position_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices']]
target_ref_names = ['Position_Indices', 'source_main-Fitter_000', 'Spectroscopic_Indices']
returned_h5_objs = hdf_utils.get_h5_obj_refs(target_ref_names, h5_obj_refs)
self.assertEqual(set(chosen_objs), set(returned_h5_objs))
def test_single(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_obj_refs = [h5_f,
4.123,
np.arange(6),
h5_f['/Raw_Measurement/Position_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/Spectroscopic_Values']]
chosen_objs = [h5_f['/Raw_Measurement/Position_Indices']]
target_ref_names = ['Position_Indices']
returned_h5_objs = hdf_utils.get_h5_obj_refs(target_ref_names[0], h5_obj_refs)
self.assertEqual(set(chosen_objs), set(returned_h5_objs))
def test_non_string_names(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_obj_refs = [h5_f, 4.123, np.arange(6),
h5_f['/Raw_Measurement/Position_Indices']]
target_ref_names = ['Position_Indices', np.arange(6), 4.123]
with self.assertRaises(TypeError):
_ = hdf_utils.get_h5_obj_refs(target_ref_names, h5_obj_refs)
def test_no_hdf5_datasets(self):
h5_obj_refs = 4.124
target_ref_names = ['Position_Indices']
with self.assertRaises(TypeError):
_ = hdf_utils.get_h5_obj_refs(target_ref_names, h5_obj_refs)
def test_same_name(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_obj_refs = [h5_f['/Raw_Measurement/source_main-Fitter_001/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/Spectroscopic_Values']]
expected_objs = [h5_f['/Raw_Measurement/source_main-Fitter_001/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices']]
target_ref_names = ['Spectroscopic_Indices']
returned_h5_objs = hdf_utils.get_h5_obj_refs(target_ref_names, h5_obj_refs)
self.assertEqual(set(expected_objs), set(returned_h5_objs))
class TestFindDataset(TestHDFUtilsBase):
def test_legal(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_group = h5_f['/Raw_Measurement/']
expected_dsets = [h5_f['/Raw_Measurement/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_001/Spectroscopic_Indices']]
ret_val = hdf_utils.find_dataset(h5_group, 'Spectroscopic_Indices')
self.assertEqual(set(ret_val), set(expected_dsets))
class TestWriteBookKeepingAttrs(unittest.TestCase):
def test_file(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
hdf_utils.write_book_keeping_attrs(h5_f)
data_utils.verify_book_keeping_attrs (self, h5_f)
os.remove(file_path)
def test_group(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_g = h5_f.create_group('group')
hdf_utils.write_book_keeping_attrs(h5_g)
data_utils.verify_book_keeping_attrs (self, h5_g)
os.remove(file_path)
def test_dset(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_dset = h5_f.create_dataset('dset', data=[1, 2, 3])
hdf_utils.write_book_keeping_attrs(h5_dset)
data_utils.verify_book_keeping_attrs (self, h5_dset)
os.remove(file_path)
def test_invalid(self):
with self.assertRaises(TypeError):
hdf_utils.write_book_keeping_attrs(np.arange(4))
class TestPrintTreeNoMain(unittest.TestCase):
def test_not_a_group(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
dset = h5_f.create_dataset('A_Dataset', data=[1, 2, 3])
with self.assertRaises(TypeError):
hdf_utils.print_tree(dset, rel_paths=False,
main_dsets_only=False)
def test_single_level_tree(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
expected = ['/']
with h5py.File(file_path, mode='w') as h5_f:
obj_name = 'A_Dataset'
expected.append(0 * ' ' + '├ ' + obj_name)
_ = h5_f.create_dataset(obj_name, data=[1, 2, 3])
obj_name = 'B_Group'
expected.append(0 * ' ' + '├ ' + obj_name)
expected.append((0 + 1) * ' ' + len(obj_name) * '-')
_ = h5_f.create_group(obj_name)
with data_utils.capture_stdout() as get_value:
hdf_utils.print_tree(h5_f, rel_paths=False,
main_dsets_only=False)
actual = get_value()
expected = '\n'.join(expected) + '\n'
self.assertEqual(expected, actual)
os.remove(file_path)
def test_single_level_rel_paths(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
expected = ['/']
with h5py.File(file_path, mode='w') as h5_f:
obj_name = 'A_Dataset'
expected.append(obj_name)
_ = h5_f.create_dataset(obj_name, data=[1, 2, 3])
obj_name = 'B_Group'
expected.append(obj_name)
_ = h5_f.create_group(obj_name)
with data_utils.capture_stdout() as get_value:
hdf_utils.print_tree(h5_f, rel_paths=True,
main_dsets_only=False)
actual = get_value()
expected = '\n'.join(expected) + '\n'
self.assertEqual(expected, actual)
os.remove(file_path)
def test_multi_level_tree(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
expected = ['/']
with h5py.File(file_path, mode='w') as h5_f:
level = 0
obj_name = 'A_Group'
expected.append(level * ' ' + '├ ' + obj_name)
expected.append((level + 1) * ' ' + len(obj_name) * '-')
grp_1 = h5_f.create_group(obj_name)
level += 1
obj_name = 'B_Group'
expected.append(level * ' ' + '├ ' + obj_name)
expected.append((level + 1) * ' ' + len(obj_name) * '-')
grp_2 = grp_1.create_group(obj_name)
level += 1
obj_name = 'C_Group'
expected.append(level * ' ' + '├ ' + obj_name)
expected.append((level + 1) * ' ' + len(obj_name) * '-')
grp_3 = grp_2.create_group(obj_name)
level += 1
obj_name = 'Y_Dataset'
expected.append(level * ' ' + '├ ' + obj_name)
_ = grp_3.create_dataset(obj_name, data=[1, 2, 3])
obj_name = 'X_Dataset'
expected.append(0 * ' ' + '├ ' + obj_name)
_ = h5_f.create_dataset(obj_name, data=[1, 2, 3])
with data_utils.capture_stdout() as get_value:
hdf_utils.print_tree(h5_f, rel_paths=False,
main_dsets_only=False)
actual = get_value()
expected = '\n'.join(expected) + '\n'
self.assertEqual(expected, actual)
os.remove(file_path)
def test_multi_level_tree_main_dsets_only(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
expected = ['/']
with h5py.File(file_path, mode='w') as h5_f:
level = 0
obj_name = 'A_Group'
expected.append(level * ' ' + '├ ' + obj_name)
expected.append((level + 1) * ' ' + len(obj_name) * '-')
grp_1 = h5_f.create_group(obj_name)
level += 1
obj_name = 'B_Group'
expected.append(level * ' ' + '├ ' + obj_name)
expected.append((level + 1) * ' ' + len(obj_name) * '-')
grp_2 = grp_1.create_group(obj_name)
level += 1
obj_name = 'C_Group'
expected.append(level * ' ' + '├ ' + obj_name)
expected.append((level + 1) * ' ' + len(obj_name) * '-')
grp_3 = grp_2.create_group(obj_name)
level += 1
obj_name = 'Y_Dataset'
# expected.append(level * ' ' + '├ ' + obj_name)
_ = grp_3.create_dataset(obj_name, data=[1, 2, 3])
obj_name = 'X_Dataset'
# expected.append(0 * ' ' + '├ ' + obj_name)
_ = h5_f.create_dataset(obj_name, data=[1, 2, 3])
with data_utils.capture_stdout() as get_value:
hdf_utils.print_tree(h5_f, rel_paths=False,
main_dsets_only=True)
actual = get_value()
expected = '\n'.join(expected) + '\n'
self.assertEqual(expected, actual)
os.remove(file_path)
def test_multi_level_tree_grp_a(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
expected = []
with h5py.File(file_path, mode='w') as h5_f:
obj_name = 'A_Group'
grp_1 = h5_f.create_group(obj_name)
# Full path printed for root always
expected.append(grp_1.name)
level = 0
obj_name = 'B_Group'
expected.append(level * ' ' + '├ ' + obj_name)
expected.append((level + 1) * ' ' + len(obj_name) * '-')
grp_2 = grp_1.create_group(obj_name)
level += 1
obj_name = 'C_Group'
expected.append(level * ' ' + '├ ' + obj_name)
expected.append((level + 1) * ' ' + len(obj_name) * '-')
grp_3 = grp_2.create_group(obj_name)
level += 1
obj_name = 'Y_Dataset'
expected.append(level * ' ' + '├ ' + obj_name)
_ = grp_3.create_dataset(obj_name, data=[1, 2, 3])
obj_name = 'X_Dataset'
# expected.append(0 * ' ' + '├ ' + obj_name)
_ = h5_f.create_dataset(obj_name, data=[1, 2, 3])
with data_utils.capture_stdout() as get_value:
hdf_utils.print_tree(grp_1, rel_paths=False,
main_dsets_only=False)
actual = get_value()
expected = '\n'.join(expected) + '\n'
self.assertEqual(expected, actual)
os.remove(file_path)
def test_multi_level_tree_grp_b(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
expected = []
with h5py.File(file_path, mode='w') as h5_f:
obj_name = 'A_Group'
grp_1 = h5_f.create_group(obj_name)
obj_name = 'B_Group'
grp_2 = grp_1.create_group(obj_name)
# Full path printed for root always
expected.append(grp_2.name)
level = 0
obj_name = 'C_Group'
expected.append(level * ' ' + '├ ' + obj_name)
expected.append((level + 1) * ' ' + len(obj_name) * '-')
grp_3 = grp_2.create_group(obj_name)
level += 1
obj_name = 'Y_Dataset'
expected.append(level * ' ' + '├ ' + obj_name)
_ = grp_3.create_dataset(obj_name, data=[1, 2, 3])
obj_name = 'X_Dataset'
# expected.append(0 * ' ' + '├ ' + obj_name)
_ = h5_f.create_dataset(obj_name, data=[1, 2, 3])
with data_utils.capture_stdout() as get_value:
hdf_utils.print_tree(grp_2, rel_paths=False,
main_dsets_only=False)
actual = get_value()
expected = '\n'.join(expected) + '\n'
self.assertEqual(expected, actual)
os.remove(file_path)
def test_multi_level_rel_paths_grp_b(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
expected = []
with h5py.File(file_path, mode='w') as h5_f:
obj_name = 'A_Group'
grp_1 = h5_f.create_group(obj_name)
obj_name = 'B_Group'
grp_2 = grp_1.create_group(obj_name)
# Full path printed for root always
expected.append(grp_2.name)
obj_name = 'C_Group'
grp_3 = grp_2.create_group(obj_name)
expected.append(grp_3.name.replace(grp_2.name + '/', ''))
obj_name = 'Y_Dataset'
dset = grp_3.create_dataset(obj_name, data=[1, 2, 3])
expected.append(dset.name.replace(grp_2.name + '/', ''))
obj_name = 'X_Dataset'
_ = h5_f.create_dataset(obj_name, data=[1, 2, 3])
with data_utils.capture_stdout() as get_value:
hdf_utils.print_tree(grp_2, rel_paths=True,
main_dsets_only=False)
actual = get_value()
expected = '\n'.join(expected) + '\n'
self.assertEqual(expected, actual)
os.remove(file_path)
def test_multi_level_rel_paths(self):
file_path = 'test.h5'
data_utils.delete_existing_file(file_path)
expected = ['/']
with h5py.File(file_path, mode='w') as h5_f:
obj_name = 'A_Group'
grp_1 = h5_f.create_group(obj_name)
expected.append(grp_1.name[1:])
obj_name = 'B_Group'
grp_2 = grp_1.create_group(obj_name)
expected.append(grp_2.name[1:])
obj_name = 'C_Group'
grp_3 = grp_2.create_group(obj_name)
expected.append(grp_3.name[1:])
obj_name = 'Y_Dataset'
dset = grp_3.create_dataset(obj_name, data=[1, 2, 3])
expected.append(dset.name[1:])
obj_name = 'X_Dataset'
dset = h5_f.create_dataset(obj_name, data=[1, 2, 3])
expected.append(dset.name[1:])
with data_utils.capture_stdout() as get_value:
hdf_utils.print_tree(h5_f, rel_paths=True,
main_dsets_only=False)
actual = get_value()
expected = '\n'.join(expected) + '\n'
self.assertEqual(expected, actual)
os.remove(file_path)
class TestPrintTreeBEPS(TestHDFUtilsBase):
def test_root_all_dsets(self):
level = 0
expected = ['/',
level * ' ' + '├ ' + 'Raw_Measurement',
(level + 1) * ' ' + len('Raw_Measurement') * '-']
level += 1
expected += [
level * ' ' + '├ ' + 'Ancillary',
level * ' ' + '├ ' + 'Bias',
level * ' ' + '├ ' + 'Cycle',
level * ' ' + '├ ' + 'Misc',
(level + 1) * ' ' + len('Misc') * '-',
level * ' ' + '├ ' + 'Position_Indices',
level * ' ' + '├ ' + 'Position_Values',
level * ' ' + '├ ' + 'Spectroscopic_Indices',
level * ' ' + '├ ' + 'Spectroscopic_Values',
level * ' ' + '├ ' + 'X',
level * ' ' + '├ ' + 'Y',
level * ' ' + '├ ' + 'n_dim_form',
level * ' ' + '├ ' + 'source_main']
level += 1
for ind in range(2):
expected += [
(level-1) * ' ' + '├ ' + 'source_main-Fitter_00'+str(ind),
level * ' ' + len('source_main-Fitter_000') * '-',
level * ' ' + '├ ' + 'Spectroscopic_Indices',
level * ' ' + '├ ' + 'Spectroscopic_Values',
level * ' ' + '├ ' + 'n_dim_form',
level * ' ' + '├ ' + 'results_main',
]
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
with data_utils.capture_stdout() as get_value:
hdf_utils.print_tree(h5_f, rel_paths=False,
main_dsets_only=False)
actual = get_value()
expected = '\n'.join(expected) + '\n'
self.assertEqual(expected, actual)
def test_root_main_dsets_only(self):
level = 0
expected = ['/',
level * ' ' + '├ ' + 'Raw_Measurement',
(level + 1) * ' ' + len('Raw_Measurement') * '-']
level += 1
expected += [
level * ' ' + '├ ' + 'Misc',
(level + 1) * ' ' + len('Misc') * '-',
level * ' ' + '├ ' + 'source_main']
level += 1
for ind in range(2):
expected += [
(level - 1) * ' ' + '├ ' + 'source_main-Fitter_00' + str(ind),
level * ' ' + len('source_main-Fitter_000') * '-',
level * ' ' + '├ ' + 'results_main',
]
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
with data_utils.capture_stdout() as get_value:
hdf_utils.print_tree(h5_f, rel_paths=False,
main_dsets_only=True)
actual = get_value()
expected = '\n'.join(expected) + '\n'
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| 19,421 | 36.785992 | 102 | py |
pyUSID-legacy | pyUSID-master-legacy/tests/processing/__init__.py | 0 | 0 | 0 | py | |
pyUSID-legacy | pyUSID-master-legacy/tests/processing/test_process.py | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
from ..io import data_utils
from ..io.data_utils import *
sys.path.append("../../../pyUSID/")
import pyUSID as usid
def _create_results_grp_dsets(h5_main, process_name, parms_dict,
h5_parent_group=None):
h5_results_grp = usid.hdf_utils.create_results_group(h5_main,
process_name,
h5_parent_group=h5_parent_group)
usid.hdf_utils.write_simple_attrs(h5_results_grp, parms_dict)
spec_dims = usid.Dimension('Empty', 'a. u.', 1)
# 3. Create an empty results dataset that will hold all the results
h5_results = usid.hdf_utils.write_main_dataset(
h5_results_grp, (h5_main.shape[0], 1), 'Results',
'quantity', 'units', None, spec_dims,
dtype=np.float32,
h5_pos_inds=h5_main.h5_pos_inds,
h5_pos_vals=h5_main.h5_pos_vals)
return h5_results_grp, h5_results
class NoMapFunc(usid.Process):
def __init__(self, h5_main, **kwargs):
parms_dict = {'parm_1': 1, 'parm_2': [1, 2, 3]}
super(NoMapFunc, self).__init__(h5_main, 'Mean_Val',
parms_dict=parms_dict, **kwargs)
def _create_results_datasets(self):
self.h5_results_grp, self.h5_results = _create_results_grp_dsets(self.h5_main,
self.process_name,
self.parms_dict,
h5_parent_group=self._h5_target_group)
class AvgSpecUltraBasic(usid.Process):
def __init__(self, h5_main, *args, **kwargs):
parms_dict = {'parm_1': 1, 'parm_2': [1, 2, 3]}
super(AvgSpecUltraBasic, self).__init__(h5_main, 'Mean_Val',
parms_dict=parms_dict,
*args, **kwargs)
def _create_results_datasets(self):
self.h5_results_grp, self.h5_results = _create_results_grp_dsets(self.h5_main, self.process_name, self.parms_dict, h5_parent_group=self._h5_target_group)
@staticmethod
def _map_function(spectrogram, *args, **kwargs):
return np.mean(spectrogram)
def _write_results_chunk(self):
"""
Write the computed results back to the H5
In this case, there isn't any more additional post-processing required
"""
# Find out the positions to write to:
pos_in_batch = self._get_pixels_in_current_batch()
# write the results to the file
self.h5_results[pos_in_batch, 0] = np.array(self._results)
class AvgSpecUltraBasicWTest(AvgSpecUltraBasic):
def test(self, pos_ind):
return np.mean(self.h5_main[pos_ind])
class AvgSpecUltraBasicWGetPrevResults(AvgSpecUltraBasic):
def _get_existing_datasets(self):
self.h5_results = self.h5_results_grp['Results']
class TestInvalidInitialization(unittest.TestCase):
def test_no_map_func(self):
delete_existing_file(data_utils.std_beps_path)
data_utils.make_beps_file()
self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
self.h5_main = self.h5_file['Raw_Measurement/source_main']
self.h5_main = usid.USIDataset(self.h5_main)
proc = NoMapFunc(self.h5_main)
with self.assertRaises(NotImplementedError):
_ = proc.compute()
self.h5_file.close()
delete_existing_file(data_utils.std_beps_path)
def test_read_only_h5_main(self):
delete_existing_file(data_utils.std_beps_path)
data_utils.make_beps_file()
self.h5_file = h5py.File(data_utils.std_beps_path, mode='r')
self.h5_main = self.h5_file['Raw_Measurement/source_main']
self.h5_main = usid.USIDataset(self.h5_main)
with self.assertRaises(IOError):
_ = AvgSpecUltraBasic(self.h5_main)
self.h5_file.close()
delete_existing_file(data_utils.std_beps_path)
def test_read_only_h5_parent_group(self):
delete_existing_file(data_utils.std_beps_path)
data_utils.make_beps_file()
self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
self.h5_main = self.h5_file['Raw_Measurement/source_main']
self.h5_main = usid.USIDataset(self.h5_main)
results_path = 'sep_results.h5'
with h5py.File(results_path, mode='w') as file_handle:
file_handle.create_group("Blah")
h5_f_new = h5py.File(results_path, mode='r')
with self.assertRaises(IOError):
_ = AvgSpecUltraBasic(self.h5_main, h5_target_group=h5_f_new)
self.h5_file.close()
h5_f_new.close()
delete_existing_file(data_utils.std_beps_path)
delete_existing_file(results_path)
def test_not_main_dataset(self):
delete_existing_file(data_utils.std_beps_path)
data_utils.make_beps_file()
self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
self.h5_main = self.h5_file['Raw_Measurement/X']
with self.assertRaises(ValueError):
_ = AvgSpecUltraBasic(self.h5_main)
self.h5_file.close()
delete_existing_file(data_utils.std_beps_path)
def test_invalid_process_name(self):
delete_existing_file(data_utils.std_beps_path)
data_utils.make_beps_file()
self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
self.h5_main = self.h5_file['Raw_Measurement/source_main']
self.h5_main = usid.USIDataset(self.h5_main)
class TempProc(usid.Process):
def __init__(self, h5_main, *args, **kwargs):
parms_dict = {'parm_1': 1, 'parm_2': [1, 2, 3]}
super(TempProc, self).__init__(h5_main, {'a': 1},
parms_dict=parms_dict,
*args, **kwargs)
with self.assertRaises(TypeError):
_ = TempProc(self.h5_main)
self.h5_file.close()
delete_existing_file(data_utils.std_beps_path)
def test_invalid_parms_dict(self):
delete_existing_file(data_utils.std_beps_path)
data_utils.make_beps_file()
self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
self.h5_main = self.h5_file['Raw_Measurement/source_main']
self.h5_main = usid.USIDataset(self.h5_main)
class TempProc(usid.Process):
def __init__(self, h5_main, *args, **kwargs):
super(TempProc, self).__init__(h5_main, 'Proc',
parms_dict='Parms',
*args, **kwargs)
with self.assertRaises(TypeError):
_ = TempProc(self.h5_main)
self.h5_file.close()
delete_existing_file(data_utils.std_beps_path)
def test_none_parms_dict(self):
delete_existing_file(data_utils.std_beps_path)
data_utils.make_beps_file()
self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
self.h5_main = self.h5_file['Raw_Measurement/source_main']
self.h5_main = usid.USIDataset(self.h5_main)
class TempProc(usid.Process):
def __init__(self, h5_main, *args, **kwargs):
super(TempProc, self).__init__(h5_main, 'Proc',
parms_dict=None,
*args, **kwargs)
proc = TempProc(self.h5_main)
self.assertEqual(proc.parms_dict, dict())
self.h5_file.close()
delete_existing_file(data_utils.std_beps_path)
class TestCoreProcessNoTest(unittest.TestCase):
def setUp(self, proc_class=AvgSpecUltraBasic, **proc_kwargs):
delete_existing_file(data_utils.std_beps_path)
data_utils.make_beps_file()
self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
self.h5_main = self.h5_file['Raw_Measurement/source_main']
self.h5_main = usid.USIDataset(self.h5_main)
self.exp_result = np.expand_dims(np.mean(self.h5_main[()], axis=1),
axis=1)
self.proc = proc_class(self.h5_main, **proc_kwargs)
def test_tfunc(self):
with self.assertRaises(NotImplementedError):
self.proc.test()
def tearDown(self):
self.h5_file.close()
delete_existing_file(data_utils.std_beps_path)
def test_compute(self):
h5_grp = self.proc.compute()
self.assertIsInstance(h5_grp, h5py.Group)
self.assertEqual(h5_grp, self.proc.h5_results_grp)
results_dset = h5_grp['Results']
self.assertTrue(np.allclose(results_dset[()], self.exp_result))
# Verify status dataset has been written:
self.assertTrue('completed_positions' in list(h5_grp.keys()))
h5_status_dset = h5_grp['completed_positions']
self.assertIsInstance(h5_status_dset, h5py.Dataset)
self.assertEqual(h5_status_dset.shape, (self.h5_main.shape[0],))
self.assertEqual(h5_status_dset.dtype, np.uint8)
class TestCoreProcessWTest(TestCoreProcessNoTest):
def setUp(self, proc_class=AvgSpecUltraBasicWTest, **proc_kwargs):
super(TestCoreProcessWTest,
self).setUp(proc_class=proc_class, **proc_kwargs)
def test_tfunc(self):
pix_ind = 5
actual = self.proc.test(pix_ind)
expected = self.exp_result[pix_ind]
self.assertTrue(np.allclose(actual, expected))
class TestWriteResultsToNewH5File(TestCoreProcessWTest):
def setUp(self, proc_class=AvgSpecUltraBasicWTest, **proc_kwargs):
self.results_h5_file_path = 'sep_results.h5'
self.h5_f_new = h5py.File(self.results_h5_file_path, mode='w')
super(TestWriteResultsToNewH5File,
self).setUp(proc_class=proc_class, h5_target_group=self.h5_f_new,
**proc_kwargs)
def test_compute(self):
super(TestWriteResultsToNewH5File, self).test_compute()
self.assertNotEqual(self.proc.h5_results_grp.file, self.h5_main.file)
self.assertEqual(self.proc.h5_results_grp.file, self.h5_f_new)
def tearDown(self):
super(TestWriteResultsToNewH5File, self).tearDown()
self.h5_f_new.close()
delete_existing_file(self.results_h5_file_path)
class TestCoreProcessWExistingResults(unittest.TestCase):
def __create_fake_result(self, percent_complete=100, parms_dict=None,
status_dset=True, status_attr=False,
verbose=False, h5_parent_group=None):
if parms_dict is None:
parms_dict = {'parm_1': 1, 'parm_2': [1, 2, 3]}
if verbose:
print('Using parms_dict: {}'.format(parms_dict))
results_grp, h5_results_dset = _create_results_grp_dsets(
self.h5_main, 'Mean_Val', parms_dict,
h5_parent_group=h5_parent_group)
# Intentionally set different results
exp_result = np.expand_dims(np.random.rand(h5_results_dset.shape[0]),
axis=1)
h5_results_dset[:, 0] = exp_result[:, 0]
# Build status:
status = np.ones(shape=self.h5_main.shape[0], dtype=np.uint8)
# Reset last portion of results to mean (expected)
complete_index = int(self.h5_main.shape[0] * percent_complete / 100)
if verbose:
print('Positions up to {} of {} will be marked as complete'
'.'.format(complete_index, self.h5_main.shape[0]))
if percent_complete < 100:
# print('Reset results from position: {}'.format(complete_index))
status[complete_index:] = 0
# print(status)
exp_result[complete_index:, 0] = np.mean(self.h5_main[complete_index:], axis=1)
# 4. Create fake status dataset
if status_dset:
if verbose:
print('Creating status dataset')
_ = results_grp.create_dataset('completed_positions', data=status)
if status_attr:
if verbose:
print('Writing legacy status attribute')
results_grp.attrs['last_pixel'] = complete_index
return results_grp, h5_results_dset, exp_result
def setUp(self, proc_class=AvgSpecUltraBasicWGetPrevResults,
percent_complete=100, parms_dict=None, status_dset=True,
status_attr=False, verbose=False, h5_target_group=None):
delete_existing_file(data_utils.std_beps_path)
data_utils.make_beps_file()
self.h5_file = h5py.File(data_utils.std_beps_path, mode='r+')
self.h5_main = self.h5_file['Raw_Measurement/source_main']
self.h5_main = usid.USIDataset(self.h5_main)
# Make some fake results here:
if any([isinstance(item, (list, tuple)) for item in [percent_complete,
status_attr,
status_dset,
parms_dict]]):
self.fake_results_grp = []
self.h5_results = []
self.exp_result = []
for this_per, this_parms, has_status_dset, has_status_attr in zip(
percent_complete, parms_dict, status_dset, status_attr):
ret_vals = self.__create_fake_result(percent_complete=this_per,
parms_dict=this_parms,
status_dset=has_status_dset,
status_attr=has_status_attr,
h5_parent_group=h5_target_group,
verbose=verbose)
self.fake_results_grp.append(ret_vals[0])
self.h5_results.append(ret_vals[1])
self.exp_result.append(ret_vals[2])
else:
ret_vals = self.__create_fake_result(percent_complete=percent_complete,
parms_dict=parms_dict,
status_dset=status_dset,
status_attr=status_attr,
h5_parent_group=h5_target_group,
verbose=verbose)
self.fake_results_grp, self.h5_results, self.exp_result = ret_vals
self.proc = AvgSpecUltraBasicWGetPrevResults(self.h5_main,
h5_target_group=h5_target_group,
verbose=verbose)
def test_compute(self):
h5_results_grp = self.proc.compute(override=False)
self.assertEqual(self.fake_results_grp, h5_results_grp)
def tearDown(self):
self.h5_file.close()
delete_existing_file(data_utils.std_beps_path)
class TestProcWLegacyResultsComplete(TestCoreProcessWExistingResults):
def setUp(self, percent_complete=100):
super(TestProcWLegacyResultsComplete,
self).setUp(percent_complete=percent_complete,
status_dset=False, status_attr=True)
def test_compute(self):
super(TestProcWLegacyResultsComplete,
self).test_compute()
items_in_grp = list(self.proc.h5_results_grp.keys())
# Should also have status dataset
self.assertEqual(len(items_in_grp), 4)
self.assertTrue(self.proc._status_dset_name in items_in_grp)
status_dset = self.proc.h5_results_grp[self.proc._status_dset_name]
self.assertEqual(np.sum(status_dset[()]), self.h5_main.shape[0])
class TestCoreProcessWDuplicateResultsOverride(TestCoreProcessWExistingResults):
def test_compute(self, override=True):
h5_grp = self.proc.compute(override=override)
self.assertNotEqual(self.fake_results_grp, h5_grp)
self.assertIsInstance(h5_grp, h5py.Group)
self.assertTrue(h5_grp.name.endswith('001'))
self.assertEqual(h5_grp, self.proc.h5_results_grp)
results_dset = h5_grp['Results']
self.assertTrue(np.allclose(results_dset[()],
np.expand_dims(
np.mean(self.h5_main[()], axis=1),
axis=1)
))
# Verify status dataset has been written:
self.assertTrue('completed_positions' in list(h5_grp.keys()))
h5_status_dset = h5_grp['completed_positions']
self.assertIsInstance(h5_status_dset, h5py.Dataset)
self.assertEqual(h5_status_dset.shape, (self.h5_main.shape[0],))
self.assertEqual(h5_status_dset.dtype, np.uint8)
class TestCoreProcessWExistResultsDiffParms(TestCoreProcessWDuplicateResultsOverride):
def setUp(self, proc_class=AvgSpecUltraBasicWGetPrevResults,
percent_complete=100,
parms_dict={'parm_1': 'Decoy', 'parm_2': 14.56}):
super(TestCoreProcessWExistResultsDiffParms,
self).setUp(percent_complete=percent_complete,
parms_dict=parms_dict)
def test_duplicate_partial_results(self):
self.assertEqual(len(self.proc.duplicate_h5_groups), 0)
self.assertEqual(len(self.proc.partial_h5_groups), 0)
def test_compute(self, override=False):
super(TestCoreProcessWExistResultsDiffParms,
self).test_compute(override=override)
class TestCoreProcessWExistingPartResults(TestCoreProcessWExistingResults):
def setUp(self, proc_class=AvgSpecUltraBasicWGetPrevResults,
percent_complete=50):
super(TestCoreProcessWExistingPartResults,
self).setUp(percent_complete=percent_complete)
class TestProcWLegacyResultsPartial(TestCoreProcessWExistingResults):
def setUp(self, percent_complete=50):
super(TestProcWLegacyResultsPartial,
self).setUp(percent_complete=percent_complete,
status_dset=False, status_attr=True)
def test_compute(self):
super(TestProcWLegacyResultsPartial,
self).test_compute()
items_in_grp = list(self.proc.h5_results_grp.keys())
self.assertEqual(len(items_in_grp), 4)
self.assertTrue(self.proc._status_dset_name in items_in_grp)
class TestProcWoStatus(TestCoreProcessWDuplicateResultsOverride):
def setUp(self, percent_complete=50):
super(TestProcWoStatus,
self).setUp(percent_complete=percent_complete,
status_dset=False, status_attr=False)
def test_compute(self, override=False):
super(TestProcWoStatus, self).test_compute(override=override)
class TestProcReturnCompletedNotPartial(TestCoreProcessWExistingResults):
def setUp(self):
super(TestProcReturnCompletedNotPartial,
self).setUp(percent_complete=[100, 50],
parms_dict=[None, None],
status_dset=[True, False],
status_attr=[False, True],
verbose=False)
def test_compute(self):
self.assertEqual(len(self.proc.duplicate_h5_groups), 1)
self.assertEqual(self.proc.duplicate_h5_groups[0],
self.fake_results_grp[0])
self.assertEqual(len(self.proc.partial_h5_groups), 1)
self.assertEqual(self.proc.partial_h5_groups[0],
self.fake_results_grp[1])
h5_results_grp = self.proc.compute(override=False)
self.assertEqual(self.fake_results_grp[0], h5_results_grp)
class TestProcLastPartialResult(TestCoreProcessWExistingResults):
def setUp(self, **kwargs):
super(TestProcLastPartialResult,
self).setUp(percent_complete=[75, 50],
parms_dict=[None, None],
status_dset=[True, False],
status_attr=[False, True],
verbose=False, **kwargs)
def test_compute(self):
self.assertEqual(len(self.proc.duplicate_h5_groups), 0)
self.assertEqual(len(self.proc.partial_h5_groups), 2)
for exp in self.fake_results_grp:
self.assertTrue(exp in self.proc.partial_h5_groups)
h5_results_grp = self.proc.compute(override=False)
self.assertEqual(self.fake_results_grp[-1], h5_results_grp)
class TestProcessWExistingResultsDiffFile(TestProcLastPartialResult):
def setUp(self):
self.results_h5_file_path = 'sep_results.h5'
self.h5_f_new = h5py.File(self.results_h5_file_path, mode='w')
super(TestProcessWExistingResultsDiffFile,
self).setUp(h5_target_group=self.h5_f_new)
def test_compute(self):
super(TestProcessWExistingResultsDiffFile, self).test_compute()
self.assertNotEqual(self.proc.h5_results_grp.file, self.h5_main.file)
self.assertEqual(self.proc.h5_results_grp.file, self.h5_f_new)
def tearDown(self):
super(TestProcessWExistingResultsDiffFile, self).tearDown()
self.h5_f_new.close()
delete_existing_file(self.results_h5_file_path)
class TestUsePartialComputationIllegal(TestProcLastPartialResult):
def test_compute(self):
h5_grp = self.h5_main.parent.create_group('Blah')
with self.assertRaises(ValueError):
self.proc.use_partial_computation(h5_grp)
class TestUsePartialComputationLegit(TestProcLastPartialResult):
def test_compute(self):
self.proc.use_partial_computation(self.fake_results_grp[0])
self.assertEqual(len(self.proc.duplicate_h5_groups), 0)
self.assertEqual(len(self.proc.partial_h5_groups), 2)
for exp in self.fake_results_grp:
self.assertTrue(exp in self.proc.partial_h5_groups)
h5_results_grp = self.proc.compute(override=False)
self.assertEqual(self.fake_results_grp[0], h5_results_grp)
class TestMultiBatchCompute(TestCoreProcessNoTest):
def test_compute(self):
self.proc.verbose = True
self.proc._max_pos_per_read = 6
super(TestMultiBatchCompute, self).test_compute()
# TODO: read_data_chunk
# TODO: interrupt computation
# TODO: set_cores, invalid inputs, etc.
# TODO: set_memory
# TODO: Non-callable map_function or unit_computation
# TODO: lazy data chunk read
# TODO: parallel computation indeed being used (roughly N times faster)
# TODO: Timing per batch
if __name__ == '__main__':
unittest.main()
| 22,925 | 39.150613 | 161 | py |
pyUSID-legacy | pyUSID-master-legacy/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import shutil
import matplotlib
matplotlib.use('agg')
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('../..'))
from pyUSID import __version__ as pyusid_version
# - Copy over examples folder to docs/source
# This makes it so that nbsphinx properly loads the notebook images
examples_source = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "..", "notebooks"))
examples_dest = os.path.abspath(
os.path.join(os.path.dirname(__file__), "notebooks"))
if os.path.exists(examples_dest):
shutil.rmtree(examples_dest)
os.mkdir(examples_dest)
for root, dirs, files in os.walk(examples_source):
for dr in dirs:
os.mkdir(os.path.join(root.replace(examples_source, examples_dest), dr))
for fil in files:
if os.path.splitext(fil)[1] in [".ipynb", ".md", ".rst"]:
source_filename = os.path.join(root, fil)
dest_filename = source_filename.replace(examples_source, examples_dest)
shutil.copyfile(source_filename, dest_filename)
# -- Project information -----------------------------------------------------
project = 'pyUSID'
copyright = '2018, Suhas Somnath and Chris R. Smith'
author = 'Suhas Somnath and Chris R. Smith'
# The short X.Y version
version = pyusid_version
# The full version, including alpha/beta/rc tags.
release = pyusid_version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'nbsphinx',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.autosectionlabel',
'sphinx.ext.napoleon', # Use either napoleon or numpydoc not both.
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# Ignore errors during notebook execution (for the time being...)
nbsphinx_allow_errors = True
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'supporting_docs']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Napoleon settings
# https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_type_aliases = None
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Generate autosummary even if no references
autosummary_generate = True
autoclass_content = 'both'
autodoc_default_flags = ['members',
'inherited-members',
# 'private-members',
# 'show-inheritance'
]
autodoc_inherit_docstrings = True # If no class summary, inherit base class summary
# -- Options for HTML output -------------------------------------------------
# on_rtd is whether on readthedocs.org, this line of code grabbed from docs.readthedocs.org...
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'pyUSID v0.0a4'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'logo_v01.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyUSIDdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyUSID.tex', u'pyUSID Documentation',
u'Suhas Somnath and Chris R. Smith', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyUSID', u'pyUSID Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyUSID', u'pyUSID Documentation',
author, 'pyUSID', 'Framework for storing, visualizing, and processing Universal Spectroscopic and Imaging Data (USID)',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'h5py': ('https://docs.h5py.org/en/latest/', None),
'sphinx': ('https://www.sphinx-doc.org/en/master/', None),
'dask': ('https://docs.dask.org/en/latest/', None),
}
# -------------------------------------------------
| 14,888 | 32.160356 | 124 | py |
pyUSID-legacy | pyUSID-master-legacy/notebooks/user_guide/supporting_docs/peak_finding.py | from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
from scipy.signal import find_peaks_cwt
def find_all_peaks(vector, width_bounds, num_steps=20, **kwargs):
"""
This is the function that will be mapped by multiprocess. This is a wrapper around the scipy function.
It uses a parameter - wavelet_widths that is configured outside this function.
Parameters
----------
vector : 1D numpy array
Feature vector containing peaks
width_bounds : tuple / list / iterable
Min and max for the size of the window
num_steps : uint, (optional). Default = 20
Number of different peak widths to search
Returns
-------
peak_indices : list
List of indices of peaks within the prescribed peak widths
"""
# The below numpy array is used to configure the returned function wpeaks
wavelet_widths = np.linspace(width_bounds[0], width_bounds[1], num_steps)
peak_indices = find_peaks_cwt(np.abs(vector), wavelet_widths, **kwargs)
return peak_indices
| 1,077 | 32.6875 | 106 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/test_regression.py | import torch
import torch.nn as nn
import torch.optim as optim
import configs
from data.qmul_loader import get_batch, train_people, test_people
from io_utils import parse_args_regression, get_resume_file
from methods.DKT_regression import DKT
from methods.feature_transfer_regression import FeatureTransfer
import backbone
import numpy as np
params = parse_args_regression('test_regression')
np.random.seed(params.seed)
torch.manual_seed(params.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
params.checkpoint_dir = '%scheckpoints/%s/%s_%s' % (configs.save_dir, params.dataset, params.model, params.method)
bb = backbone.Conv3().cuda()
if params.method=='DKT':
model = DKT(bb).cuda()
optimizer = None
elif params.method=='transfer':
model = FeatureTransfer(bb).cuda()
optimizer = optim.Adam([{'params':model.parameters(),'lr':0.001}])
else:
ValueError('Unrecognised method')
model.load_checkpoint(params.checkpoint_dir)
mse_list = []
for epoch in range(params.n_test_epochs):
mse = float(model.test_loop(params.n_support, optimizer).cpu().detach().numpy())
mse_list.append(mse)
print("-------------------")
print("Average MSE: " + str(np.mean(mse_list)) + " +- " + str(np.std(mse_list)))
print("-------------------")
| 1,301 | 31.55 | 114 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/test.py | from pathlib import Path
import torch
import numpy as np
import random
import torch.optim
import torch.utils.data.sampler
import os
import time
from typing import Type
import configs
import backbone
import data.feature_loader as feat_loader
from data.datamgr import SetDataManager
from methods.baselinefinetune import BaselineFinetune
from methods.hypernets.hypernet_poc import HyperNetPOC
from methods.hypernets import hypernet_types
from methods.protonet import ProtoNet
from methods.DKT import DKT
from methods.matchingnet import MatchingNet
from methods.relationnet import RelationNet
from methods.maml import MAML
from methods.hypernets.bayeshmaml import BayesHMAML
from methods.hypernets.hypermaml import HyperMAML
from io_utils import model_dict, parse_args, get_best_file , get_assigned_file
def _set_seed(seed, verbose=True):
if(seed!=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if(verbose): print("[INFO] Setting SEED: " + str(seed))
else:
if(verbose): print("[INFO] Setting SEED: None")
def feature_evaluation(cl_data_file, model, n_way = 5, n_support = 5, n_query = 15, adaptation = False):
class_list = cl_data_file.keys()
select_class = random.sample(class_list,n_way)
z_all = []
for cl in select_class:
img_feat = cl_data_file[cl]
perm_ids = np.random.permutation(len(img_feat)).tolist()
z_all.append( [ np.squeeze( img_feat[perm_ids[i]]) for i in range(n_support+n_query) ] ) # stack each batch
z_all = torch.from_numpy(np.array(z_all) )
model.n_query = n_query
if adaptation:
scores = model.set_forward_adaptation(z_all, is_feature = True)
else:
scores, _ = model.set_forward(z_all, is_feature = True)
pred = scores.data.cpu().numpy().argmax(axis = 1)
y = np.repeat(range( n_way ), n_query )
acc = np.mean(pred == y)*100
return acc
def single_test(params):
acc_all = []
iter_num = 600
n_query = max(1, int(16 * params.test_n_way / params.train_n_way)) # if test_n_way is smaller than train_n_way, reduce n_query to keep batch size small
print("n_query", n_query)
few_shot_params = dict(n_way = params.test_n_way , n_support = params.n_shot, n_query=n_query)
if params.dataset in ['omniglot', 'cross_char']:
assert params.model == 'Conv4' and not params.train_aug ,f'model = {params.model}, train_aug= {params.train_aug} ' \
f'omniglot only support Conv4 without augmentation'
# params.model = 'Conv4S'
if params.method == 'baseline':
model = BaselineFinetune( model_dict[params.model], **few_shot_params )
elif params.method == 'baseline++':
model = BaselineFinetune( model_dict[params.model], loss_type = 'dist', **few_shot_params )
elif params.method == 'protonet':
model = ProtoNet( model_dict[params.model], **few_shot_params )
elif params.method == 'DKT':
model = DKT(model_dict[params.model], **few_shot_params)
elif params.method == 'matchingnet':
model = MatchingNet( model_dict[params.model], **few_shot_params )
elif params.method in ['relationnet', 'relationnet_softmax']:
if params.model == 'Conv4':
feature_model = backbone.Conv4NP
elif params.model == 'Conv6':
feature_model = backbone.Conv6NP
elif params.model == 'Conv4S':
feature_model = backbone.Conv4SNP
else:
feature_model = lambda: model_dict[params.model]( flatten = False )
loss_type = 'mse' if params.method == 'relationnet' else 'softmax'
model = RelationNet( feature_model, loss_type = loss_type , **few_shot_params )
elif params.method in ['maml' , 'maml_approx']:
backbone.ConvBlock.maml = True
backbone.SimpleBlock.maml = True
backbone.BottleneckBlock.maml = True
backbone.ResNet.maml = True
model = MAML( model_dict[params.model], params=params, approx = (params.method == 'maml_approx') , **few_shot_params )
if params.dataset in ['omniglot', 'cross_char']: #maml use different parameter in omniglot
model.n_task = 32
model.task_update_num = 1
model.train_lr = 0.1
elif params.method in list(hypernet_types.keys()):
few_shot_params['n_query'] = 15
hn_type: Type[HyperNetPOC] = hypernet_types[params.method]
model = hn_type(model_dict[params.model], params=params, **few_shot_params)
# model = HyperNetPOC(model_dict[params.model], **few_shot_params)
elif params.method == 'hyper_maml' or params.method == 'bayes_hmaml':
if params.method == 'bayes_hmaml':
model = BayesHMAML(model_dict[params.model], params=params, approx=(params.method == 'maml_approx'), **few_shot_params)
else:
model = HyperMAML(model_dict[params.model], params=params, approx=(params.method == 'maml_approx'),
**few_shot_params)
if params.dataset in ['omniglot', 'cross_char']: # maml use different parameter in omniglot
model.n_task = 32
model.train_lr = 0.1
else:
raise ValueError('Unknown method')
few_shot_params["n_query"] = 15
model = model.cuda()
checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(
configs.save_dir,
params.dataset,
params.model,
params.method
)
if params.train_aug:
checkpoint_dir += '_aug'
if not params.method in ['baseline', 'baseline++'] :
checkpoint_dir += '_%dway_%dshot' %( params.train_n_way, params.n_shot)
if params.checkpoint_suffix != "":
checkpoint_dir = checkpoint_dir + "_" + params.checkpoint_suffix
if params.dataset == "cross":
if not Path(checkpoint_dir).exists():
checkpoint_dir = checkpoint_dir.replace("cross", "miniImagenet")
assert Path(checkpoint_dir).exists(), checkpoint_dir
#modelfile = get_resume_file(checkpoint_dir)
if not params.method in ['baseline', 'baseline++'] :
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir,params.save_iter)
else:
modelfile = get_best_file(checkpoint_dir)
print("Using model file", modelfile)
if modelfile is not None:
tmp = torch.load(modelfile)
model.load_state_dict(tmp['state'])
else:
print("[WARNING] Cannot find 'best_file.tar' in: " + str(checkpoint_dir))
split = params.split
if params.save_iter != -1:
split_str = split + "_" +str(params.save_iter)
else:
split_str = split
eval_time = 0
if params.method in ['maml', 'maml_approx', 'hyper_maml','bayes_hmaml', 'DKT'] + list(hypernet_types.keys()): #maml do not support testing with feature
if 'Conv' in params.model:
if params.dataset in ['omniglot', 'cross_char']:
image_size = 28
else:
image_size = 84
else:
image_size = 224
datamgr = SetDataManager(image_size, n_eposide = iter_num, **few_shot_params)
if params.dataset == 'cross':
if split == 'base':
loadfile = configs.data_dir['miniImagenet'] + 'all.json'
else:
loadfile = configs.data_dir['CUB'] + split +'.json'
elif params.dataset == 'cross_char':
if split == 'base':
loadfile = configs.data_dir['omniglot'] + 'noLatin.json'
else:
loadfile = configs.data_dir['emnist'] + split +'.json'
else:
loadfile = configs.data_dir[params.dataset] + split + '.json'
novel_loader = datamgr.get_data_loader( loadfile, aug = False)
if params.adaptation:
model.task_update_num = 100 if params.hn_val_epochs == -1 else params.hn_val_epochs
#We perform adaptation on MAML simply by updating more times.
model.eval()
model.single_test = True
if isinstance(model, (MAML, BayesHMAML, HyperMAML)):
acc_mean, acc_std, eval_time, *_ = model.test_loop( novel_loader, return_std = True, return_time=True)
else:
acc_mean, acc_std, *_ = model.test_loop( novel_loader, return_std = True)
else:
novel_file = os.path.join( checkpoint_dir.replace("checkpoints","features"), split_str +".hdf5") #defaut split = novel, but you can also test base or val classes
cl_data_file = feat_loader.init_loader(novel_file)
for i in range(iter_num):
acc = feature_evaluation(cl_data_file, model, adaptation = params.adaptation, **few_shot_params)
acc_all.append(acc)
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
with open('./record/results.txt' , 'a') as f:
timestamp = time.strftime("%Y%m%d-%H%M%S", time.localtime())
aug_str = '-aug' if params.train_aug else ''
aug_str += '-adapted' if params.adaptation else ''
if params.method in ['baseline', 'baseline++'] :
exp_setting = '%s-%s-%s-%s%s %sshot %sway_test' %(params.dataset, split_str, params.model, params.method, aug_str, params.n_shot, params.test_n_way )
else:
exp_setting = '%s-%s-%s-%s%s %sshot %sway_train %sway_test' %(params.dataset, split_str, params.model, params.method, aug_str , params.n_shot , params.train_n_way, params.test_n_way )
acc_str = '%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num))
f.write( 'Time: %s, Setting: %s, Acc: %s \n' %(timestamp,exp_setting,acc_str) )
print("Test loop time:", eval_time)
return acc_mean, eval_time
def perform_test(params):
seed = params.seed
repeat = params.repeat
# repeat the test N times changing the seed in range [seed, seed+repeat]
accuracy_list = list()
time_list = list()
for i in range(seed, seed + repeat):
if (seed != 0):
_set_seed(i)
else:
_set_seed(0)
acc, test_time = single_test(params)
accuracy_list.append(acc)
time_list.append(test_time)
mean_acc = np.mean(accuracy_list)
std_acc = np.std(accuracy_list)
mean_time = np.mean(time_list)
std_time = np.std(time_list)
print("-----------------------------")
print(
f'Seeds = {repeat} | Overall Test Acc = {mean_acc:.2f} +- {std_acc:.2f}. Eval time: {mean_time:.2f} +- {std_time:.2f}' )
print("-----------------------------")
return {
"accuracy_mean": mean_acc,
"accuracy_std": std_acc,
"time_mean": mean_time,
"time_std": std_time,
"n_seeds": repeat
}
def main():
params = parse_args('test')
perform_test(params)
if __name__ == '__main__':
main()
| 11,323 | 40.028986 | 195 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/test_uncertainty.py | import torch
import numpy as np
import random
from torch.autograd import Variable
import torch.nn as nn
import torch.optim
import json
import torch.utils.data.sampler
import os
import glob
import time
import configs
import backbone
import data.feature_loader as feat_loader
from data.datamgr import SetDataManager
from methods.baselinetrain import BaselineTrain
from methods.baselinefinetune import BaselineFinetune
from methods.protonet import ProtoNet
from methods.DKT import DKT
from methods.matchingnet import MatchingNet
from methods.relationnet import RelationNet
from methods.maml import MAML
from io_utils import model_dict, get_resume_file, parse_args, get_best_file , get_assigned_file
def _set_seed(seed, verbose=True):
if(seed!=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if(verbose): print("[INFO] Setting SEED: " + str(seed))
else:
if(verbose): print("[INFO] Setting SEED: None")
class ECELoss(nn.Module):
""" Calculates the Expected Calibration Error of a model.
(This isn't necessary for temperature scaling, just a cool metric).
The input to this loss is the logits of a model, NOT the softmax scores.
This divides the confidence outputs into equally-sized interval bins.
In each bin, we compute the confidence gap:
bin_gap = | avg_confidence_in_bin - accuracy_in_bin |
We then return a weighted average of the gaps, based on the number
of samples in each bin.
Adapted from: https://github.com/gpleiss/temperature_scaling
See: Naeini, Mahdi Pakdaman, Gregory F. Cooper, and Milos Hauskrecht.
"Obtaining Well Calibrated Probabilities Using Bayesian Binning." AAAI.
2015.
"""
def __init__(self, n_bins=15):
"""
n_bins (int): number of confidence interval bins
"""
super(ECELoss, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def calibrate(self, logits, labels, iterations=50, lr=0.01):
temperature_raw = torch.ones(1, requires_grad=True, device="cuda")
nll_criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.LBFGS([temperature_raw], lr=lr, max_iter=iterations)
softplus = nn.Softplus() #temperature must be > zero, Softplus could be used
def closure():
if torch.is_grad_enabled(): optimizer.zero_grad()
#loss = nll_criterion(logits / softplus(temperature_raw.expand_as(logits)), labels)
loss = nll_criterion(logits / temperature_raw.expand_as(logits), labels)
if loss.requires_grad: loss.backward()
return loss
optimizer.step(closure)
return temperature_raw
def forward(self, logits, labels, temperature=1.0, onevsrest=False):
logits_scaled = logits / temperature
if(onevsrest):
softmaxes = torch.sigmoid(logits_scaled) / torch.sum(torch.sigmoid(logits_scaled), dim=1, keepdim=True)
else:
softmaxes = torch.softmax(logits_scaled, dim=1)
confidences, predictions = torch.max(softmaxes, 1)
accuracies = predictions.eq(labels)
ece = torch.zeros(1, device=logits.device)
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece
def get_logits_targets(params):
acc_all = []
iter_num = 600
few_shot_params = dict(n_way = params.test_n_way , n_support = params.n_shot)
if params.dataset in ['omniglot', 'cross_char']:
assert params.model == 'Conv4' and not params.train_aug ,'omniglot only support Conv4 without augmentation'
params.model = 'Conv4S'
if params.method == 'baseline':
model = BaselineFinetune( model_dict[params.model], **few_shot_params )
elif params.method == 'baseline++':
model = BaselineFinetune( model_dict[params.model], loss_type = 'dist', **few_shot_params )
elif params.method == 'protonet':
model = ProtoNet( model_dict[params.model], **few_shot_params )
elif params.method == 'DKT':
model = DKT(model_dict[params.model], **few_shot_params)
elif params.method == 'matchingnet':
model = MatchingNet( model_dict[params.model], **few_shot_params )
elif params.method in ['relationnet', 'relationnet_softmax']:
if params.model == 'Conv4':
feature_model = backbone.Conv4NP
elif params.model == 'Conv6':
feature_model = backbone.Conv6NP
elif params.model == 'Conv4S':
feature_model = backbone.Conv4SNP
else:
feature_model = lambda: model_dict[params.model]( flatten = False )
loss_type = 'mse' if params.method == 'relationnet' else 'softmax'
model = RelationNet( feature_model, loss_type = loss_type , **few_shot_params )
elif params.method in ['maml' , 'maml_approx']:
backbone.ConvBlock.maml = True
backbone.SimpleBlock.maml = True
backbone.BottleneckBlock.maml = True
backbone.ResNet.maml = True
model = MAML( model_dict[params.model], approx = (params.method == 'maml_approx') , **few_shot_params )
if params.dataset in ['omniglot', 'cross_char']: #maml use different parameter in omniglot
model.n_task = 32
model.task_update_num = 1
model.train_lr = 0.1
else:
raise ValueError('Unknown method')
model = model.cuda()
checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, params.dataset, params.model, params.method)
if params.train_aug:
checkpoint_dir += '_aug'
if not params.method in ['baseline', 'baseline++'] :
checkpoint_dir += '_%dway_%dshot' %( params.train_n_way, params.n_shot)
#modelfile = get_resume_file(checkpoint_dir)
if not params.method in ['baseline', 'baseline++'] :
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir,params.save_iter)
else:
modelfile = get_best_file(checkpoint_dir)
if modelfile is not None:
tmp = torch.load(modelfile)
model.load_state_dict(tmp['state'])
else:
print("[WARNING] Cannot find 'best_file.tar' in: " + str(checkpoint_dir))
split = params.split
if params.save_iter != -1:
split_str = split + "_" +str(params.save_iter)
else:
split_str = split
if params.method in ['maml', 'maml_approx', 'DKT']: #maml do not support testing with feature
if 'Conv' in params.model:
if params.dataset in ['omniglot', 'cross_char']:
image_size = 28
else:
image_size = 84
else:
image_size = 224
datamgr = SetDataManager(image_size, n_eposide = iter_num, n_query = 15 , **few_shot_params)
if params.dataset == 'cross':
if split == 'base':
loadfile = configs.data_dir['miniImagenet'] + 'all.json'
else:
loadfile = configs.data_dir['CUB'] + split +'.json'
elif params.dataset == 'cross_char':
if split == 'base':
loadfile = configs.data_dir['omniglot'] + 'noLatin.json'
else:
loadfile = configs.data_dir['emnist'] + split +'.json'
else:
loadfile = configs.data_dir[params.dataset] + split + '.json'
novel_loader = datamgr.get_data_loader( loadfile, aug = False)
if params.adaptation:
model.task_update_num = 100 #We perform adaptation on MAML simply by updating more times.
model.eval()
logits_list = list()
targets_list = list()
for i, (x,_) in enumerate(novel_loader):
logits = model.get_logits(x).detach()
targets = torch.tensor(np.repeat(range(params.test_n_way), model.n_query)).cuda()
logits_list.append(logits) #.cpu().detach().numpy())
targets_list.append(targets) #.cpu().detach().numpy())
else:
novel_file = os.path.join( checkpoint_dir.replace("checkpoints","features"), split_str +".hdf5")
cl_data_file = feat_loader.init_loader(novel_file)
logits_list = list()
targets_list = list()
n_query = 15
n_way = few_shot_params['n_way']
n_support = few_shot_params['n_support']
class_list = cl_data_file.keys()
for i in range(iter_num):
#----------------------
select_class = random.sample(class_list,n_way)
z_all = []
for cl in select_class:
img_feat = cl_data_file[cl]
perm_ids = np.random.permutation(len(img_feat)).tolist()
z_all.append( [ np.squeeze( img_feat[perm_ids[i]]) for i in range(n_support+n_query) ] ) # stack each batch
z_all = torch.from_numpy(np.array(z_all))
model.n_query = n_query
logits = model.set_forward(z_all, is_feature = True).detach()
targets = torch.tensor(np.repeat(range(n_way), n_query)).cuda()
logits_list.append(logits)
targets_list.append(targets)
#----------------------
return torch.cat(logits_list, 0), torch.cat(targets_list, 0)
def main():
params = parse_args('test')
seed = params.seed
repeat = params.repeat
# 1. Find the value of temperature (calibration)
print("Calibration: finding temperature hyperparameter...")
ece_module = ECELoss()
temperature_list = list()
for _ in range(repeat):#repeat):
_set_seed(0) # random seed
logits, targets = get_logits_targets(parse_args('test'))
temperature = ece_module.calibrate(logits, targets, iterations=300, lr=0.01).item()
if(temperature>0): temperature_list.append(temperature)
print("Calibration: temperature", temperature, "; mean temperature", np.mean(temperature_list))
# Filtering invalid temperatures (e.g. temp<0)
if(len(temperature_list)>0):temperature = np.mean(temperature_list)
else: temperature = 1.0
# 2. Use the temperature to record the ECE
# repeat the test N times changing the seed in range [seed, seed+repeat]
ece_list = list()
for i in range(seed, seed+repeat):
if(seed!=0): _set_seed(i)
else: _set_seed(0)
logits, targets = get_logits_targets(parse_args('test'))
#ece = ece_module.forward(logits, targets, temperature, onevsrest=params.method=='DKT').item()
ece = ece_module.forward(logits, targets, temperature, onevsrest=False).item()
ece_list.append(ece)
print("ECE:", np.mean(ece_list), "+-", np.std(ece_list))
# 3. Print the final ECE (averaged over all seeds)
print("-----------------------------")
print('Seeds = %d | Overall ECE = %4.4f +- %4.4f' %(repeat, np.mean(ece_list), np.std(ece_list)))
print("-----------------------------")
if __name__ == '__main__':
main()
| 11,741 | 43.309434 | 127 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/utils.py | import torch
import numpy as np
def one_hot(y, num_class):
return torch.zeros((len(y), num_class)).scatter_(1, y.unsqueeze(1), 1)
def DBindex(cl_data_file):
class_list = cl_data_file.keys()
cl_num= len(class_list)
cl_means = []
stds = []
DBs = []
for cl in class_list:
cl_means.append( np.mean(cl_data_file[cl], axis = 0) )
stds.append( np.sqrt(np.mean( np.sum(np.square( cl_data_file[cl] - cl_means[-1]), axis = 1))))
mu_i = np.tile( np.expand_dims( np.array(cl_means), axis = 0), (len(class_list),1,1) )
mu_j = np.transpose(mu_i,(1,0,2))
mdists = np.sqrt(np.sum(np.square(mu_i - mu_j), axis = 2))
for i in range(cl_num):
DBs.append( np.max([ (stds[i]+ stds[j])/mdists[i,j] for j in range(cl_num) if j != i ]) )
return np.mean(DBs)
def sparsity(cl_data_file):
class_list = cl_data_file.keys()
cl_sparsity = []
for cl in class_list:
cl_sparsity.append(np.mean([np.sum(x!=0) for x in cl_data_file[cl] ]) )
return np.mean(cl_sparsity)
| 1,052 | 31.90625 | 102 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/backbone.py | # This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
# Basic ResNet model
def init_layer(L):
# Initialization using fan-in
if isinstance(L, nn.Conv2d):
n = L.kernel_size[0]*L.kernel_size[1]*L.out_channels
L.weight.data.normal_(0,math.sqrt(2.0/float(n)))
elif isinstance(L, nn.BatchNorm2d):
L.weight.data.fill_(1)
L.bias.data.fill_(0)
class distLinear(nn.Module):
def __init__(self, indim, outdim):
super(distLinear, self).__init__()
self.L = nn.Linear( indim, outdim, bias = False)
self.class_wise_learnable_norm = True #See the issue#4&8 in the github
if self.class_wise_learnable_norm:
WeightNorm.apply(self.L, 'weight', dim=0) #split the weight update component to direction and norm
if outdim <=200:
self.scale_factor = 2 #a fixed scale factor to scale the output of cos value into a reasonably large input for softmax
else:
self.scale_factor = 10 #in omniglot, a larger scale factor is required to handle >1000 output classes.
def forward(self, x):
x_norm = torch.norm(x, p=2, dim =1).unsqueeze(1).expand_as(x)
x_normalized = x.div(x_norm+ 0.00001)
if not self.class_wise_learnable_norm:
L_norm = torch.norm(self.L.weight.data, p=2, dim =1).unsqueeze(1).expand_as(self.L.weight.data)
self.L.weight.data = self.L.weight.data.div(L_norm + 0.00001)
cos_dist = self.L(x_normalized) #matrix product by forward function, but when using WeightNorm, this also multiply the cosine distance by a class-wise learnable norm, see the issue#4&8 in the github
scores = self.scale_factor* (cos_dist)
return scores
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
class Linear_fw(nn.Linear): #used in MAML to forward input with fast weight
def __init__(self, in_features, out_features):
super(Linear_fw, self).__init__(in_features, out_features)
self.weight.fast = None #Lazy hack to add fast weight link
self.bias.fast = None
def forward(self, x):
if self.weight.fast is not None and self.bias.fast is not None:
out = F.linear(x, self.weight.fast, self.bias.fast) #weight.fast (fast weight) is the temporaily adapted weight
else:
out = super(Linear_fw, self).forward(x)
return out
class BLinear_fw(Linear_fw): #used in BHMAML to forward input with fast weight
def __init__(self, in_features, out_features):
super(BLinear_fw, self).__init__(in_features, out_features)
self.weight.logvar = None
self.weight.mu = None
self.bias.logvar = None
self.bias.mu = None
def forward(self, x):
if self.weight.fast is not None and self.bias.fast is not None:
preds = []
for w, b in zip(self.weight.fast, self.bias.fast):
preds.append(F.linear(x, w, b))
out = sum(preds) / len(preds)
else:
out = super(BLinear_fw, self).forward(x)
return out
class Conv2d_fw(nn.Conv2d): #used in MAML to forward input with fast weight
def __init__(self, in_channels, out_channels, kernel_size, stride=1,padding=0, bias = True):
super(Conv2d_fw, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias)
self.weight.fast = None
if not self.bias is None:
self.bias.fast = None
def forward(self, x):
if self.bias is None:
if self.weight.fast is not None:
out = F.conv2d(x, self.weight.fast, None, stride= self.stride, padding=self.padding)
else:
out = super(Conv2d_fw, self).forward(x)
else:
if self.weight.fast is not None and self.bias.fast is not None:
out = F.conv2d(x, self.weight.fast, self.bias.fast, stride= self.stride, padding=self.padding)
else:
out = super(Conv2d_fw, self).forward(x)
return out
class BatchNorm2d_fw(nn.BatchNorm2d): #used in MAML to forward input with fast weight
def __init__(self, num_features):
super(BatchNorm2d_fw, self).__init__(num_features)
self.weight.fast = None
self.bias.fast = None
def forward(self, x):
running_mean = torch.zeros(x.data.size()[1]).cuda()
running_var = torch.ones(x.data.size()[1]).cuda()
if self.weight.fast is not None and self.bias.fast is not None:
out = F.batch_norm(x, running_mean, running_var, self.weight.fast, self.bias.fast, training = True, momentum = 1)
#batch_norm momentum hack: follow hack of Kate Rakelly in pytorch-maml/src/layers.py
else:
out = F.batch_norm(x, running_mean, running_var, self.weight, self.bias, training = True, momentum = 1)
return out
# Simple Conv Block
class ConvBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, pool = True, padding = 1):
super(ConvBlock, self).__init__()
self.indim = indim
self.outdim = outdim
if self.maml:
self.C = Conv2d_fw(indim, outdim, 3, padding = padding)
self.BN = BatchNorm2d_fw(outdim)
else:
self.C = nn.Conv2d(indim, outdim, 3, padding= padding)
self.BN = nn.BatchNorm2d(outdim)
self.relu = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C, self.BN, self.relu]
if pool:
self.pool = nn.MaxPool2d(2)
self.parametrized_layers.append(self.pool)
for layer in self.parametrized_layers:
init_layer(layer)
self.trunk = nn.Sequential(*self.parametrized_layers)
def forward(self,x):
out = self.trunk(x)
return out
# Simple ResNet Block
class SimpleBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, half_res):
super(SimpleBlock, self).__init__()
self.indim = indim
self.outdim = outdim
if self.maml:
self.C1 = Conv2d_fw(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = BatchNorm2d_fw(outdim)
self.C2 = Conv2d_fw(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = BatchNorm2d_fw(outdim)
else:
self.C1 = nn.Conv2d(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = nn.BatchNorm2d(outdim)
self.C2 = nn.Conv2d(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = nn.BatchNorm2d(outdim)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C1, self.C2, self.BN1, self.BN2]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
if self.maml:
self.shortcut = Conv2d_fw(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = BatchNorm2d_fw(outdim)
else:
self.shortcut = nn.Conv2d(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = nn.BatchNorm2d(outdim)
self.parametrized_layers.append(self.shortcut)
self.parametrized_layers.append(self.BNshortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
out = self.C1(x)
out = self.BN1(out)
out = self.relu1(out)
out = self.C2(out)
out = self.BN2(out)
short_out = x if self.shortcut_type == 'identity' else self.BNshortcut(self.shortcut(x))
out = out + short_out
out = self.relu2(out)
return out
# Bottleneck block
class BottleneckBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, half_res):
super(BottleneckBlock, self).__init__()
bottleneckdim = int(outdim/4)
self.indim = indim
self.outdim = outdim
if self.maml:
self.C1 = Conv2d_fw(indim, bottleneckdim, kernel_size=1, bias=False)
self.BN1 = BatchNorm2d_fw(bottleneckdim)
self.C2 = Conv2d_fw(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)
self.BN2 = BatchNorm2d_fw(bottleneckdim)
self.C3 = Conv2d_fw(bottleneckdim, outdim, kernel_size=1, bias=False)
self.BN3 = BatchNorm2d_fw(outdim)
else:
self.C1 = nn.Conv2d(indim, bottleneckdim, kernel_size=1, bias=False)
self.BN1 = nn.BatchNorm2d(bottleneckdim)
self.C2 = nn.Conv2d(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)
self.BN2 = nn.BatchNorm2d(bottleneckdim)
self.C3 = nn.Conv2d(bottleneckdim, outdim, kernel_size=1, bias=False)
self.BN3 = nn.BatchNorm2d(outdim)
self.relu = nn.ReLU()
self.parametrized_layers = [self.C1, self.BN1, self.C2, self.BN2, self.C3, self.BN3]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
if self.maml:
self.shortcut = Conv2d_fw(indim, outdim, 1, stride=2 if half_res else 1, bias=False)
else:
self.shortcut = nn.Conv2d(indim, outdim, 1, stride=2 if half_res else 1, bias=False)
self.parametrized_layers.append(self.shortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
short_out = x if self.shortcut_type == 'identity' else self.shortcut(x)
out = self.C1(x)
out = self.BN1(out)
out = self.relu(out)
out = self.C2(out)
out = self.BN2(out)
out = self.relu(out)
out = self.C3(out)
out = self.BN3(out)
out = out + short_out
out = self.relu(out)
return out
class ConvNet(nn.Module):
def __init__(self, depth, flatten = True, pool=False):
super(ConvNet,self).__init__()
trunk = []
for i in range(depth):
indim = 3 if i == 0 else 64
outdim = 64
B = ConvBlock(indim, outdim, pool = ( i <4 ) ) #only pooling for fist 4 layers
trunk.append(B)
if pool:
trunk.append(nn.AdaptiveAvgPool2d((1,1)))
if flatten:
trunk.append(Flatten())
self.trunk = nn.Sequential(*trunk)
self.final_feat_dim: int = 64 # outdim if pool else 1600
def forward(self,x):
out = self.trunk(x)
return out
class ConvNetNopool(nn.Module): #Relation net use a 4 layer conv with pooling in only first two layers, else no pooling
def __init__(self, depth):
super(ConvNetNopool,self).__init__()
trunk = []
for i in range(depth):
indim = 3 if i == 0 else 64
outdim = 64
B = ConvBlock(indim, outdim, pool = ( i in [0,1] ), padding = 0 if i in[0,1] else 1 ) #only first two layer has pooling and no padding
trunk.append(B)
self.trunk = nn.Sequential(*trunk)
self.final_feat_dim = [64,19,19]
def forward(self,x):
out = self.trunk(x)
return out
class ConvNetS(nn.Module): #For omniglot, only 1 input channel, output dim is 64
def __init__(self, depth, flatten = True):
super(ConvNetS,self).__init__()
trunk = []
for i in range(depth):
indim = 1 if i == 0 else 64
outdim = 64
B = ConvBlock(indim, outdim, pool = ( i <4 ) ) #only pooling for fist 4 layers
trunk.append(B)
if flatten:
trunk.append(Flatten())
#trunk.append(nn.BatchNorm1d(64)) #TODO remove
#trunk.append(nn.ReLU(inplace=True)) #TODO remove
#trunk.append(nn.Linear(64, 64)) #TODO remove
self.trunk = nn.Sequential(*trunk)
self.final_feat_dim = 64
def forward(self,x):
out = x[:,0:1,:,:] #only use the first dimension
out = self.trunk(out)
#out = torch.tanh(out) #TODO remove
return out
class ConvNetSNopool(nn.Module): #Relation net use a 4 layer conv with pooling in only first two layers, else no pooling. For omniglot, only 1 input channel, output dim is [64,5,5]
def __init__(self, depth):
super(ConvNetSNopool,self).__init__()
trunk = []
for i in range(depth):
indim = 1 if i == 0 else 64
outdim = 64
B = ConvBlock(indim, outdim, pool = ( i in [0,1] ), padding = 0 if i in[0,1] else 1 ) #only first two layer has pooling and no padding
trunk.append(B)
self.trunk = nn.Sequential(*trunk)
self.final_feat_dim = [64,5,5]
def forward(self,x):
out = x[:,0:1,:,:] #only use the first dimension
out = self.trunk(out)
return out
class ResNet(nn.Module):
maml = False #Default
def __init__(self,block,list_of_num_layers, list_of_out_dims, flatten = True):
# list_of_num_layers specifies number of layers in each stage
# list_of_out_dims specifies number of output channel for each stage
super(ResNet,self).__init__()
assert len(list_of_num_layers)==4, 'Can have only four stages'
if self.maml:
conv1 = Conv2d_fw(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = BatchNorm2d_fw(64)
else:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = nn.BatchNorm2d(64)
relu = nn.ReLU()
pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
init_layer(conv1)
init_layer(bn1)
trunk = [conv1, bn1, relu, pool1]
indim = 64
for i in range(4):
for j in range(list_of_num_layers[i]):
half_res = (i>=1) and (j==0)
B = block(indim, list_of_out_dims[i], half_res)
trunk.append(B)
indim = list_of_out_dims[i]
if flatten:
avgpool = nn.AvgPool2d(7)
trunk.append(avgpool)
trunk.append(Flatten())
self.final_feat_dim = indim
else:
self.final_feat_dim = [ indim, 7, 7]
self.trunk = nn.Sequential(*trunk)
def forward(self,x):
out = self.trunk(x)
return out
# Backbone for QMUL regression
class Conv3(nn.Module):
def __init__(self):
super(Conv3, self).__init__()
self.layer1 = nn.Conv2d(3, 36, 3,stride=2,dilation=2)
self.layer2 = nn.Conv2d(36,36, 3,stride=2,dilation=2)
self.layer3 = nn.Conv2d(36,36, 3,stride=2,dilation=2)
def return_clones(self):
layer1_w = self.layer1.weight.data.clone().detach()
layer2_w = self.layer2.weight.data.clone().detach()
layer3_w = self.layer3.weight.data.clone().detach()
return [layer1_w, layer2_w, layer3_w]
def assign_clones(self, weights_list):
self.layer1.weight.data.copy_(weights_list[0])
self.layer2.weight.data.copy_(weights_list[1])
self.layer3.weight.data.copy_(weights_list[2])
def forward(self, x):
out = F.relu(self.layer1(x))
out = F.relu(self.layer2(out))
out = F.relu(self.layer3(out))
out = out.view(out.size(0), -1)
return out
# just to test the kernel hypothesis
class BackboneKernel(nn.Module):
def __init__(self, input_dim: int, output_dim: int, num_layers: int, hidden_dim: int, flatten: bool =False, **kwargs):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.flatten = flatten
self.model = self.create_model()
def create_model(self):
assert self.num_layers >= 1, "Number of hidden layers must be at least 1"
modules = [nn.Linear(self.input_dim, self.hidden_dim), nn.ReLU()]
if self.flatten:
modules = [nn.Flatten()] + modules
for i in range(self.num_layers - 1):
modules.append(nn.Linear(self.hidden_dim, self.hidden_dim))
modules.append(nn.ReLU())
modules.append(nn.Linear(self.hidden_dim, self.output_dim))
model = nn.Sequential(*modules)
return model
def forward(self, x, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
out = self.model(x)
return out
class ConvNet4WithKernel(nn.Module):
def __init__(self):
super(ConvNet4WithKernel, self).__init__()
conv_out_size = 1600
hn_kernel_layers_no = 4
hn_kernel_hidden_dim = 64
self.input_dim = conv_out_size
self.output_dim = conv_out_size
self.num_layers = hn_kernel_layers_no
self.hidden_dim = hn_kernel_hidden_dim
self.Conv4 = ConvNet(4)
self.nn_kernel = BackboneKernel(self.input_dim, self.output_dim,
self.num_layers, self.hidden_dim)
self.final_feat_dim = self.output_dim
def forward(self, x):
x = self.Conv4(x)
out = self.nn_kernel(x)
return out
class ResNet10WithKernel(nn.Module):
def __init__(self):
super(ResNet10WithKernel, self).__init__()
conv_out_size = None
hn_kernel_layers_no = None
hn_kernel_hidden_dim = None
self.input_dim = conv_out_size
self.output_dim = conv_out_size
self.num_layers = hn_kernel_layers_no
self.hidden_dim = hn_kernel_hidden_dim
self.Conv4 = ConvNet(4)
self.nn_kernel = BackboneKernel(self.input_dim, self.output_dim,
self.num_layers, self.hidden_dim)
def forward(self, x):
x = self.Conv4(x)
x = torch.unsqueeze(torch.flatten(x), 0)
out = self.nn_kernel(x)
return out
def Conv4():
return ConvNet(4)
def Conv4Pool():
return ConvNet(4, pool=True)
def Conv6():
return ConvNet(6)
def Conv4NP():
return ConvNetNopool(4)
def Conv6NP():
return ConvNetNopool(6)
def Conv4S():
return ConvNetS(4)
def Conv4SNP():
return ConvNetSNopool(4)
def ResNet10( flatten = True):
return ResNet(SimpleBlock, [1,1,1,1],[64,128,256,512], flatten)
def ResNet12(flatten=True):
from learn2learn.vision.models import resnet12
class R12(nn.Module):
def __init__(self):
super().__init__()
self.model = resnet12.ResNet12Backbone()
self.avgpool = nn.AvgPool2d(14)
self.flat = nn.Flatten()
self.final_feat_dim = 640 # 640
def forward(self, x):
x = self.model(x)
return x
return R12()
def ResNet18( flatten = True):
return ResNet(SimpleBlock, [2,2,2,2],[64,128,256,512], flatten)
def ResNet34( flatten = True):
return ResNet(SimpleBlock, [3,4,6,3],[64,128,256,512], flatten)
def ResNet50( flatten = True):
return ResNet(BottleneckBlock, [3,4,6,3], [256,512,1024,2048], flatten)
def ResNet101( flatten = True):
return ResNet(BottleneckBlock, [3,4,23,3],[256,512,1024,2048], flatten)
def Conv4WithKernel():
return ConvNet4WithKernel()
def ResNetWithKernel():
return ResNet10WithKernel()
| 21,085 | 35.355172 | 206 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/train_regression.py | import torch
import torch.nn as nn
import torch.optim as optim
import configs
from data.qmul_loader import get_batch, train_people, test_people
from io_utils import parse_args_regression, get_resume_file
from methods.DKT_regression import DKT
from methods.feature_transfer_regression import FeatureTransfer
import backbone
import os
import numpy as np
params = parse_args_regression('train_regression')
np.random.seed(params.seed)
torch.manual_seed(params.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
params.checkpoint_dir = '%scheckpoints/%s/' % (configs.save_dir, params.dataset)
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
params.checkpoint_dir = '%scheckpoints/%s/%s_%s' % (configs.save_dir, params.dataset, params.model, params.method)
bb = backbone.Conv3().cuda()
if params.method=='DKT':
model = DKT(bb).cuda()
elif params.method=='transfer':
model = FeatureTransfer(bb).cuda()
else:
ValueError('Unrecognised method')
optimizer = torch.optim.Adam([{'params': model.model.parameters(), 'lr': 0.001},
{'params': model.feature_extractor.parameters(), 'lr': 0.001}])
for epoch in range(params.stop_epoch):
model.train_loop(epoch, optimizer)
model.save_checkpoint(params.checkpoint_dir)
| 1,334 | 32.375 | 114 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/hn_args.py | from argparse import ArgumentParser
from methods.hypernets.hypernet_poc import ALLOWED_AGGREGATIONS
def add_hn_args_to_parser(parser: ArgumentParser) -> ArgumentParser:
hypershot_args = parser.add_argument_group("HyperShot-related arguments")
hypershot_args.add_argument('--hn_adaptation_strategy', type=str, default=None, choices=['increasing_alpha'], help='strategy used for manipulating alpha parameter')
hypershot_args.add_argument('--hn_alpha_step', type=float, default=0, help='step used to increase alpha from 0 to 1 during adaptation to new task')
hypershot_args.add_argument("--hn_hidden_size", type=int, default=256, help="HN hidden size")
hypershot_args.add_argument("--hn_tn_hidden_size", type=int, default=120, help="Target network hidden size")
hypershot_args.add_argument("--hn_taskset_size", type=int, default=1, help="Taskset size")
hypershot_args.add_argument("--hn_neck_len", type=int, default=0, help="Number of layers in the neck of the hypernet")
hypershot_args.add_argument("--hn_head_len", type=int, default=2, help="Number of layers in the heads of the hypernet, must be >= 1")
hypershot_args.add_argument("--hn_taskset_repeats", type=str, default="10:10-20:5-30:2", help="A sequence of <epoch:taskset_repeats_until_the_epoch>")
hypershot_args.add_argument("--hn_taskset_print_every", type=int, default=20, help="It's a utility")
hypershot_args.add_argument("--hn_detach_ft_in_hn", type=int, default=10000, help="Detach FE output before hypernetwork in training *after* this epoch")
hypershot_args.add_argument("--hn_detach_ft_in_tn", type=int, default=10000, help="Detach FE output before target network in training *after* this epoch")
hypershot_args.add_argument("--hn_tn_depth", type=int, default=1, help="Depth of target network")
hypershot_args.add_argument("--hn_dropout", type=float, default=0, help="Dropout probability in hypernet")
hypershot_args.add_argument("--hn_sup_aggregation", type=str, default="concat", choices=ALLOWED_AGGREGATIONS, help="How to aggregate supports from the same class")
hypershot_args.add_argument("--hn_transformer_layers_no", type=int, default=1, help="Number of layers in transformer")
hypershot_args.add_argument("--hn_transformer_heads_no", type=int, default=1, help="Number of attention heads in transformer")
hypershot_args.add_argument("--hn_transformer_feedforward_dim", type=int, default=512, help="Transformer's feedforward dimensionality")
hypershot_args.add_argument("--hn_attention_embedding", action='store_true', help="Utilize attention-based embedding")
hypershot_args.add_argument("--hn_kernel_layers_no", type=int, default=2, help="Depth of a kernel network")
hypershot_args.add_argument("--hn_kernel_hidden_dim", type=int, default=128, help="Hidden dimension of a kernel network")
hypershot_args.add_argument("--kernel_transformer_layers_no", type=int, default=1, help="Number of layers in kernel's transformer")
hypershot_args.add_argument("--kernel_transformer_heads_no", type=int, default=1, help="Number of attention heads in kernel's transformer")
hypershot_args.add_argument("--kernel_transformer_feedforward_dim", type=int, default=512, help="Kernel transformer's feedforward dimensionality")
hypershot_args.add_argument("--hn_kernel_out_size", type=int, default=1600, help="Kernel output dim")
hypershot_args.add_argument("--hn_kernel_invariance", action='store_true', help="Should the HyperNet's kernel be sequence invariant")
hypershot_args.add_argument("--hn_kernel_invariance_type", default='attention', choices=['attention', 'convolution'], help="The type of invariance operation for the kernel's output")
hypershot_args.add_argument("--hn_kernel_convolution_output_dim", type=int, default=256, help="Kernel convolution's output dim")
hypershot_args.add_argument("--hn_kernel_invariance_pooling", default='mean', choices=['average', 'mean', 'min', 'max'], help="The type of invariance operation for the kernel's output")
hypershot_args.add_argument("--hn_use_support_embeddings", action='store_true', help="Concatenate support embeddings with kernel features")
hypershot_args.add_argument("--hn_no_self_relations", action='store_true', help="Multiply matrix K to remove self relations (i.e., kernel(x_i, x_i))")
hypershot_args.add_argument("--hn_use_cosine_distance", action='store_true', help="Use cosine distance instead of a more specific kernel")
hypershot_args.add_argument("--hn_use_scalar_product", action='store_true', help="Use scalar product instead of a more specific kernel")
hypershot_args.add_argument("--hn_use_cosine_nn_kernel", action='store_true', help="Use cosine distance in NNKernel")
hypershot_args.add_argument("--hn_val_epochs", type=int, default=0, help="Epochs for finetuning on support set during validation. We recommend to set this to >0 only during testing.")
hypershot_args.add_argument("--hn_val_lr", type=float, default=1e-4, help="LR for finetuning on support set during validation")
hypershot_args.add_argument("--hn_val_optim", type=str, default="adam", choices=["adam", "sgd"], help="Optimizer for finetuning on support set during validation")
hypermaml_args = parser.add_argument_group("HyperMAML and BayesHMAML- related arguments")
hypermaml_args.add_argument('--hm_use_class_batch_input', action='store_true', help='Strategy for handling query set embeddings as an input of hyper network')
hypermaml_args.add_argument("--hm_enhance_embeddings", type=bool, default=False, help="Flag that indicates if embeddings should be concatenated with logits and labels")
hypermaml_args.add_argument("--hm_update_operator", type=str, default='minus', choices=['minus', 'plus', 'multiply'], help="Choice of operator to use with update value for weight update")
hypermaml_args.add_argument('--hm_lambda', type=float, default=0.0, help='Regularization coefficient for the output of the hypernet')
hypermaml_args.add_argument('--hm_save_delta_params', type=bool, default=False, help='saving delta parameters')
hypermaml_args.add_argument("--hm_maml_warmup", action="store_true", help="Train the model in MAML way only at the beggining of the training")
hypermaml_args.add_argument("--hm_maml_update_feature_net", action="store_true", help="Train feature net in the inner loop of MAML")
hypermaml_args.add_argument("--hm_maml_warmup_epochs", type=int, default=100, help="The first n epochs where model is trained in MAML way only")
hypermaml_args.add_argument("--hm_maml_warmup_switch_epochs", type=int, default=1000, help="The number of epochs for switching from MAML to HyperMAML")
hypermaml_args.add_argument("--hm_load_feature_net", action="store_true", help="Load feature network from file")
hypermaml_args.add_argument("--hm_feature_net_path", type=str, default='', help="File with feature network")
hypermaml_args.add_argument("--hm_detach_feature_net", action="store_true", help="Freeze feature network")
hypermaml_args.add_argument("--hm_detach_before_hyper_net", action="store_true", help="Do not calculate gradient which comes from hypernetwork")
hypermaml_args.add_argument("--hm_support_set_loss", action='store_true', help="Use both query and support data when calculating loss")
hypermaml_args.add_argument("--hm_set_forward_with_adaptation", action='store_true', help="Adapt network before test")
# BHMAML only
bhypermaml_args = parser.add_argument_group("BayesHMAML (only) related arguments")
bhypermaml_args.add_argument('--hm_weight_set_num_train', default=1, type=int, help='number of randomly generated weights for training (default 1)')
bhypermaml_args.add_argument('--hm_weight_set_num_test', default=20, type=int, help='number of randomly generated weights for test (default 20), if set to 0 expected value is generated')
bhypermaml_args.add_argument('--kl_stop_val', default=1e-3, type=float, help='final value of kld_scale (default 1e-3)')
bhypermaml_args.add_argument('--kl_scale', default=1e-24, type=float, help='initial value of kld_scale (default 1e-24)')
return parser
| 8,176 | 102.506329 | 191 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/metrics_explorer.py | # run with:
# streamlit run metrics_explorer.py
from typing import Tuple, Dict, List, Union, Any
import numpy as np
import streamlit as st
from pathlib import Path
import json
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import altair as alt
import base64
from collections import defaultdict
st.set_page_config(
page_title="FSL metrics",
page_icon=None,
layout="wide",
)
"""# FSLH - metrics explorer"""
root = Path("/home/mprzewie/coding/gmum_mnt/uj/few-shot-hypernets/save/checkpoints/cross_char/")
METRICS_FILE = "metrics.json"
ARGS_FILE = "args.json"
LOADING_PROGRESS = st.progress(0.0)
PROGRESS_TEXT = st.empty()
HN_PREFIX = "hn"
def metrics_dict_to_df(
experiment_name: str,
metrics_dict: Dict[str, List[Union[float, List[float]]]],
args_dict: Dict[str, Any]
) -> pd.DataFrame:
rows = []
for m_name, values in metrics_dict.items():
for e, vls in enumerate(values):
vls = [vls] if not isinstance(vls, list) else [np.mean(vls)] #vls
rows.extend([{
"exp_name": experiment_name,
"met_name": m_name,
"epoch": e,
"value": v,
**{ak: av for (ak, av) in args_dict.items() if ak.startswith(HN_PREFIX)}
} for v in vls])
return pd.DataFrame(rows)
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def load_experiment(experiment_path: Path) -> Tuple[Dict, pd.DataFrame]:
with (experiment_path / METRICS_FILE).open("r") as f:
metrics = json.load(f)
with (experiment_path / ARGS_FILE).open("r") as f:
args = json.load(f)
return args, metrics_dict_to_df(experiment_name=experiment_path.name, metrics_dict=metrics, args_dict=args)
loggable_experiments = {p.parent.name: p for p in root.glob(f"*/{METRICS_FILE}") if (p.parent / ARGS_FILE).exists()}
experiment_args = dict()
experiment_metrics = dict()
loggable_experiments = list(loggable_experiments.items()) # [:3]
for i, (e, p) in enumerate(loggable_experiments):
PROGRESS_TEXT.text(f"loading {p.parent.name} {i}/{len(loggable_experiments)}")
try:
args, metrics_df = load_experiment(p.parent)
experiment_args[e] = args
experiment_metrics[e] = metrics_df
LOADING_PROGRESS.progress((i + 1) / len(loggable_experiments))
except Exception as exc:
print(exc)
pass
PROGRESS_TEXT.text(f"Loaded {len(experiment_metrics)} experiments")
df = pd.concat([mdf for mdf in experiment_metrics.values()])
available_metrics = sorted(df.met_name.unique())
all_args = sorted({a for ad in experiment_args.values() for a in ad.keys()})
"""## Selected metric over the course of epochs"""
selected_metric = st.selectbox("Select metric", available_metrics, index=available_metrics.index("accuracy_val_max"))
aggregate_y = st.checkbox("Aggregate Y?", value=True)
st.altair_chart(alt.Chart(
df[df.met_name == selected_metric],
).mark_line(point=True).encode(
x="epoch",
y=alt.Y("value", aggregate=("mean" if aggregate_y else alt.Undefined)),
color="exp_name", tooltip=["exp_name", "value", "epoch"] + [a for a in all_args if a.startswith(HN_PREFIX)],
).configure_legend(labelLimit=0).interactive().properties(title=selected_metric), use_container_width=True)
"""## How do hyperparams influence the metric?"""
for a in all_args:
if a.startswith(HN_PREFIX):
unique_as = sorted(df[a].unique())
with st.expander(f"{a} := {unique_as}", expanded=(len(unique_as) > 1)):
st.altair_chart(alt.Chart(
df[df.met_name == selected_metric],
).mark_line(point=True).encode(
x="epoch",
y=alt.Y("value", aggregate=("mean" if aggregate_y else alt.Undefined)),
color="exp_name", tooltip=["exp_name", "value", "epoch"] + [a for a in all_args if a.startswith(HN_PREFIX)],
column=a
).configure_legend(labelLimit=0).interactive().properties(title=selected_metric)) | 4,036 | 33.211864 | 124 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/save_features.py | import numpy as np
import torch
from torch.autograd import Variable
import os
import glob
import h5py
import configs
import backbone
from data.datamgr import SimpleDataManager
from methods.baselinetrain import BaselineTrain
from methods.baselinefinetune import BaselineFinetune
from methods.hypernets import hypernet_types
from methods.protonet import ProtoNet
from methods.matchingnet import MatchingNet
from methods.relationnet import RelationNet
from methods.maml import MAML
from io_utils import model_dict, parse_args, get_resume_file, get_best_file, get_assigned_file
def save_features(model, data_loader, outfile ):
f = h5py.File(outfile, 'w')
max_count = len(data_loader)*data_loader.batch_size
all_labels = f.create_dataset('all_labels',(max_count,), dtype='i')
all_feats=None
count=0
for i, (x,y) in enumerate(data_loader):
if i%10 == 0:
print('{:d}/{:d}'.format(i, len(data_loader)))
x = x.cuda()
x_var = Variable(x)
feats = model(x_var)
if all_feats is None:
all_feats = f.create_dataset('all_feats', [max_count] + list( feats.size()[1:]) , dtype='f')
all_feats[count:count+feats.size(0)] = feats.data.cpu().numpy()
all_labels[count:count+feats.size(0)] = y.cpu().numpy()
count = count + feats.size(0)
count_var = f.create_dataset('count', (1,), dtype='i')
count_var[0] = count
f.close()
def do_save_fts(params):
illegal_models = [
"maml", "maml_approx", "hyper_maml", "bayes_hmaml", "DKT",
] + list(hypernet_types.keys())
assert params.method not in illegal_models, 'maml do not support save_feature and run'
if 'Conv' in params.model:
if params.dataset in ['omniglot', 'cross_char']:
image_size = 28
else:
image_size = 84
else:
image_size = 224
if params.dataset in ['omniglot', 'cross_char']:
assert params.model == 'Conv4' and not params.train_aug, 'omniglot only support Conv4 without augmentation'
params.model = 'Conv4S'
split = params.split
if params.dataset == 'cross':
if split == 'base':
loadfile = configs.data_dir['miniImagenet'] + 'all.json'
else:
loadfile = configs.data_dir['CUB'] + split + '.json'
elif params.dataset == 'cross_char':
if split == 'base':
loadfile = configs.data_dir['omniglot'] + 'noLatin.json'
else:
loadfile = configs.data_dir['emnist'] + split + '.json'
else:
loadfile = configs.data_dir[params.dataset] + split + '.json'
checkpoint_dir = '%s/checkpoints/%s/%s_%s' % (configs.save_dir, params.dataset, params.model, params.method)
if params.train_aug:
checkpoint_dir += '_aug'
if not params.method in ['baseline', 'baseline++']:
checkpoint_dir += '_%dway_%dshot' % (params.train_n_way, params.n_shot)
if params.checkpoint_suffix != "":
checkpoint_dir = checkpoint_dir + "_" + params.checkpoint_suffix
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir, params.save_iter)
elif params.method in ['baseline', 'baseline++']:
modelfile = get_resume_file(checkpoint_dir)
else:
print("looking for best file in", checkpoint_dir)
modelfile = get_best_file(checkpoint_dir)
print("got", modelfile)
if params.save_iter != -1:
outfile = os.path.join(checkpoint_dir.replace("checkpoints", "features"),
split + "_" + str(params.save_iter) + ".hdf5")
else:
outfile = os.path.join(checkpoint_dir.replace("checkpoints", "features"), split + ".hdf5")
datamgr = SimpleDataManager(image_size, batch_size=64)
data_loader = datamgr.get_data_loader(loadfile, aug=False)
if params.method in ['relationnet', 'relationnet_softmax']:
if params.model == 'Conv4':
model = backbone.Conv4NP()
elif params.model == 'Conv6':
model = backbone.Conv6NP()
elif params.model == 'Conv4S':
model = backbone.Conv4SNP()
else:
model = model_dict[params.model](flatten=False)
elif params.method in ['maml', 'maml_approx']:
raise ValueError('MAML do not support save feature')
else:
model = model_dict[params.model]()
model = model.cuda()
tmp = torch.load(modelfile)
state = tmp['state']
state_keys = list(state.keys())
for i, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.",
"") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state.pop(key)
model.load_state_dict(state)
model.eval()
dirname = os.path.dirname(outfile)
if not os.path.isdir(dirname):
os.makedirs(dirname)
save_features(model, data_loader, outfile)
if __name__ == '__main__':
params = parse_args('save_features')
do_save_fts(params)
| 5,138 | 35.707143 | 178 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/train.py | import json
import sys
from collections import defaultdict
from typing import Type, List, Union, Dict, Optional
from copy import deepcopy
import numpy as np
import torch
import random
from neptune.new import Run
import torch.optim
import torch.optim.lr_scheduler as lr_scheduler
import os
import configs
import backbone
from data.datamgr import SimpleDataManager, SetDataManager
from methods.baselinetrain import BaselineTrain
from methods.DKT import DKT
from methods.hypernets.hypernet_poc import HyperNetPOC
from methods.hypernets import hypernet_types
from methods.protonet import ProtoNet
from methods.matchingnet import MatchingNet
from methods.relationnet import RelationNet
from methods.maml import MAML
from methods.hypernets.bayeshmaml import BayesHMAML
from methods.hypernets.hypermaml import HyperMAML
from io_utils import model_dict, parse_args, get_resume_file, setup_neptune
from neptune.new.types import File
import matplotlib.pyplot as plt
from pathlib import Path
from save_features import do_save_fts
from test import perform_test
def _set_seed(seed, verbose=True):
if (seed != 0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if (verbose): print("[INFO] Setting SEED: " + str(seed))
else:
if (verbose): print("[INFO] Setting SEED: None")
def train(base_loader, val_loader, model, optimization, start_epoch, stop_epoch, params, *,
neptune_run: Optional[Run] = None):
print("Tot epochs: " + str(stop_epoch))
if optimization == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=params.lr)
elif optimization == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=params.lr)
else:
raise ValueError(f'Unknown optimization {optimization}, please define by yourself')
max_acc = 0
max_train_acc = 0
max_acc_adaptation_dict = {}
if params.hm_set_forward_with_adaptation:
max_acc_adaptation_dict = {}
for i in range(params.hn_val_epochs + 1):
if i != 0:
max_acc_adaptation_dict[f"accuracy/val_support_max@-{i}"] = 0
max_acc_adaptation_dict[f"accuracy/val_max@-{i}"] = 0
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
if (Path(params.checkpoint_dir) / "metrics.json").exists() and params.resume:
with (Path(params.checkpoint_dir) / "metrics.json").open("r") as f:
try:
metrics_per_epoch = defaultdict(list, json.load(f))
try:
max_acc = metrics_per_epoch["accuracy/val_max"][-1]
max_train_acc = metrics_per_epoch["accuracy/train_max"][-1]
if params.hm_set_forward_with_adaptation:
for i in range(params.hn_val_epochs + 1):
if i != 0:
max_acc_adaptation_dict[f"accuracy/val_support_max@-{i}"] = \
metrics_per_epoch[f"accuracy/val_support_max@-{i}"][-1]
max_acc_adaptation_dict[f"accuracy/val_max@-{i}"] = \
metrics_per_epoch[f"accuracy/val_max@-{i}"][-1]
except:
max_acc = metrics_per_epoch["accuracy_val_max"][-1]
max_train_acc = metrics_per_epoch["accuracy_train_max"][-1]
except:
metrics_per_epoch = defaultdict(list)
else:
metrics_per_epoch = defaultdict(list)
scheduler = get_scheduler(params, optimizer, stop_epoch)
print("Starting training")
print("Params accessed until this point:")
print("\n\t".join(sorted(params.history)))
print("Params ignored until this point:")
print("\n\t".join(params.get_ignored_args()))
delta_params_list = []
for epoch in range(start_epoch, stop_epoch):
if epoch >= params.es_epoch:
if max_acc < params.es_threshold:
print("Breaking training at epoch", epoch, "because max accuracy", max_acc, "is lower than threshold",
params.es_threshold)
break
model.epoch = epoch
model.start_epoch = start_epoch
model.stop_epoch = stop_epoch
model.train()
if params.method in ['hyper_maml','bayes_hmaml']:
metrics = model.train_loop(epoch, base_loader, optimizer)
else:
metrics = model.train_loop(epoch, base_loader, optimizer) # model are called by reference, no need to return
scheduler.step()
model.eval()
delta_params = metrics.pop('delta_params', None)
if delta_params is not None:
delta_params_list.append(delta_params)
if (epoch % params.eval_freq == 0) or epoch in [
params.es_epoch - 1,
stop_epoch - 1
]:
try:
acc, test_loop_metrics = model.test_loop(val_loader)
except:
acc = model.test_loop(val_loader)
test_loop_metrics = dict()
print(
f"Epoch {epoch}/{stop_epoch} | Max test acc {max_acc:.2f} | Test acc {acc:.2f} | Metrics: {test_loop_metrics}")
metrics = metrics or dict()
metrics["lr"] = scheduler.get_lr()
metrics["accuracy/val"] = acc
metrics["accuracy/val_max"] = max_acc
metrics["accuracy/train_max"] = max_train_acc
metrics = {
**metrics,
**test_loop_metrics,
**max_acc_adaptation_dict
}
if params.hm_set_forward_with_adaptation:
for i in range(params.hn_val_epochs + 1):
if i != 0:
metrics[f"accuracy/val_support_max@-{i}"] = max_acc_adaptation_dict[
f"accuracy/val_support_max@-{i}"]
metrics[f"accuracy/val_max@-{i}"] = max_acc_adaptation_dict[f"accuracy/val_max@-{i}"]
if metrics["accuracy/train"] > max_train_acc:
max_train_acc = metrics["accuracy/train"]
if params.hm_set_forward_with_adaptation:
for i in range(params.hn_val_epochs + 1):
if i != 0 and metrics[f"accuracy/val_support_acc@-{i}"] > max_acc_adaptation_dict[
f"accuracy/val_support_max@-{i}"]:
max_acc_adaptation_dict[f"accuracy/val_support_max@-{i}"] = metrics[
f"accuracy/val_support_acc@-{i}"]
if metrics[f"accuracy/val@-{i}"] > max_acc_adaptation_dict[f"accuracy/val_max@-{i}"]:
max_acc_adaptation_dict[f"accuracy/val_max@-{i}"] = metrics[f"accuracy/val@-{i}"]
if acc > max_acc: # for baseline and baseline++, we don't use validation here so we let acc = -1
print("--> Best model! save...")
max_acc = acc
outfile = os.path.join(params.checkpoint_dir, 'best_model.tar')
torch.save({'epoch': epoch, 'state': model.state_dict()}, outfile)
if params.maml_save_feature_network and params.method in ['maml', 'hyper_maml','bayes_hmaml']:
outfile = os.path.join(params.checkpoint_dir, 'best_feature_net.tar')
torch.save({'epoch': epoch, 'state': model.feature.state_dict()}, outfile)
outfile = os.path.join(params.checkpoint_dir, 'last_model.tar')
torch.save({'epoch': epoch, 'state': model.state_dict()}, outfile)
if params.maml_save_feature_network and params.method in ['maml', 'hyper_maml','bayes_hmaml']:
outfile = os.path.join(params.checkpoint_dir, 'last_feature_net.tar')
torch.save({'epoch': epoch, 'state': model.feature.state_dict()}, outfile)
if (epoch % params.save_freq == 0) or (epoch == stop_epoch - 1):
outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch))
torch.save({'epoch': epoch, 'state': model.state_dict()}, outfile)
if metrics is not None:
for k, v in metrics.items():
metrics_per_epoch[k].append(v)
with (Path(params.checkpoint_dir) / "metrics.json").open("w") as f:
json.dump(metrics_per_epoch, f, indent=2)
if neptune_run is not None:
for m, v in metrics.items():
neptune_run[m].log(v, step=epoch)
if neptune_run is not None:
neptune_run["best_model"].track_files(os.path.join(params.checkpoint_dir, 'best_model.tar'))
neptune_run["last_model"].track_files(os.path.join(params.checkpoint_dir, 'last_model.tar'))
if params.maml_save_feature_network:
neptune_run["best_feature_net"].track_files(os.path.join(params.checkpoint_dir, 'best_feature_net.tar'))
neptune_run["last_feature_net"].track_files(os.path.join(params.checkpoint_dir, 'last_feature_net.tar'))
if len(delta_params_list) > 0 and params.hm_save_delta_params:
with (Path(params.checkpoint_dir) / f"delta_params_list_{len(delta_params_list)}.json").open("w") as f:
json.dump(delta_params_list, f, indent=2)
return model
def plot_metrics(metrics_per_epoch: Dict[str, Union[List[float], float]], epoch: int, fig_dir: Path):
for m, values in metrics_per_epoch.items():
plt.figure()
if "accuracy" in m:
plt.ylim((0, 100))
plt.errorbar(
list(range(len(values))),
[
np.mean(v) if isinstance(v, list) else v for v in values
],
[
np.std(v) if isinstance(v, list) else 0 for v in values
],
ecolor="black",
fmt="o",
)
plt.grid()
plt.title(f"{epoch}- {m}")
plt.savefig(fig_dir / f"{m}.png")
plt.close()
def get_scheduler(params, optimizer, stop_epoch=None) -> lr_scheduler._LRScheduler:
if params.lr_scheduler == "multisteplr":
if params.milestones is not None:
milestones = params.milestones
else:
milestones = list(range(0, params.stop_epoch, params.stop_epoch // 4))[1:]
return lr_scheduler.MultiStepLR(optimizer, milestones=milestones,
gamma=0.3)
elif params.lr_scheduler == "none":
return lr_scheduler.MultiStepLR(optimizer,
milestones=list(range(0, params.stop_epoch, params.stop_epoch // 4))[1:],
gamma=1)
elif params.lr_scheduler == "cosine":
T_0 = stop_epoch if stop_epoch is not None else params.stop_epoch // 4
return lr_scheduler.CosineAnnealingWarmRestarts(
optimizer,
T_0=T_0
)
raise TypeError(params.lr_scheduler)
if __name__ == '__main__':
params = parse_args('train')
_set_seed(params.seed)
if params.dataset == 'cross':
base_file = configs.data_dir['miniImagenet'] + 'all.json'
val_file = configs.data_dir['CUB'] + 'val.json'
elif params.dataset == 'cross_char':
base_file = configs.data_dir['omniglot'] + 'noLatin.json'
val_file = configs.data_dir['emnist'] + 'val.json'
else:
base_file = configs.data_dir[params.dataset] + 'base.json'
val_file = configs.data_dir[params.dataset] + 'val.json'
if 'Conv' in params.model:
if params.dataset in ['omniglot', 'cross_char']:
image_size = 28
else:
image_size = 84
else:
image_size = 224
if params.dataset in ['omniglot', 'cross_char']:
assert params.model == 'Conv4' and not params.train_aug, 'omniglot only support Conv4 without augmentation'
# params.model = 'Conv4S'
# no need for this, since omniglot is loaded as RGB
# optimization = 'Adam'
optimization = params.optim
if params.stop_epoch == -1:
if params.method in ['baseline', 'baseline++']:
if params.dataset in ['omniglot', 'cross_char']:
params.stop_epoch = 5
elif params.dataset in ['CUB']:
params.stop_epoch = 200 # This is different as stated in the open-review paper. However, using 400 epoch in baseline actually lead to over-fitting
elif params.dataset in ['miniImagenet', 'cross']:
params.stop_epoch = 400
else:
params.stop_epoch = 400 # default
else: # meta-learning methods
if params.n_shot == 1:
params.stop_epoch = 600
elif params.n_shot == 5:
params.stop_epoch = 400
else:
params.stop_epoch = 600 # default
if params.method in ['baseline', 'baseline++']:
base_datamgr = SimpleDataManager(image_size, batch_size=16)
base_loader = base_datamgr.get_data_loader(base_file, aug=params.train_aug)
val_datamgr = SimpleDataManager(image_size, batch_size=64)
val_loader = val_datamgr.get_data_loader(val_file, aug=False)
if params.dataset == 'omniglot':
assert params.num_classes >= 4112, 'class number need to be larger than max label id in base class'
if params.dataset == 'cross_char':
assert params.num_classes >= 1597, 'class number need to be larger than max label id in base class'
if params.method == 'baseline':
model = BaselineTrain(model_dict[params.model], params.num_classes)
elif params.method == 'baseline++':
model = BaselineTrain(model_dict[params.model], params.num_classes, loss_type='dist')
elif params.method in ['DKT', 'protonet', 'matchingnet', 'relationnet', 'relationnet_softmax', 'maml',
'maml_approx', 'hyper_maml','bayes_hmaml'] + list(hypernet_types.keys()):
n_query = max(1, int(
16 * params.test_n_way / params.train_n_way)) # if test_n_way is smaller than train_n_way, reduce n_query to keep batch size small
print("n_query", n_query)
train_few_shot_params = dict(n_way=params.train_n_way, n_support=params.n_shot, n_query=n_query)
base_datamgr = SetDataManager(image_size, **train_few_shot_params) # n_eposide=100
base_loader = base_datamgr.get_data_loader(base_file, aug=params.train_aug)
test_few_shot_params = dict(n_way=params.test_n_way, n_support=params.n_shot, n_query=n_query)
val_datamgr = SetDataManager(image_size, **test_few_shot_params)
val_loader = val_datamgr.get_data_loader(val_file, aug=False)
# a batch for SetDataManager: a [n_way, n_support + n_query, dim, w, h] tensor
if (params.method == 'DKT'):
dkt_train_few_shot_params = dict(n_way=params.train_n_way, n_support=params.n_shot)
model = DKT(model_dict[params.model], **dkt_train_few_shot_params)
model.init_summary()
elif params.method == 'protonet':
model = ProtoNet(model_dict[params.model], **train_few_shot_params)
elif params.method == 'matchingnet':
model = MatchingNet(model_dict[params.model], **train_few_shot_params)
elif params.method in ['relationnet', 'relationnet_softmax']:
if params.model == 'Conv4':
feature_model = backbone.Conv4NP
elif params.model == 'Conv6':
feature_model = backbone.Conv6NP
elif params.model == 'Conv4S':
feature_model = backbone.Conv4SNP
else:
feature_model = lambda: model_dict[params.model](flatten=False)
loss_type = 'mse' if params.method == 'relationnet' else 'softmax'
model = RelationNet(feature_model, loss_type=loss_type, **train_few_shot_params)
elif params.method in ['maml', 'maml_approx']:
backbone.ConvBlock.maml = True
backbone.SimpleBlock.maml = True
backbone.BottleneckBlock.maml = True
backbone.ResNet.maml = True
model = MAML(model_dict[params.model], params=params, approx=(params.method == 'maml_approx'),
**train_few_shot_params)
if params.dataset in ['omniglot', 'cross_char']: # maml use different parameter in omniglot
model.n_task = 32
model.task_update_num = 1
model.train_lr = 0.1
elif params.method in hypernet_types.keys():
hn_type: Type[HyperNetPOC] = hypernet_types[params.method]
model = hn_type(model_dict[params.model], params=params, **train_few_shot_params)
elif params.method == "hyper_maml" or params.method == 'bayes_hmaml':
backbone.ConvBlock.maml = True
backbone.SimpleBlock.maml = True
backbone.BottleneckBlock.maml = True
backbone.ResNet.maml = True
if params.method == 'bayes_hmaml':
model = BayesHMAML(model_dict[params.model], params=params, approx=(params.method == 'maml_approx'),
**train_few_shot_params)
else:
model = HyperMAML(model_dict[params.model], params=params, approx=(params.method == 'maml_approx'),
**train_few_shot_params)
if params.dataset in ['omniglot', 'cross_char']: # maml use different parameter in omniglot
model.n_task = 32
model.task_update_num = 1
model.train_lr = 0.1
else:
raise ValueError('Unknown method')
model = model.cuda()
params.checkpoint_dir = '%s/checkpoints/%s/%s_%s' % (configs.save_dir, params.dataset, params.model, params.method)
if params.train_aug:
params.checkpoint_dir += '_aug'
if not params.method in ['baseline', 'baseline++']:
params.checkpoint_dir += '_%dway_%dshot' % (params.train_n_way, params.n_shot)
if params.checkpoint_suffix != "":
params.checkpoint_dir = params.checkpoint_dir + "_" + params.checkpoint_suffix
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
print(params.checkpoint_dir)
start_epoch = params.start_epoch
stop_epoch = params.stop_epoch
if params.method in ['maml', 'maml_approx', 'hyper_maml','bayes_hmaml']:
stop_epoch = params.stop_epoch * model.n_task # maml use multiple tasks in one update
if params.resume:
resume_file = get_resume_file(params.checkpoint_dir)
print(resume_file)
if resume_file is not None:
tmp = torch.load(resume_file)
start_epoch = tmp['epoch'] + 1
model.load_state_dict(tmp['state'])
print("Resuming training from", resume_file, "epoch", start_epoch)
elif params.warmup: # We also support warmup from pretrained baseline feature, but we never used in our paper
baseline_checkpoint_dir = '%s/checkpoints/%s/%s_%s' % (
configs.save_dir, params.dataset, params.model, 'baseline')
if params.train_aug:
baseline_checkpoint_dir += '_aug'
warmup_resume_file = get_resume_file(baseline_checkpoint_dir)
tmp = torch.load(warmup_resume_file)
if tmp is not None:
state = tmp['state']
state_keys = list(state.keys())
for i, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.",
"") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state.pop(key)
model.feature.load_state_dict(state)
else:
raise ValueError('No warm_up file')
args_dict = vars(params.params)
with (Path(params.checkpoint_dir) / "args.json").open("w") as f:
json.dump(
{
k: v if isinstance(v, (int, str, bool, float)) else str(v)
for (k, v) in args_dict.items()
},
f,
indent=2,
)
with (Path(params.checkpoint_dir) / "rerun.sh").open("w") as f:
print("python", " ".join(sys.argv), file=f)
neptune_run = setup_neptune(params)
if neptune_run is not None:
neptune_run["model"] = str(model)
if not params.evaluate_model:
model = train(base_loader, val_loader, model, optimization, start_epoch, stop_epoch, params,
neptune_run=neptune_run)
params.split = "novel"
params.save_iter = -1
try:
do_save_fts(params)
except Exception as e:
print("Cannot save features bc of", e)
val_datasets = [params.dataset]
if params.dataset in ["cross", "miniImagenet"]:
val_datasets = ["cross", "miniImagenet"]
for d in val_datasets:
print("Evaluating on", d)
params.dataset = d
# num of epochs for finetuning on testing.
for hn_val_epochs in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 25, 50, 100, 200]:
params.hn_val_epochs = hn_val_epochs
params.hm_set_forward_with_adaptation = True
# add default test params
params.adaptation = True
params.repeat = 5
print(f"Testing with {hn_val_epochs=}")
test_results = perform_test(params)
if neptune_run is not None:
neptune_run[f"full_test/{d}/metrics @ {hn_val_epochs}"] = test_results
| 21,822 | 42.733467 | 186 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/configs.py | save_dir = './save/'
data_dir = {}
data_dir['CUB'] = './filelists/CUB/'
data_dir['miniImagenet'] = './filelists/miniImagenet/'
data_dir['omniglot'] = './filelists/omniglot/'
data_dir['emnist'] = './filelists/emnist/'
kernel_type = 'bncossim' #'nn' #linear, rbf, spectral (regression only), matern, poli1, poli2, cossim, bncossim
| 395 | 48.5 | 127 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/io_utils.py | import sys
from pathlib import Path
import neptune.new as neptune
import numpy as np
import os
import glob
import argparse
from neptune.new import Run
import backbone
import configs
import hn_args
from methods.hypernets import hypernet_types
model_dict = dict(
Conv4 = backbone.Conv4,
Conv4Pool = backbone.Conv4Pool,
Conv4S = backbone.Conv4S,
Conv6 = backbone.Conv6,
ResNet10 = backbone.ResNet10,
ResNet18 = backbone.ResNet18,
ResNet34 = backbone.ResNet34,
ResNet50 = backbone.ResNet50,
ResNet101 = backbone.ResNet101,
Conv4WithKernel = backbone.Conv4WithKernel,
ResNetWithKernel = backbone.ResNetWithKernel,
)
class ParamHolder:
"""A class for checking which script arguments were actually used at any time"""
def __init__(self, params):
self.params = params
self.history = []
def __getattr__(self, item):
it = getattr(self.params, item)
if item not in self.history:
print("Getting", item, "=", it)
self.history.append(item)
return it
def get_ignored_args(self):
return sorted([
k for k in vars(self.params).keys() if k not in self.history
])
def parse_args(script):
parser = argparse.ArgumentParser(description= 'few-shot script %s' %(script), formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--seed' , default=0, type=int, help='Seed for Numpy and pyTorch. Default: 0 (None)')
parser.add_argument('--dataset' , default='CUB', help='CUB/miniImagenet/cross/omniglot/cross_char')
parser.add_argument('--model' , default='Conv4', help='model: Conv{4|6}{Pool} / ResNet{10|18|34|50|101}', choices=sorted(model_dict.keys())) # 50 and 101 are not used in the paper
parser.add_argument('--method', default='baseline', choices=['baseline', 'baseline++', 'DKT', 'protonet', 'matchingnet', 'relationnet', 'relationnet_softmax', 'maml', 'maml_approx', 'hyper_maml','bayes_hmaml'] + list(hypernet_types.keys()),
help='baseline/baseline++/protonet/matchingnet/relationnet{_softmax}/maml{_approx}/hn_poc/hyper_maml/bayes_hmaml') #relationnet_softmax replace L2 norm with softmax to expedite training, maml_approx use first-order approximation in the gradient for efficiency
parser.add_argument('--train_n_way' , default=5, type=int, help='class num to classify for training') #baseline and baseline++ would ignore this parameter
parser.add_argument('--test_n_way' , default=5, type=int, help='class num to classify for testing (validation) ') #baseline and baseline++ only use this parameter in finetuning
parser.add_argument('--n_shot' , default=5, type=int, help='number of labeled data in each class, same as n_support') #baseline and baseline++ only use this parameter in finetuning
parser.add_argument('--train_aug' , action='store_true', help='perform data augmentation or not during training ') #still required for save_features.py and test.py to find the model path correctly
parser.add_argument("--checkpoint_suffix", type=str,default="", help="Suffix for custom experiment differentiation" )
# saved in save/checkpoints/[dataset]
parser.add_argument("--lr", type=float, default=1e-3, help="Learning rate")
parser.add_argument("--optim", type=str, choices=["adam", "sgd"], help="Optimizer", default="adam")
parser.add_argument("--n_val_perms", type=int, default=1, help="Number of task permutations in evaluation.")
parser.add_argument("--lr_scheduler", type=str, help="LR scheduler", default="none", choices=[
"multisteplr", "none", "cosine", "reducelronplateau"
])
parser.add_argument("--milestones", nargs='+', type=int, default=None, help="Milestones for multisteplr")
parser.add_argument("--maml_save_feature_network", action="store_true", help="if to save feature net used in MAML")
parser.add_argument("--maml_adapt_classifier", action="store_true", help="Adapt only the classifier during second gradient calculation")
parser.add_argument("--evaluate_model", action="store_true", help="Skip train phase and perform final test")
# to eval model pass suffix to --checkpoint_suffix
if script == 'train':
parser.add_argument('--num_classes' , default=200, type=int, help='total number of classes in softmax, only used in baseline') #make it larger than the maximum label value in base class
parser.add_argument('--save_freq' , default=500, type=int, help='Save frequency')
parser.add_argument('--start_epoch' , default=0, type=int,help ='Starting epoch')
parser.add_argument('--stop_epoch' , default=-1, type=int, help ='Stopping epoch') #for meta-learning methods, each epoch contains 100 episodes. The default epoch number is dataset dependent. See train.py
parser.add_argument('--resume' , action='store_true', help='continue from previous trained model with largest epoch')
parser.add_argument('--warmup' , action='store_true', help='continue from baseline, neglected if resume is true') #never used in the paper
parser.add_argument("--es_epoch", type=int, default=250,
help="Check if val accuracy threshold achieved at this epoch, stop if not.")
parser.add_argument("--es_threshold", type=float, default=50.0,
help="Val accuracy threshold for early stopping")
parser.add_argument("--eval_freq", type=int, default=1, help="Evaluation frequency")
elif script == 'save_features':
parser.add_argument('--split' , default='novel', help='base/val/novel') #default novel, but you can also test base/val class accuracy if you want
parser.add_argument('--save_iter', default=-1, type=int,help ='save feature from the model trained in x epoch, use the best model if x is -1')
elif script == 'test':
parser.add_argument('--split' , default='novel', help='base/val/novel') #default novel, but you can also test base/val class accuracy if you want
parser.add_argument('--save_iter', default=-1, type=int,help ='saved feature from the model trained in x epoch, use the best model if x is -1')
parser.add_argument('--adaptation' , action='store_true', help='further adaptation in test time or not')
parser.add_argument('--repeat', default=5, type=int, help ='Repeat the test N times with different seeds and take the mean. The seeds range is [seed, seed+repeat]')
else:
raise ValueError('Unknown script')
parser = hn_args.add_hn_args_to_parser(parser)
return ParamHolder(parser.parse_args())
def parse_args_regression(script):
parser = argparse.ArgumentParser(description= 'few-shot script %s' %(script))
parser.add_argument('--seed' , default=0, type=int, help='Seed for Numpy and pyTorch. Default: 0 (None)')
parser.add_argument('--model' , default='Conv3', help='model: Conv{3} / MLP{2}')
parser.add_argument('--method' , default='DKT', help='DKT / transfer')
parser.add_argument('--dataset' , default='QMUL', help='QMUL / sines')
parser.add_argument('--spectral', action='store_true', help='Use a spectral covariance kernel function')
if script == 'train_regression':
parser.add_argument('--start_epoch' , default=0, type=int,help ='Starting epoch')
parser.add_argument('--stop_epoch' , default=100, type=int, help ='Stopping epoch') #for meta-learning methods, each epoch contains 100 episodes. The default epoch number is dataset dependent. See train.py
parser.add_argument('--resume' , action='store_true', help='continue from previous trained model with largest epoch')
elif script == 'test_regression':
parser.add_argument('--n_support', default=5, type=int, help='Number of points on trajectory to be given as support points')
parser.add_argument('--n_test_epochs', default=10, type=int, help='How many test people?')
return parser.parse_args()
def get_assigned_file(checkpoint_dir,num):
assign_file = os.path.join(checkpoint_dir, '{:d}.tar'.format(num))
return assign_file
def get_resume_file(checkpoint_dir):
filelist = glob.glob(os.path.join(checkpoint_dir, '*.tar'))
if len(filelist) == 0:
return None
last_model_files = [x for x in filelist if os.path.basename(x) == 'last_model.tar' ]
if len(last_model_files) == 1:
return last_model_files[0]
filelist = [ x for x in filelist if os.path.basename(x) != 'best_model.tar' ]
epochs = np.array([int(os.path.splitext(os.path.basename(x))[0]) for x in filelist])
max_epoch = np.max(epochs)
resume_file = os.path.join(checkpoint_dir, '{:d}.tar'.format(max_epoch))
return resume_file
def get_best_file(checkpoint_dir):
best_file = os.path.join(checkpoint_dir, 'best_model.tar')
if os.path.isfile(best_file):
return best_file
else:
return get_resume_file(checkpoint_dir)
def setup_neptune(params) -> Run:
try:
run_name = Path(params.checkpoint_dir).relative_to(Path(configs.save_dir) / "checkpoints").name
run_file = Path(params.checkpoint_dir) / "NEPTUNE_RUN.txt"
run_id = None
if params.resume and run_file.exists():
with run_file.open("r") as f:
run_id = f.read()
print("Resuming neptune run", run_id)
run = neptune.init(
name=run_name,
source_files="**/*.py",
tags=[params.checkpoint_suffix] if params.checkpoint_suffix != "" else [],
run=run_id
)
with run_file.open("w") as f:
f.write(run._short_id)
print("Starting neptune run", run._short_id)
run["params"] = vars(params.params)
run["cmd"] = f"python {' '.join(sys.argv)}"
return run
except Exception as e:
print("Cannot initialize neptune because of", e)
pass | 10,149 | 58.705882 | 283 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/relationnet.py | # This code is modified from https://github.com/floodsung/LearningToCompare_FSL
import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from methods.meta_template import MetaTemplate
import utils
class RelationNet(MetaTemplate):
def __init__(self, model_func, n_way, n_support, loss_type = 'mse', n_query=None):
super(RelationNet, self).__init__(model_func, n_way, n_support)
self.loss_type = loss_type #'softmax'# 'mse'
self.relation_module = RelationModule( self.feat_dim , 8, self.loss_type ) #relation net features are not pooled, so self.feat_dim is [dim, w, h]
if self.loss_type == 'mse':
self.loss_fn = nn.MSELoss()
else:
self.loss_fn = nn.CrossEntropyLoss()
def set_forward(self,x,is_feature = False):
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous()
z_proto = z_support.view( self.n_way, self.n_support, *self.feat_dim ).mean(1)
z_query = z_query.contiguous().view( self.n_way* self.n_query, *self.feat_dim )
z_proto_ext = z_proto.unsqueeze(0).repeat(self.n_query* self.n_way,1,1,1,1)
z_query_ext = z_query.unsqueeze(0).repeat( self.n_way,1,1,1,1)
z_query_ext = torch.transpose(z_query_ext,0,1)
extend_final_feat_dim = self.feat_dim.copy()
extend_final_feat_dim[0] *= 2
relation_pairs = torch.cat((z_proto_ext,z_query_ext),2).view(-1, *extend_final_feat_dim)
relations = self.relation_module(relation_pairs).view(-1, self.n_way)
return relations
def set_forward_adaptation(self,x,is_feature = True): #overwrite parent function
assert is_feature == True, 'Finetune only support fixed feature'
full_n_support = self.n_support
full_n_query = self.n_query
relation_module_clone = RelationModule( self.feat_dim , 8, self.loss_type )
relation_module_clone.load_state_dict(self.relation_module.state_dict())
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous()
set_optimizer = torch.optim.SGD(self.relation_module.parameters(), lr = 0.01, momentum=0.9, dampening=0.9, weight_decay=0.001)
self.n_support = 3
self.n_query = 2
z_support_cpu = z_support.data.cpu().numpy()
for epoch in range(100):
perm_id = np.random.permutation(full_n_support).tolist()
sub_x = np.array([z_support_cpu[i,perm_id,:,:,:] for i in range(z_support.size(0))])
sub_x = torch.Tensor(sub_x).cuda()
if self.change_way:
self.n_way = sub_x.size(0)
set_optimizer.zero_grad()
y = torch.from_numpy(np.repeat(range( self.n_way ), self.n_query ))
scores = self.set_forward(sub_x, is_feature = True)
if self.loss_type == 'mse':
y_oh = utils.one_hot(y, self.n_way)
y_oh = Variable(y_oh.cuda())
loss = self.loss_fn(scores, y_oh )
else:
y = Variable(y.cuda())
loss = self.loss_fn(scores, y )
loss.backward()
set_optimizer.step()
self.n_support = full_n_support
self.n_query = full_n_query
z_proto = z_support.view( self.n_way, self.n_support, *self.feat_dim ).mean(1)
z_query = z_query.contiguous().view( self.n_way* self.n_query, *self.feat_dim )
z_proto_ext = z_proto.unsqueeze(0).repeat(self.n_query* self.n_way,1,1,1,1)
z_query_ext = z_query.unsqueeze(0).repeat( self.n_way,1,1,1,1)
z_query_ext = torch.transpose(z_query_ext,0,1)
extend_final_feat_dim = self.feat_dim.copy()
extend_final_feat_dim[0] *= 2
relation_pairs = torch.cat((z_proto_ext,z_query_ext),2).view(-1, *extend_final_feat_dim)
relations = self.relation_module(relation_pairs).view(-1, self.n_way)
self.relation_module.load_state_dict(relation_module_clone.state_dict())
return relations
def set_forward_loss(self, x):
y = torch.from_numpy(np.repeat(range( self.n_way ), self.n_query ))
scores = self.set_forward(x)
if self.loss_type == 'mse':
y_oh = utils.one_hot(y, self.n_way)
y_oh = Variable(y_oh.cuda())
return self.loss_fn(scores, y_oh )
else:
y = Variable(y.cuda())
return self.loss_fn(scores, y )
class RelationConvBlock(nn.Module):
def __init__(self, indim, outdim, padding = 0):
super(RelationConvBlock, self).__init__()
self.indim = indim
self.outdim = outdim
self.C = nn.Conv2d(indim, outdim, 3, padding = padding )
self.BN = nn.BatchNorm2d(outdim, momentum=1, affine=True)
self.relu = nn.ReLU()
self.pool = nn.MaxPool2d(2)
self.parametrized_layers = [self.C, self.BN, self.relu, self.pool]
for layer in self.parametrized_layers:
backbone.init_layer(layer)
self.trunk = nn.Sequential(*self.parametrized_layers)
def forward(self,x):
out = self.trunk(x)
return out
class RelationModule(nn.Module):
"""docstring for RelationNetwork"""
def __init__(self,input_size,hidden_size, loss_type = 'mse'):
super(RelationModule, self).__init__()
self.loss_type = loss_type
padding = 1 if ( input_size[1] <10 ) and ( input_size[2] <10 ) else 0 # when using Resnet, conv map without avgpooling is 7x7, need padding in block to do pooling
self.layer1 = RelationConvBlock(input_size[0]*2, input_size[0], padding = padding )
self.layer2 = RelationConvBlock(input_size[0], input_size[0], padding = padding )
shrink_s = lambda s: int((int((s- 2 + 2*padding)/2)-2 + 2*padding)/2)
self.fc1 = nn.Linear( input_size[0]* shrink_s(input_size[1]) * shrink_s(input_size[2]), hidden_size )
self.fc2 = nn.Linear( hidden_size,1)
def forward(self,x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0),-1)
out = F.relu(self.fc1(out))
if self.loss_type == 'mse':
out = F.sigmoid(self.fc2(out))
elif self.loss_type == 'softmax':
out = self.fc2(out)
return out
| 6,459 | 40.677419 | 170 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/kernels.py | import gpytorch
import torch
import torch.nn as nn
class NNKernel(nn.Module):
def __init__(self, input_dim: int, output_dim: int, num_layers: int, hidden_dim: int, flatten: bool =False, **kwargs):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.flatten = flatten
self.model = self.create_model()
def create_model(self):
if self.num_layers == 0:
modules = [nn.Linear(self.input_dim, self.output_dim)]
else:
assert self.num_layers >= 1, "Number of hidden layers must be at least 1"
modules = [nn.Linear(self.input_dim, self.hidden_dim), nn.ReLU()]
if self.flatten:
modules = [nn.Flatten()] + modules
for i in range(self.num_layers - 1):
modules.append(nn.Linear(self.hidden_dim, self.hidden_dim))
modules.append(nn.ReLU())
modules.append(nn.Linear(self.hidden_dim, self.output_dim))
model = nn.Sequential(*modules)
return model
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
if last_dim_is_batch:
raise NotImplementedError()
else:
z1 = self.model(x1)
z2 = self.model(x2)
out = torch.matmul(z1, z2.T)
if diag:
return torch.diag(out)
else:
return out
class CosineNNKernel(nn.Module):
def __init__(self, input_dim: int, output_dim: int, num_layers: int, hidden_dim: int, flatten: bool =False, **kwargs):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.flatten = flatten
self.model = self.create_model()
def create_model(self):
if self.num_layers == 0:
modules = [nn.Linear(self.input_dim, self.output_dim)]
else:
assert self.num_layers >= 1, "Number of hidden layers must be at least 1"
modules = [nn.Linear(self.input_dim, self.hidden_dim), nn.ReLU()]
if self.flatten:
modules = [nn.Flatten()] + modules
for i in range(self.num_layers - 1):
modules.append(nn.Linear(self.hidden_dim, self.hidden_dim))
modules.append(nn.ReLU())
modules.append(nn.Linear(self.hidden_dim, self.output_dim))
model = nn.Sequential(*modules)
return model
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
if last_dim_is_batch:
raise NotImplementedError()
else:
z1 = self.model(x1)
z2 = self.model(x2)
normalized_input_a = torch.nn.functional.normalize(z1)
normalized_input_b = torch.nn.functional.normalize(z2)
out = torch.mm(normalized_input_a, normalized_input_b.T)
out += 1
if diag:
return torch.diag(out)
else:
return out
class ScalarProductKernel(nn.Module):
def forward(self, x1, x2):
return torch.matmul(x1, x2)
class CosineDistanceKernel(nn.Module):
def forward(self, x1, x2):
normalized_input_a = torch.nn.functional.normalize(x1)
normalized_input_b = torch.nn.functional.normalize(x2)
res = torch.mm(normalized_input_a, normalized_input_b.T)
res = res * -1
res += 1
return res
class PositiveLinear(nn.Module):
def __init__(self, in_features, out_features):
super(PositiveLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
def forward(self, input):
w = nn.functional.softplus(self.weight)
return nn.functional.linear(input, w)
class NNKernelNoInner(gpytorch.kernels.Kernel):
def __init__(self, input_dim, num_layers, hidden_dim, flatten=False, **kwargs):
super(NNKernelNoInner, self).__init__(**kwargs)
self.input_dim = input_dim*2
self.output_dim = 1
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.flatten = flatten
self.model = self.create_model()
def create_model(self):
assert self.num_layers >= 1, "Number of hidden layers must be at least 1"
modules = [PositiveLinear(self.input_dim, self.hidden_dim), nn.Sigmoid()]
if self.flatten:
modules = [nn.Flatten()] + modules
for i in range(self.num_layers - 1):
modules.append(PositiveLinear(self.hidden_dim, self.hidden_dim))
modules.append(nn.Sigmoid())
modules.append(PositiveLinear(self.hidden_dim, self.output_dim))
model = nn.Sequential(*modules)
return model
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
if last_dim_is_batch:
raise NotImplementedError()
else:
n = x1.shape[0]
m = x2.shape[0]
out = torch.zeros((n,m), device=x1.get_device())
for i in range(n):
for j in range(i+1):
out[i, j] = self.model(torch.cat((x1[i], x2[j]))).view(-1)
if i != j:
out[j, i] = out[i, j]
#npout = out.cpu().detach().numpy()
#print(np.linalg.eigvals(npout))
#assert np.all(np.linalg.eigvals(npout) +1e-2 >= 0), "not positive"
if diag:
return torch.diag(out)
else:
return out
class MultiNNKernel(gpytorch.kernels.Kernel):
def __init__(self, num_tasks, kernels, **kwargs):
super(MultiNNKernel, self).__init__(**kwargs)
assert isinstance(kernels, list), "kernels must be a list of kernels"
self.num_tasks = num_tasks
self.kernels = nn.ModuleList(kernels)
def num_outputs_per_input(self, x1, x2):
"""
Given `n` data points `x1` and `m` datapoints `x2`, this multitask
kernel returns an `(n*num_tasks) x (m*num_tasks)` covariance matrix.
"""
return self.num_tasks
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
if last_dim_is_batch:
raise NotImplementedError()
else:
n = x1.shape[0]
m = x2.shape[0]
out = torch.zeros((n * self.num_tasks, m * self.num_tasks), device=x1.get_device())
for i in range(self.num_tasks):
for j in range(self.num_tasks):
z1 = self.kernels[i].model(x1)
z2 = self.kernels[j].model(x2)
out[i:n*self.num_tasks:self.num_tasks, j:m*self.num_tasks:self.num_tasks] = torch.matmul(z1, z2.T)
if diag:
return torch.diag(out)
else:
return out
def init_kernel_function(kernel_input_dim, params):
if params.hn_use_scalar_product:
return ScalarProductKernel()
elif params.hn_use_cosine_distance:
return CosineDistanceKernel()
else:
# if (not self.use_scalar_product) and (not self.use_cosine_distance):
# kernel_output_dim = self.feat_dim + self.n_way if self.attention_embedding else self.feat_dim
kernel_output_dim = params.hn_kernel_out_size
kernel_layers_no = params.hn_kernel_layers_no
kernel_hidden_dim = params.hn_kernel_hidden_dim
if params.hn_use_cosine_nn_kernel:
return CosineNNKernel(kernel_input_dim, kernel_output_dim, kernel_layers_no, kernel_hidden_dim)
else:
return NNKernel(kernel_input_dim, kernel_output_dim, kernel_layers_no, kernel_hidden_dim)
| 11,422 | 37.591216 | 122 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/maml.py | # This code is modified from https://github.com/dragen1860/MAML-Pytorch and https://github.com/katerakelly/pytorch-maml
import torch
import backbone
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from methods.meta_template import MetaTemplate
from time import time
class MAML(MetaTemplate):
def __init__(self, model_func, n_way, n_support, n_query, params=None, approx = False):
super(MAML, self).__init__(model_func, n_way, n_support, change_way = False)
self.loss_fn = nn.CrossEntropyLoss()
self.classifier = backbone.Linear_fw(self.feat_dim, n_way)
self.classifier.bias.data.fill_(0)
self.maml_adapt_classifier = params.maml_adapt_classifier
self.n_task = 4
self.task_update_num = 5
self.train_lr = 0.01
self.approx = approx #first order approx.
def forward(self,x):
out = self.feature.forward(x)
scores = self.classifier.forward(out)
return scores
def set_forward(self,x, is_feature = False):
assert is_feature == False, 'MAML do not support fixed feature'
x = x.cuda()
x_var = Variable(x)
x_a_i = x_var[:,:self.n_support,:,:,:].contiguous().view( self.n_way* self.n_support, *x.size()[2:]) #support data
x_b_i = x_var[:,self.n_support:,:,:,:].contiguous().view( self.n_way* self.n_query, *x.size()[2:]) #query data
y_a_i = Variable( torch.from_numpy( np.repeat(range( self.n_way ), self.n_support ) )).cuda() #label for support data
if self.maml_adapt_classifier:
fast_parameters = list(self.classifier.parameters())
for weight in self.classifier.parameters():
weight.fast = None
else:
fast_parameters = list(self.parameters()) #the first gradient calcuated in line 45 is based on original weight
for weight in self.parameters():
weight.fast = None
self.zero_grad()
for task_step in (list(range(self.task_update_num))):
scores = self.forward(x_a_i)
set_loss = self.loss_fn( scores, y_a_i)
grad = torch.autograd.grad(set_loss, fast_parameters, create_graph=True) #build full graph support gradient of gradient
if self.approx:
grad = [ g.detach() for g in grad ] #do not calculate gradient of gradient if using first order approximation
fast_parameters = []
parameters = self.classifier.parameters() if self.maml_adapt_classifier else self.parameters()
for k, weight in enumerate(parameters):
#for usage of weight.fast, please see Linear_fw, Conv_fw in backbone.py
if weight.fast is None:
weight.fast = weight - self.train_lr * grad[k] #create weight.fast
else:
weight.fast = weight.fast - self.train_lr * grad[k] #create an updated weight.fast, note the '-' is not merely minus value, but to create a new weight.fast
fast_parameters.append(weight.fast) #gradients calculated in line 45 are based on newest fast weight, but the graph will retain the link to old weight.fasts
scores = self.forward(x_b_i)
return scores
def set_forward_adaptation(self,x, is_feature = False): #overwrite parrent function
raise ValueError('MAML performs further adapation simply by increasing task_upate_num')
def set_forward_loss(self, x):
scores = self.set_forward(x, is_feature = False)
query_data_labels = Variable( torch.from_numpy( np.repeat(range( self.n_way ), self.n_query ) )).cuda()
loss = self.loss_fn(scores, query_data_labels)
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy().flatten()
y_labels = query_data_labels.cpu().numpy()
top1_correct = np.sum(topk_ind == y_labels)
task_accuracy = (top1_correct / len(query_data_labels)) * 100
return loss, task_accuracy
def train_loop(self, epoch, train_loader, optimizer): #overwrite parrent function
print_freq = 10
avg_loss=0
task_count = 0
loss_all = []
acc_all = []
optimizer.zero_grad()
#train
for i, (x,_) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
assert self.n_way == x.size(0), "MAML do not support way change"
loss, task_accuracy = self.set_forward_loss(x)
avg_loss = avg_loss+loss.item()#.data[0]
loss_all.append(loss)
acc_all.append(task_accuracy)
task_count += 1
if task_count == self.n_task: #MAML update several tasks at one time
loss_q = torch.stack(loss_all).sum(0)
loss_q.backward()
optimizer.step()
task_count = 0
loss_all = []
optimizer.zero_grad()
if i % print_freq==0:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader), avg_loss/float(i+1)))
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
metrics = {"accuracy/train": acc_mean}
return metrics
def test_loop(self, test_loader, return_std = False, return_time: bool = False): #overwrite parrent function
correct = 0
count = 0
acc_all = []
eval_time = 0
iter_num = len(test_loader)
for i, (x,_) in enumerate(test_loader):
self.n_query = x.size(1) - self.n_support
assert self.n_way == x.size(0), "MAML do not support way change"
s = time()
correct_this, count_this = self.correct(x)
t = time()
eval_time += (t -s)
acc_all.append(correct_this/ count_this *100 )
num_tasks = len(acc_all)
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
print("Num tasks", num_tasks)
ret = [acc_mean]
if return_std:
ret.append(acc_std)
if return_time:
ret.append(eval_time)
ret.append({})
return ret
def get_logits(self, x):
self.n_query = x.size(1) - self.n_support
logits = self.set_forward(x)
return logits
| 6,570 | 39.312883 | 176 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/meta_template.py | from collections import defaultdict
from typing import Tuple
import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import utils
from abc import abstractmethod
class MetaTemplate(nn.Module):
def __init__(self, model_func, n_way, n_support, change_way = True):
super(MetaTemplate, self).__init__()
self.n_way = n_way
self.n_support = n_support
self.n_query = -1 #(change depends on input)
self.feature = model_func()
self.feat_dim = self.feature.final_feat_dim
self.change_way = change_way #some methods allow different_way classification during training and test
@abstractmethod
def set_forward(self,x,is_feature):
pass
@abstractmethod
def set_forward_loss(self, x):
pass
def forward(self,x):
out = self.feature.forward(x)
return out
def parse_feature(self,x,is_feature) -> Tuple[torch.Tensor, torch.Tensor]:
x = Variable(x.cuda())
if is_feature:
z_all = x
else:
x = x.contiguous().view( self.n_way * (self.n_support + self.n_query), *x.size()[2:])
z_all = self.feature.forward(x)
z_all = z_all.view( self.n_way, self.n_support + self.n_query, -1)
z_support = z_all[:, :self.n_support]
z_query = z_all[:, self.n_support:]
return z_support, z_query
def correct(self, x):
scores = self.set_forward(x)
y_query = np.repeat(range( self.n_way ), self.n_query )
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy()
top1_correct = np.sum(topk_ind[:,0] == y_query)
return float(top1_correct), len(y_query)
def train_loop(self, epoch, train_loader, optimizer ):
print_freq = 10
avg_loss=0
for i, (x,_) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
optimizer.zero_grad()
loss = self.set_forward_loss( x )
loss.backward()
optimizer.step()
avg_loss = avg_loss+loss.item()
if i % print_freq==0:
#print(optimizer.state_dict()['param_groups'][0]['lr'])
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader), avg_loss/float(i+1)))
def test_loop(self, test_loader, record = None, return_std: bool = False):
correct =0
count = 0
acc_all = []
acc_at = defaultdict(list)
iter_num = len(test_loader)
for i, (x,_) in enumerate(test_loader):
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
y_query = np.repeat(range( self.n_way ), self.n_query )
try:
scores, acc_at_metrics = self.set_forward_with_adaptation(x)
for (k,v) in acc_at_metrics.items():
acc_at[k].append(v)
except Exception as e:
scores = self.set_forward(x)
scores = scores.reshape((self.n_way * self.n_query, self.n_way))
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy()
top1_correct = np.sum(topk_ind[:,0] == y_query)
correct_this = float(top1_correct)
count_this = len(y_query)
acc_all.append(correct_this/ count_this*100 )
metrics = {
k: np.mean(v) if len(v) > 0 else 0
for (k,v) in acc_at.items()
}
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print(metrics)
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
if return_std:
return acc_mean, acc_std, metrics
else:
return acc_mean, metrics
def set_forward_adaptation(self, x, is_feature = True): #further adaptation, default is fixing feature and train a new softmax clasifier
assert is_feature == True, 'Feature is fixed in further adaptation'
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous().view(self.n_way* self.n_support, -1 )
z_query = z_query.contiguous().view(self.n_way* self.n_query, -1 )
y_support = torch.from_numpy(np.repeat(range( self.n_way ), self.n_support ))
y_support = Variable(y_support.cuda())
linear_clf = nn.Linear(self.feat_dim, self.n_way)
linear_clf = linear_clf.cuda()
set_optimizer = torch.optim.SGD(linear_clf.parameters(), lr = 0.01, momentum=0.9, dampening=0.9, weight_decay=0.001)
loss_function = nn.CrossEntropyLoss()
loss_function = loss_function.cuda()
batch_size = 4
support_size = self.n_way* self.n_support
for epoch in range(100):
rand_id = np.random.permutation(support_size)
for i in range(0, support_size , batch_size):
set_optimizer.zero_grad()
selected_id = torch.from_numpy( rand_id[i: min(i+batch_size, support_size) ]).cuda()
z_batch = z_support[selected_id]
y_batch = y_support[selected_id]
scores = linear_clf(z_batch)
loss = loss_function(scores,y_batch)
loss.backward()
set_optimizer.step()
scores = linear_clf(z_query)
return scores
| 5,764 | 36.679739 | 140 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/DKT.py | ## Original packages
import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from methods.meta_template import MetaTemplate
## Our packages
import gpytorch
from time import gmtime, strftime
import random
from configs import kernel_type
from models import gp_kernels
#Check if tensorboardx is installed
try:
from tensorboardX import SummaryWriter
IS_TBX_INSTALLED = True
except ImportError:
IS_TBX_INSTALLED = False
print('[WARNING] install tensorboardX to record simulation logs.')
## Training CMD
#ATTENTION: to test each method use exaclty the same command but replace 'train.py' with 'test.py'
# Omniglot->EMNIST without data augmentation
#python3 train.py --dataset="cross_char" --method="DKT" --train_n_way=5 --test_n_way=5 --n_shot=1
#python3 train.py --dataset="cross_char" --method="DKT" --train_n_way=5 --test_n_way=5 --n_shot=5
# CUB + data augmentation
#python3 train.py --dataset="CUB" --method="DKT" --train_n_way=5 --test_n_way=5 --n_shot=1 --train_aug
#python3 train.py --dataset="CUB" --method="DKT" --train_n_way=5 --test_n_way=5 --n_shot=5 --train_aug
class DKT(MetaTemplate):
def __init__(self, model_func, n_way, n_support):
super(DKT, self).__init__(model_func, n_way, n_support)
## GP parameters
self.leghtscale_list = None
self.noise_list = None
self.outputscale_list = None
self.iteration = 0
self.writer=None
self.feature_extractor = self.feature
self.get_model_likelihood_mll() #Init model, likelihood, and mll
if(kernel_type=="cossim"):
self.normalize=True
elif(kernel_type=="bncossim"):
self.normalize=True
latent_size = np.prod(self.feature_extractor.final_feat_dim)
self.feature_extractor.trunk.add_module("bn_out", nn.BatchNorm1d(latent_size))
else:
self.normalize=False
def init_summary(self):
if(IS_TBX_INSTALLED):
time_string = strftime("%d%m%Y_%H%M%S", gmtime())
writer_path = "./log/" + time_string
self.writer = SummaryWriter(log_dir=writer_path)
def get_model_likelihood_mll(self, train_x_list=None, train_y_list=None):
if(train_x_list is None): train_x_list=[torch.ones(100, 64).cuda()]*self.n_way
if(train_y_list is None): train_y_list=[torch.ones(100).cuda()]*self.n_way
model_list = list()
likelihood_list = list()
for train_x, train_y in zip(train_x_list, train_y_list):
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = ExactGPLayer(train_x=train_x, train_y=train_y, likelihood=likelihood, kernel=kernel_type)
model_list.append(model)
likelihood_list.append(model.likelihood)
self.model = gpytorch.models.IndependentModelList(*model_list).cuda()
self.likelihood = gpytorch.likelihoods.LikelihoodList(*likelihood_list).cuda()
self.mll = gpytorch.mlls.SumMarginalLogLikelihood(self.likelihood, self.model).cuda()
return self.model, self.likelihood, self.mll
def set_forward(self, x, is_feature=False):
pass
def set_forward_loss(self, x):
pass
def _reset_likelihood(self, debug=False):
for param in self.likelihood.parameters():
param.data.normal_(0.0, 0.01)
def _print_weights(self):
for k, v in self.feature_extractor.state_dict().items():
print("Layer {}".format(k))
print(v)
def _reset_variational(self):
mean_init = torch.zeros(128) #num_inducing_points
covar_init = torch.eye(128, 128) #num_inducing_points
mean_init = mean_init.repeat(64, 1) #batch_shape
covar_init = covar_init.repeat(64, 1, 1) #batch_shape
for idx, param in enumerate(self.gp_layer.variational_parameters()):
if(idx==0): param.data.copy_(mean_init) #"variational_mean"
elif(idx==1): param.data.copy_(covar_init) #"chol_variational_covar"
else: raise ValueError('[ERROR] DKT the variational_parameters at index>1 should not exist!')
def _reset_parameters(self):
if(self.leghtscale_list is None):
self.leghtscale_list = list()
self.noise_list = list()
self.outputscale_list = list()
for idx, single_model in enumerate(self.model.models):
self.leghtscale_list.append(single_model.covar_module.base_kernel.lengthscale.clone().detach())
self.noise_list.append(single_model.likelihood.noise.clone().detach())
self.outputscale_list.append(single_model.covar_module.outputscale.clone().detach())
else:
for idx, single_model in enumerate(self.model.models):
single_model.covar_module.base_kernel.lengthscale=self.leghtscale_list[idx].clone().detach()#.requires_grad_(True)
single_model.likelihood.noise=self.noise_list[idx].clone().detach()
single_model.covar_module.outputscale=self.outputscale_list[idx].clone().detach()
def train_loop(self, epoch, train_loader, optimizer, print_freq=10):
optimizer = torch.optim.Adam([{'params': self.model.parameters(), 'lr': 1e-4},
{'params': self.feature_extractor.parameters(), 'lr': 1e-3}])
for i, (x,_) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
if self.change_way: self.n_way = x.size(0)
x_all = x.contiguous().view(self.n_way * (self.n_support + self.n_query), *x.size()[2:]).cuda()
y_all = Variable(torch.from_numpy(np.repeat(range(self.n_way), self.n_query+self.n_support)).cuda())
x_support = x[:,:self.n_support,:,:,:].contiguous().view(self.n_way * (self.n_support), *x.size()[2:]).cuda()
y_support = np.repeat(range(self.n_way), self.n_support)
x_query = x[:,self.n_support:,:,:,:].contiguous().view(self.n_way * (self.n_query), *x.size()[2:]).cuda()
y_query = np.repeat(range(self.n_way), self.n_query)
x_train = x_all
y_train = y_all
target_list = list()
samples_per_model = int(len(y_train) / self.n_way) #25 / 5 = 5
for way in range(self.n_way):
target = torch.ones(len(y_train), dtype=torch.float32) * -1.0
start_index = way * samples_per_model
stop_index = start_index+samples_per_model
target[start_index:stop_index] = 1.0
target_list.append(target.cuda())
self.model.train()
self.likelihood.train()
self.feature_extractor.train()
z_train = self.feature_extractor.forward(x_train)
if(self.normalize): z_train = F.normalize(z_train, p=2, dim=1)
train_list = [z_train]*self.n_way
lenghtscale = 0.0
noise = 0.0
outputscale = 0.0
for idx, single_model in enumerate(self.model.models):
single_model.set_train_data(inputs=z_train, targets=target_list[idx], strict=False)
if(single_model.covar_module.base_kernel.lengthscale is not None):
lenghtscale+=single_model.covar_module.base_kernel.lengthscale.mean().cpu().detach().numpy().squeeze()
noise+=single_model.likelihood.noise.cpu().detach().numpy().squeeze()
if(single_model.covar_module.outputscale is not None):
outputscale+=single_model.covar_module.outputscale.cpu().detach().numpy().squeeze()
if(single_model.covar_module.base_kernel.lengthscale is not None): lenghtscale /= float(len(self.model.models))
noise /= float(len(self.model.models))
if(single_model.covar_module.outputscale is not None): outputscale /= float(len(self.model.models))
## Optimize
optimizer.zero_grad()
output = self.model(*self.model.train_inputs)
loss = -self.mll(output, self.model.train_targets)
loss.backward()
optimizer.step()
self.iteration = i+(epoch*len(train_loader))
if(self.writer is not None): self.writer.add_scalar('loss', loss, self.iteration)
#Eval on the query (validation set)
with torch.no_grad():
self.model.eval()
self.likelihood.eval()
self.feature_extractor.eval()
z_support = self.feature_extractor.forward(x_support).detach()
if(self.normalize): z_support = F.normalize(z_support, p=2, dim=1)
z_support_list = [z_support]*len(y_support)
predictions = self.likelihood(*self.model(*z_support_list)) #return 20 MultiGaussian Distributions
predictions_list = list()
for gaussian in predictions:
predictions_list.append(torch.sigmoid(gaussian.mean).cpu().detach().numpy())
y_pred = np.vstack(predictions_list).argmax(axis=0) #[model, classes]
accuracy_support = (np.sum(y_pred==y_support) / float(len(y_support))) * 100.0
if(self.writer is not None): self.writer.add_scalar('GP_support_accuracy', accuracy_support, self.iteration)
z_query = self.feature_extractor.forward(x_query).detach()
if(self.normalize): z_query = F.normalize(z_query, p=2, dim=1)
z_query_list = [z_query]*len(y_query)
predictions = self.likelihood(*self.model(*z_query_list)) #return 20 MultiGaussian Distributions
predictions_list = list()
for gaussian in predictions:
predictions_list.append(torch.sigmoid(gaussian.mean).cpu().detach().numpy())
y_pred = np.vstack(predictions_list).argmax(axis=0) #[model, classes]
accuracy_query = (np.sum(y_pred==y_query) / float(len(y_query))) * 100.0
if(self.writer is not None): self.writer.add_scalar('GP_query_accuracy', accuracy_query, self.iteration)
if i % print_freq==0:
if(self.writer is not None): self.writer.add_histogram('z_support', z_support, self.iteration)
print('Epoch [{:d}] [{:d}/{:d}] | Outscale {:f} | Lenghtscale {:f} | Noise {:f} | Loss {:f} | Supp. {:f} | Query {:f}'.format(epoch, i, len(train_loader), outputscale, lenghtscale, noise, loss.item(), accuracy_support, accuracy_query))
def correct(self, x, N=0, laplace=False):
##Dividing input x in query and support set
x_support = x[:,:self.n_support,:,:,:].contiguous().view(self.n_way * (self.n_support), *x.size()[2:]).cuda()
y_support = torch.from_numpy(np.repeat(range(self.n_way), self.n_support)).cuda()
x_query = x[:,self.n_support:,:,:,:].contiguous().view(self.n_way * (self.n_query), *x.size()[2:]).cuda()
y_query = np.repeat(range(self.n_way), self.n_query)
## Laplace approximation of the posterior
if(laplace):
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, Matern
from sklearn.gaussian_process.kernels import ConstantKernel as C
kernel = 1.0 * RBF(length_scale=0.1 , length_scale_bounds=(0.1, 10.0))
gp = GaussianProcessClassifier(kernel=kernel, optimizer=None)
z_support = self.feature_extractor.forward(x_support).detach()
if(self.normalize): z_support = F.normalize(z_support, p=2, dim=1)
gp.fit(z_support.cpu().detach().numpy(), y_support.cpu().detach().numpy())
z_query = self.feature_extractor.forward(x_query).detach()
if(self.normalize): z_query = F.normalize(z_query, p=2, dim=1)
y_pred = gp.predict(z_query.cpu().detach().numpy())
accuracy = (np.sum(y_pred==y_query) / float(len(y_query))) * 100.0
top1_correct = np.sum(y_pred==y_query)
count_this = len(y_query)
return float(top1_correct), count_this, 0.0
x_train = x_support
y_train = y_support
target_list = list()
samples_per_model = int(len(y_train) / self.n_way)
for way in range(self.n_way):
target = torch.ones(len(y_train), dtype=torch.float32) * -1.0
start_index = way * samples_per_model
stop_index = start_index+samples_per_model
target[start_index:stop_index] = 1.0
target_list.append(target.cuda())
z_train = self.feature_extractor.forward(x_train).detach() #[340, 64]
if(self.normalize): z_train = F.normalize(z_train, p=2, dim=1)
train_list = [z_train]*self.n_way
for idx, single_model in enumerate(self.model.models):
single_model.set_train_data(inputs=z_train, targets=target_list[idx], strict=False)
optimizer = torch.optim.Adam([{'params': self.model.parameters()}], lr=1e-3)
self.model.train()
self.likelihood.train()
self.feature_extractor.eval()
avg_loss=0.0
for i in range(0, N):
## Optimize
optimizer.zero_grad()
output = self.model(*self.model.train_inputs)
loss = -self.mll(output, self.model.train_targets)
loss.backward()
optimizer.step()
avg_loss = avg_loss+loss.item()
with torch.no_grad(), gpytorch.settings.num_likelihood_samples(32):
self.model.eval()
self.likelihood.eval()
self.feature_extractor.eval()
z_query = self.feature_extractor.forward(x_query).detach()
if(self.normalize): z_query = F.normalize(z_query, p=2, dim=1)
z_query_list = [z_query]*len(y_query)
predictions = self.likelihood(*self.model(*z_query_list)) #return n_way MultiGaussians
predictions_list = list()
for gaussian in predictions:
predictions_list.append(torch.sigmoid(gaussian.mean).cpu().detach().numpy())
y_pred = np.vstack(predictions_list).argmax(axis=0) #[model, classes]
top1_correct = np.sum(y_pred == y_query)
count_this = len(y_query)
return float(top1_correct), count_this, avg_loss/float(N+1e-10)
def test_loop(self, test_loader, record=None, return_std=False):
print_freq = 10
correct =0
count = 0
acc_all = []
iter_num = len(test_loader)
for i, (x,_) in enumerate(test_loader):
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
correct_this, count_this, loss_value = self.correct(x)
acc_all.append(correct_this/ count_this*100)
if(i % 100==0):
acc_mean = np.mean(np.asarray(acc_all))
print('Test | Batch {:d}/{:d} | Loss {:f} | Acc {:f}'.format(i, len(test_loader), loss_value, acc_mean))
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
if(self.writer is not None): self.writer.add_scalar('test_accuracy', acc_mean, self.iteration)
if(return_std): return acc_mean, acc_std
else: return acc_mean
def get_logits(self, x):
self.n_query = x.size(1) - self.n_support
##Dividing input x in query and support set
x_support = x[:,:self.n_support,:,:,:].contiguous().view(self.n_way * (self.n_support), *x.size()[2:]).cuda()
y_support = torch.from_numpy(np.repeat(range(self.n_way), self.n_support)).cuda()
x_query = x[:,self.n_support:,:,:,:].contiguous().view(self.n_way * (self.n_query), *x.size()[2:]).cuda()
y_query = np.repeat(range(self.n_way), self.n_query)
# Init to dummy values
x_train = x_support
y_train = y_support
target_list = list()
samples_per_model = int(len(y_train) / self.n_way)
for way in range(self.n_way):
target = torch.ones(len(y_train), dtype=torch.float32) * -1.0
start_index = way * samples_per_model
stop_index = start_index+samples_per_model
target[start_index:stop_index] = 1.0
target_list.append(target.cuda())
z_train = self.feature_extractor.forward(x_train).detach() #[340, 64]
if(self.normalize): z_train = F.normalize(z_train, p=2, dim=1)
train_list = [z_train]*self.n_way
for idx, single_model in enumerate(self.model.models):
single_model.set_train_data(inputs=z_train, targets=target_list[idx], strict=False)
with torch.no_grad(), gpytorch.settings.num_likelihood_samples(32):
self.model.eval()
self.likelihood.eval()
self.feature_extractor.eval()
z_query = self.feature_extractor.forward(x_query).detach()
if(self.normalize): z_query = F.normalize(z_query, p=2, dim=1)
z_query_list = [z_query]*len(y_query)
predictions = self.likelihood(*self.model(*z_query_list)) #return n_way MultiGaussians
predictions_list = list()
for gaussian in predictions:
predictions_list.append(gaussian.mean) #.cpu().detach().numpy())
y_pred = torch.stack(predictions_list, 1)
return y_pred
class ExactGPLayer(gpytorch.models.ExactGP):
'''
Parameters learned by the model:
likelihood.noise_covar.raw_noise
covar_module.raw_outputscale
covar_module.base_kernel.raw_lengthscale
'''
def __init__(self, train_x, train_y, likelihood, kernel='linear'):
#Set the likelihood noise and enable/disable learning
likelihood.noise_covar.raw_noise.requires_grad = False
likelihood.noise_covar.noise = torch.tensor(0.1)
super().__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
## Linear kernel
if(kernel=='linear'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.LinearKernel())
## RBF kernel
elif(kernel=='rbf' or kernel=='RBF'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
## Matern kernel
elif(kernel=='matern'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel())
## Polynomial (p=1)
elif(kernel=='poli1'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PolynomialKernel(power=1))
## Polynomial (p=2)
elif(kernel=='poli2'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PolynomialKernel(power=2))
elif(kernel=='cossim' or kernel=='bncossim'):
## Cosine distance and BatchNorm Cosine distance
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.LinearKernel())
self.covar_module.base_kernel.variance = 1.0
self.covar_module.base_kernel.raw_variance.requires_grad = False
elif(kernel=='nn'):
self.input_dim = 1600
self.output_dim = 1600
self.num_layers = 4
self.hidden_dim = 64
kernel = gp_kernels.NNKernel(input_dim=self.input_dim,
output_dim=self.output_dim,
num_layers=self.num_layers,
hidden_dim=self.hidden_dim)
self.covar_module = gpytorch.kernels.ScaleKernel(kernel)
else:
raise ValueError("[ERROR] the kernel '" + str(kernel) + "' is not supported!")
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
| 20,017 | 50.328205 | 251 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/protonet.py | # This code is modified from https://github.com/jakesnell/prototypical-networks
import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from methods.meta_template import MetaTemplate
class ProtoNet(MetaTemplate):
def __init__(self, model_func, n_way, n_support, n_query=None):
super(ProtoNet, self).__init__( model_func, n_way, n_support)
self.loss_fn = nn.CrossEntropyLoss()
def set_forward(self,x,is_feature = False):
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous()
z_proto = z_support.view(self.n_way, self.n_support, -1 ).mean(1) #the shape of z is [n_data, n_dim]
z_query = z_query.contiguous().view(self.n_way* self.n_query, -1 )
dists = euclidean_dist(z_query, z_proto)
scores = -dists
return scores
def set_forward_loss(self, x):
y_query = torch.from_numpy(np.repeat(range( self.n_way ), self.n_query ))
y_query = Variable(y_query.cuda())
scores = self.set_forward(x)
return self.loss_fn(scores, y_query )
def euclidean_dist( x, y):
# x: N x D
# y: M x D
n = x.size(0)
m = y.size(0)
d = x.size(1)
assert d == y.size(1)
x = x.unsqueeze(1).expand(n, m, d)
y = y.unsqueeze(0).expand(n, m, d)
return torch.pow(x - y, 2).sum(2)
| 1,434 | 27.7 | 112 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/baselinetrain.py | import backbone
import utils
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
class BaselineTrain(nn.Module):
def __init__(self, model_func, num_class, loss_type = 'softmax'):
super(BaselineTrain, self).__init__()
self.feature = model_func()
if loss_type == 'softmax':
self.classifier = nn.Linear(self.feature.final_feat_dim, num_class)
self.classifier.bias.data.fill_(0)
elif loss_type == 'dist': #Baseline ++
self.classifier = backbone.distLinear(self.feature.final_feat_dim, num_class)
self.loss_type = loss_type #'softmax' #'dist'
self.num_class = num_class
self.loss_fn = nn.CrossEntropyLoss()
def forward(self,x):
x = Variable(x.cuda())
out = self.feature.forward(x)
scores = self.classifier.forward(out)
return scores
def forward_loss(self, x, y):
scores = self.forward(x)
y = Variable(y.cuda())
return self.loss_fn(scores, y )
def train_loop(self, epoch, train_loader, optimizer):
print_freq = 10
avg_loss=0
for i, (x,y) in enumerate(train_loader):
optimizer.zero_grad()
loss = self.forward_loss(x, y)
loss.backward()
optimizer.step()
avg_loss = avg_loss+loss.data.item()
if i % print_freq==0:
#print(optimizer.state_dict()['param_groups'][0]['lr'])
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader), avg_loss/float(i+1) ))
def test_loop(self, val_loader):
return -1 #no validation, just save model during iteration
| 1,780 | 32.603774 | 124 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/baselinefinetune.py | import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from methods.meta_template import MetaTemplate
class BaselineFinetune(MetaTemplate):
def __init__(self, model_func, n_way, n_support, loss_type = "softmax"):
super(BaselineFinetune, self).__init__( model_func, n_way, n_support)
self.loss_type = loss_type
def set_forward(self,x,is_feature = True):
return self.set_forward_adaptation(x,is_feature); #Baseline always do adaptation
def set_forward_adaptation(self,x,is_feature = True):
assert is_feature == True, 'Baseline only support testing with feature'
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous().view(self.n_way* self.n_support, -1 )
z_query = z_query.contiguous().view(self.n_way* self.n_query, -1 )
y_support = torch.from_numpy(np.repeat(range( self.n_way ), self.n_support ))
y_support = Variable(y_support.cuda())
if self.loss_type == 'softmax':
linear_clf = nn.Linear(self.feat_dim, self.n_way)
elif self.loss_type == 'dist':
linear_clf = backbone.distLinear(self.feat_dim, self.n_way)
linear_clf = linear_clf.cuda()
set_optimizer = torch.optim.SGD(linear_clf.parameters(), lr = 0.01, momentum=0.9, dampening=0.9, weight_decay=0.001)
loss_function = nn.CrossEntropyLoss()
loss_function = loss_function.cuda()
batch_size = 4
support_size = self.n_way* self.n_support
for epoch in range(100):
rand_id = np.random.permutation(support_size)
for i in range(0, support_size , batch_size):
set_optimizer.zero_grad()
selected_id = torch.from_numpy( rand_id[i: min(i+batch_size, support_size) ]).cuda()
z_batch = z_support[selected_id]
y_batch = y_support[selected_id]
scores = linear_clf(z_batch)
loss = loss_function(scores,y_batch)
loss.backward()
set_optimizer.step()
scores = linear_clf(z_query)
return scores
def set_forward_loss(self,x):
raise ValueError('Baseline predict on pretrained feature and do not support finetune backbone')
| 2,381 | 39.372881 | 124 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/kernel_convolutions.py | import torch
import torch.nn as nn
class KernelConv(nn.Module):
def __init__(self, n_shot, hn_kernel_convolution_output_dim):
super(KernelConv, self).__init__()
if n_shot == 5:
self.conv = nn.Sequential(
nn.Conv2d(1, 2, kernel_size=(5, 5)),
nn.ReLU(inplace=True),
nn.Conv2d(2, 3, kernel_size=(3, 3)),
nn.ReLU(inplace=True),
nn.Conv2d(3, 5, kernel_size=(2, 2)),
nn.ReLU(inplace=True)
)
self.fc = nn.Linear(5 * 18 * 18, hn_kernel_convolution_output_dim)
else:
self.conv = nn.Sequential(
nn.Conv2d(1, 2, kernel_size=(2, 2)),
nn.ReLU(inplace=True),
nn.Conv2d(2, 3, kernel_size=(2, 2)),
nn.ReLU(inplace=True),
nn.Conv2d(3, 5, kernel_size=(2, 2)),
nn.ReLU(inplace=True)
)
self.fc = nn.Linear(5 * 2 * 2, hn_kernel_convolution_output_dim)
def forward(self, x):
x = self.conv(x)
x = torch.flatten(x, start_dim=1, end_dim=-1)
out = self.fc(x)
return out | 1,174 | 34.606061 | 78 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/transformer.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def scaled_dot_product(q, k, v, mask=None):
d_k = q.size()[-1]
attn_logits = torch.matmul(q, k.transpose(-2, -1))
attn_logits = attn_logits / math.sqrt(d_k)
if mask is not None:
attn_logits = attn_logits.masked_fill(mask == 0, -9e15)
attention = F.softmax(attn_logits, dim=-1)
values = torch.matmul(attention, v)
return values, attention
class MultiheadAttention(nn.Module):
def __init__(self, input_dim, embed_dim, num_heads):
super().__init__()
assert embed_dim % num_heads == 0, "Embedding dimension must be 0 modulo number of heads."
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
# Stack all weight matrices 1...h together for efficiency
# Note that in many implementations you see "bias=False" which is optional
self.qkv_proj = nn.Linear(input_dim, 3*embed_dim)
self.o_proj = nn.Linear(embed_dim, embed_dim)
self._reset_parameters()
def _reset_parameters(self):
# Original Transformer initialization, see PyTorch documentation
nn.init.xavier_uniform_(self.qkv_proj.weight)
self.qkv_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(self.o_proj.weight)
self.o_proj.bias.data.fill_(0)
def forward(self, x, mask=None, return_attention=False):
batch_size, seq_length, embed_dim = x.size()
qkv = self.qkv_proj(x)
# Separate Q, K, V from linear output
qkv = qkv.reshape(batch_size, seq_length, self.num_heads, 3*self.head_dim)
qkv = qkv.permute(0, 2, 1, 3) # [Batch, Head, SeqLen, Dims]
q, k, v = qkv.chunk(3, dim=-1)
# Determine value outputs
values, attention = scaled_dot_product(q, k, v, mask=mask)
values = values.permute(0, 2, 1, 3) # [Batch, SeqLen, Head, Dims]
values = values.reshape(batch_size, seq_length, embed_dim)
o = self.o_proj(values)
if return_attention:
return o, attention
else:
return o
class EncoderBlock(nn.Module):
def __init__(self, input_dim, num_heads, dim_feedforward, dropout=0.0):
"""
Inputs:
input_dim - Dimensionality of the input
num_heads - Number of heads to use in the attention block
dim_feedforward - Dimensionality of the hidden layer in the MLP
dropout - Dropout probability to use in the dropout layers
"""
super().__init__()
# Attention layer
self.self_attn = MultiheadAttention(input_dim, input_dim, num_heads)
# Two-layer MLP
self.linear_net = nn.Sequential(
nn.Linear(input_dim, dim_feedforward),
nn.Dropout(dropout),
nn.ReLU(inplace=True),
nn.Linear(dim_feedforward, input_dim)
)
# Layers to apply in between the main layers
self.norm1 = nn.LayerNorm(input_dim)
self.norm2 = nn.LayerNorm(input_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask=None):
# Attention part
attn_out = self.self_attn(x, mask=mask)
x = x + self.dropout(attn_out)
x = self.norm1(x)
# MLP part
linear_out = self.linear_net(x)
x = x + self.dropout(linear_out)
x = self.norm2(x)
return x
class TransformerEncoder(nn.Module):
def __init__(self, num_layers, **block_args):
super().__init__()
self.layers = nn.ModuleList([EncoderBlock(**block_args) for _ in range(num_layers)])
def forward(self, x, mask=None):
for l in self.layers:
x = l(x, mask=mask)
return x
def get_attention_maps(self, x, mask=None):
attention_maps = []
for l in self.layers:
_, attn_map = l.self_attn(x, mask=mask, return_attention=True)
attention_maps.append(attn_map)
x = l(x)
return attention_maps | 4,046 | 32.172131 | 98 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/feature_transfer_regression.py | import numpy as np
import gpytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import backbone
from torch.autograd import Variable
from data.qmul_loader import get_batch, train_people, test_people
class Regressor(nn.Module):
def __init__(self):
super(Regressor, self).__init__()
self.layer4 = nn.Linear(2916, 1)
def return_clones(self):
layer4_w = self.layer4.weight.data.clone().detach()
layer4_b = self.layer4.bias.data.clone().detach()
def assign_clones(self, weights_list):
self.layer4.weight.data.copy_(weights_list[0])
self.layer4.weight.data.copy_(weights_list[1])
def forward(self, x):
out = self.layer4(x)
return out
class FeatureTransfer(nn.Module):
def __init__(self, backbone):
super(FeatureTransfer, self).__init__()
regressor = Regressor()
self.feature_extractor = backbone
self.model = Regressor()
self.criterion = nn.MSELoss()
def train_loop(self, epoch, optimizer):
batch, batch_labels = get_batch(train_people)
batch, batch_labels = batch.cuda(), batch_labels.cuda()
for inputs, labels in zip(batch, batch_labels):
optimizer.zero_grad()
output = self.model(self.feature_extractor(inputs))
loss = self.criterion(output, labels)
loss.backward()
optimizer.step()
if(epoch%10==0):
print('[%d] - Loss: %.3f' % (
epoch, loss.item()
))
def test_loop(self, n_support, optimizer): # we need optimizer to take one gradient step
inputs, targets = get_batch(test_people)
support_ind = list(np.random.choice(list(range(19)), replace=False, size=n_support))
query_ind = [i for i in range(19) if i not in support_ind]
x_all = inputs.cuda()
y_all = targets.cuda()
x_support = inputs[:,support_ind,:,:,:].cuda()
y_support = targets[:,support_ind].cuda()
x_query = inputs[:,query_ind,:,:,:].cuda()
y_query = targets[:,query_ind].cuda()
# choose a random test person
n = np.random.randint(0, len(test_people)-1)
optimizer.zero_grad()
z_support = self.feature_extractor(x_support[n]).detach()
output_support = self.model(z_support).squeeze()
loss = self.criterion(output_support, y_support[n])
loss.backward()
optimizer.step()
self.feature_extractor.eval()
self.model.eval()
z_all = self.feature_extractor(x_all[n]).detach()
output_all = self.model(z_all).squeeze()
return self.criterion(output_all, y_all[n])
def save_checkpoint(self, checkpoint):
torch.save({'feature_extractor': self.feature_extractor.state_dict(), 'model':self.model.state_dict()}, checkpoint)
def load_checkpoint(self, checkpoint):
ckpt = torch.load(checkpoint)
self.feature_extractor.load_state_dict(ckpt['feature_extractor'])
self.model.load_state_dict(ckpt['model'])
| 3,110 | 33.955056 | 123 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/__init__.py | from . import meta_template
from . import baselinetrain
from . import baselinefinetune
from . import matchingnet
from . import protonet
from . import relationnet
from . import maml
from . import transformer
from . import kernels
from . import kernel_convolutions | 263 | 25.4 | 33 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/DKT_regression.py | ## Original packages
import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import math
import torch.nn.functional as F
## Our packages
import gpytorch
from time import gmtime, strftime
import random
from statistics import mean
from data.qmul_loader import get_batch, train_people, test_people
from configs import kernel_type
class DKT(nn.Module):
def __init__(self, backbone):
super(DKT, self).__init__()
## GP parameters
self.feature_extractor = backbone
self.get_model_likelihood_mll() #Init model, likelihood, and mll
def get_model_likelihood_mll(self, train_x=None, train_y=None):
if(train_x is None): train_x=torch.ones(19, 2916).cuda()
if(train_y is None): train_y=torch.ones(19).cuda()
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = ExactGPLayer(train_x=train_x, train_y=train_y, likelihood=likelihood, kernel=kernel_type)
self.model = model.cuda()
self.likelihood = likelihood.cuda()
self.mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self.model).cuda()
self.mse = nn.MSELoss()
return self.model, self.likelihood, self.mll
def set_forward(self, x, is_feature=False):
pass
def set_forward_loss(self, x):
pass
def train_loop(self, epoch, optimizer):
batch, batch_labels = get_batch(train_people)
batch, batch_labels = batch.cuda(), batch_labels.cuda()
for inputs, labels in zip(batch, batch_labels):
optimizer.zero_grad()
z = self.feature_extractor(inputs)
self.model.set_train_data(inputs=z, targets=labels)
predictions = self.model(z)
loss = -self.mll(predictions, self.model.train_targets)
loss.backward()
optimizer.step()
mse = self.mse(predictions.mean, labels)
if (epoch%10==0):
print('[%d] - Loss: %.3f MSE: %.3f noise: %.3f' % (
epoch, loss.item(), mse.item(),
self.model.likelihood.noise.item()
))
def test_loop(self, n_support, optimizer=None): # no optimizer needed for GP
inputs, targets = get_batch(test_people)
support_ind = list(np.random.choice(list(range(19)), replace=False, size=n_support))
query_ind = [i for i in range(19) if i not in support_ind]
x_all = inputs.cuda()
y_all = targets.cuda()
x_support = inputs[:,support_ind,:,:,:].cuda()
y_support = targets[:,support_ind].cuda()
x_query = inputs[:,query_ind,:,:,:]
y_query = targets[:,query_ind].cuda()
# choose a random test person
n = np.random.randint(0, len(test_people)-1)
z_support = self.feature_extractor(x_support[n]).detach()
self.model.set_train_data(inputs=z_support, targets=y_support[n], strict=False)
self.model.eval()
self.feature_extractor.eval()
self.likelihood.eval()
with torch.no_grad():
z_query = self.feature_extractor(x_all[n]).detach()
pred = self.likelihood(self.model(z_query))
lower, upper = pred.confidence_region() #2 standard deviations above and below the mean
mse = self.mse(pred.mean, y_all[n])
return mse
def save_checkpoint(self, checkpoint):
# save state
gp_state_dict = self.model.state_dict()
likelihood_state_dict = self.likelihood.state_dict()
nn_state_dict = self.feature_extractor.state_dict()
torch.save({'gp': gp_state_dict, 'likelihood': likelihood_state_dict, 'net':nn_state_dict}, checkpoint)
def load_checkpoint(self, checkpoint):
ckpt = torch.load(checkpoint)
self.model.load_state_dict(ckpt['gp'])
self.likelihood.load_state_dict(ckpt['likelihood'])
self.feature_extractor.load_state_dict(ckpt['net'])
class ExactGPLayer(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood, kernel='linear'):
super(ExactGPLayer, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
## RBF kernel
if(kernel=='rbf' or kernel=='RBF'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
## Spectral kernel
elif(kernel=='spectral'):
self.covar_module = gpytorch.kernels.SpectralMixtureKernel(num_mixtures=4, ard_num_dims=2916)
else:
raise ValueError("[ERROR] the kernel '" + str(kernel) + "' is not supported for regression, use 'rbf' or 'spectral'.")
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
| 4,900 | 36.7 | 130 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/matchingnet.py | # This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from methods.meta_template import MetaTemplate
import utils
import copy
class MatchingNet(MetaTemplate):
def __init__(self, model_func, n_way, n_support, n_query=None):
super(MatchingNet, self).__init__( model_func, n_way, n_support)
self.loss_fn = nn.NLLLoss()
self.FCE = FullyContextualEmbedding(self.feat_dim)
self.G_encoder = nn.LSTM(self.feat_dim, self.feat_dim, 1, batch_first=True, bidirectional=True)
self.relu = nn.ReLU()
self.softmax = nn.Softmax()
def encode_training_set(self, S, G_encoder = None):
if G_encoder is None:
G_encoder = self.G_encoder
out_G = G_encoder(S.unsqueeze(0))[0]
out_G = out_G.squeeze(0)
G = S + out_G[:,:S.size(1)] + out_G[:,S.size(1):]
G_norm = torch.norm(G,p=2, dim =1).unsqueeze(1).expand_as(G)
G_normalized = G.div(G_norm+ 0.00001)
return G, G_normalized
def get_logprobs(self, f, G, G_normalized, Y_S, FCE = None):
if FCE is None:
FCE = self.FCE
F = FCE(f, G)
F_norm = torch.norm(F,p=2, dim =1).unsqueeze(1).expand_as(F)
F_normalized = F.div(F_norm+ 0.00001)
#scores = F.mm(G_normalized.transpose(0,1)) #The implementation of Ross et al., but not consistent with origin paper and would cause large norm feature dominate
scores = self.relu( F_normalized.mm(G_normalized.transpose(0,1)) ) *100 # The original paper use cosine simlarity, but here we scale it by 100 to strengthen highest probability after softmax
softmax = self.softmax(scores)
logprobs =(softmax.mm(Y_S)+1e-6).log()
return logprobs
def set_forward(self, x, is_feature = False):
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous().view( self.n_way* self.n_support, -1 )
z_query = z_query.contiguous().view( self.n_way* self.n_query, -1 )
G, G_normalized = self.encode_training_set( z_support)
y_s = torch.from_numpy(np.repeat(range( self.n_way ), self.n_support ))
Y_S = Variable( utils.one_hot(y_s, self.n_way ) ).cuda()
f = z_query
logprobs = self.get_logprobs(f, G, G_normalized, Y_S)
return logprobs
def set_forward_loss(self, x):
y_query = torch.from_numpy(np.repeat(range( self.n_way ), self.n_query ))
y_query = Variable(y_query.cuda())
logprobs = self.set_forward(x)
return self.loss_fn(logprobs, y_query )
def cuda(self):
super(MatchingNet, self).cuda()
self.FCE = self.FCE.cuda()
return self
class FullyContextualEmbedding(nn.Module):
def __init__(self, feat_dim):
super(FullyContextualEmbedding, self).__init__()
self.lstmcell = nn.LSTMCell(feat_dim*2, feat_dim)
self.softmax = nn.Softmax()
self.c_0 = Variable(torch.zeros(1,feat_dim))
self.feat_dim = feat_dim
#self.K = K
def forward(self, f, G):
h = f
c = self.c_0.expand_as(f)
G_T = G.transpose(0,1)
K = G.size(0) #Tuna to be comfirmed
for k in range(K):
logit_a = h.mm(G_T)
a = self.softmax(logit_a)
r = a.mm(G)
x = torch.cat((f, r),1)
h, c = self.lstmcell(x, (h, c))
h = h + f
return h
def cuda(self):
super(FullyContextualEmbedding, self).cuda()
self.c_0 = self.c_0.cuda()
return self
| 3,749 | 35.764706 | 199 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/hypernets/hypernet_kernel.py | from copy import deepcopy
from typing import Optional, Tuple
import torch
from torch import nn
from methods.hypernets import HyperNetPOC
from methods.hypernets.utils import set_from_param_dict, accuracy_from_scores
from methods.kernel_convolutions import KernelConv
from methods.kernels import init_kernel_function
from methods.transformer import TransformerEncoder
class HyperShot(HyperNetPOC):
def __init__(
self, model_func: nn.Module, n_way: int, n_support: int, n_query: int,
params: "ArgparseHNParams", target_net_architecture: Optional[nn.Module] = None
):
super().__init__(
model_func, n_way, n_support, n_query, params=params, target_net_architecture=target_net_architecture
)
# TODO - check!!!
# Use support embeddings - concatenate them with kernel features
self.hn_use_support_embeddings: bool = params.hn_use_support_embeddings
# Remove self relations by matrix K multiplication
self.hn_no_self_relations: bool = params.hn_no_self_relations
self.kernel_function = init_kernel_function(
kernel_input_dim=self.feat_dim + self.n_way if self.attention_embedding else self.feat_dim,
params=params
)
# embedding size
# TODO - add attention based input also
self.embedding_size = self.init_embedding_size(params)
# I will be adding the kernel vector to the stacked images embeddings
# TODO: add/check changes for attention-like input
self.hn_kernel_invariance: bool = params.hn_kernel_invariance
if self.hn_kernel_invariance:
self.hn_kernel_invariance_type: str = params.hn_kernel_invariance_type
self.hn_kernel_invariance_pooling: str = params.hn_kernel_invariance_pooling
if self.hn_kernel_invariance_type == 'attention':
self.init_kernel_transformer_architecture(params)
else:
self.init_kernel_convolution_architecture(params)
self.query_relations_size = self.n_way * self.n_support_size_context
self.target_net_architecture = target_net_architecture or self.build_target_net_architecture(params)
self.init_hypernet_modules()
def init_embedding_size(self, params) -> int:
if params.hn_use_support_embeddings:
support_embeddings_size = self.feat_dim * self.n_way * self.n_support_size_context
else:
support_embeddings_size = 0
if params.hn_kernel_invariance:
if params.hn_kernel_invariance_type == 'attention':
return support_embeddings_size + (self.n_way * self.n_support_size_context)
else:
return support_embeddings_size + params.hn_kernel_convolution_output_dim
else:
if params.hn_no_self_relations:
return support_embeddings_size + (
((self.n_way * self.n_support_size_context) ** 2) - (
self.n_way * self.n_support_size_context))
else:
return support_embeddings_size + ((self.n_way * self.n_support_size_context) ** 2)
@property
def n_support_size_context(self) -> int:
return 1 if self.sup_aggregation in ["mean", "min_pooling", "max_pooling"] else self.n_support
def build_target_net_architecture(self, params) -> nn.Module:
tn_hidden_size = params.hn_tn_hidden_size
layers = []
if params.hn_use_support_embeddings:
common_insize = ((self.n_way * self.n_support_size_context) + self.feat_dim)
else:
common_insize = (self.n_way * self.n_support_size_context)
for i in range(params.hn_tn_depth):
is_final = i == (params.hn_tn_depth - 1)
insize = common_insize if i == 0 else tn_hidden_size
outsize = self.n_way if is_final else tn_hidden_size
layers.append(nn.Linear(insize, outsize))
if not is_final:
layers.append(nn.ReLU())
res = nn.Sequential(*layers)
print(res)
return res
def maybe_aggregate_support_feature(self, support_feature: torch.Tensor) -> torch.Tensor:
"""
Process embeddings for few shot learning
"""
if self.n_support > 1:
if self.sup_aggregation == 'mean':
return torch.mean(support_feature, axis=1).reshape(self.n_way, 1, -1)
elif self.sup_aggregation == 'max_pooling':
pooled, _ = torch.max(support_feature, axis=1)
pooled = pooled.reshape(self.n_way, 1, -1)
return pooled
elif self.sup_aggregation == 'min_pooling':
pooled, _ = torch.min(support_feature, axis=1)
pooled = pooled.reshape(self.n_way, 1, -1)
return pooled
return support_feature
def parse_feature(self, x, is_feature) -> Tuple[torch.Tensor, torch.Tensor]:
support_feature, query_feature = super().parse_feature(x, is_feature)
support_feature = self.maybe_aggregate_support_feature(support_feature)
return support_feature, query_feature
def init_kernel_convolution_architecture(self, params):
# TODO - add convolution-based approach
self.kernel_2D_convolution: bool = True
self.kernel_conv: nn.Module = KernelConv(self.n_support, params.hn_kernel_convolution_output_dim)
def init_kernel_transformer_architecture(self, params):
kernel_transformer_input_dim: int = self.n_way * self.n_support_size_context
self.kernel_transformer_encoder: nn.Module = TransformerEncoder(
num_layers=params.kernel_transformer_layers_no,
input_dim=kernel_transformer_input_dim,
num_heads=params.kernel_transformer_heads_no,
dim_feedforward=params.kernel_transformer_feedforward_dim
)
def build_relations_features(self, support_feature: torch.Tensor,
feature_to_classify: torch.Tensor) -> torch.Tensor:
supp_way, n_support, supp_feat = support_feature.shape
n_examples, feat_dim = feature_to_classify.shape
support_features = support_feature.reshape(supp_way * n_support, supp_feat)
kernel_values_tensor = self.kernel_function.forward(support_features, feature_to_classify)
relations = kernel_values_tensor.T
return relations
def build_kernel_features_embedding(self, support_feature: torch.Tensor) -> torch.Tensor:
"""
x_support: [n_way, n_support, hidden_size]
"""
supp_way, n_support, supp_feat = support_feature.shape
support_features = support_feature.reshape(supp_way * n_support, supp_feat)
support_features_copy = torch.clone(support_features)
kernel_values_tensor = self.kernel_function.forward(support_features, support_features_copy)
# Remove self relations by matrix multiplication
if self.hn_no_self_relations:
zero_diagonal_matrix = torch.ones_like(kernel_values_tensor).cuda() - torch.eye(
kernel_values_tensor.shape[0]).cuda()
kernel_values_tensor = kernel_values_tensor * zero_diagonal_matrix
return torch.flatten(kernel_values_tensor[kernel_values_tensor != 0.0])
if self.hn_kernel_invariance:
# TODO - check!!!
if self.hn_kernel_invariance_type == 'attention':
kernel_values_tensor = torch.unsqueeze(kernel_values_tensor.T, 0)
encoded = self.kernel_transformer_encoder.forward(kernel_values_tensor)
if self.hn_kernel_invariance_pooling == 'min':
invariant_kernel_values, _ = torch.min(encoded, 1)
elif self.hn_kernel_invariance_pooling == 'max':
invariant_kernel_values, _ = torch.max(encoded, 1)
else:
invariant_kernel_values = torch.mean(encoded, 1)
return invariant_kernel_values
else:
# TODO - add convolutional approach
kernel_values_tensor = torch.unsqueeze(torch.unsqueeze(kernel_values_tensor.T, 0), 0)
invariant_kernel_values = torch.flatten(self.kernel_conv.forward(kernel_values_tensor))
return invariant_kernel_values
return kernel_values_tensor
def generate_target_net(self, support_feature: torch.Tensor) -> nn.Module:
"""
x_support: [n_way, n_support, hidden_size]
"""
embedding = self.build_kernel_features_embedding(support_feature)
embedding = embedding.reshape(1, self.embedding_size)
# TODO - check!!!
if self.hn_use_support_embeddings:
embedding = torch.cat((embedding, torch.flatten(support_feature)), 0)
root = self.hypernet_neck(embedding)
network_params = {
name.replace("-", "."): param_net(root).reshape(self.target_net_param_shapes[name])
for name, param_net in self.hypernet_heads.items()
}
tn = deepcopy(self.target_net_architecture)
set_from_param_dict(tn, network_params)
tn.support_feature = support_feature
return tn.cuda()
def set_forward(self, x: torch.Tensor, is_feature: bool = False, permutation_sanity_check: bool = False):
support_feature, query_feature = self.parse_feature(x, is_feature)
classifier = self.generate_target_net(support_feature)
query_feature = query_feature.reshape(
-1, query_feature.shape[-1]
)
relational_query_feature = self.build_relations_features(support_feature, query_feature)
# TODO - check!!!
if self.hn_use_support_embeddings:
relational_query_feature = torch.cat((relational_query_feature, query_feature), 1)
y_pred = classifier(relational_query_feature)
if permutation_sanity_check:
### random permutation test
perm = torch.randperm(len(query_feature))
rev_perm = torch.argsort(perm)
query_perm = query_feature[perm]
relation_perm = self.build_relations_features(support_feature, query_perm)
assert torch.equal(relation_perm[rev_perm], relational_query_feature)
y_pred_perm = classifier(relation_perm)
assert torch.equal(y_pred_perm[rev_perm], y_pred)
return y_pred
def set_forward_with_adaptation(self, x: torch.Tensor):
y_pred, metrics = super().set_forward_with_adaptation(x)
support_feature, query_feature = self.parse_feature(x, is_feature=False)
query_feature = query_feature.reshape(
-1, query_feature.shape[-1]
)
relational_query_feature = self.build_relations_features(support_feature, query_feature)
metrics["accuracy/val_relational"] = accuracy_from_scores(relational_query_feature, self.n_way, self.n_query)
return y_pred, metrics
def set_forward_loss(
self, x: torch.Tensor, detach_ft_hn: bool = False, detach_ft_tn: bool = False,
train_on_support: bool = True,
train_on_query: bool = True
):
nw, ne, c, h, w = x.shape
support_feature, query_feature = self.parse_feature(x, is_feature=False)
# TODO: add/check changes for attention-like input
if self.attention_embedding:
y_support = self.get_labels(support_feature)
y_query = self.get_labels(query_feature)
y_support_one_hot = torch.nn.functional.one_hot(y_support)
support_feature_with_classes_one_hot = torch.cat((support_feature, y_support_one_hot), 2)
y_query_zeros = torch.zeros((y_query.shape[0], y_query.shape[1], y_support_one_hot.shape[2]))
query_feature_with_zeros = torch.cat((query_feature, y_query_zeros), 2)
feature_to_hn = support_feature_with_classes_one_hot.detach() if detach_ft_hn else support_feature_with_classes_one_hot
query_feature_to_hn = query_feature_with_zeros
else:
feature_to_hn = support_feature.detach() if detach_ft_hn else support_feature
query_feature_to_hn = query_feature
classifier = self.generate_target_net(feature_to_hn)
feature_to_classify = []
y_to_classify_gt = []
if train_on_support:
feature_to_classify.append(
support_feature.reshape(
(self.n_way * self.n_support_size_context), support_feature.shape[-1]
)
)
y_support = self.get_labels(support_feature)
y_to_classify_gt.append(y_support.reshape(self.n_way * self.n_support_size_context))
if train_on_query:
feature_to_classify.append(
query_feature.reshape(
(self.n_way * (ne - self.n_support)), query_feature.shape[-1]
)
)
y_query = self.get_labels(query_feature)
y_to_classify_gt.append(y_query.reshape(self.n_way * (ne - self.n_support)))
feature_to_classify = torch.cat(feature_to_classify)
y_to_classify_gt = torch.cat(y_to_classify_gt)
relational_feature_to_classify = self.build_relations_features(support_feature, feature_to_classify)
if detach_ft_tn:
relational_feature_to_classify = relational_feature_to_classify.detach()
if self.hn_use_support_embeddings:
relational_feature_to_classify = torch.cat((relational_feature_to_classify, feature_to_classify), 1)
y_pred = classifier(relational_feature_to_classify)
return self.loss_fn(y_pred, y_to_classify_gt)
| 13,719 | 43.983607 | 131 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/hypernets/hypermaml.py | from collections import defaultdict
from copy import deepcopy
from time import time
import numpy as np
import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import backbone
from methods.hypernets.utils import get_param_dict, accuracy_from_scores
from methods.maml import MAML
class HyperNet(nn.Module):
def __init__(self, hn_hidden_size, n_way, embedding_size, feat_dim, out_neurons, params):
super(HyperNet, self).__init__()
self.hn_head_len = params.hn_head_len
head = [nn.Linear(embedding_size, hn_hidden_size), nn.ReLU()]
if self.hn_head_len > 2:
for i in range(self.hn_head_len - 2):
head.append(nn.Linear(hn_hidden_size, hn_hidden_size))
head.append(nn.ReLU())
self.head = nn.Sequential(*head)
tail = [nn.Linear(hn_hidden_size, out_neurons)]
self.tail = nn.Sequential(*tail)
def forward(self, x):
out = self.head(x)
out = self.tail(out)
return out
class HyperMAML(MAML):
def __init__(self, model_func, n_way, n_support, n_query, params=None, approx=False):
super(HyperMAML, self).__init__(model_func, n_way, n_support, n_query, params=params)
self.loss_fn = nn.CrossEntropyLoss()
self.hn_tn_hidden_size = params.hn_tn_hidden_size
self.hn_tn_depth = params.hn_tn_depth
self._init_classifier()
self.enhance_embeddings = params.hm_enhance_embeddings
self.n_task = 4
self.task_update_num = 5
self.train_lr = 0.01
self.approx = approx # first order approx.
self.hn_sup_aggregation = params.hn_sup_aggregation
self.hn_hidden_size = params.hn_hidden_size
self.hm_lambda = params.hm_lambda
self.hm_save_delta_params = params.hm_save_delta_params
self.hm_use_class_batch_input = params.hm_use_class_batch_input
self.hn_adaptation_strategy = params.hn_adaptation_strategy
self.hm_support_set_loss = params.hm_support_set_loss
self.hm_maml_warmup = params.hm_maml_warmup
self.hm_maml_warmup_epochs = params.hm_maml_warmup_epochs
self.hm_maml_warmup_switch_epochs = params.hm_maml_warmup_switch_epochs
self.hm_maml_update_feature_net = params.hm_maml_update_feature_net
self.hm_update_operator = params.hm_update_operator
self.hm_load_feature_net = params.hm_load_feature_net
self.hm_feature_net_path = params.hm_feature_net_path
self.hm_detach_feature_net = params.hm_detach_feature_net
self.hm_detach_before_hyper_net = params.hm_detach_before_hyper_net
self.hm_set_forward_with_adaptation = params.hm_set_forward_with_adaptation
self.hn_val_lr = params.hn_val_lr
self.hn_val_epochs = params.hn_val_epochs
self.hn_val_optim = params.hn_val_optim
self.alpha = 0
self.hn_alpha_step = params.hn_alpha_step
if self.hn_adaptation_strategy == 'increasing_alpha' and self.hn_alpha_step < 0:
raise ValueError('hn_alpha_step is not positive!')
self.single_test = False
self.epoch = -1
self.start_epoch = -1
self.stop_epoch = -1
self.calculate_embedding_size()
self._init_hypernet_modules(params)
self._init_feature_net()
# print(self)
def _init_feature_net(self):
if self.hm_load_feature_net:
print(f'loading feature net model from location: {self.hm_feature_net_path}')
model_dict = torch.load(self.hm_feature_net_path)
self.feature.load_state_dict(model_dict['state'])
def _init_classifier(self):
assert self.hn_tn_hidden_size % self.n_way == 0, f"hn_tn_hidden_size {self.hn_tn_hidden_size} should be the multiple of n_way {self.n_way}"
layers = []
for i in range(self.hn_tn_depth):
in_dim = self.feat_dim if i == 0 else self.hn_tn_hidden_size
out_dim = self.n_way if i == (self.hn_tn_depth - 1) else self.hn_tn_hidden_size
linear = backbone.Linear_fw(in_dim, out_dim)
linear.bias.data.fill_(0)
layers.append(linear)
self.classifier = nn.Sequential(*layers)
def _init_hypernet_modules(self, params):
target_net_param_dict = get_param_dict(self.classifier)
target_net_param_dict = {
name.replace(".", "-"): p
# replace dots with hyphens bc torch doesn't like dots in modules names
for name, p in target_net_param_dict.items()
}
self.target_net_param_shapes = {
name: p.shape
for (name, p)
in target_net_param_dict.items()
}
self.hypernet_heads = nn.ModuleDict()
for name, param in target_net_param_dict.items():
if self.hm_use_class_batch_input and name[-4:] == 'bias':
continue
bias_size = param.shape[0] // self.n_way
head_in = self.embedding_size
head_out = (param.numel() // self.n_way) + bias_size if self.hm_use_class_batch_input else param.numel()
head_modules = []
self.hypernet_heads[name] = HyperNet(self.hn_hidden_size, self.n_way, head_in, self.feat_dim, head_out,
params)
def calculate_embedding_size(self):
n_classes_in_embedding = 1 if self.hm_use_class_batch_input else self.n_way
n_support_per_class = 1 if self.hn_sup_aggregation == 'mean' else self.n_support
single_support_embedding_len = self.feat_dim + self.n_way + 1 if self.enhance_embeddings else self.feat_dim
self.embedding_size = n_classes_in_embedding * n_support_per_class * single_support_embedding_len
def apply_embeddings_strategy(self, embeddings):
if self.hn_sup_aggregation == 'mean':
new_embeddings = torch.zeros(self.n_way, *embeddings.shape[1:])
for i in range(self.n_way):
lower = i * self.n_support
upper = (i + 1) * self.n_support
new_embeddings[i] = embeddings[lower:upper, :].mean(dim=0)
return new_embeddings.cuda()
return embeddings
def get_support_data_labels(self):
return torch.from_numpy(np.repeat(range(self.n_way), self.n_support)).cuda() # labels for support data
def get_hn_delta_params(self, support_embeddings):
if self.hm_detach_before_hyper_net:
support_embeddings = support_embeddings.detach()
if self.hm_use_class_batch_input:
delta_params_list = []
for name, param_net in self.hypernet_heads.items():
support_embeddings_resh = support_embeddings.reshape(
self.n_way, -1
)
delta_params = param_net(support_embeddings_resh)
bias_neurons_num = self.target_net_param_shapes[name][0] // self.n_way
if self.hn_adaptation_strategy == 'increasing_alpha' and self.alpha < 1:
delta_params = delta_params * self.alpha
weights_delta = delta_params[:, :-bias_neurons_num]
bias_delta = delta_params[:, -bias_neurons_num:].flatten()
delta_params_list.extend([weights_delta, bias_delta])
return delta_params_list
else:
delta_params_list = []
for name, param_net in self.hypernet_heads.items():
flattened_embeddings = support_embeddings.flatten()
delta = param_net(flattened_embeddings)
if name in self.target_net_param_shapes.keys():
delta = delta.reshape(self.target_net_param_shapes[name])
if self.hn_adaptation_strategy == 'increasing_alpha' and self.alpha < 1:
delta = self.alpha * delta
delta_params_list.append(delta)
return delta_params_list
def _update_weight(self, weight, update_value):
if self.hm_update_operator == 'minus':
if weight.fast is None:
weight.fast = weight - update_value
else:
weight.fast = weight.fast - update_value
elif self.hm_update_operator == 'plus':
if weight.fast is None:
weight.fast = weight + update_value
else:
weight.fast = weight.fast + update_value
elif self.hm_update_operator == 'multiply':
if weight.fast is None:
weight.fast = weight * update_value
else:
weight.fast = weight.fast * update_value
def _get_p_value(self):
if self.epoch < self.hm_maml_warmup_epochs:
return 1.0
elif self.hm_maml_warmup_epochs <= self.epoch < self.hm_maml_warmup_epochs + self.hm_maml_warmup_switch_epochs:
return (self.hm_maml_warmup_switch_epochs + self.hm_maml_warmup_epochs - self.epoch) / (
self.hm_maml_warmup_switch_epochs + 1)
return 0.0
def _update_network_weights(self, delta_params_list, support_embeddings, support_data_labels, train_stage=False):
if self.hm_maml_warmup and not self.single_test:
p = self._get_p_value()
if p > 0.0:
fast_parameters = []
if self.hm_maml_update_feature_net:
fet_fast_parameters = list(self.feature.parameters())
for weight in self.feature.parameters():
weight.fast = None
self.feature.zero_grad()
fast_parameters = fast_parameters + fet_fast_parameters
clf_fast_parameters = list(self.classifier.parameters())
for weight in self.classifier.parameters():
weight.fast = None
self.classifier.zero_grad()
fast_parameters = fast_parameters + clf_fast_parameters
for task_step in range(self.task_update_num):
scores = self.classifier(support_embeddings)
set_loss = self.loss_fn(scores, support_data_labels)
grad = torch.autograd.grad(set_loss, fast_parameters, create_graph=True,
allow_unused=True) # build full graph support gradient of gradient
if self.approx:
grad = [g.detach() for g in
grad] # do not calculate gradient of gradient if using first order approximation
if self.hm_maml_update_feature_net:
# update weights of feature networ
for k, weight in enumerate(self.feature.parameters()):
update_value = self.train_lr * p * grad[k]
self._update_weight(weight, update_value)
classifier_offset = len(fet_fast_parameters) if self.hm_maml_update_feature_net else 0
if p == 1:
# update weights of classifier network by adding gradient
for k, weight in enumerate(self.classifier.parameters()):
update_value = (self.train_lr * grad[classifier_offset + k])
self._update_weight(weight, update_value)
elif 0.0 < p < 1.0:
# update weights of classifier network by adding gradient and output of hypernetwork
for k, weight in enumerate(self.classifier.parameters()):
update_value = ((self.train_lr * p * grad[classifier_offset + k]) + (
(1 - p) * delta_params_list[k]))
self._update_weight(weight, update_value)
else:
for k, weight in enumerate(self.classifier.parameters()):
update_value = delta_params_list[k]
self._update_weight(weight, update_value)
else:
for k, weight in enumerate(self.classifier.parameters()):
update_value = delta_params_list[k]
self._update_weight(weight, update_value)
def _get_list_of_delta_params(self, maml_warmup_used, support_embeddings, support_data_labels):
if not maml_warmup_used:
if self.enhance_embeddings:
with torch.no_grad():
logits = self.classifier.forward(support_embeddings).detach()
logits = F.softmax(logits, dim=1)
labels = support_data_labels.view(support_embeddings.shape[0], -1)
support_embeddings = torch.cat((support_embeddings, logits, labels), dim=1)
for weight in self.parameters():
weight.fast = None
self.zero_grad()
support_embeddings = self.apply_embeddings_strategy(support_embeddings)
delta_params = self.get_hn_delta_params(support_embeddings)
if self.hm_save_delta_params and len(self.delta_list) == 0:
self.delta_list = [{'delta_params': delta_params}]
return delta_params
else:
return [torch.zeros(*i).cuda() for (_, i) in self.target_net_param_shapes.items()]
def forward(self, x):
out = self.feature.forward(x)
if self.hm_detach_feature_net:
out = out.detach()
scores = self.classifier.forward(out)
return scores
def set_forward(self, x, is_feature=False, train_stage=False):
""" 1. Get delta params from hypernetwork with support data.
2. Update target- network weights.
3. Forward with query data.
4. Return scores"""
assert is_feature == False, 'MAML do not support fixed feature'
x = x.cuda()
x_var = Variable(x)
support_data = x_var[:, :self.n_support, :, :, :].contiguous().view(self.n_way * self.n_support,
*x.size()[2:]) # support data
query_data = x_var[:, self.n_support:, :, :, :].contiguous().view(self.n_way * self.n_query,
*x.size()[2:]) # query data
support_data_labels = self.get_support_data_labels()
support_embeddings = self.feature(support_data)
if self.hm_detach_feature_net:
support_embeddings = support_embeddings.detach()
maml_warmup_used = (
(not self.single_test) and self.hm_maml_warmup and (self.epoch < self.hm_maml_warmup_epochs))
delta_params_list = self._get_list_of_delta_params(maml_warmup_used, support_embeddings, support_data_labels)
self._update_network_weights(delta_params_list, support_embeddings, support_data_labels, train_stage)
if self.hm_set_forward_with_adaptation and not train_stage:
scores = self.forward(support_data)
return scores, None
else:
if self.hm_support_set_loss and train_stage and not maml_warmup_used:
query_data = torch.cat((support_data, query_data))
scores = self.forward(query_data)
# sum of delta params for regularization
if self.hm_lambda != 0:
total_delta_sum = sum([delta_params.pow(2.0).sum() for delta_params in delta_params_list])
return scores, total_delta_sum
else:
return scores, None
def set_forward_adaptation(self, x, is_feature=False): # overwrite parrent function
raise ValueError('MAML performs further adapation simply by increasing task_upate_num')
def set_forward_loss(self, x):
scores, total_delta_sum = self.set_forward(x, is_feature=False, train_stage=True)
query_data_labels = Variable(torch.from_numpy(np.repeat(range(self.n_way), self.n_query))).cuda()
if self.hm_support_set_loss:
support_data_labels = torch.from_numpy(np.repeat(range(self.n_way), self.n_support)).cuda()
query_data_labels = torch.cat((support_data_labels, query_data_labels))
loss = self.loss_fn(scores, query_data_labels)
if self.hm_lambda != 0:
loss = loss + self.hm_lambda * total_delta_sum
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy().flatten()
y_labels = query_data_labels.cpu().numpy()
top1_correct = np.sum(topk_ind == y_labels)
task_accuracy = (top1_correct / len(query_data_labels)) * 100
return loss, task_accuracy
def set_forward_loss_with_adaptation(self, x):
scores, _ = self.set_forward(x, is_feature=False, train_stage=False)
support_data_labels = Variable(torch.from_numpy(np.repeat(range(self.n_way), self.n_support))).cuda()
loss = self.loss_fn(scores, support_data_labels)
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy().flatten()
y_labels = support_data_labels.cpu().numpy()
top1_correct = np.sum(topk_ind == y_labels)
task_accuracy = (top1_correct / len(support_data_labels)) * 100
return loss, task_accuracy
def train_loop(self, epoch, train_loader, optimizer): # overwrite parrent function
print_freq = 10
avg_loss = 0
task_count = 0
loss_all = []
acc_all = []
optimizer.zero_grad()
self.delta_list = []
# train
for i, (x, _) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
assert self.n_way == x.size(0), "MAML do not support way change"
loss, task_accuracy = self.set_forward_loss(x)
avg_loss = avg_loss + loss.item() # .data[0]
loss_all.append(loss)
acc_all.append(task_accuracy)
task_count += 1
if task_count == self.n_task: # MAML update several tasks at one time
loss_q = torch.stack(loss_all).sum(0)
loss_q.backward()
optimizer.step()
task_count = 0
loss_all = []
optimizer.zero_grad()
if i % print_freq == 0:
print('Epoch {:d}/{:d} | Batch {:d}/{:d} | Loss {:f}'.format(self.epoch, self.stop_epoch, i,
len(train_loader),
avg_loss / float(i + 1)))
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
metrics = {"accuracy/train": acc_mean}
if self.hn_adaptation_strategy == 'increasing_alpha':
metrics['alpha'] = self.alpha
if self.hm_save_delta_params and len(self.delta_list) > 0:
delta_params = {"epoch": self.epoch, "delta_list": self.delta_list}
metrics['delta_params'] = delta_params
if self.alpha < 1:
self.alpha += self.hn_alpha_step
return metrics
def test_loop(self, test_loader, return_std=False, return_time: bool = False): # overwrite parrent function
acc_all = []
self.delta_list = []
acc_at = defaultdict(list)
iter_num = len(test_loader)
eval_time = 0
if self.hm_set_forward_with_adaptation:
for i, (x, _) in enumerate(test_loader):
self.n_query = x.size(1) - self.n_support
assert self.n_way == x.size(0), "MAML do not support way change"
s = time()
acc_task, acc_at_metrics = self.set_forward_with_adaptation(x)
t = time()
for (k, v) in acc_at_metrics.items():
acc_at[k].append(v)
acc_all.append(acc_task)
eval_time += (t - s)
else:
for i, (x, _) in enumerate(test_loader):
self.n_query = x.size(1) - self.n_support
assert self.n_way == x.size(0), f"MAML do not support way change, {self.n_way=}, {x.size(0)=}"
s = time()
correct_this, count_this = self.correct(x)
t = time()
acc_all.append(correct_this / count_this * 100)
eval_time += (t - s)
metrics = {
k: np.mean(v) if len(v) > 0 else 0
for (k, v) in acc_at.items()
}
num_tasks = len(acc_all)
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Acc = %4.2f%% +- %4.2f%%' % (iter_num, acc_mean, 1.96 * acc_std / np.sqrt(iter_num)))
print("Num tasks", num_tasks)
ret = [acc_mean]
if return_std:
ret.append(acc_std)
if return_time:
ret.append(eval_time)
ret.append(metrics)
return ret
def set_forward_with_adaptation(self, x: torch.Tensor):
self_copy = deepcopy(self)
# deepcopy does not copy "fast" parameters so it should be done manually
for param1, param2 in zip(self.parameters(), self_copy.parameters()):
if hasattr(param1, 'fast'):
if param1.fast is not None:
param2.fast = param1.fast.clone()
else:
param2.fast = None
metrics = {
"accuracy/val@-0": self_copy.query_accuracy(x)
}
val_opt_type = torch.optim.Adam if self.hn_val_optim == "adam" else torch.optim.SGD
val_opt = val_opt_type(self_copy.parameters(), lr=self.hn_val_lr)
if self.hn_val_epochs > 0:
for i in range(1, self.hn_val_epochs + 1):
self_copy.train()
val_opt.zero_grad()
loss, val_support_acc = self_copy.set_forward_loss_with_adaptation(x)
loss.backward()
val_opt.step()
self_copy.eval()
metrics[f"accuracy/val_support_acc@-{i}"] = val_support_acc
metrics[f"accuracy/val_loss@-{i}"] = loss.item()
metrics[f"accuracy/val@-{i}"] = self_copy.query_accuracy(x)
# free CUDA memory by deleting "fast" parameters
for param in self_copy.parameters():
param.fast = None
return metrics[f"accuracy/val@-{self.hn_val_epochs}"], metrics
def query_accuracy(self, x: torch.Tensor) -> float:
scores, _ = self.set_forward(x, train_stage=True)
return 100 * accuracy_from_scores(scores, n_way=self.n_way, n_query=self.n_query)
def get_logits(self, x):
self.n_query = x.size(1) - self.n_support
logits, _ = self.set_forward(x)
return logits
def correct(self, x):
scores, _ = self.set_forward(x)
y_query = np.repeat(range(self.n_way), self.n_query)
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy()
top1_correct = np.sum(topk_ind[:, 0] == y_query)
return float(top1_correct), len(y_query)
| 23,248 | 39.503484 | 147 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/hypernets/utils.py | from typing import Dict
import numpy as np
import torch
from torch import nn
def get_param_dict(net: nn.Module) -> Dict[str, nn.Parameter]:
"""A dict of named parameters of an nn.Module"""
return {
n: p
for (n, p) in net.named_parameters()
}
def set_from_param_dict(module: nn.Module, param_dict: Dict[str, torch.Tensor]):
"""
Sets the values of `module` parameters with the values from `param_dict`.
Works just like:
nn.Module.load_state_dict()
with the exception that those parameters are not tunable by default, because
we set their values to bare tensors instead of nn.Parameter.
This means that a network with such params cannot be trained directly with an optimizer.
However, gradients may still flow through those tensors, so it's useful for the use-case of hypernetworks.
"""
for (sdk, v) in param_dict.items():
keys = sdk.split(".")
param_name = keys[-1]
m = module
for k in keys[:-1]:
try:
k = int(k)
m = m[k]
except:
m = getattr(m, k)
param = getattr(m, param_name)
assert param.shape == v.shape, (sdk, param.shape, v.shape)
delattr(m, param_name)
setattr(m, param_name, v)
class SinActivation(nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.sin(x)
def accuracy_from_scores(scores: torch.Tensor, n_way: int, n_query: int) -> float:
"""Assumes that scores are for examples sorted by class!"""
s_nq, s_nw = scores.shape
assert (s_nq, s_nw) == (n_way * n_query, n_way), ((s_nq, s_nw), (n_query, n_way))
y_query = np.repeat(range(n_way), n_query)
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy()
top1_correct = np.sum(topk_ind[:, 0] == y_query)
correct_this = float(top1_correct)
count_this = len(y_query)
return correct_this / count_this
def kl_diag_gauss_with_standard_gauss(mean, logvar):
mean_flat = torch.cat([t.view(-1) for t in mean])
logvar_flat = torch.cat([t.view(-1) for t in logvar])
var_flat = logvar_flat.exp()
return -0.5 * torch.sum(1 + logvar_flat - mean_flat.pow(2) - var_flat)
def reparameterize(mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu | 2,398 | 31.418919 | 110 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/hypernets/hypernet_poc.py | from collections import defaultdict
from copy import deepcopy
from typing import Dict, Optional
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from methods.hypernets.utils import get_param_dict, set_from_param_dict, SinActivation, accuracy_from_scores
from methods.meta_template import MetaTemplate
from methods.transformer import TransformerEncoder
ALLOWED_AGGREGATIONS = ["concat", "mean", "max_pooling", "min_pooling"]
class HyperNetPOC(MetaTemplate):
def __init__(
self, model_func: nn.Module, n_way: int, n_support: int, n_query: int,
params: "ArgparseHNParams", target_net_architecture: Optional[nn.Module] = None
):
super().__init__(model_func, n_way, n_support)
self.feat_dim = self.feature.final_feat_dim = 64 if params.dataset == "cross_char" else 1600
self.n_query = n_query
self.taskset_size: int = params.hn_taskset_size
self.taskset_print_every: int = params.hn_taskset_print_every
self.hn_hidden_size: int = params.hn_hidden_size
self.attention_embedding: bool = params.hn_attention_embedding
self.sup_aggregation: str = params.hn_sup_aggregation
self.detach_ft_in_hn: int = params.hn_detach_ft_in_hn
self.detach_ft_in_tn: int = params.hn_detach_ft_in_tn
self.hn_neck_len: int = params.hn_neck_len
self.hn_head_len: int = params.hn_head_len
self.taskset_repeats_config: str = params.hn_taskset_repeats
self.hn_dropout: float = params.hn_dropout
self.hn_val_epochs: int = params.hn_val_epochs
self.hn_val_lr: float = params.hn_val_lr
self.hn_val_optim: float = params.hn_val_optim
self.embedding_size = self.init_embedding_size(params)
self.target_net_architecture = target_net_architecture or self.build_target_net_architecture(params)
self.loss_fn = nn.CrossEntropyLoss()
self.init_hypernet_modules()
if self.attention_embedding:
self.init_transformer_architecture(params)
print(self.target_net_architecture)
def init_embedding_size(self, params) -> int:
if self.attention_embedding:
return (self.feat_dim + self.n_way) * self.n_way * self.n_support
else:
assert self.sup_aggregation in ALLOWED_AGGREGATIONS
if self.sup_aggregation == "concat":
return self.feat_dim * self.n_way * self.n_support
elif self.sup_aggregation in ["mean", "max_pooling", "min_pooling"]:
return self.feat_dim * self.n_way
def build_target_net_architecture(self, params) -> nn.Module:
tn_hidden_size = params.hn_tn_hidden_size
layers = []
for i in range(params.hn_tn_depth):
is_final = i == (params.hn_tn_depth - 1)
insize = self.feat_dim if i == 0 else tn_hidden_size
outsize = self.n_way if is_final else tn_hidden_size
layers.append(nn.Linear(insize, outsize))
if not is_final:
layers.append(nn.ReLU())
res = nn.Sequential(*layers)
print(res)
return res
def init_transformer_architecture(self, params):
transformer_input_dim: int = self.feat_dim + self.n_way
self.transformer_encoder: nn.Module = TransformerEncoder(
num_layers=params.hn_transformer_layers_no, input_dim=transformer_input_dim,
num_heads=params.hn_transformer_heads_no, dim_feedforward=params.hn_transformer_feedforward_dim)
def init_hypernet_modules(self):
target_net_param_dict = get_param_dict(self.target_net_architecture)
target_net_param_dict = {
name.replace(".", "-"): p
# replace dots with hyphens bc torch doesn't like dots in modules names
for name, p in target_net_param_dict.items()
}
self.target_net_param_shapes = {
name: p.shape
for (name, p)
in target_net_param_dict.items()
}
self.init_hypernet_neck()
self.hypernet_heads = nn.ModuleDict()
assert self.hn_head_len >= 1, "Head len must be >= 1!"
for name, param in target_net_param_dict.items():
head_in = self.embedding_size if self.hn_neck_len == 0 else self.hn_hidden_size
head_out = param.numel()
head_modules = []
for i in range(self.hn_head_len):
in_size = head_in if i == 0 else self.hn_hidden_size
is_final = (i == (self.hn_head_len - 1))
out_size = head_out if is_final else self.hn_hidden_size
head_modules.extend([nn.Dropout(self.hn_dropout), nn.Linear(in_size, out_size)])
if not is_final:
head_modules.append(nn.ReLU())
self.hypernet_heads[name] = nn.Sequential(*head_modules)
def init_hypernet_neck(self):
neck_modules = []
if self.hn_neck_len > 0:
neck_modules = [
nn.Linear(self.embedding_size, self.hn_hidden_size),
nn.ReLU()
]
for _ in range(self.hn_neck_len - 1):
neck_modules.extend(
[nn.Dropout(self.hn_dropout), nn.Linear(self.hn_hidden_size, self.hn_hidden_size), nn.ReLU()]
)
neck_modules = neck_modules[:-1] # remove the last ReLU
self.hypernet_neck = nn.Sequential(*neck_modules)
def taskset_repeats(self, epoch: int):
epoch_ceiling_to_n_repeats = {
int(kv.split(":")[0]): int(kv.split(":")[1])
for kv in self.taskset_repeats_config.split("-")
}
epoch_ceiling_to_n_repeats = {k: v for (k, v) in epoch_ceiling_to_n_repeats.items() if k > epoch}
if len(epoch_ceiling_to_n_repeats) == 0:
return 1
return epoch_ceiling_to_n_repeats[min(epoch_ceiling_to_n_repeats.keys())]
def get_labels(self, x: torch.Tensor) -> torch.Tensor:
"""
x: [n_way, n_shot, hidden_size]
"""
ys = torch.tensor(list(range(x.shape[0]))).reshape(len(x), 1)
ys = ys.repeat(1, x.shape[1]).to(x.device)
return ys.cuda()
def maybe_aggregate_support_feature(self, support_feature: torch.Tensor) -> torch.Tensor:
way, n_support, feat = support_feature.shape
if self.sup_aggregation == "concat":
features = support_feature.reshape(way * n_support, feat)
elif self.sup_aggregation == "sum":
features = support_feature.sum(dim=1)
way, feat = features.shape
assert (way, feat) == (self.n_way, self.feat_dim)
elif self.sup_aggregation == "mean":
features = support_feature.mean(dim=1)
way, feat = features.shape
assert (way, feat) == (self.n_way, self.feat_dim)
else:
raise TypeError(self.sup_aggregation)
return features
def build_embedding(self, support_feature: torch.Tensor) -> torch.Tensor:
way, n_support, feat = support_feature.shape
if self.attention_embedding:
features = support_feature.view(1, -1, *(support_feature.size()[2:]))
attention_features = torch.flatten(self.transformer_encoder.forward(features))
return attention_features
features = self.maybe_aggregate_support_feature(support_feature)
features = features.reshape(1, -1)
return features
def generate_network_params(self, support_feature: torch.Tensor) -> Dict[str, torch.Tensor]:
embedding = self.build_embedding(support_feature)
root = self.hypernet_neck(embedding)
network_params = {
name.replace("-", "."): param_net(root).reshape(self.target_net_param_shapes[name])
for name, param_net in self.hypernet_heads.items()
}
return network_params
def generate_target_net(self, support_feature: torch.Tensor) -> nn.Module:
"""
x_support: [n_way, n_support, hidden_size]
"""
network_params = self.generate_network_params(support_feature)
tn = deepcopy(self.target_net_architecture)
set_from_param_dict(tn, network_params)
return tn.cuda()
def set_forward(self, x: torch.Tensor, is_feature: bool = False, permutation_sanity_check: bool = False):
support_feature, query_feature = self.parse_feature(x, is_feature)
if self.attention_embedding:
y_support = self.get_labels(support_feature)
y_support_one_hot = torch.nn.functional.one_hot(y_support)
support_feature_with_classes_one_hot = torch.cat((support_feature, y_support_one_hot), 2)
support_feature = support_feature_with_classes_one_hot
classifier = self.generate_target_net(support_feature)
query_feature = query_feature.reshape(
-1, query_feature.shape[-1]
)
y_pred = classifier(query_feature)
if permutation_sanity_check:
### random permutation test
perm = torch.randperm(len(query_feature))
rev_perm = torch.argsort(perm)
query_perm = query_feature[perm]
assert torch.equal(query_perm[rev_perm], query_feature)
y_pred_perm = classifier(query_perm)
assert torch.equal(y_pred_perm[rev_perm], y_pred)
return y_pred
def set_forward_with_adaptation(self, x: torch.Tensor):
self_copy = deepcopy(self)
metrics = {
"accuracy/val@-0": self_copy.query_accuracy(x)
}
val_opt_type = torch.optim.Adam if self.hn_val_optim == "adam" else torch.optim.SGD
val_opt = val_opt_type(self_copy.parameters(), lr=self.hn_val_lr)
if self.hn_val_epochs > 0:
for i in range(1, self.hn_val_epochs + 1):
self_copy.train()
val_opt.zero_grad()
loss = self_copy.set_forward_loss(x, train_on_query=False)
loss.backward()
val_opt.step()
self_copy.eval()
metrics[f"accuracy/val@-{i}"] = self_copy.query_accuracy(x)
return self_copy.set_forward(x, permutation_sanity_check=True), metrics
def query_accuracy(self, x: torch.Tensor) -> float:
scores = self.set_forward(x)
return accuracy_from_scores(scores, n_way=self.n_way, n_query=self.n_query)
def set_forward_loss(
self, x: torch.Tensor, detach_ft_hn: bool = False, detach_ft_tn: bool = False,
train_on_support: bool = True,
train_on_query: bool = True
):
n_way, n_examples, c, h, w = x.shape
support_feature, query_feature = self.parse_feature(x, is_feature=False)
if self.attention_embedding:
y_support = self.get_labels(support_feature)
y_query = self.get_labels(query_feature)
y_support_one_hot = torch.nn.functional.one_hot(y_support)
support_feature_with_classes_one_hot = torch.cat((support_feature, y_support_one_hot), 2)
feature_to_hn = support_feature_with_classes_one_hot.detach() if detach_ft_hn else support_feature_with_classes_one_hot
else:
feature_to_hn = support_feature.detach() if detach_ft_hn else support_feature
classifier = self.generate_target_net(feature_to_hn)
feature_to_classify = []
y_to_classify_gt = []
if train_on_support:
feature_to_classify.append(
support_feature.reshape(
(self.n_way * self.n_support), support_feature.shape[-1]
)
)
y_support = self.get_labels(support_feature)
y_to_classify_gt.append(y_support.reshape(self.n_way * self.n_support))
if train_on_query:
feature_to_classify.append(
query_feature.reshape(
(self.n_way * (n_examples - self.n_support)), query_feature.shape[-1]
)
)
y_query = self.get_labels(query_feature)
y_to_classify_gt.append(y_query.reshape(self.n_way * (n_examples - self.n_support)))
feature_to_classify = torch.cat(feature_to_classify)
y_to_classify_gt = torch.cat(y_to_classify_gt)
if detach_ft_tn:
feature_to_classify = feature_to_classify.detach()
y_pred = classifier(feature_to_classify)
return self.loss_fn(y_pred, y_to_classify_gt)
def train_loop(self, epoch: int, train_loader: DataLoader, optimizer: torch.optim.Optimizer):
taskset_id = 0
taskset = []
n_train = len(train_loader)
accuracies = []
losses = []
metrics = defaultdict(list)
ts_repeats = self.taskset_repeats(epoch)
for i, (x, _) in enumerate(train_loader):
taskset.append(x)
# TODO 3: perhaps the idea of tasksets is redundant and it's better to update weights at every task
if i % self.taskset_size == (self.taskset_size - 1) or i == (n_train - 1):
loss_sum = torch.tensor(0).cuda()
for tr in range(ts_repeats):
loss_sum = torch.tensor(0).cuda()
for task in taskset:
if self.change_way:
self.n_way = task.size(0)
self.n_query = task.size(1) - self.n_support
loss = self.set_forward_loss(task)
loss_sum = loss_sum + loss
optimizer.zero_grad()
loss_sum.backward()
if tr == 0:
for k, p in get_param_dict(self).items():
metrics[f"grad_norm/{k}"] = p.grad.abs().mean().item() if p.grad is not None else 0
optimizer.step()
losses.append(loss_sum.item())
accuracies.extend([
self.query_accuracy(task) for task in taskset
])
acc_mean = np.mean(accuracies) * 100
acc_std = np.std(accuracies) * 100
if taskset_id % self.taskset_print_every == 0:
print(
f"Epoch {epoch} | Taskset {taskset_id} | TS {len(taskset)} | TS epochs {ts_repeats} | Loss {loss_sum.item()} | Train acc {acc_mean:.2f} +- {acc_std:.2f} %")
taskset_id += 1
taskset = []
metrics["loss/train"] = np.mean(losses)
metrics["accuracy/train"] = np.mean(accuracies) * 100
return metrics
class PPAMixin(HyperNetPOC):
def build_target_net_architecture(self, params) -> nn.Module:
assert params.hn_tn_depth == 1, "In PPA the target network must be a single linear layer, please use `--hn_tn_depth=1`"
return super().build_target_net_architecture(params)
def init_hypernet_modules(self):
target_net_param_dict = get_param_dict(self.target_net_architecture)
target_net_param_dict = {
name.replace(".", "-"): p
# replace dots with hyphens bc torch doesn't like dots in modules names
for name, p in target_net_param_dict.items()
}
self.target_net_param_shapes = {
name: p.shape
for (name, p)
in target_net_param_dict.items()
}
self.init_hypernet_neck()
self.hypernet_heads = nn.ModuleDict()
assert self.hn_head_len >= 1, "Head len must be >= 1!"
# assert False, self.target_net_param_shapes
for name, param in target_net_param_dict.items():
head_in = self.embedding_size if self.hn_neck_len == 0 else self.hn_hidden_size
head_modules = []
assert param.numel() % self.n_way == 0, f"Each param in PPA should be divisible by {self.n_way=}, but {name} is of {param.shape=} -> {param.numel()=}"
head_out = param.numel() // self.n_way
for i in range(self.hn_head_len):
in_size = head_in if i == 0 else self.hn_hidden_size
is_final = (i == (self.hn_head_len - 1))
out_size = head_out if is_final else self.hn_hidden_size
head_modules.extend([nn.Dropout(self.hn_dropout), nn.Linear(in_size, out_size)])
if not is_final:
head_modules.append(nn.ReLU())
self.hypernet_heads[name] = nn.Sequential(*head_modules)
class HypernetPPA(PPAMixin, HyperNetPOC):
"""Based loosely on https://arxiv.org/abs/1706.03466"""
def taskset_repeats(self, epoch: int):
return 1
def init_embedding_size(self, params) -> int:
if self.attention_embedding:
raise NotImplementedError()
else:
assert self.sup_aggregation in ALLOWED_AGGREGATIONS
if self.sup_aggregation == "concat":
return self.feat_dim * self.n_support
elif self.sup_aggregation in ["mean", "max_pooling", "min_pooling"]:
return self.feat_dim
def build_embedding(self, support_feature: torch.Tensor) -> torch.Tensor:
way, n_support, feat = support_feature.shape
if self.attention_embedding:
features = support_feature.view(1, -1, *(support_feature.size()[2:]))
attention_features = torch.flatten(self.transformer_encoder.forward(features))
return attention_features
features = self.maybe_aggregate_support_feature(support_feature)
return features
def generate_network_params(self, support_feature: torch.Tensor) -> Dict[str, torch.Tensor]:
embedding = self.build_embedding(support_feature)
assert embedding.shape[0] == self.n_way
root = self.hypernet_neck(embedding)
network_params = {
name.replace("-", "."): param_net(root).reshape(self.target_net_param_shapes[name])
for name, param_net in self.hypernet_heads.items()
}
return network_params
| 18,022 | 41.607565 | 180 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/hypernets/__init__.py | from methods.hypernets.hypernet_poc import HyperNetPOC, HypernetPPA
from methods.hypernets.hypernet_kernel import HyperShot
hypernet_types = {
"hyper_shot": HyperShot,
"hn_ppa": HypernetPPA,
"hn_poc": HyperNetPOC
} | 226 | 31.428571 | 67 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/hypernets/bayeshmaml.py | from copy import deepcopy
import numpy as np
import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import backbone
from methods.hypernets.utils import get_param_dict, kl_diag_gauss_with_standard_gauss, \
reparameterize
from methods.hypernets.hypermaml import HyperMAML
class BHyperNet(nn.Module):
"""bayesian hypernetwork for target network params"""
def __init__(self, hn_hidden_size, n_way, embedding_size, feat_dim, out_neurons, params):
super(BHyperNet, self).__init__()
self.hn_head_len = params.hn_head_len
head = [nn.Linear(embedding_size, hn_hidden_size), nn.ReLU()]
if self.hn_head_len > 2:
for i in range(self.hn_head_len - 2):
head.append(nn.Linear(hn_hidden_size, hn_hidden_size))
head.append(nn.ReLU())
self.head = nn.Sequential(*head)
# tails to equate weights with distributions
tail_mean = [nn.Linear(hn_hidden_size, out_neurons)]
tail_logvar = [nn.Linear(hn_hidden_size, out_neurons)]
self.tail_mean = nn.Sequential(*tail_mean)
self.tail_logvar = nn.Sequential(*tail_logvar)
def forward(self, x):
out = self.head(x)
out_mean = self.tail_mean(out)
out_logvar = self.tail_logvar(out)
return out_mean, out_logvar
class BayesHMAML(HyperMAML):
def __init__(self, model_func, n_way, n_support, n_query, params=None, approx=False):
super(BayesHMAML, self).__init__(model_func, n_way, n_support, n_query, approx=approx, params=params)
# loss function component
self.loss_kld = kl_diag_gauss_with_standard_gauss # Kullback–Leibler divergence
self.kl_scale = params.kl_scale
self.kl_step = None # increase step for share of kld in loss
self.kl_stop_val = params.kl_stop_val
# num of weight set draws for softvoting
self.weight_set_num_train = params.hm_weight_set_num_train # train phase
self.weight_set_num_test = params.hm_weight_set_num_test if params.hm_weight_set_num_test != 0 else None # test phase
def _init_classifier(self):
assert self.hn_tn_hidden_size % self.n_way == 0, f"hn_tn_hidden_size {self.hn_tn_hidden_size} should be the multiple of n_way {self.n_way}"
layers = []
for i in range(self.hn_tn_depth):
in_dim = self.feat_dim if i == 0 else self.hn_tn_hidden_size
out_dim = self.n_way if i == (self.hn_tn_depth - 1) else self.hn_tn_hidden_size
linear = backbone.BLinear_fw(in_dim, out_dim)
linear.bias.data.fill_(0)
layers.append(linear)
self.classifier = nn.Sequential(*layers)
def _init_hypernet_modules(self, params):
target_net_param_dict = get_param_dict(self.classifier)
target_net_param_dict = {
name.replace(".", "-"): p
# replace dots with hyphens bc torch doesn't like dots in modules names
for name, p in target_net_param_dict.items()
}
self.target_net_param_shapes = {
name: p.shape
for (name, p)
in target_net_param_dict.items()
}
self.hypernet_heads = nn.ModuleDict()
for name, param in target_net_param_dict.items():
if self.hm_use_class_batch_input and name[-4:] == 'bias':
# notice head_out val when using this strategy
continue
bias_size = param.shape[0] // self.n_way
head_in = self.embedding_size
head_out = (param.numel() // self.n_way) + bias_size if self.hm_use_class_batch_input else param.numel()
# make hypernetwork for target network param
self.hypernet_heads[name] = BHyperNet(self.hn_hidden_size, self.n_way, head_in, self.feat_dim, head_out,
params)
def get_hn_delta_params(self, support_embeddings):
if self.hm_detach_before_hyper_net:
support_embeddings = support_embeddings.detach()
if self.hm_use_class_batch_input:
delta_params_list = []
for name, param_net in self.hypernet_heads.items():
support_embeddings_resh = support_embeddings.reshape(
self.n_way, -1
)
delta_params_mean, params_logvar = param_net(support_embeddings_resh)
bias_neurons_num = self.target_net_param_shapes[name][0] // self.n_way
if self.hn_adaptation_strategy == 'increasing_alpha' and self.alpha < 1:
delta_params_mean = delta_params_mean * self.alpha
params_logvar = params_logvar * self.alpha
weights_delta_mean = delta_params_mean[:, :-bias_neurons_num].contiguous().view(
*self.target_net_param_shapes[name])
bias_delta_mean = delta_params_mean[:, -bias_neurons_num:].flatten()
weights_logvar = params_logvar[:, :-bias_neurons_num].contiguous().view(
*self.target_net_param_shapes[name])
bias_logvar = params_logvar[:, -bias_neurons_num:].flatten()
delta_params_list.append([weights_delta_mean, weights_logvar])
delta_params_list.append([bias_delta_mean, bias_logvar])
return delta_params_list
else:
delta_params_list = []
for name, param_net in self.hypernet_heads.items():
flattened_embeddings = support_embeddings.flatten()
delta_mean, logvar = param_net(flattened_embeddings)
if name in self.target_net_param_shapes.keys():
delta_mean = delta_mean.reshape(self.target_net_param_shapes[name])
logvar = logvar.reshape(self.target_net_param_shapes[name])
if self.hn_adaptation_strategy == 'increasing_alpha' and self.alpha < 1:
delta_mean = self.alpha * delta_mean
logvar = self.alpha * logvar
delta_params_list.append([delta_mean, logvar])
return delta_params_list
def _update_weight(self, weight, update_mean, logvar, train_stage=False):
""" get distribution associated with weight. Sample weights for target network. """
if update_mean is None and logvar is None:
return
# if weight.mu is None:
if not hasattr(weight, 'mu') or weight.mu is None:
weight.mu = None
weight.mu = weight - update_mean
else:
weight.mu = weight.mu - update_mean
if logvar is None: # used in maml warmup
weight.fast = []
weight.fast.append(weight.mu)
else:
weight.logvar = logvar
weight.fast = []
if train_stage:
for _ in range(self.weight_set_num_train): # sample fast parameters for training
weight.fast.append(reparameterize(weight.mu, weight.logvar))
else:
if self.weight_set_num_test is not None:
for _ in range(self.weight_set_num_test): # sample fast parameters for testing
weight.fast.append(reparameterize(weight.mu, weight.logvar))
else:
weight.fast.append(weight.mu) # return expected value
def _scale_step(self):
"""calculate regularization step for kld"""
if self.kl_step is None:
# scale step is calculated so that share of kld in loss increases kl_scale -> kl_stop_val
self.kl_step = np.power(1 / self.kl_scale * self.kl_stop_val, 1 / self.stop_epoch)
self.kl_scale = self.kl_scale * self.kl_step
def _get_p_value(self):
if self.epoch < self.hm_maml_warmup_epochs:
return 1.0
elif self.hm_maml_warmup_epochs <= self.epoch < self.hm_maml_warmup_epochs + self.hm_maml_warmup_switch_epochs:
return (self.hm_maml_warmup_switch_epochs + self.hm_maml_warmup_epochs - self.epoch) / (
self.hm_maml_warmup_switch_epochs + 1)
return 0.0
def _update_network_weights(self, delta_params_list, support_embeddings, support_data_labels, train_stage=False):
if self.hm_maml_warmup and not self.single_test:
p = self._get_p_value()
# warmup coef p decreases 1 -> 0
if p > 0.0:
fast_parameters = []
clf_fast_parameters = list(self.classifier.parameters())
for weight in self.classifier.parameters():
weight.fast = None
weight.mu = None
# weight.logvar = None
self.classifier.zero_grad()
fast_parameters = fast_parameters + clf_fast_parameters
for task_step in range(self.task_update_num):
scores = self.classifier(support_embeddings)
set_loss = self.loss_fn(scores, support_data_labels)
reduction = self.kl_scale
for weight in self.classifier.parameters():
if weight.logvar is not None:
if weight.mu is not None:
# set_loss = set_loss + self.kl_w * reduction * self.loss_kld(weight.mu, weight.logvar)
set_loss = set_loss + reduction * self.loss_kld(weight.mu, weight.logvar)
else:
# set_loss = set_loss + self.kl_w * reduction * self.loss_kld(weight, weight.logvar)
set_loss = set_loss + reduction * self.loss_kld(weight, weight.logvar)
grad = torch.autograd.grad(set_loss, fast_parameters, create_graph=True,
allow_unused=True) # build full graph support gradient of gradient
if self.approx:
grad = [g.detach() for g in
grad] # do not calculate gradient of gradient if using first order approximation
if p == 1:
# update weights of classifier network by adding gradient
for k, weight in enumerate(self.classifier.parameters()):
update_value = (self.train_lr * grad[k])
update_mean, logvar = delta_params_list[k]
self._update_weight(weight, update_value, logvar, train_stage)
elif 0.0 < p < 1.0:
# update weights of classifier network by adding gradient and output of hypernetwork
for k, weight in enumerate(self.classifier.parameters()):
update_value = self.train_lr * p * grad[k]
update_mean, logvar = delta_params_list[k]
update_mean = (1 - p) * update_mean + update_value
self._update_weight(weight, update_mean, logvar, train_stage)
else:
for k, weight in enumerate(self.classifier.parameters()):
update_mean, logvar = delta_params_list[k]
self._update_weight(weight, update_mean, logvar, train_stage)
else:
for k, weight in enumerate(self.classifier.parameters()):
update_mean, logvar = delta_params_list[k]
self._update_weight(weight, update_mean, logvar, train_stage)
def _get_list_of_delta_params(self, maml_warmup_used, support_embeddings, support_data_labels):
# if not maml_warmup_used:
if self.enhance_embeddings:
with torch.no_grad():
logits = self.classifier.forward(support_embeddings).detach()
logits = F.softmax(logits, dim=1)
labels = support_data_labels.view(support_embeddings.shape[0], -1)
support_embeddings = torch.cat((support_embeddings, logits, labels), dim=1)
for weight in self.parameters():
weight.fast = None
for weight in self.classifier.parameters():
weight.mu = None
# weight.logvar = None
self.zero_grad()
support_embeddings = self.apply_embeddings_strategy(support_embeddings)
delta_params = self.get_hn_delta_params(support_embeddings)
if self.hm_save_delta_params and len(self.delta_list) == 0:
self.delta_list = [{'delta_params': delta_params}]
return delta_params
def set_forward_loss(self, x):
"""Adapt and forward using x. Return scores and total losses"""
scores, total_delta_sum = self.set_forward(x, is_feature=False, train_stage=True)
# calc_sigma = calc_sigma and (self.epoch == self.stop_epoch - 1 or self.epoch % 100 == 0)
# sigma, mu = self._mu_sigma(calc_sigma)
query_data_labels = Variable(torch.from_numpy(np.repeat(range(self.n_way), self.n_query))).cuda()
if self.hm_support_set_loss:
support_data_labels = torch.from_numpy(np.repeat(range(self.n_way), self.n_support)).cuda()
query_data_labels = torch.cat((support_data_labels, query_data_labels))
reduction = self.kl_scale
loss_ce = self.loss_fn(scores, query_data_labels)
loss_kld = torch.zeros_like(loss_ce)
for name, weight in self.classifier.named_parameters():
if weight.mu is not None and weight.logvar is not None:
val = self.loss_kld(weight.mu, weight.logvar)
# loss_kld = loss_kld + self.kl_w * reduction * val
loss_kld = loss_kld + reduction * val
loss = loss_ce + loss_kld
if self.hm_lambda != 0:
loss = loss + self.hm_lambda * total_delta_sum
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy().flatten()
y_labels = query_data_labels.cpu().numpy()
top1_correct = np.sum(topk_ind == y_labels)
task_accuracy = (top1_correct / len(query_data_labels)) * 100
return loss, loss_ce, loss_kld, task_accuracy
def set_forward_loss_with_adaptation(self, x):
"""returns loss and accuracy from adapted model (copy)"""
scores, _ = self.set_forward(x, is_feature=False, train_stage=False) # scores from adapted copy
support_data_labels = Variable(torch.from_numpy(np.repeat(range(self.n_way), self.n_support))).cuda()
reduction = self.kl_scale
loss_ce = self.loss_fn(scores, support_data_labels)
loss_kld = torch.zeros_like(loss_ce)
for name, weight in self.classifier.named_parameters():
if weight.mu is not None and weight.logvar is not None:
# loss_kld = loss_kld + self.kl_w * reduction * self.loss_kld(weight.mu, weight.logvar)
loss_kld = loss_kld + reduction * self.loss_kld(weight.mu, weight.logvar)
loss = loss_ce + loss_kld
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy().flatten()
y_labels = support_data_labels.cpu().numpy()
top1_correct = np.sum(topk_ind == y_labels)
task_accuracy = (top1_correct / len(support_data_labels)) * 100
return loss, task_accuracy
def train_loop(self, epoch, train_loader, optimizer): # overwrite parrent function
print_freq = 10
avg_loss = 0
task_count = 0
loss_all = []
loss_ce_all = []
loss_kld_all = []
# loss_kld_no_scale_all = []
acc_all = []
optimizer.zero_grad()
self.delta_list = []
# train
for i, (x, _) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
assert self.n_way == x.size(0), "MAML do not support way change"
loss, loss_ce, loss_kld, task_accuracy = self.set_forward_loss(x)
avg_loss = avg_loss + loss.item() # .data[0]
loss_all.append(loss)
loss_ce_all.append(loss_ce.item())
loss_kld_all.append(loss_kld.item())
# loss_kld_no_scale_all.append(loss_kld_no_scale.item())
acc_all.append(task_accuracy)
task_count += 1
if task_count == self.n_task: # MAML update several tasks at one time
loss_q = torch.stack(loss_all).sum(0)
loss_q.backward()
optimizer.step()
task_count = 0
loss_all = []
optimizer.zero_grad()
if i % print_freq == 0:
print('Epoch {:d}/{:d} | Batch {:d}/{:d} | Loss {:f}'.format(self.epoch, self.stop_epoch, i,
len(train_loader),
avg_loss / float(i + 1)))
self._scale_step()
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
metrics = {"accuracy/train": acc_mean}
loss_ce_all = np.asarray(loss_ce_all)
loss_ce_mean = np.mean(loss_ce_all)
metrics["loss_ce"] = loss_ce_mean
loss_kld_all = np.asarray(loss_kld_all)
loss_kld_mean = np.mean(loss_kld_all)
metrics["loss_kld"] = loss_kld_mean
if self.hn_adaptation_strategy == 'increasing_alpha':
metrics['alpha'] = self.alpha
if self.hm_save_delta_params and len(self.delta_list) > 0:
delta_params = {"epoch": self.epoch, "delta_list": self.delta_list}
metrics['delta_params'] = delta_params
if self.alpha < 1:
self.alpha += self.hn_alpha_step
return metrics
def set_forward_with_adaptation(self, x: torch.Tensor):
self_copy = deepcopy(self)
# deepcopy does not copy "fast" parameters so it should be done manually
for param1, param2 in zip(self.feature.parameters(), self_copy.feature.parameters()):
if hasattr(param1, 'fast'):
if param1.fast is not None:
param2.fast = param1.fast.clone()
else:
param2.fast = None
for param1, param2 in zip(self.classifier.parameters(), self_copy.classifier.parameters()):
if hasattr(param1, 'fast'):
if param1.fast is not None:
param2.fast = list(param1.fast)
else:
param2.fast = None
if hasattr(param1, 'mu'):
if param1.mu is not None:
param2.mu = param1.mu.clone()
else:
param2.mu = None
if hasattr(param1, 'logvar'):
if param1.logvar is not None:
param2.logvar = param1.logvar.clone()
else:
param2.logvar = None
metrics = {
"accuracy/val@-0": self_copy.query_accuracy(x)
}
val_opt_type = torch.optim.Adam if self.hn_val_optim == "adam" else torch.optim.SGD
val_opt = val_opt_type(self_copy.parameters(), lr=self.hn_val_lr)
if self.hn_val_epochs > 0:
for i in range(1, self.hn_val_epochs + 1):
self_copy.train()
val_opt.zero_grad()
loss, val_support_acc = self_copy.set_forward_loss_with_adaptation(x)
loss.backward()
val_opt.step()
self_copy.eval()
metrics[f"accuracy/val_support_acc@-{i}"] = val_support_acc
metrics[f"accuracy/val_loss@-{i}"] = loss.item()
metrics[f"accuracy/val@-{i}"] = self_copy.query_accuracy(x)
# free CUDA memory by deleting "fast" parameters
for param in self_copy.parameters():
param.fast = None
param.mu = None
param.logvar = None
return metrics[f"accuracy/val@-{self.hn_val_epochs}"], metrics
| 20,114 | 41.08159 | 147 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/models/gp_kernels.py | import gpytorch
import torch
import torch.nn as nn
import numpy as np
class NNKernel(gpytorch.kernels.Kernel):
def __init__(self, input_dim, output_dim, num_layers, hidden_dim, flatten=False, **kwargs):
super(NNKernel, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.flatten = flatten
self.model = self.create_model()
def create_model(self):
assert self.num_layers >= 1, "Number of hidden layers must be at least 1"
modules = [nn.Linear(self.input_dim, self.hidden_dim), nn.ReLU()]
if self.flatten:
modules = [nn.Flatten()] + modules
for i in range(self.num_layers - 1):
modules.append(nn.Linear(self.hidden_dim, self.hidden_dim))
modules.append(nn.ReLU())
modules.append(nn.Linear(self.hidden_dim, self.output_dim))
model = nn.Sequential(*modules)
return model
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
if last_dim_is_batch:
raise NotImplementedError()
else:
z1 = self.model(x1)
z2 = self.model(x2)
out = torch.matmul(z1, z2.T)
if diag:
return torch.diag(out)
else:
return out
class PositiveLinear(nn.Module):
def __init__(self, in_features, out_features):
super(PositiveLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
def forward(self, input):
w = nn.functional.softplus(self.weight)
return nn.functional.linear(input, w)
class NNKernelNoInner(gpytorch.kernels.Kernel):
def __init__(self, input_dim, num_layers, hidden_dim, flatten=False, **kwargs):
super(NNKernelNoInner, self).__init__(**kwargs)
self.input_dim = input_dim*2
self.output_dim = 1
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.flatten = flatten
self.model = self.create_model()
def create_model(self):
assert self.num_layers >= 1, "Number of hidden layers must be at least 1"
modules = [PositiveLinear(self.input_dim, self.hidden_dim), nn.Sigmoid()]
if self.flatten:
modules = [nn.Flatten()] + modules
for i in range(self.num_layers - 1):
modules.append(PositiveLinear(self.hidden_dim, self.hidden_dim))
modules.append(nn.Sigmoid())
modules.append(PositiveLinear(self.hidden_dim, self.output_dim))
model = nn.Sequential(*modules)
return model
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
if last_dim_is_batch:
raise NotImplementedError()
else:
n = x1.shape[0]
m = x2.shape[0]
out = torch.zeros((n,m), device=x1.get_device())
for i in range(n):
for j in range(i+1):
out[i, j] = self.model(torch.cat((x1[i], x2[j]))).view(-1)
if i != j:
out[j, i] = out[i, j]
#npout = out.cpu().detach().numpy()
#print(np.linalg.eigvals(npout))
#assert np.all(np.linalg.eigvals(npout) +1e-2 >= 0), "not positive"
if diag:
return torch.diag(out)
else:
return out
class MultiNNKernel(gpytorch.kernels.Kernel):
def __init__(self, num_tasks, kernels, **kwargs):
super(MultiNNKernel, self).__init__(**kwargs)
assert isinstance(kernels, list), "kernels must be a list of kernels"
self.num_tasks = num_tasks
self.kernels = nn.ModuleList(kernels)
def num_outputs_per_input(self, x1, x2):
"""
Given `n` data points `x1` and `m` datapoints `x2`, this multitask
kernel returns an `(n*num_tasks) x (m*num_tasks)` covariance matrix.
"""
return self.num_tasks
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
if last_dim_is_batch:
raise NotImplementedError()
else:
n = x1.shape[0]
m = x2.shape[0]
out = torch.zeros((n * self.num_tasks, m * self.num_tasks), device=x1.get_device())
for i in range(self.num_tasks):
for j in range(self.num_tasks):
z1 = self.kernels[i].model(x1)
z2 = self.kernels[j].model(x2)
out[i:n*self.num_tasks:self.num_tasks, j:m*self.num_tasks:self.num_tasks] = torch.matmul(z1, z2.T)
if diag:
return torch.diag(out)
else:
return out | 8,368 | 39.429952 | 118 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/models/__init__.py | 0 | 0 | 0 | py | |
few-shot-hypernets-public | few-shot-hypernets-public-master/filelists/QMUL/write_QMUL_filelist.py | import numpy as np
from os import listdir
from os.path import isfile, isdir, join
import os
import json
import random
from tqdm import tqdm
from PIL import Image
cwd = os.getcwd()
data_path = join(cwd,'QMUL_360degreeViewSphere_FaceDatabase/Set1_Greyscale')
folder_list = [f for f in listdir(data_path) if isdir(join(data_path, f))]
folder_list.sort()
allfiles=[]
for i, folder in enumerate(tqdm(folder_list)):
print(folder)
folder_path = join(data_path, folder)
allfiles.append(
[join(folder_path, cf) for cf in listdir(folder_path) if (isfile(join(folder_path, cf)) and cf[0] != '.') and 'ras' in cf])
# First let's rewrite all these RAS images as JPEGS because we like JPGS
new_data_path = join(cwd,'images/')
for i, folder in enumerate(tqdm(folder_list)):
os.makedirs(join(new_data_path,folder), exist_ok=True)
for file in allfiles[i]:
theim = Image.open(file)
newim = theim.convert('RGB')
newim.save(join(new_data_path,folder,file.split('/')[-1].replace('ras','jpg')))
# Here we go again
data_path = join(cwd,'images/')
folder_list = [f for f in listdir(data_path) if isdir(join(data_path, f))]
folder_list.sort()
label_dict = dict(zip(folder_list,range(0,len(folder_list))))
savedir = cwd
dataset_list = ['base','val','novel']
folder_list = [f for f in listdir(data_path) if isdir(join(data_path, f))]
folder_list.sort()
label_dict = dict(zip(folder_list,range(0,len(folder_list))))
classfile_list_all = []
for i, folder in enumerate(folder_list):
folder_path = join(data_path, folder)
classfile_list_all.append( [ join(folder_path, cf) for cf in listdir(folder_path) if (isfile(join(folder_path,cf)) and cf[0] != '.')])
random.shuffle(classfile_list_all[i])
for dataset in dataset_list:
file_list = []
label_list = []
for i, classfile_list in enumerate(classfile_list_all):
if 'base' in dataset:
if (i%2 == 0):
file_list = file_list + classfile_list
label_list = label_list + [int(v.split('_')[-1].replace('.jpg','')) for v in classfile_list]
if 'val' in dataset:
if (i%4 == 1):
file_list = file_list + classfile_list
label_list = label_list + [int(v.split('_')[-1].replace('.jpg','')) for v in classfile_list]
if 'novel' in dataset:
if (i%4 == 3):
file_list = file_list + classfile_list
label_list = label_list + [int(v.split('_')[-1].replace('.jpg','')) for v in classfile_list]
fo = open(dataset + ".json", "w")
fo.write('{"label_names": [')
fo.writelines(['"%s",' % item for item in folder_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_names": [')
fo.writelines(['"%s",' % item for item in file_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_labels": [')
fo.writelines(['%d,' % item for item in label_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write(']}')
fo.close()
print("%s -OK" %dataset)
| 3,153 | 31.854167 | 138 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/filelists/omniglot/write_cross_char_base_filelist.py | import numpy as np
from os import listdir
from os.path import isfile, isdir, join
import os
import json
import random
import re
cwd = os.getcwd()
data_path = join(cwd,'images')
savedir = './'
#if not os.path.exists(savedir):
# os.makedirs(savedir)
cl = -1
folderlist = []
language_folder_list = [f for f in listdir(data_path) if isdir(join(data_path, f))]
language_folder_list.sort()
filelists = {}
for language_folder in language_folder_list:
if language_folder == 'Latin':
continue
language_folder_path = join(data_path, language_folder)
character_folder_list = [cf for cf in listdir(language_folder_path) if isdir(join(language_folder_path, cf))]
character_folder_list.sort()
for character_folder in character_folder_list:
character_folder_path = join(language_folder_path, character_folder)
label = join(language_folder,character_folder)
folderlist.append(label)
filelists[label] = [ join(character_folder_path,img) for img in listdir(character_folder_path) if (isfile(join(character_folder_path,img)) and img[-3:] == 'png')]
filelists_flat = []
labellists_flat = []
for key, filelist in filelists.items():
cl += 1
random.shuffle(filelist)
filelists_flat += filelist
labellists_flat += np.repeat(cl, len(filelist)).tolist()
fo = open(join(savedir, "noLatin.json"), "w")
fo.write('{"label_names": [')
fo.writelines(['"%s",' % item for item in folderlist])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_names": [')
fo.writelines(['"%s",' % item for item in filelists_flat])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_labels": [')
fo.writelines(['%d,' % item for item in labellists_flat])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write(']}')
fo.close()
print("noLatin -OK")
| 1,885 | 28.015385 | 171 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/filelists/omniglot/write_omniglot_filelist.py | import numpy as np
from os import listdir
from os.path import isfile, isdir, join
import os
import json
import random
import re
cwd = os.getcwd()
data_path = join(cwd,'images')
savedir = './'
dataset_list = ['base', 'val', 'novel']
#if not os.path.exists(savedir):
# os.makedirs(savedir)
cl = -1
folderlist = []
datasetmap = {'base':'train','val':'val','novel':'test'};
filelists = {'base':{},'val':{},'novel':{} }
filelists_flat = {'base':[],'val':[],'novel':[] }
labellists_flat = {'base':[],'val':[],'novel':[] }
for dataset in dataset_list:
with open(datasetmap[dataset] + ".txt", "r") as lines:
for i, line in enumerate(lines):
label = line.replace('\n','')
folderlist.append(label)
filelists[dataset][label] = [ join(data_path,label, f) for f in listdir( join(data_path, label))]
for key, filelist in filelists[dataset].items():
cl += 1
random.shuffle(filelist)
filelists_flat[dataset] += filelist
labellists_flat[dataset] += np.repeat(cl, len(filelist)).tolist()
for dataset in dataset_list:
fo = open(savedir + dataset + ".json", "w")
fo.write('{"label_names": [')
fo.writelines(['"%s",' % item for item in folderlist])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_names": [')
fo.writelines(['"%s",' % item for item in filelists_flat[dataset]])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_labels": [')
fo.writelines(['%d,' % item for item in labellists_flat[dataset]])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write(']}')
fo.close()
print("%s -OK" %dataset)
| 1,755 | 28.266667 | 110 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/filelists/omniglot/rot_omniglot.py | import numpy as np
from os import listdir
from os.path import isfile, isdir, join
import os
import json
import random
from PIL import Image
cwd = os.getcwd()
data_path = join(cwd,'images')
savedir = './'
#if not os.path.exists(savedir):
# os.makedirs(savedir)
language_folder_list = [f for f in listdir(data_path) if isdir(join(data_path, f))]
language_folder_list.sort()
classfile_list_all = []
for language_folder in language_folder_list:
language_folder_path = join(data_path, language_folder)
character_folder_list = [cf for cf in listdir(language_folder_path) if isdir(join(language_folder_path, cf))]
character_folder_list.sort()
for character_folder in character_folder_list:
character_folder_path = join(language_folder_path, character_folder)
image_list = [ img for img in listdir(character_folder_path) if (isfile(join(character_folder_path,img)) and img[0] != '.')]
for deg in [0,90,180,270]:
rot_str = "rot%03d"%deg
rot_character_path = join(character_folder_path, rot_str)
print(rot_character_path)
if not os.path.exists(rot_character_path):
os.makedirs(rot_character_path)
for img in image_list:
rot_img = Image.open(join(character_folder_path,img)).rotate(deg)
rot_img.save(join(character_folder_path,rot_str,img))
| 1,413 | 36.210526 | 133 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/filelists/miniImagenet/write_miniImagenet_filelist.py | import numpy as np
from os import listdir
from os.path import isfile, isdir, join
import os
import json
import random
import re
#cwd = os.getcwd()
#data_path = join(cwd,'ILSVRC2015/Data/CLS-LOC/train')
data_path = '/shared/sets/datasets/vision/ImageNet/train'
savedir = './'
dataset_list = ['base', 'val', 'novel']
#if not os.path.exists(savedir):
# os.makedirs(savedir)
cl = -1
folderlist = []
datasetmap = {'base':'train','val':'val','novel':'test'};
filelists = {'base':{},'val':{},'novel':{} }
filelists_flat = {'base':[],'val':[],'novel':[] }
labellists_flat = {'base':[],'val':[],'novel':[] }
for dataset in dataset_list:
with open(datasetmap[dataset] + ".csv", "r") as lines:
for i, line in enumerate(lines):
if i == 0:
continue
fid, _ , label = re.split(',|\.', line)
label = label.replace('\n','')
if not label in filelists[dataset]:
folderlist.append(label)
filelists[dataset][label] = []
fnames = listdir( join(data_path, label) )
fname_number = [ int(re.split('_|\.', fname)[1]) for fname in fnames]
sorted_fnames = list(zip( *sorted( zip(fnames, fname_number), key = lambda f_tuple: f_tuple[1] )))[0]
fid = int(fid[-5:])-1
fname = join( data_path,label, sorted_fnames[fid] )
filelists[dataset][label].append(fname)
for key, filelist in filelists[dataset].items():
cl += 1
random.shuffle(filelist)
filelists_flat[dataset] += filelist
labellists_flat[dataset] += np.repeat(cl, len(filelist)).tolist()
for dataset in dataset_list:
fo = open(savedir + dataset + ".json", "w")
fo.write('{"label_names": [')
fo.writelines(['"%s",' % item for item in folderlist])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_names": [')
fo.writelines(['"%s",' % item for item in filelists_flat[dataset]])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_labels": [')
fo.writelines(['%d,' % item for item in labellists_flat[dataset]])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write(']}')
fo.close()
print("%s -OK" %dataset)
| 2,359 | 31.777778 | 118 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/filelists/miniImagenet/write_cross_filelist.py | import numpy as np
from os import listdir
from os.path import isfile, isdir, join
import os
import json
import random
import re
#cwd = os.getcwd()
#data_path = join(cwd,'ILSVRC2015/Data/CLS-LOC/train')
data_path = '/shared/sets/datasets/vision/ImageNet/train'
savedir = './'
dataset_list = ['base', 'val', 'novel']
#if not os.path.exists(savedir):
# os.makedirs(savedir)
cl = -1
folderlist = []
datasetmap = {'base':'train','val':'val','novel':'test'};
filelists = {'base':{},'val':{},'novel':{} }
filelists_flat = {'base':[],'val':[],'novel':[] }
labellists_flat = {'base':[],'val':[],'novel':[] }
for dataset in dataset_list:
with open(datasetmap[dataset] + ".csv", "r") as lines:
for i, line in enumerate(lines):
if i == 0:
continue
fid, _ , label = re.split(',|\.', line)
label = label.replace('\n','')
if not label in filelists[dataset]:
folderlist.append(label)
filelists[dataset][label] = []
fnames = listdir( join(data_path, label) )
fname_number = [ int(re.split('_|\.', fname)[1]) for fname in fnames]
sorted_fnames = list(zip( *sorted( zip(fnames, fname_number), key = lambda f_tuple: f_tuple[1] )))[0]
fid = int(fid[-5:])-1
fname = join( data_path,label, sorted_fnames[fid] )
filelists[dataset][label].append(fname)
for key, filelist in filelists[dataset].items():
cl += 1
random.shuffle(filelist)
filelists_flat[dataset] += filelist
labellists_flat[dataset] += np.repeat(cl, len(filelist)).tolist()
#cross setting use base/val/novel together
filelists_flat_all = filelists_flat['base'] + filelists_flat['val'] + filelists_flat['novel']
labellists_flat_all = labellists_flat['base'] + labellists_flat['val'] + labellists_flat['novel']
fo = open(savedir + "all.json", "w")
fo.write('{"label_names": [')
fo.writelines(['"%s",' % item for item in folderlist])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_names": [')
fo.writelines(['"%s",' % item for item in filelists_flat_all])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_labels": [')
fo.writelines(['%d,' % item for item in labellists_flat_all])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write(']}')
fo.close()
print("all -OK")
| 2,472 | 32.418919 | 118 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/filelists/CUB/write_CUB_filelist.py | import numpy as np
from os import listdir
from os.path import isfile, isdir, join
import os
import json
import random
#cwd = os.getcwd()
#data_path = join(cwd,'CUB_200_2011/images')
data_path = '/shared/sets/datasets/cub_birds/images'
savedir = './'
dataset_list = ['base','val','novel']
#if not os.path.exists(savedir):
# os.makedirs(savedir)
folder_list = [f for f in listdir(data_path) if isdir(join(data_path, f))]
folder_list.sort()
label_dict = dict(zip(folder_list,range(0,len(folder_list))))
classfile_list_all = []
for i, folder in enumerate(folder_list):
folder_path = join(data_path, folder)
classfile_list_all.append( [ join(folder_path, cf) for cf in listdir(folder_path) if (isfile(join(folder_path,cf)) and cf[0] != '.')])
random.shuffle(classfile_list_all[i])
for dataset in dataset_list:
file_list = []
label_list = []
for i, classfile_list in enumerate(classfile_list_all):
if 'base' in dataset:
if (i%2 == 0):
file_list = file_list + classfile_list
label_list = label_list + np.repeat(i, len(classfile_list)).tolist()
if 'val' in dataset:
if (i%4 == 1):
file_list = file_list + classfile_list
label_list = label_list + np.repeat(i, len(classfile_list)).tolist()
if 'novel' in dataset:
if (i%4 == 3):
file_list = file_list + classfile_list
label_list = label_list + np.repeat(i, len(classfile_list)).tolist()
fo = open(savedir + dataset + ".json", "w")
fo.write('{"label_names": [')
fo.writelines(['"%s",' % item for item in folder_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_names": [')
fo.writelines(['"%s",' % item for item in file_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_labels": [')
fo.writelines(['%d,' % item for item in label_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write(']}')
fo.close()
print("%s -OK" %dataset)
| 2,151 | 31.119403 | 138 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/filelists/emnist/invert_emnist.py | import numpy as np
from os import listdir
from os.path import isfile, isdir, join
import os
import json
import random
from PIL import Image
import PIL.ImageOps
cwd = os.getcwd()
data_path = join(cwd,'emnist')
inv_data_path = join(cwd,'inv_emnist')
savedir = './'
#if not os.path.exists(savedir):
# os.makedirs(savedir)
if not os.path.exists(inv_data_path):
os.makedirs(inv_data_path)
character_folder_list = [str(i) for i in range(62)] #lazy_hack
classfile_list_all = []
for character_folder in character_folder_list:
character_folder_path = join(data_path, character_folder)
inv_character_folder_path = join(inv_data_path, character_folder)
image_list = [ img for img in listdir(character_folder_path) if (isfile(join(character_folder_path,img)) and img[0] != '.')]
if not os.path.exists(inv_character_folder_path):
os.makedirs(inv_character_folder_path)
for img in image_list:
inverted_img =PIL.ImageOps.invert(Image.open(join(character_folder_path,img)))
inverted_img.save(join(inv_character_folder_path ,img))
| 1,105 | 32.515152 | 129 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/filelists/emnist/write_cross_char_valnovel_filelist.py | import numpy as np
from os import listdir
from os.path import isfile, isdir, join
import os
import json
import random
cwd = os.getcwd()
data_path = join(cwd,'inv_emnist')
savedir = './'
dataset_list = ['val','novel']
#if not os.path.exists(savedir):
# os.makedirs(savedir)
folder_list = [str(i) for i in range(62)] #lazy_hack
label_dict = dict(zip(folder_list,range(0,len(folder_list))))
classfile_list_all = []
for i, folder in enumerate(folder_list):
folder_path = join(data_path, folder)
classfile_list_all.append( [ join(folder_path, cf) for cf in listdir(folder_path) if (isfile(join(folder_path,cf)) and cf[0] != '.')])
random.shuffle(classfile_list_all[i])
for dataset in dataset_list:
file_list = []
label_list = []
for i, classfile_list in enumerate(classfile_list_all):
if 'val' in dataset:
if (i%2 == 0):
file_list = file_list + classfile_list
label_list = label_list + np.repeat(i, len(classfile_list)).tolist()
if 'novel' in dataset:
if (i%2 == 1):
file_list = file_list + classfile_list
label_list = label_list + np.repeat(i, len(classfile_list)).tolist()
fo = open(savedir + dataset + ".json", "w")
fo.write('{"label_names": [')
fo.writelines(['"%s",' % item for item in folder_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_names": [')
fo.writelines(['"%s",' % item for item in file_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_labels": [')
fo.writelines(['%d,' % item for item in label_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write(']}')
fo.close()
print("%s -OK" %dataset)
| 1,841 | 29.7 | 138 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/data/additional_transforms.py | # Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from PIL import ImageEnhance
transformtypedict=dict(Brightness=ImageEnhance.Brightness, Contrast=ImageEnhance.Contrast, Sharpness=ImageEnhance.Sharpness, Color=ImageEnhance.Color)
class ImageJitter(object):
def __init__(self, transformdict):
self.transforms = [(transformtypedict[k], transformdict[k]) for k in transformdict]
def __call__(self, img):
out = img
randtensor = torch.rand(len(self.transforms))
for i, (transformer, alpha) in enumerate(self.transforms):
r = alpha*(randtensor[i]*2.0 -1.0) + 1
out = transformer(out).enhance(r).convert('RGB')
return out
| 850 | 24.787879 | 150 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/data/feature_loader.py | import torch
import numpy as np
import h5py
class SimpleHDF5Dataset:
def __init__(self, file_handle = None):
if file_handle == None:
self.f = ''
self.all_feats_dset = []
self.all_labels = []
self.total = 0
else:
self.f = file_handle
self.all_feats_dset = self.f['all_feats'][...]
self.all_labels = self.f['all_labels'][...]
self.total = self.f['count'][0]
# print('here')
def __getitem__(self, i):
return torch.Tensor(self.all_feats_dset[i,:]), int(self.all_labels[i])
def __len__(self):
return self.total
def init_loader(filename):
with h5py.File(filename, 'r') as f:
fileset = SimpleHDF5Dataset(f)
#labels = [ l for l in fileset.all_labels if l != 0]
feats = fileset.all_feats_dset
labels = fileset.all_labels
while np.sum(feats[-1]) == 0:
feats = np.delete(feats,-1,axis = 0)
labels = np.delete(labels,-1,axis = 0)
class_list = np.unique(np.array(labels)).tolist()
inds = range(len(labels))
cl_data_file = {}
for cl in class_list:
cl_data_file[cl] = []
for ind in inds:
cl_data_file[labels[ind]].append( feats[ind])
return cl_data_file
| 1,293 | 27.755556 | 78 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/data/dataset.py | # This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
from PIL import Image
import json
import numpy as np
import torchvision.transforms as transforms
import os
identity = lambda x:x
class SimpleDataset:
def __init__(self, data_file, transform, target_transform=identity):
with open(data_file, 'r') as f:
self.meta = json.load(f)
self.transform = transform
self.target_transform = target_transform
def __getitem__(self,i):
image_path = os.path.join(self.meta['image_names'][i])
img = Image.open(image_path).convert('RGB')
img = self.transform(img)
target = self.target_transform(self.meta['image_labels'][i])
return img, target
def __len__(self):
return len(self.meta['image_names'])
class SetDataset:
def __init__(self, data_file, batch_size, transform):
with open(data_file, 'r') as f:
self.meta = json.load(f)
self.cl_list = np.unique(self.meta['image_labels']).tolist()
self.sub_meta = {}
for cl in self.cl_list:
self.sub_meta[cl] = []
for x,y in zip(self.meta['image_names'],self.meta['image_labels']):
self.sub_meta[y].append(x)
self.sub_dataloader = []
sub_data_loader_params = dict(batch_size = batch_size,
shuffle = True,
num_workers = 0, #use main thread only or may receive multiple batches
pin_memory = False)
for cl in self.cl_list:
sub_dataset = SubDataset(self.sub_meta[cl], cl, transform = transform)
self.sub_dataloader.append( torch.utils.data.DataLoader(sub_dataset, **sub_data_loader_params) )
def __getitem__(self,i):
return next(iter(self.sub_dataloader[i]))
def __len__(self):
return len(self.cl_list)
class SubDataset:
def __init__(self, sub_meta, cl, transform=transforms.ToTensor(), target_transform=identity ):
self.sub_meta = sub_meta
self.cl = cl
self.transform = transform
self.target_transform = target_transform
def __getitem__(self,i):
#print( '%d -%d' %(self.cl,i))
image_path = os.path.join( self.sub_meta[i])
img = Image.open(image_path).convert('RGB')
img = self.transform(img)
target = self.target_transform(self.cl)
return img, target
def __len__(self):
return len(self.sub_meta)
class EpisodicBatchSampler(object):
def __init__(self, n_classes, n_way, n_episodes):
self.n_classes = n_classes
self.n_way = n_way
self.n_episodes = n_episodes
def __len__(self):
return self.n_episodes
def __iter__(self):
for i in range(self.n_episodes):
yield torch.randperm(self.n_classes)[:self.n_way]
| 2,913 | 31.741573 | 108 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/data/datamgr.py | # This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
import data.additional_transforms as add_transforms
from data.dataset import SimpleDataset, SetDataset, EpisodicBatchSampler
from abc import abstractmethod
def _init_fn(worker_id):
np.random.seed(0)
class TransformLoader:
def __init__(self, image_size,
normalize_param = dict(mean= [0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225]),
jitter_param = dict(Brightness=0.4, Contrast=0.4, Color=0.4)):
self.image_size = image_size
self.normalize_param = normalize_param
self.jitter_param = jitter_param
def parse_transform(self, transform_type):
if transform_type=='ImageJitter':
method = add_transforms.ImageJitter( self.jitter_param )
return method
method = getattr(transforms, transform_type)
if transform_type=='RandomResizedCrop':
return method(self.image_size)
elif transform_type=='CenterCrop':
return method(self.image_size)
elif transform_type=='Resize':
return method([int(self.image_size*1.15), int(self.image_size*1.15)])
elif transform_type=='Normalize':
return method(**self.normalize_param )
else:
return method()
def get_composed_transform(self, aug = False):
if aug:
transform_list = ['RandomResizedCrop', 'ImageJitter', 'RandomHorizontalFlip', 'ToTensor', 'Normalize']
else:
transform_list = ['Resize','CenterCrop', 'ToTensor', 'Normalize']
transform_funcs = [ self.parse_transform(x) for x in transform_list]
transform = transforms.Compose(transform_funcs)
return transform
class DataManager:
@abstractmethod
def get_data_loader(self, data_file, aug):
pass
class SimpleDataManager(DataManager):
def __init__(self, image_size, batch_size):
super(SimpleDataManager, self).__init__()
self.batch_size = batch_size
self.trans_loader = TransformLoader(image_size)
def get_data_loader(self, data_file, aug): #parameters that would change on train/val set
transform = self.trans_loader.get_composed_transform(aug)
dataset = SimpleDataset(data_file, transform)
data_loader_params = dict(batch_size = self.batch_size, shuffle=True, num_workers = 8, pin_memory=True)
data_loader = torch.utils.data.DataLoader(dataset, **data_loader_params)
return data_loader
class SetDataManager(DataManager):
def __init__(self, image_size, n_way, n_support, n_query, n_eposide =100):
super(SetDataManager, self).__init__()
self.image_size = image_size
self.n_way = n_way
self.batch_size = n_support + n_query
self.n_eposide = n_eposide
self.trans_loader = TransformLoader(image_size)
def get_data_loader(self, data_file, aug): #parameters that would change on train/val set
transform = self.trans_loader.get_composed_transform(aug)
dataset = SetDataset( data_file , self.batch_size, transform )
sampler = EpisodicBatchSampler(len(dataset), self.n_way, self.n_eposide )
data_loader_params = dict(batch_sampler = sampler, num_workers = 8, pin_memory=True)
data_loader = torch.utils.data.DataLoader(dataset, **data_loader_params)
return data_loader
| 3,560 | 38.566667 | 118 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/data/__init__.py | from . import datamgr
from . import dataset
from . import additional_transforms
from . import feature_loader
| 109 | 21 | 35 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/data/qmul_loader.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import torchvision.transforms as transforms
from PIL import Image
train_people = ['DennisPNoGlassesGrey','JohnGrey','SimonBGrey','SeanGGrey','DanJGrey','AdamBGrey','JackGrey','RichardHGrey','YongminYGrey','TomKGrey','PaulVGrey','DennisPGrey','CarlaBGrey','JamieSGrey','KateSGrey','DerekCGrey','KatherineWGrey','ColinPGrey','SueWGrey','GrahamWGrey','KrystynaNGrey','SeanGNoGlassesGrey','KeithCGrey','HeatherLGrey']
test_people = ['RichardBGrey','TasosHGrey','SarahLGrey','AndreeaVGrey','YogeshRGrey']
def num_to_str(num):
str_ = ''
if num == 0:
str_ = '000'
elif num < 100:
str_ = '0' + str(int(num))
else:
str_ = str(int(num))
return str_
def get_person_at_curve(person, curve, prefix='filelists/QMUL/images/'):
faces = []
targets = []
train_transforms = transforms.Compose([transforms.ToTensor()])
for pitch, angle in curve:
fname = prefix + person + '/' + person[:-4] + '_' + num_to_str(pitch) + '_' + num_to_str(angle) +'.jpg'
img = Image.open(fname).convert('RGB')
img = train_transforms(img)
faces.append(img)
pitch_norm = 2 * ((pitch - 60) / (120 - 60)) -1
angle_norm = 2 * ((angle - 0) / (180 - 0)) -1
targets.append(torch.Tensor([pitch_norm]))
faces = torch.stack(faces)
targets = torch.stack(targets).squeeze()
return faces, targets
def get_batch(train_people=train_people, num_samples=19):
## generate trajectory
amp = np.random.uniform(-3, 3)
phase = np.random.uniform(-5, 5)
wave = [(amp * np.sin(phase + x)) for x in range(num_samples)]
## map trajectory to angles/pitches
angles = list(range(num_samples))
angles = [x * 10 for x in angles]
pitches = [int(round(((y+3)*10 )+60,-1)) for y in wave]
curve = [(p,a) for p, a in zip(pitches, angles)]
inputs = []
targets = []
for person in train_people:
inps, targs = get_person_at_curve(person, curve)
inputs.append(inps)
targets.append(targs)
return torch.stack(inputs), torch.stack(targets)
| 2,209 | 35.833333 | 347 | py |
AICare | AICare-main/AICare.py | class Sparsemax(nn.Module):
"""Sparsemax function."""
def __init__(self, dim=None):
super(Sparsemax, self).__init__()
self.dim = -1 if dim is None else dim
def forward(self, input, device='cuda'):
original_size = input.size()
input = input.view(-1, input.size(self.dim))
dim = 1
number_of_logits = input.size(dim)
input = input - torch.max(input, dim=dim, keepdim=True)[0].expand_as(input)
zs = torch.sort(input=input, dim=dim, descending=True)[0]
range = torch.arange(start=1, end=number_of_logits+1, device=device, dtype=torch.float32).view(1, -1)
range = range.expand_as(zs)
bound = 1 + range * zs
cumulative_sum_zs = torch.cumsum(zs, dim)
is_gt = torch.gt(bound, cumulative_sum_zs).type(input.type())
k = torch.max(is_gt * range, dim, keepdim=True)[0]
zs_sparse = is_gt * zs
taus = (torch.sum(zs_sparse, dim, keepdim=True) - 1) / k
taus = taus.expand_as(input)
self.output = torch.max(torch.zeros_like(input), input - taus)
output = self.output.view(original_size)
return output
def backward(self, grad_output):
dim = 1
nonzeros = torch.ne(self.output, 0)
sum = torch.sum(grad_output * nonzeros, dim=dim) / torch.sum(nonzeros, dim=dim)
self.grad_input = nonzeros * (grad_output - sum.expand_as(grad_output))
return self.grad_input
class SingleAttention(nn.Module):
def __init__(self, attention_input_dim, attention_hidden_dim, attention_type='add', demographic_dim=12, time_aware=False, use_demographic=False):
super(SingleAttention, self).__init__()
self.attention_type = attention_type
self.attention_hidden_dim = attention_hidden_dim
self.attention_input_dim = attention_input_dim
self.use_demographic = use_demographic
self.demographic_dim = demographic_dim
self.time_aware = time_aware
# batch_time = torch.arange(0, batch_mask.size()[1], dtype=torch.float32).reshape(1, batch_mask.size()[1], 1)
# batch_time = batch_time.repeat(batch_mask.size()[0], 1, 1)
if attention_type == 'add':
if self.time_aware == True:
# self.Wx = nn.Parameter(torch.randn(attention_input_dim+1, attention_hidden_dim))
self.Wx = nn.Parameter(torch.randn(attention_input_dim, attention_hidden_dim))
self.Wtime_aware = nn.Parameter(torch.randn(1, attention_hidden_dim))
nn.init.kaiming_uniform_(self.Wtime_aware, a=math.sqrt(5))
else:
self.Wx = nn.Parameter(torch.randn(attention_input_dim, attention_hidden_dim))
self.Wt = nn.Parameter(torch.randn(attention_input_dim, attention_hidden_dim))
self.Wd = nn.Parameter(torch.randn(demographic_dim, attention_hidden_dim))
self.bh = nn.Parameter(torch.zeros(attention_hidden_dim,))
self.Wa = nn.Parameter(torch.randn(attention_hidden_dim, 1))
self.ba = nn.Parameter(torch.zeros(1,))
nn.init.kaiming_uniform_(self.Wd, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wx, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wt, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wa, a=math.sqrt(5))
elif attention_type == 'mul':
self.Wa = nn.Parameter(torch.randn(attention_input_dim, attention_input_dim))
self.ba = nn.Parameter(torch.zeros(1,))
nn.init.kaiming_uniform_(self.Wa, a=math.sqrt(5))
elif attention_type == 'concat':
if self.time_aware == True:
self.Wh = nn.Parameter(torch.randn(2*attention_input_dim+1, attention_hidden_dim))
else:
self.Wh = nn.Parameter(torch.randn(2*attention_input_dim, attention_hidden_dim))
self.Wa = nn.Parameter(torch.randn(attention_hidden_dim, 1))
self.ba = nn.Parameter(torch.zeros(1,))
nn.init.kaiming_uniform_(self.Wh, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wa, a=math.sqrt(5))
else:
raise RuntimeError('Wrong attention type.')
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
def forward(self, input, demo=None):
batch_size, time_step, input_dim = input.size() # batch_size * time_step * hidden_dim(i)
#assert(input_dim == self.input_dim)
# time_decays = torch.zeros((time_step,time_step)).to(device)# t*t
# for this_time in range(time_step):
# for pre_time in range(time_step):
# if pre_time > this_time:
# break
# time_decays[this_time][pre_time] = torch.tensor(this_time - pre_time, dtype=torch.float32).to(device)
# b_time_decays = tile(time_decays, 0, batch_size).view(batch_size,time_step,time_step).unsqueeze(-1).to(device)# b t t 1
time_decays = torch.tensor(range(47,-1,-1), dtype=torch.float32).unsqueeze(-1).unsqueeze(0).to(self.device)# 1*t*1
b_time_decays = time_decays.repeat(batch_size,1,1)# b t 1
if self.attention_type == 'add': #B*T*I @ H*I
q = torch.matmul(input[:,-1,:], self.Wt)# b h
q = torch.reshape(q, (batch_size, 1, self.attention_hidden_dim)) #B*1*H
if self.time_aware == True:
# k_input = torch.cat((input, time), dim=-1)
k = torch.matmul(input, self.Wx)#b t h
# k = torch.reshape(k, (batch_size, 1, time_step, self.attention_hidden_dim)) #B*1*T*H
time_hidden = torch.matmul(b_time_decays, self.Wtime_aware)# b t h
else:
k = torch.matmul(input, self.Wx)# b t h
# k = torch.reshape(k, (batch_size, 1, time_step, self.attention_hidden_dim)) #B*1*T*H
if self.use_demographic == True:
d = torch.matmul(demo, self.Wd) #B*H
d = torch.reshape(d, (batch_size, 1, self.attention_hidden_dim)) # b 1 h
h = q + k + self.bh # b t h
if self.time_aware == True:
h += time_hidden
h = self.tanh(h) #B*T*H
e = torch.matmul(h, self.Wa) + self.ba #B*T*1
e = torch.reshape(e, (batch_size, time_step))# b t
elif self.attention_type == 'mul':
e = torch.matmul(input[:,-1,:], self.Wa)#b i
e = torch.matmul(e.unsqueeze(1), input.permute(0,2,1)).squeeze() + self.ba #b t
elif self.attention_type == 'concat':
q = input[:,-1,:].unsqueeze(1).repeat(1,time_step,1)# b t i
k = input
c = torch.cat((q, k), dim=-1) #B*T*2I
if self.time_aware == True:
c = torch.cat((c, b_time_decays), dim=-1) #B*T*2I+1
h = torch.matmul(c, self.Wh)
h = self.tanh(h)
e = torch.matmul(h, self.Wa) + self.ba #B*T*1
e = torch.reshape(e, (batch_size, time_step)) # b t
a = self.softmax(e) #B*T
v = torch.matmul(a.unsqueeze(1), input).squeeze() #B*I
return v, a
class FinalAttentionQKV(nn.Module):
def __init__(self, attention_input_dim, attention_hidden_dim, attention_type='add', dropout=None):
super(FinalAttentionQKV, self).__init__()
self.attention_type = attention_type
self.attention_hidden_dim = attention_hidden_dim
self.attention_input_dim = attention_input_dim
self.W_q = nn.Linear(attention_input_dim, attention_hidden_dim)
self.W_k = nn.Linear(attention_input_dim, attention_hidden_dim)
self.W_v = nn.Linear(attention_input_dim, attention_hidden_dim)
self.W_out = nn.Linear(attention_hidden_dim, 1)
self.b_in = nn.Parameter(torch.zeros(1,))
self.b_out = nn.Parameter(torch.zeros(1,))
nn.init.kaiming_uniform_(self.W_q.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.W_k.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.W_v.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.W_out.weight, a=math.sqrt(5))
self.Wh = nn.Parameter(torch.randn(2*attention_input_dim, attention_hidden_dim))
self.Wa = nn.Parameter(torch.randn(attention_hidden_dim, 1))
self.ba = nn.Parameter(torch.zeros(1,))
nn.init.kaiming_uniform_(self.Wh, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wa, a=math.sqrt(5))
self.dropout = nn.Dropout(p=dropout)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
self.sparsemax = Sparsemax()
def forward(self, input):
batch_size, time_step, input_dim = input.size() # batch_size * input_dim + 1 * hidden_dim(i)
input_q = self.W_q(torch.mean(input,1)) # b h
input_k = self.W_k(input[:,:-1,:])# b t h
input_v = self.W_v(input[:,:-1,:])# b t h
if self.attention_type == 'add': #B*T*I @ H*I
q = torch.reshape(input_q, (batch_size, 1, self.attention_hidden_dim)) #B*1*H
h = q + input_k + self.b_in # b t h
h = self.tanh(h) #B*T*H
e = self.W_out(h) # b t 1
e = torch.reshape(e, (batch_size, time_step))# b t
elif self.attention_type == 'mul':
q = torch.reshape(input_q, (batch_size, self.attention_hidden_dim, 1)) #B*h 1
e = torch.matmul(input_k, q).squeeze()#b t
elif self.attention_type == 'concat':
q = input_q.unsqueeze(1).repeat(1,time_step,1)# b t h
k = input_k
c = torch.cat((q, k), dim=-1) #B*T*2I
h = torch.matmul(c, self.Wh)
h = self.tanh(h)
e = torch.matmul(h, self.Wa) + self.ba #B*T*1
e = torch.reshape(e, (batch_size, time_step)) # b t
a = self.softmax(e) #B*T
if self.dropout is not None:
a = self.dropout(a)
v = torch.matmul(a.unsqueeze(1), input_v).squeeze() #B*I
return v, a
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def tile(a, dim, n_tile):
init_dim = a.size(dim)
repeat_idx = [1] * a.dim()
repeat_idx[dim] = n_tile
a = a.repeat(*(repeat_idx))
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])).to(self.device)
return torch.index_select(a, dim, order_index).to(self.device)
class PositionwiseFeedForward(nn.Module): # new added
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x)))), None
class PositionalEncoding(nn.Module): # new added / not use anymore
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=400):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0., max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0., d_model, 2) * -(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0 # 下三角矩阵
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)# b h t d_k
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k) # b h t t
if mask is not None:# 1 1 t t
scores = scores.masked_fill(mask == 0, -1e9)# b h t t 下三角
p_attn = F.softmax(scores, dim = -1)# b h t t
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn # b h t v (d_k)
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, self.d_k * self.h), 3)
self.final_linear = nn.Linear(d_model, d_model)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1) # 1 1 t t
nbatches = query.size(0)# b
input_dim = query.size(1)# i+1
feature_dim = query.size(-1)# i+1
#input size -> # batch_size * d_input * hidden_dim
# d_model => h * d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))] # b num_head d_input d_k
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)# b num_head d_input d_v (d_k)
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)# batch_size * d_input * hidden_dim
#DeCov
DeCov_contexts = x.transpose(0, 1).transpose(1, 2) # I+1 H B
# print(DeCov_contexts.shape)
Covs = cov(DeCov_contexts[0,:,:])
DeCov_loss = 0.5 * (torch.norm(Covs, p = 'fro')**2 - torch.norm(torch.diag(Covs))**2 )
for i in range(17+1 -1):
Covs = cov(DeCov_contexts[i+1,:,:])
DeCov_loss += 0.5 * (torch.norm(Covs, p = 'fro')**2 - torch.norm(torch.diag(Covs))**2 )
return self.final_linear(x), DeCov_loss
class LayerNorm(nn.Module):
def __init__(self, size, eps=1e-7):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(size))
self.b_2 = nn.Parameter(torch.zeros(size))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
def cov(m, y=None):
if y is not None:
m = torch.cat((m, y), dim=0)
m_exp = torch.mean(m, dim=1)
x = m - m_exp[:, None]
cov = 1 / (x.size(1) - 1) * x.mm(x.t())
return cov
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
returned_value = sublayer(self.norm(x))
return x + self.dropout(returned_value[0]) , returned_value[1]
class AICare(nn.Module):
def __init__(self, input_dim=17, hidden_dim=32, d_model=32, MHD_num_head=4, d_ff=64, output_dim=1, device='cuda', keep_prob=0.5):
super(AICare, self).__init__()
# hyperparameters
self.input_dim = input_dim
self.hidden_dim = hidden_dim # d_model
self.d_model = d_model
self.MHD_num_head = MHD_num_head
self.device = device
self.d_ff = d_ff
self.output_dim = output_dim
self.keep_prob = keep_prob
# layers
self.PositionalEncoding = PositionalEncoding(self.d_model, dropout = 0, max_len = 400)
# self.GRUs = clones(nn.GRU(1, self.hidden_dim, batch_first = True), self.input_dim)
self.GRUs = clones(nn.RNN(1, self.hidden_dim, bidirectional = True, batch_first = True), self.input_dim)
self.LastStepAttentions = clones(SingleAttention(self.hidden_dim, 8, attention_type='concat', demographic_dim=12, time_aware=True, use_demographic=False),self.input_dim)
self.FinalAttentionQKV = FinalAttentionQKV(self.hidden_dim, self.hidden_dim, attention_type='mul',dropout = 1 - self.keep_prob)
self.MultiHeadedAttention = MultiHeadedAttention(self.MHD_num_head, self.d_model,dropout = 1 - self.keep_prob)
self.SublayerConnection = SublayerConnection(self.d_model, dropout = 1 - self.keep_prob)
self.PositionwiseFeedForward = PositionwiseFeedForward(self.d_model, self.d_ff, dropout=0.1)
self.demo_proj_main = nn.Linear(4, self.hidden_dim)
self.demo_proj = nn.Linear(4, self.hidden_dim)
self.output = nn.Linear(self.hidden_dim*2, self.output_dim)
self.dropout = nn.Dropout(p = 1 - self.keep_prob)
self.tanh=nn.Tanh()
self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
self.relu=nn.ReLU()
def forward(self, input, demo_input, lens):
# input shape [batch_size, timestep, feature_dim]
demo_main = self.tanh(self.demo_proj_main(demo_input)).unsqueeze(1)# b hidden_dim
batch_size = input.size(0)
time_step = input.size(1)
feature_dim = input.size(2)
assert(feature_dim == self.input_dim)# input Tensor : 256 * 48 * 76
assert(self.d_model % self.MHD_num_head == 0)
GRU_embeded_input = torch.sum(self.GRUs[0](pack_padded_sequence(input[:,:,0].unsqueeze(-1), lens.cpu(), batch_first=True, enforce_sorted=False))[1], 0).squeeze().unsqueeze(1) # b 1 h
# print(GRU_embeded_input.shape)
for i in range(feature_dim-1):
embeded_input = torch.sum(self.GRUs[i+1](pack_padded_sequence(input[:,:,i+1].unsqueeze(-1), lens.cpu(), batch_first=True, enforce_sorted=False))[1], 0).squeeze().unsqueeze(1) # b 1 h
GRU_embeded_input = torch.cat((GRU_embeded_input, embeded_input), 1)
# print(demo_main.shape)
GRU_embeded_input = torch.cat((GRU_embeded_input, demo_main), 1)# b i+1 h
posi_input = self.dropout(GRU_embeded_input) # batch_size * d_input * hidden_dim
weighted_contexts = self.FinalAttentionQKV(posi_input)[0]
combined_hidden = torch.cat((weighted_contexts, \
demo_main.squeeze(1)),-1)#b n h
output = self.output(self.dropout(combined_hidden))# b 1
output = self.sigmoid(output)
return output
| 19,067 | 42.042889 | 195 | py |
prob-alpha | prob-alpha-master/src/Shape.py | """
A `Shape` represents a topological tree. The data structure implemented here is of recursive type: a `Shape` can be either
a leaf or a list of `Shape` objects. Leaves are not distinguishable, but we know that they are leaves.
We choose a sorted shape to be the class representant of all shapes isomorphic to it.
In order to read Newick codes we import the newick module from https://github.com/glottobank/python-newick.
"""
import newick
class Shape(object):
"""
A `Shape` instance is either a leaf or a list of `Shape` instances that hang from a root.
"""
def __init__(self, children):
"""
Create a new `Shape` object.
The boolean is_leaf is True if the object is a leaf; it is False otherwise.
:param children: if not is_leaf, the `Shape` objects which are descendants of the considered object;
otherwise, `None`.
:return: `Shape` instance.
"""
self.is_leaf = children is None
self.children = children
assert self.is_leaf or len(children) > 0
def sort(self):
"""
Sorts self using graduate lexicographical order.
"""
if not self.is_leaf:
children.sort()
def compare(self, T2):
"""
Compare self with another `Shape` object. We use lexicographical order in order to compare two `Shape` instances.
Leaves in this case are indistinguishable. It returns anint c, which is 0 if self and T2 are equal, < 0 if self < T2,
and > 0 if self > T2.
:param T2: the `Shape` object against which we compare self.
:return: int instance.
"""
if self.is_leaf and T2.is_leaf:
return 0
elif self.is_leaf:
return -1
elif T2.is_leaf:
return 1
else:
c = len(self.children) - len(T2.children)
if c != 0:
return c
for i in range(0, len(self.children)):
c = self.children[i].compare(T2.children[i])
if c != 0:
return c
return 0
def __lt__(self, T2):
"""
Uses the comparing method above to decide if self is less than T2.
:param T2: the `Shape` object against which we compare self.
:return: bool instance.
"""
return self.compare(T2) < 0
def __le__(self, T2):
"""
Uses the comparing method above to decide if self is less or equal than T2.
:param T2: the `Shape` object against which we compare self.
:return: bool instance.
"""
return self.compare(T2) <= 0
def __eq__(self, T2):
"""
Uses the comparing method above to decide if self is equal to T2.
:param T2: the `Shape` object against which we compare self.
:return: bool instance.
"""
return self.compare(T2) == 0
def __ne__(self, T2):
"""
Uses the comparing method above to decide if self is not equal to T2.
:param T2: the `Shape` object against which we compare self.
:return: bool instance.
"""
return self.compare(T2) != 0
def __ge__(self, T2):
"""
Uses the comparing method above to decide if self is greater or equal than T2.
:param T2: the `Shape` object against which we compare self.
:return: bool instance.
"""
return self.compare(T2) >= 0
def __gt__(self, T2):
"""
Uses the comparing method above to decide if self is greater than T2.
:param T2: the `Shape` object against which we compare self.
:return: bool instance.
"""
return self.compare(T2) > 0
def iso(self, T2):
"""
Since our `Shape` objects are sorted, to know whether two trees are isomorphic or not it suffices to know whether
they are equal or not.
:param T2: the `Shape` object against which we compare self.
:return: bool instance.
"""
return self == T2
def to_newick_tuple(self):
"""
Returns a tuple representing the simplified Newick code of self. Leaves are marked as 1's, since we do not distinguish them.
:return: tuple instance.
"""
if self.is_leaf:
return 1
else:
return tuple(x.to_newick_tuple() for x in self.children)
def to_newick(self):
"""
Returns a string representing the simplified Newick code of self.
:return: string instance.
"""
return str(self.to_newick_tuple()) + ";"
def is_symmetric(self):
"""
Returns True if the root of self is a symmetric node, and False otherwise. If self is a leaf, it returns True:
ex falso quodlibet.
:return: int instance.
"""
if not self.is_leaf:
return all(self.children[0].iso(x) for x in self.children)
else:
return True
def count_symmetries(self):
"""
Returns the number of symmetric interior nodes in self.
:return: int instance.
"""
if self.is_leaf:
return 0
elif all(self.children[0].iso(x) for x in self.children):
return 1 + sum(x.count_symmetries() for x in self.children)
else:
return sum(x.count_symmetries() for x in self.children)
def count_leaves(self):
"""
Returns the number of leaves in self.
:return: int instance.
"""
if self.is_leaf:
return 1
else:
return sum(x.count_leaves() for x in self.children)
def shape(self):
"""
Returns the `Shape` associated to self. Namely, it "forgets" the labels of the leafs.
:return: `Shape` instance.
"""
return self
def labels(self):
"""
Returns a list with the labels that appear in self, sorted in lexicographical order.
Since we only use 1's as flags for leaves, the output is [1].
:return: list instance.
"""
return [1]
def from_newick(X):
"""
Create a `Shape` object from a Newick code entered as a string.
:param X: a string representing a Newick code.
:return: `Shape` instance.
"""
return newick_node_to_shape(newick.loads(X)[0])
def from_newick_list(X):
"""
Create a list of `Shape` objects from a list of Newick codes entered as a string.
:param X: a string representing a list of Newick codes.
:return: [`Shape`] instance.
"""
return[newick_node_to_shape(n) for n in newick.loads(X)]
def newick_node_to_shape(N):
"""
Create a `Shape` object from a `Node` object.
:param N: a `Node`.
:return: `Shape` instance.
"""
if not bool(N.descendants):
return Shape(None)
return Shape(sorted([newick_node_to_shape(x) for x in N.descendants]))
def shapes_from_file(fname, encoding='utf8', strip_comments=False, **kw):
"""
Load a list of shapes from a Newick formatted file.
:param fname: file path.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.read`.
:return: [`Shape`] instance.
"""
l = newick.read(fname, encoding, strip_comments, **kw)
return [newick_node_to_shape(n) for n in l] | 7,393 | 32.156951 | 132 | py |
prob-alpha | prob-alpha-master/src/PhyloTree.py | """
For us, a Phylogenetic Tree (a `PhyloTree` instance) is a special case of Shape in which labels of leaves can be distinguished.
We import modules Shape.py and newick.py; the latter will be used for reading Newick code in string format and turning it
into trees; it can be found in https://github.com/glottobank/python-newick.
"""
from Shape import *
import newick
class PhyloTree(Shape):
"""
Thus, the class `PhyloTree` must extend the class `Shape`, overriding those methods in `Shape` that do not take into
account the distinguishability of leaves.
"""
def __init__(self, leaf, children):
"""
Create a new `PhyloTree` object.
The boolean is_leaf is True if the object is a leaf; it is False otherwise.
:param leaf: if is_leaf, its label; otherwise, `None`.
:param children: if not is_leaf, the `PhyloTree` objects which are descendants of the considered object;
otherwise, `None`.
:return: `PhyloTree` instance.
"""
super(PhyloTree, self).__init__(children)
self.leaf = leaf
assert not self.is_leaf or (leaf is not None)
def shape(self):
"""
Returns the `Shape` associated to self. Namely, it "forgets" the labels of the leafs.
:return: `Shape` instance.
"""
if self.is_leaf:
return Shape(None)
else:
return Shape([x.shape() for x in self.children])
def to_newick_tuple(self):
"""
Returns a tuple representing the simplified Newick code of self.
:return: tuple instance.
"""
if self.is_leaf:
return self.leaf
else:
return tuple(x.to_newick_tuple() for x in self.children)
def leaves(self):
"""
Yields the (labels of the) leaves of self.
:return: `PhyloTree` instance.
"""
if self.is_leaf:
yield self.leaf
else:
for x in self.children:
for l in x.leaves():
yield l
def labels(self):
"""
Returns a list with the labels that appear in self, sorted in lexicographical order.
Repetitions may arise if the user enters trees which are not phylogenetic.
:return: list instance.
"""
return sorted(list(self.leaves()))
def is_phylo(self):
"""
Returns True if self is phylogenetic (namely, if it has no repeated leaves). Returns False otherwise.
:return: bool instance.
"""
L = self.labels()
for i in range(1, len(L)):
if L[i] == L[i-1]:
return False
return True
def from_newick(X):
"""
Create a `PhyloTree` object from a Newick code entered as a string.
:param X: a string representing a Newick code.
:return: `PhyloTree` instance.
"""
return newick_node_to_tree(newick.loads(X)[0])
def from_newick_list(X):
"""
Create a list of `PhyloTree` objects from a list of Newick codes entered as a string.
:param X: a string representing a list of Newick codes.
:return: [`PhyloTree`] instance.
"""
return[newick_node_to_tree(n) for n in newick.loads(X)]
def newick_node_to_tree(N):
"""
Create a `PhyloTree` object from a `Node` object.
:param N: a Node.
:return: `PhyloTree` instance.
"""
if not bool(N.descendants):
return PhyloTree(N.name, None)
return PhyloTree(None, sorted([newick_node_to_tree(x) for x in N.descendants]))
def trees_from_file(fname, encoding='utf8', strip_comments=False, **kw):
"""
Load a list of trees from a Newick formatted file.
:param fname: file path.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.read`.
:return: [`PhyloTree`] instance.
"""
l = newick.read(fname, encoding, strip_comments, **kw)
return [newick_node_to_tree(n) for n in l]
| 4,022 | 31.707317 | 127 | py |
prob-alpha | prob-alpha-master/src/newick.py | # coding: utf8
"""
Functionality to read and write the Newick serialization format for trees.
.. seealso:: https://en.wikipedia.org/wiki/Newick_format
"""
from __future__ import unicode_literals
import io
import re
RESERVED_PUNCTUATION = ':;,()'
COMMENT = re.compile('\[[^\]]*\]')
def length_parser(x):
return float(x or 0.0)
def length_formatter(x):
return '%s' % x
class Node(object):
"""
A Node may be a tree, a subtree or a leaf.
A Node has optional name and length (from parent) and a (possibly empty) list of
descendants. It further has an ancestor, which is *None* if the node is the
root node of a tree.
"""
def __init__(self, name=None, length=None, **kw):
"""
:param name: Node label.
:param length: Branch length from the new node to its parent.
:param kw: Recognized keyword arguments:\
`length_parser`: Custom parser for the `length` attribute of a Node.\
`length_formatter`: Custom formatter for the branch length when formatting a\
Node as Newick string.
"""
for char in RESERVED_PUNCTUATION:
if (name and char in name) or (length and char in length):
raise ValueError(
'Node names or branch lengths must not contain "%s"' % char)
self.name = name
self._length = length
self.descendants = []
self.ancestor = None
self._length_parser = kw.pop('length_parser', length_parser)
self._length_formatter = kw.pop('length_formatter', length_formatter)
def __repr__(self):
return 'Node("%s")' % self.name
@property
def length(self):
return self._length_parser(self._length)
@length.setter
def length(self, l):
if l is None:
self._length = l
else:
self._length = self._length_formatter(l)
@classmethod
def create(cls, name=None, length=None, descendants=None, **kw):
"""
Create a new `Node` object.
:param name: Node label.
:param length: Branch length from the new node to its parent.
:param descendants: list of descendants or `None`.
:param kw: Additonal keyword arguments are passed through to `Node.__init__`.
:return: `Node` instance.
"""
node = cls(name=name, length=length, **kw)
for descendant in descendants or []:
node.add_descendant(descendant)
return node
def add_descendant(self, node):
node.ancestor = self
self.descendants.append(node)
@property
def newick(self):
"""The representation of the Node in Newick format."""
label = self.name or ''
if self._length:
label += ':' + self._length
descendants = ','.join([n.newick for n in self.descendants])
if descendants:
descendants = '(' + descendants + ')'
return descendants + label
@property
def is_leaf(self):
return not bool(self.descendants)
@property
def is_binary(self):
return all([len(n.descendants) in (0, 2) for n in self.walk()])
def walk(self, mode=None):
"""
Traverses the (sub)tree rooted at self, yielding each visited Node.
.. seealso:: https://en.wikipedia.org/wiki/Tree_traversal
:param mode: Specifies the algorithm to use when traversing the subtree rooted \
at self. `None` for breadth-first, `'postorder'` for post-order depth-first \
search.
:return: Generator of the visited Nodes.
"""
if mode == 'postorder':
for n in self._postorder():
yield n
else: # default to a breadth-first search
yield self
for node in self.descendants:
for n in node.walk():
yield n
def visit(self, visitor, predicate=None, **kw):
"""
Apply a function to matching nodes in the (sub)tree rooted at self.
:param visitor: A callable accepting a Node object as single argument..
:param predicate: A callable accepting a Node object as single argument and \
returning a boolean signaling whether Node matches; if `None` all nodes match.
:param kw: Addtional keyword arguments are passed through to self.walk.
"""
predicate = predicate or bool
for n in self.walk(**kw):
if predicate(n):
visitor(n)
def _postorder(self):
stack = [self]
descendant_map = {id(node): [n for n in node.descendants] for node in self.walk()}
while stack:
node = stack[-1]
descendants = descendant_map[id(node)]
# if we are at a leave-node, we remove the item from the stack
if not descendants:
stack.pop()
yield node
if stack:
descendant_map[id(stack[-1])].pop(0)
else:
stack.append(descendants[0])
def get_leaves(self):
"""
Get all the leaf nodes of the subtree descending from this node.
:return: List of Nodes with no descendants.
"""
return [n for n in self.walk() if n.is_leaf]
def get_node(self, label):
"""
Gets the specified node by name.
:return: Node or None if name does not exist in tree
"""
for n in self.walk():
if n.name == label:
return n
def get_leaf_names(self):
"""
Get the names of all the leaf nodes of the subtree descending from
this node.
:return: List of names of Nodes with no descendants.
"""
return [n.name for n in self.get_leaves()]
def prune(self, leaves, inverse=False):
"""
Remove all those nodes in the specified list, or if inverse=True,
remove all those nodes not in the specified list. The specified nodes
must be leaves and distinct from the root node.
:param nodes: A list of Node objects
:param inverse: Specifies whether to remove nodes in the list or not\
in the list.
"""
self.visit(
lambda n: n.ancestor.descendants.remove(n),
# We won't prune the root node, even if it is a leave and requested to
# be pruned!
lambda n: ((not inverse and n in leaves) or
(inverse and n.is_leaf and n not in leaves)) and n.ancestor,
mode="postorder")
def prune_by_names(self, leaf_names, inverse=False):
"""
Perform an (inverse) prune, with leaves specified by name.
:param node_names: A list of leaaf Node names (strings)
:param inverse: Specifies whether to remove nodes in the list or not\
in the list.
"""
self.prune([l for l in self.walk() if l.name in leaf_names], inverse)
def remove_redundant_nodes(self, preserve_lengths=True):
"""
Remove all nodes which have only a single child, and attach their
grandchildren to their parent. The resulting tree has the minimum
number of internal nodes required for the number of leaves.
:param preserve_lengths: If true, branch lengths of removed nodes are \
added to those of their children.
"""
for n in self.walk(mode='postorder'):
while n.ancestor and len(n.ancestor.descendants) == 1:
grandfather = n.ancestor.ancestor
father = n.ancestor
if preserve_lengths:
n.length += father.length
if grandfather:
for i, child in enumerate(grandfather.descendants):
if child is father:
del grandfather.descendants[i]
grandfather.add_descendant(n)
father.ancestor = None
else:
self.descendants = n.descendants
if preserve_lengths:
self.length = n.length
def resolve_polytomies(self):
"""
Insert additional nodes with length=0 into the subtree in such a way
that all non-leaf nodes have only 2 descendants, i.e. the tree becomes
a fully resolved binary tree.
"""
def _resolve_polytomies(n):
new = Node(length=self._length_formatter(self._length_parser('0')))
while len(n.descendants) > 1:
new.add_descendant(n.descendants.pop())
n.descendants.append(new)
self.visit(_resolve_polytomies, lambda n: len(n.descendants) > 2)
def remove_names(self):
"""
Set the name of all nodes in the subtree to None.
"""
self.visit(lambda n: setattr(n, 'name', None))
def remove_internal_names(self):
"""
Set the name of all non-leaf nodes in the subtree to None.
"""
self.visit(lambda n: setattr(n, 'name', None), lambda n: not n.is_leaf)
def remove_leaf_names(self):
"""
Set the name of all leaf nodes in the subtree to None.
"""
self.visit(lambda n: setattr(n, 'name', None), lambda n: n.is_leaf)
def remove_lengths(self):
"""
Set the length of all nodes in the subtree to None.
"""
self.visit(lambda n: setattr(n, 'length', None))
def loads(s, strip_comments=False, **kw):
"""
Load a list of trees from a Newick formatted string.
:param s: Newick formatted string.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
return [parse_node(ss.strip(), **kw) for ss in s.split(';') if ss.strip()]
def dumps(trees):
"""
Serialize a list of trees in Newick format.
:param trees: List of Node objects or a single Node object.
:return: Newick formatted string.
"""
if isinstance(trees, Node):
trees = [trees]
return ';\n'.join([tree.newick for tree in trees]) + ';'
def load(fp, strip_comments=False, **kw):
"""
Load a list of trees from an open Newick formatted file.
:param fp: open file handle.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
return loads(fp.read(), **kw)
def dump(tree, fp):
fp.write(dumps(tree))
def read(fname, encoding='utf8', strip_comments=False, **kw):
"""
Load a list of trees from a Newick formatted file.
:param fname: file path.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
with io.open(fname, encoding=encoding) as fp:
return load(fp, **kw)
def write(tree, fname, encoding='utf8'):
with io.open(fname, encoding=encoding, mode='w') as fp:
dump(tree, fp)
def _parse_name_and_length(s):
l = None
if ':' in s:
s, l = s.split(':', 1)
return s or None, l or None
def _parse_siblings(s, **kw):
"""
http://stackoverflow.com/a/26809037
"""
bracket_level = 0
current = []
# trick to remove special-case of trailing chars
for c in (s + ","):
if c == "," and bracket_level == 0:
yield parse_node("".join(current), **kw)
current = []
else:
if c == "(":
bracket_level += 1
elif c == ")":
bracket_level -= 1
current.append(c)
def parse_node(s, strip_comments=False, **kw):
"""
Parse a Newick formatted string into a `Node` object.
:param s: Newick formatted string to parse.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: `Node` instance.
"""
if strip_comments:
s = COMMENT.sub('', s)
s = s.strip()
parts = s.split(')')
if len(parts) == 1:
descendants, label = [], s
else:
if not parts[0].startswith('('):
raise ValueError('unmatched braces %s' % parts[0][:100])
descendants = list(_parse_siblings(')'.join(parts[:-1])[1:], **kw))
label = parts[-1]
name, length = _parse_name_and_length(label)
return Node.create(name=name, length=length, descendants=descendants, **kw)
| 12,861 | 31.979487 | 90 | py |
prob-alpha | prob-alpha-master/src/Probs.py | """
This module computes the probabilities and functions defined in the article, for binary trees only.
In order to do that, we need to import these modules.
Variable `a` represents the alpha in Ford's Alpha model.
"""
from __future__ import division
import Shape, PhyloTree, newick
from sage.all import *
from sympy import *
a = var('a')
def Pi(T):
"""
Compute the product in Proposition 2.
:param T: A given `Shape` object (it depends only on the topology of T).
:return: A symbolic expression depending on `a`.
"""
if T.is_leaf or T.children == []:
return 1
elif len(T.children) > 1:
b = T.children[0].count_leaves()
c = T.children[1].count_leaves()
if not b*c == 0:
return phi(b, c)*prod(Pi(x) for x in T.children)
else:
return 1
else:
return 1
def phi(n, m):
"""
Compute phi as defined in the article.
:param n, m: Given integers.
:return: A symbolic expression depending on `a`.
"""
return a/Integer(2)*binomial(n+m, n) + (1-2*a)*binomial(n+m-2, n-1)
def Gamma_alpha(n):
"""
Compute the Gamma-Alpha function of n.
:param n: Given integer.
:return: A symbolic expression depending on `a`.
"""
if n == 2:
return 1 - a
elif n > 2:
return (n - 1 - a)*Gamma_alpha(n-1)
else:
return 0
def prob_shape(T):
"""
Compute the probability of generating the shape of T under the Alpha model by Ford, assuming T is an instance of `Shape` --i.e.,
that it is only the shape of a tree.
:param T: The instance of `Shape` of which we want to compute the probability.
:return: A symbolic expression depending on `a`.
"""
if not bool(T.children):
return 1
T1 = T.shape()
n = T1.count_leaves()
Gamma = Gamma_alpha(n)
if Gamma == 0:
return "ERROR: division by zero"
else:
k = T1.count_symmetries()
eq = 2 ** (n - k - 1) / Gamma * Pi(T)
return eq.factor()
def prob_tree(T):
"""
Compute the probability of generating T under the Alpha model by Ford, assuming T is an instance of `PhyloTree`.
Given an instance of `Shape`, it computes the probability of a phylogenetic tree with its shape.
:param T: The instance of `PhyloTree` of which we want to compute the probability.
:return: A symbolic expression depending on `a`.
"""
if not bool(T.children):
return 1
n = T.count_leaves()
Gamma = Gamma_alpha(n)
if Gamma == 0:
return "ERROR: division by zero"
else:
eq = 2 ** (n - 1) / (factorial(n)*Gamma) * Pi(T)
return eq.factor()
def prob(T):
"""
Compute the probability of generating T under the Alpha model by Ford.
Given an instance of `Shape`, it computes the probability of generating the given shape.
Given an instance of `PhyloTree` that is a phylogenetic tree --i.e., such that does not have repeated labels--, it
computes the probability of generating the given tree.
:param T: The instance of `Shape` of which we want to compute the probability.
:return: A symbolic expression depending on `a`.
"""
if not bool(T.children):
return 1
n = T.count_leaves()
Gamma = Gamma_alpha(n)
if Gamma == 0:
return "ERROR: division by zero"
else:
k = T.count_symmetries()
nlabels = len(T.labels())
eq = 2**(n-k-1) / (factorial(nlabels)*Gamma) * Pi(T)
return eq.factor()
def shape_probs_from_list(X):
"""
Return the probabilities of all the instances of `Shape` in a list (in string) of Newick codes.
:param X: a string containing several Newick codes.
:return: A list of symbolic expressions depending on `a`.
"""
l = Shape.from_newick_list(X)
return [prob_shape(t) for t in l]
def tree_probs_from_list(X):
"""
Return the probabilities of all the instances of `PhyloTree` in a list (in string) of Newick codes.
:param X: a string containing several Newick codes.
:return: A list of symbolic expressions depending on `a`.
"""
l = PhyloTree.from_newick_list(X)
return [prob_tree(t) for t in l]
def shape_probs_from_file(fname, encoding='utf8', strip_comments=False, **kw):
"""
Load a list of instances of `Shape` from a Newick formatted file and return their probabilities in a list, assuming
they are instances of `Shape`.
:param fname: file path.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.read`.
:return: A list of symbolic expressions depending on `a`.
"""
l = newick.read(fname, encoding, strip_comments, **kw)
return [prob_shape(Shape.newick_node_to_shape(t)) for t in l]
def tree_probs_from_file(fname, encoding='utf8', strip_comments=False, **kw):
"""
Load a list of instances of `PhyloTree` from a Newick formatted file and return their probabilities in a list, assuming
they are instances of `PhyloTree`.
:param fname: file path.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.read`.
:return: A list of symbolic expressions depending on `a`.
"""
l = newick.read(fname, encoding, strip_comments, **kw)
return [prob_tree(PhyloTree.newick_node_to_tree(t)) for t in l] | 5,462 | 35.178808 | 132 | py |
bioselector | bioselector-master/scripts/addAggregator.py | import sys
import json
from efsassembler import ScriptsManager
args = sys.argv[1]
input_data = json.loads(args)
personalized_aggregator_path = input_data[0]
sm = ScriptsManager()
sm.add_aggregation_algorithm(personalized_aggregator_path)
print("Aggregator added:", personalized_aggregator_path)
sys.stdout.flush() | 317 | 21.714286 | 58 | py |
bioselector | bioselector-master/scripts/addSelector.py | import sys
import json
from efsassembler import ScriptsManager
args = sys.argv[1]
input_data = json.loads(args)
personalized_selector_path = input_data[0]
sm = ScriptsManager()
sm.add_fs_algorithm(personalized_selector_path)
print("Selector added:", personalized_selector_path)
sys.stdout.flush() | 300 | 20.5 | 52 | py |
bioselector | bioselector-master/scripts/runExperiments.py | import sys
import json
import rpy2.robjects.packages as rpackages
from efsassembler import Experiments
args = sys.argv[1]
input_data = json.loads(args)
experiments = input_data[0]
results_path = input_data[1]
exp = Experiments(experiments, results_path)
exp.run() | 266 | 19.538462 | 44 | py |
GANFingerprints | GANFingerprints-master/classifier/tfutil.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import os
import sys
import inspect
import importlib
import imp
import numpy as np
from collections import OrderedDict
import tensorflow as tf
#----------------------------------------------------------------------------
# Convenience.
def run(*args, **kwargs): # Run the specified ops in the default session.
return tf.get_default_session().run(*args, **kwargs)
def is_tf_expression(x):
return isinstance(x, tf.Tensor) or isinstance(x, tf.Variable) or isinstance(x, tf.Operation)
def shape_to_list(shape):
return [dim.value for dim in shape]
def flatten(x):
with tf.name_scope('Flatten'):
return tf.reshape(x, [-1])
def log2(x):
with tf.name_scope('Log2'):
return tf.log(x) * np.float32(1.0 / np.log(2.0))
def exp2(x):
with tf.name_scope('Exp2'):
return tf.exp(x * np.float32(np.log(2.0)))
def lerp(a, b, t):
with tf.name_scope('Lerp'):
return a + (b - a) * t
def lerp_clip(a, b, t):
with tf.name_scope('LerpClip'):
return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
def absolute_name_scope(scope): # Forcefully enter the specified name scope, ignoring any surrounding scopes.
return tf.name_scope(scope + '/')
#----------------------------------------------------------------------------
# Initialize TensorFlow graph and session using good default settings.
def init_tf(config_dict=dict()):
if tf.get_default_session() is None:
tf.set_random_seed(np.random.randint(1 << 31))
create_session(config_dict, force_as_default=True)
#----------------------------------------------------------------------------
# Create tf.Session based on config dict of the form
# {'gpu_options.allow_growth': True}
def create_session(config_dict=dict(), force_as_default=False):
config = tf.ConfigProto()
for key, value in config_dict.items():
fields = key.split('.')
obj = config
for field in fields[:-1]:
obj = getattr(obj, field)
setattr(obj, fields[-1], value)
session = tf.Session(config=config)
if force_as_default:
session._default_session = session.as_default()
session._default_session.enforce_nesting = False
session._default_session.__enter__()
return session
#----------------------------------------------------------------------------
# Initialize all tf.Variables that have not already been initialized.
# Equivalent to the following, but more efficient and does not bloat the tf graph:
# tf.variables_initializer(tf.report_unitialized_variables()).run()
def init_uninited_vars(vars=None):
if vars is None: vars = tf.global_variables()
test_vars = []; test_ops = []
with tf.control_dependencies(None): # ignore surrounding control_dependencies
for var in vars:
assert is_tf_expression(var)
try:
tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/IsVariableInitialized:0'))
except KeyError:
# Op does not exist => variable may be uninitialized.
test_vars.append(var)
with absolute_name_scope(var.name.split(':')[0]):
test_ops.append(tf.is_variable_initialized(var))
init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
run([var.initializer for var in init_vars])
#----------------------------------------------------------------------------
# Set the values of given tf.Variables.
# Equivalent to the following, but more efficient and does not bloat the tf graph:
# tfutil.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
def set_vars(var_to_value_dict):
ops = []
feed_dict = {}
for var, value in var_to_value_dict.items():
assert is_tf_expression(var)
try:
setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/setter:0')) # look for existing op
except KeyError:
with absolute_name_scope(var.name.split(':')[0]):
with tf.control_dependencies(None): # ignore surrounding control_dependencies
setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, 'new_value'), name='setter') # create new setter
ops.append(setter)
feed_dict[setter.op.inputs[1]] = value
run(ops, feed_dict)
#----------------------------------------------------------------------------
# Autosummary creates an identity op that internally keeps track of the input
# values and automatically shows up in TensorBoard. The reported value
# represents an average over input components. The average is accumulated
# constantly over time and flushed when save_summaries() is called.
#
# Notes:
# - The output tensor must be used as an input for something else in the
# graph. Otherwise, the autosummary op will not get executed, and the average
# value will not get accumulated.
# - It is perfectly fine to include autosummaries with the same name in
# several places throughout the graph, even if they are executed concurrently.
# - It is ok to also pass in a python scalar or numpy array. In this case, it
# is added to the average immediately.
_autosummary_vars = OrderedDict() # name => [var, ...]
_autosummary_immediate = OrderedDict() # name => update_op, update_value
_autosummary_finalized = False
def autosummary(name, value):
id = name.replace('/', '_')
if is_tf_expression(value):
with tf.name_scope('summary_' + id), tf.device(value.device):
update_op = _create_autosummary_var(name, value)
with tf.control_dependencies([update_op]):
return tf.identity(value)
else: # python scalar or numpy array
if name not in _autosummary_immediate:
with absolute_name_scope('Autosummary/' + id), tf.device(None), tf.control_dependencies(None):
update_value = tf.placeholder(tf.float32)
update_op = _create_autosummary_var(name, update_value)
_autosummary_immediate[name] = update_op, update_value
update_op, update_value = _autosummary_immediate[name]
run(update_op, {update_value: np.float32(value)})
return value
# Create the necessary ops to include autosummaries in TensorBoard report.
# Note: This should be done only once per graph.
def finalize_autosummaries():
global _autosummary_finalized
if _autosummary_finalized:
return
_autosummary_finalized = True
init_uninited_vars([var for vars in _autosummary_vars.values() for var in vars])
with tf.device(None), tf.control_dependencies(None):
for name, vars in _autosummary_vars.items():
id = name.replace('/', '_')
with absolute_name_scope('Autosummary/' + id):
sum = tf.add_n(vars)
avg = sum[0] / sum[1]
with tf.control_dependencies([avg]): # read before resetting
reset_ops = [tf.assign(var, tf.zeros(2)) for var in vars]
with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting
tf.summary.scalar(name, avg)
# Internal helper for creating autosummary accumulators.
def _create_autosummary_var(name, value_expr):
assert not _autosummary_finalized
v = tf.cast(value_expr, tf.float32)
if v.shape.ndims is 0:
v = [v, np.float32(1.0)]
elif v.shape.ndims is 1:
v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]
else:
v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]
v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))
with tf.control_dependencies(None):
var = tf.Variable(tf.zeros(2)) # [numerator, denominator]
update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
if name in _autosummary_vars:
_autosummary_vars[name].append(var)
else:
_autosummary_vars[name] = [var]
return update_op
#----------------------------------------------------------------------------
# Call filewriter.add_summary() with all summaries in the default graph,
# automatically finalizing and merging them on the first call.
_summary_merge_op = None
def save_summaries(filewriter, global_step=None):
global _summary_merge_op
if _summary_merge_op is None:
finalize_autosummaries()
with tf.device(None), tf.control_dependencies(None):
_summary_merge_op = tf.summary.merge_all()
filewriter.add_summary(_summary_merge_op.eval(), global_step)
#----------------------------------------------------------------------------
# Utilities for importing modules and objects by name.
def import_module(module_or_obj_name):
parts = module_or_obj_name.split('.')
parts[0] = {'np': 'numpy', 'tf': 'tensorflow'}.get(parts[0], parts[0])
for i in range(len(parts), 0, -1):
try:
module = importlib.import_module('.'.join(parts[:i]))
relative_obj_name = '.'.join(parts[i:])
return module, relative_obj_name
except ImportError:
pass
raise ImportError(module_or_obj_name)
def find_obj_in_module(module, relative_obj_name):
obj = module
for part in relative_obj_name.split('.'):
obj = getattr(obj, part)
return obj
def import_obj(obj_name):
module, relative_obj_name = import_module(obj_name)
return find_obj_in_module(module, relative_obj_name)
def call_func_by_name(*args, func=None, **kwargs):
assert func is not None
return import_obj(func)(*args, **kwargs)
#----------------------------------------------------------------------------
# Wrapper for tf.train.Optimizer that automatically takes care of:
# - Gradient averaging for multi-GPU training.
# - Dynamic loss scaling and typecasts for FP16 training.
# - Ignoring corrupted gradients that contain NaNs/Infs.
# - Reporting statistics.
# - Well-chosen default settings.
class Optimizer:
def __init__(
self,
name = 'Train',
tf_optimizer = 'tf.train.AdamOptimizer',
learning_rate = 0.001,
use_loss_scaling = False,
loss_scaling_init = 64.0,
loss_scaling_inc = 0.0005,
loss_scaling_dec = 1.0,
**kwargs):
# Init fields.
self.name = name
self.learning_rate = tf.convert_to_tensor(learning_rate)
self.id = self.name.replace('/', '.')
self.scope = tf.get_default_graph().unique_name(self.id)
self.optimizer_class = import_obj(tf_optimizer)
self.optimizer_kwargs = dict(kwargs)
self.use_loss_scaling = use_loss_scaling
self.loss_scaling_init = loss_scaling_init
self.loss_scaling_inc = loss_scaling_inc
self.loss_scaling_dec = loss_scaling_dec
self._grad_shapes = None # [shape, ...]
self._dev_opt = OrderedDict() # device => optimizer
self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...]
self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor)
self._updates_applied = False
# Register the gradients of the given loss function with respect to the given variables.
# Intended to be called once per GPU.
def register_gradients(self, loss, vars):
assert not self._updates_applied
# Validate arguments.
if isinstance(vars, dict):
vars = list(vars.values()) # allow passing in Network.trainables as vars
assert isinstance(vars, list) and len(vars) >= 1
assert all(is_tf_expression(expr) for expr in vars + [loss])
if self._grad_shapes is None:
self._grad_shapes = [shape_to_list(var.shape) for var in vars]
assert len(vars) == len(self._grad_shapes)
assert all(shape_to_list(var.shape) == var_shape for var, var_shape in zip(vars, self._grad_shapes))
dev = loss.device
assert all(var.device == dev for var in vars)
# Register device and compute gradients.
with tf.name_scope(self.id + '_grad'), tf.device(dev):
if dev not in self._dev_opt:
opt_name = self.scope.replace('/', '_') + '_opt%d' % len(self._dev_opt)
self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs)
self._dev_grads[dev] = []
loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))
grads = self._dev_opt[dev].compute_gradients(loss, vars, gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage
grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads] # replace disconnected gradients with zeros
self._dev_grads[dev].append(grads)
# Construct training op to update the registered variables based on their gradients.
def apply_updates(self):
assert not self._updates_applied
self._updates_applied = True
devices = list(self._dev_grads.keys())
total_grads = sum(len(grads) for grads in self._dev_grads.values())
assert len(devices) >= 1 and total_grads >= 1
ops = []
with absolute_name_scope(self.scope):
# Cast gradients to FP32 and calculate partial sum within each device.
dev_grads = OrderedDict() # device => [(grad, var), ...]
for dev_idx, dev in enumerate(devices):
with tf.name_scope('ProcessGrads%d' % dev_idx), tf.device(dev):
sums = []
for gv in zip(*self._dev_grads[dev]):
assert all(v is gv[0][1] for g, v in gv)
g = [tf.cast(g, tf.float32) for g, v in gv]
g = g[0] if len(g) == 1 else tf.add_n(g)
sums.append((g, gv[0][1]))
dev_grads[dev] = sums
# Sum gradients across devices.
if len(devices) > 1:
with tf.name_scope('SumAcrossGPUs'), tf.device(None):
for var_idx, grad_shape in enumerate(self._grad_shapes):
g = [dev_grads[dev][var_idx][0] for dev in devices]
if np.prod(grad_shape): # nccl does not support zero-sized tensors
g = tf.contrib.nccl.all_sum(g)
for dev, gg in zip(devices, g):
dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1])
# Apply updates separately on each device.
for dev_idx, (dev, grads) in enumerate(dev_grads.items()):
with tf.name_scope('ApplyGrads%d' % dev_idx), tf.device(dev):
# Scale gradients as needed.
if self.use_loss_scaling or total_grads > 1:
with tf.name_scope('Scale'):
coef = tf.constant(np.float32(1.0 / total_grads), name='coef')
coef = self.undo_loss_scaling(coef)
grads = [(g * coef, v) for g, v in grads]
# Check for overflows.
with tf.name_scope('CheckOverflow'):
grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads]))
# Update weights and adjust loss scaling.
with tf.name_scope('UpdateWeights'):
opt = self._dev_opt[dev]
ls_var = self.get_loss_scaling_var(dev)
if not self.use_loss_scaling:
ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op))
else:
ops.append(tf.cond(grad_ok,
lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)),
lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec))))
# Report statistics on the last device.
if dev == devices[-1]:
with tf.name_scope('Statistics'):
ops.append(autosummary(self.id + '/learning_rate', self.learning_rate))
ops.append(autosummary(self.id + '/overflow_frequency', tf.where(grad_ok, 0, 1)))
if self.use_loss_scaling:
ops.append(autosummary(self.id + '/loss_scaling_log2', ls_var))
# Initialize variables and group everything into a single op.
self.reset_optimizer_state()
init_uninited_vars(list(self._dev_ls_var.values()))
return tf.group(*ops, name='TrainingOp')
# Reset internal state of the underlying optimizer.
def reset_optimizer_state(self):
run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()])
# Get or create variable representing log2 of the current dynamic loss scaling factor.
def get_loss_scaling_var(self, device):
if not self.use_loss_scaling:
return None
if device not in self._dev_ls_var:
with absolute_name_scope(self.scope + '/LossScalingVars'), tf.control_dependencies(None):
self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name='loss_scaling_var')
return self._dev_ls_var[device]
# Apply dynamic loss scaling for the given expression.
def apply_loss_scaling(self, value):
assert is_tf_expression(value)
if not self.use_loss_scaling:
return value
return value * exp2(self.get_loss_scaling_var(value.device))
# Undo the effect of dynamic loss scaling for the given expression.
def undo_loss_scaling(self, value):
assert is_tf_expression(value)
if not self.use_loss_scaling:
return value
return value * exp2(-self.get_loss_scaling_var(value.device))
#----------------------------------------------------------------------------
# Generic network abstraction.
#
# Acts as a convenience wrapper for a parameterized network construction
# function, providing several utility methods and convenient access to
# the inputs/outputs/weights.
#
# Network objects can be safely pickled and unpickled for long-term
# archival purposes. The pickling works reliably as long as the underlying
# network construction function is defined in a standalone Python module
# that has no side effects or application-specific imports.
network_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import.
_network_import_modules = [] # Temporary modules create during pickle import.
class Network:
def __init__(self,
name=None, # Network name. Used to select TensorFlow name and variable scopes.
func=None, # Fully qualified name of the underlying network construction function.
reuse=False, # If reuse the variables from the initialized ones
**static_kwargs): # Keyword arguments to be passed in to the network construction function.
self._init_fields()
self.name = name
self.static_kwargs = dict(static_kwargs)
# Init build func.
module, self._build_func_name = import_module(func)
self._build_module_src = inspect.getsource(module)
self._build_func = find_obj_in_module(module, self._build_func_name)
# Init graph.
self._init_graph()
if not reuse:
self.reset_vars()
def _init_fields(self):
self.name = None # User-specified name, defaults to build func name if None.
self.scope = None # Unique TF graph scope, derived from the user-specified name.
self.static_kwargs = dict() # Arguments passed to the user-supplied build func.
self.num_inputs = 0 # Number of input tensors.
self.num_outputs = 0 # Number of output tensors.
self.input_shapes = [[]] # Input tensor shapes (NC or NCHW), including minibatch dimension.
self.output_shapes = [[]] # Output tensor shapes (NC or NCHW), including minibatch dimension.
self.input_shape = [] # Short-hand for input_shapes[0].
self.output_shape = [] # Short-hand for output_shapes[0].
self.input_templates = [] # Input placeholders in the template graph.
self.output_templates = [] # Output tensors in the template graph.
self.input_names = [] # Name string for each input.
self.output_names = [] # Name string for each output.
self.vars = OrderedDict() # All variables (localname => var).
self.trainables = OrderedDict() # Trainable variables (localname => var).
self._build_func = None # User-supplied build function that constructs the network.
self._build_func_name = None # Name of the build function.
self._build_module_src = None # Full source code of the module containing the build function.
self._run_cache = dict() # Cached graph data for Network.run().
def _init_graph(self):
# Collect inputs.
self.input_names = []
for param in inspect.signature(self._build_func).parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
self.input_names.append(param.name)
self.num_inputs = len(self.input_names)
assert self.num_inputs >= 1
# Choose name and scope.
if self.name is None:
self.name = self._build_func_name
#self.scope = tf.get_default_graph().unique_name(self.name.replace('/', '_'), mark_as_used=False)
self.scope = self.name.replace('/', '_') # enable variable reuse to share weights between networks
# Build template graph.
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
assert tf.get_variable_scope().name == self.scope
with absolute_name_scope(self.scope): # ignore surrounding name_scope
with tf.control_dependencies(None): # ignore surrounding control_dependencies
self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs)
# Collect outputs.
assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
self.output_templates = [out_expr] if is_tf_expression(out_expr) else list(out_expr)
self.output_names = [t.name.split('/')[-1].split(':')[0] for t in self.output_templates]
self.num_outputs = len(self.output_templates)
assert self.num_outputs >= 1
# Populate remaining fields.
self.input_shapes = [shape_to_list(t.shape) for t in self.input_templates]
self.output_shapes = [shape_to_list(t.shape) for t in self.output_templates]
self.input_shape = self.input_shapes[0]
self.output_shape = self.output_shapes[0]
self.vars = OrderedDict([(self.get_var_localname(var), var) for var in tf.global_variables(self.scope + '/')])
self.trainables = OrderedDict([(self.get_var_localname(var), var) for var in tf.trainable_variables(self.scope + '/')])
# Run initializers for all variables defined by this network.
def reset_vars(self):
run([var.initializer for var in self.vars.values()])
# Run initializers for all trainable variables defined by this network.
def reset_trainables(self):
run([var.initializer for var in self.trainables.values()])
# Get TensorFlow expression(s) for the output(s) of this network, given the inputs.
def get_output_for(self, *in_expr, return_as_list=False, **dynamic_kwargs):
assert len(in_expr) == self.num_inputs
all_kwargs = dict(self.static_kwargs)
all_kwargs.update(dynamic_kwargs)
with tf.variable_scope(self.scope, reuse=True):
assert tf.get_variable_scope().name == self.scope
named_inputs = [tf.identity(expr, name=name) for expr, name in zip(in_expr, self.input_names)]
out_expr = self._build_func(*named_inputs, **all_kwargs)
assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
if return_as_list:
out_expr = [out_expr] if is_tf_expression(out_expr) else list(out_expr)
return out_expr
# Get the local name of a given variable, excluding any surrounding name scopes.
def get_var_localname(self, var_or_globalname):
assert is_tf_expression(var_or_globalname) or isinstance(var_or_globalname, str)
globalname = var_or_globalname if isinstance(var_or_globalname, str) else var_or_globalname.name
assert globalname.startswith(self.scope + '/')
localname = globalname[len(self.scope) + 1:]
localname = localname.split(':')[0]
return localname
# Find variable by local or global name.
def find_var(self, var_or_localname):
assert is_tf_expression(var_or_localname) or isinstance(var_or_localname, str)
return self.vars[var_or_localname] if isinstance(var_or_localname, str) else var_or_localname
# Get the value of a given variable as NumPy array.
# Note: This method is very inefficient -- prefer to use tfutil.run(list_of_vars) whenever possible.
def get_var(self, var_or_localname):
return self.find_var(var_or_localname).eval()
# Set the value of a given variable based on the given NumPy array.
# Note: This method is very inefficient -- prefer to use tfutil.set_vars() whenever possible.
def set_var(self, var_or_localname, new_value):
return set_vars({self.find_var(var_or_localname): new_value})
# Pickle export.
def __getstate__(self):
return {
'version': 2,
'name': self.name,
'static_kwargs': self.static_kwargs,
'build_module_src': self._build_module_src,
'build_func_name': self._build_func_name,
'variables': list(zip(self.vars.keys(), run(list(self.vars.values()))))}
# Pickle import.
def __setstate__(self, state):
self._init_fields()
# Execute custom import handlers.
for handler in network_import_handlers:
state = handler(state)
# Set basic fields.
assert state['version'] == 2
self.name = state['name']
self.static_kwargs = state['static_kwargs']
self._build_module_src = state['build_module_src']
self._build_func_name = state['build_func_name']
# Parse imported module.
module = imp.new_module('_tfutil_network_import_module_%d' % len(_network_import_modules))
exec(self._build_module_src, module.__dict__)
self._build_func = find_obj_in_module(module, self._build_func_name)
_network_import_modules.append(module) # avoid gc
# Init graph.
self._init_graph()
self.reset_vars()
set_vars({self.find_var(name): value for name, value in state['variables']})
# Create a clone of this network with its own copy of the variables.
def clone(self, name=None):
net = object.__new__(Network)
net._init_fields()
net.name = name if name is not None else self.name
net.static_kwargs = dict(self.static_kwargs)
net._build_module_src = self._build_module_src
net._build_func_name = self._build_func_name
net._build_func = self._build_func
net._init_graph()
net.copy_vars_from(self)
return net
# Copy the values of all variables from the given network.
def copy_vars_from(self, src_net):
assert isinstance(src_net, Network)
name_to_value = run({name: src_net.find_var(name) for name in self.vars.keys()})
set_vars({self.find_var(name): value for name, value in name_to_value.items()})
# Copy the values of all trainable variables from the given network.
def copy_trainables_from(self, src_net):
assert isinstance(src_net, Network)
name_to_value = run({name: src_net.find_var(name) for name in self.trainables.keys()})
set_vars({self.find_var(name): value for name, value in name_to_value.items()})
# Create new network with the given parameters, and copy all variables from this network.
def convert(self, name=None, func=None, **static_kwargs):
net = Network(name, func, **static_kwargs)
net.copy_vars_from(self)
return net
# Construct a TensorFlow op that updates the variables of this network
# to be slightly closer to those of the given network.
def setup_as_moving_average_of(self, src_net, beta=0.99, beta_nontrainable=0.0):
assert isinstance(src_net, Network)
with absolute_name_scope(self.scope):
with tf.name_scope('MovingAvg'):
ops = []
for name, var in self.vars.items():
if name in src_net.vars:
cur_beta = beta if name in self.trainables else beta_nontrainable
new_value = lerp(src_net.vars[name], var, cur_beta)
ops.append(var.assign(new_value))
return tf.group(*ops)
# Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
def run(self, *in_arrays,
return_as_list = False, # True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
print_progress = False, # Print progress to the console? Useful for very large input arrays.
minibatch_size = None, # Maximum minibatch size to use, None = disable batching.
num_gpus = 1, # Number of GPUs to use.
out_mul = 1.0, # Multiplicative constant to apply to the output(s).
out_add = 0.0, # Additive constant to apply to the output(s).
out_shrink = 1, # Shrink the spatial dimensions of the output(s) by the given factor.
out_dtype = None, # Convert the output to the specified data type.
**dynamic_kwargs): # Additional keyword arguments to pass into the network construction function.
assert len(in_arrays) == self.num_inputs
num_items = in_arrays[0].shape[0]
if minibatch_size is None:
minibatch_size = num_items
key = str([list(sorted(dynamic_kwargs.items())), num_gpus, out_mul, out_add, out_shrink, out_dtype])
# Build graph.
if key not in self._run_cache:
with absolute_name_scope(self.scope + '/Run'), tf.control_dependencies(None):
in_split = list(zip(*[tf.split(x, num_gpus) for x in self.input_templates]))
out_split = []
for gpu in range(num_gpus):
with tf.device('/gpu:%d' % gpu):
out_expr = self.get_output_for(*in_split[gpu], return_as_list=True, **dynamic_kwargs)
if out_mul != 1.0:
out_expr = [x * out_mul for x in out_expr]
if out_add != 0.0:
out_expr = [x + out_add for x in out_expr]
if out_shrink > 1:
ksize = [1, 1, out_shrink, out_shrink]
out_expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') for x in out_expr]
if out_dtype is not None:
if tf.as_dtype(out_dtype).is_integer:
out_expr = [tf.round(x) for x in out_expr]
out_expr = [tf.saturate_cast(x, out_dtype) for x in out_expr]
out_split.append(out_expr)
self._run_cache[key] = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
# Run minibatches.
out_expr = self._run_cache[key]
out_arrays = [np.empty([num_items] + shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr]
for mb_begin in range(0, num_items, minibatch_size):
if print_progress:
print('\r%d / %d' % (mb_begin, num_items), end='')
mb_end = min(mb_begin + minibatch_size, num_items)
mb_in = [src[mb_begin : mb_end] for src in in_arrays]
mb_out = tf.get_default_session().run(out_expr, dict(zip(self.input_templates, mb_in)))
for dst, src in zip(out_arrays, mb_out):
dst[mb_begin : mb_end] = src
# Done.
if print_progress:
print('\r%d / %d' % (num_items, num_items))
if not return_as_list:
out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
return out_arrays
# Returns a list of (name, output_expr, trainable_vars) tuples corresponding to
# individual layers of the network. Mainly intended to be used for reporting.
def list_layers(self):
patterns_to_ignore = ['/Setter', '/new_value', '/Shape', '/strided_slice', '/Cast', '/concat']
all_ops = tf.get_default_graph().get_operations()
all_ops = [op for op in all_ops if not any(p in op.name for p in patterns_to_ignore)]
layers = []
def recurse(scope, parent_ops, level):
prefix = scope + '/'
ops = [op for op in parent_ops if op.name == scope or op.name.startswith(prefix)]
# Does not contain leaf nodes => expand immediate children.
if level == 0 or all('/' in op.name[len(prefix):] for op in ops):
visited = set()
for op in ops:
suffix = op.name[len(prefix):]
if '/' in suffix:
suffix = suffix[:suffix.index('/')]
if suffix not in visited:
recurse(prefix + suffix, ops, level + 1)
visited.add(suffix)
# Otherwise => interpret as a layer.
else:
layer_name = scope[len(self.scope)+1:]
layer_output = ops[-1].outputs[0]
layer_trainables = [op.outputs[0] for op in ops if op.type.startswith('Variable') and self.get_var_localname(op.name) in self.trainables]
layers.append((layer_name, layer_output, layer_trainables))
recurse(self.scope, all_ops, 0)
return layers
# Print a summary table of the network structure.
def print_layers(self, title=None, hide_layers_with_no_params=False):
if title is None: title = self.name
print()
print('%-28s%-12s%-24s%-24s' % (title, 'Params', 'OutputShape', 'WeightShape'))
print('%-28s%-12s%-24s%-24s' % (('---',) * 4))
total_params = 0
for layer_name, layer_output, layer_trainables in self.list_layers():
weights = [var for var in layer_trainables if var.name.endswith('/weight:0')]
num_params = sum(np.prod(shape_to_list(var.shape)) for var in layer_trainables)
total_params += num_params
if hide_layers_with_no_params and num_params == 0:
continue
print('%-28s%-12s%-24s%-24s' % (
layer_name,
num_params if num_params else '-',
layer_output.shape,
weights[0].shape if len(weights) == 1 else '-'))
print('%-28s%-12s%-24s%-24s' % (('---',) * 4))
print('%-28s%-12s%-24s%-24s' % ('Total', total_params, '', ''))
print()
# Construct summary ops to include histograms of all trainable parameters in TensorBoard.
def setup_weight_histograms(self, title=None):
if title is None: title = self.name
with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):
for localname, var in self.trainables.items():
if '/' in localname:
p = localname.split('/')
name = title + '_' + p[-1] + '/' + '_'.join(p[:-1])
else:
name = title + '_toplevel/' + localname
tf.summary.histogram(name, var)
#----------------------------------------------------------------------------
| 37,226 | 48.438247 | 154 | py |
GANFingerprints | GANFingerprints-master/classifier/legacy.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import pickle
import inspect
import numpy as np
import tfutil
import networks
#----------------------------------------------------------------------------
# Custom unpickler that is able to load network pickles produced by
# the old Theano implementation.
class LegacyUnpickler(pickle.Unpickler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def find_class(self, module, name):
if module == 'network' and name == 'Network':
return tfutil.Network
return super().find_class(module, name)
#----------------------------------------------------------------------------
# Import handler for tfutil.Network that silently converts networks produced
# by the old Theano implementation to a suitable format.
theano_gan_remap = {
'G_paper': 'G_paper',
'G_progressive_8': 'G_paper',
'D_paper': 'D_paper',
'D_progressive_8': 'D_paper'}
def patch_theano_gan(state):
if 'version' in state or state['build_func_spec']['func'] not in theano_gan_remap:
return state
spec = dict(state['build_func_spec'])
func = spec.pop('func')
resolution = spec.get('resolution', 32)
resolution_log2 = int(np.log2(resolution))
use_wscale = spec.get('use_wscale', True)
assert spec.pop('label_size', 0) == 0
assert spec.pop('use_batchnorm', False) == False
assert spec.pop('tanh_at_end', None) is None
assert spec.pop('mbstat_func', 'Tstdeps') == 'Tstdeps'
assert spec.pop('mbstat_avg', 'all') == 'all'
assert spec.pop('mbdisc_kernels', None) is None
spec.pop( 'use_gdrop', True) # doesn't make a difference
assert spec.pop('use_layernorm', False) == False
spec[ 'fused_scale'] = False
spec[ 'mbstd_group_size'] = 16
vars = []
param_iter = iter(state['param_values'])
relu = np.sqrt(2); linear = 1.0
def flatten2(w): return w.reshape(w.shape[0], -1)
def he_std(gain, w): return gain / np.sqrt(np.prod(w.shape[:-1]))
def wscale(gain, w): return w * next(param_iter) / he_std(gain, w) if use_wscale else w
def layer(name, gain, w): return [(name + '/weight', wscale(gain, w)), (name + '/bias', next(param_iter))]
if func.startswith('G'):
vars += layer('4x4/Dense', relu/4, flatten2(next(param_iter).transpose(1,0,2,3)))
vars += layer('4x4/Conv', relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
for res in range(3, resolution_log2 + 1):
vars += layer('%dx%d/Conv0' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
vars += layer('%dx%d/Conv1' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
for lod in range(0, resolution_log2 - 1):
vars += layer('ToRGB_lod%d' % lod, linear, next(param_iter)[np.newaxis, np.newaxis])
if func.startswith('D'):
vars += layer('FromRGB_lod0', relu, next(param_iter)[np.newaxis, np.newaxis])
for res in range(resolution_log2, 2, -1):
vars += layer('%dx%d/Conv0' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
vars += layer('%dx%d/Conv1' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
vars += layer('FromRGB_lod%d' % (resolution_log2 - (res - 1)), relu, next(param_iter)[np.newaxis, np.newaxis])
vars += layer('4x4/Conv', relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
vars += layer('4x4/Dense0', relu, flatten2(next(param_iter)[:,:,::-1,::-1]).transpose())
vars += layer('4x4/Dense1', linear, next(param_iter))
vars += [('lod', state['toplevel_params']['cur_lod'])]
return {
'version': 2,
'name': func,
'build_module_src': inspect.getsource(networks),
'build_func_name': theano_gan_remap[func],
'static_kwargs': spec,
'variables': vars}
tfutil.network_import_handlers.append(patch_theano_gan)
#----------------------------------------------------------------------------
# Import handler for tfutil.Network that ignores unsupported/deprecated
# networks produced by older versions of the code.
def ignore_unknown_theano_network(state):
if 'version' in state:
return state
print('Ignoring unknown Theano network:', state['build_func_spec']['func'])
return {
'version': 2,
'name': 'Dummy',
'build_module_src': 'def dummy(input, **kwargs): input.set_shape([None, 1]); return input',
'build_func_name': 'dummy',
'static_kwargs': {},
'variables': []}
tfutil.network_import_handlers.append(ignore_unknown_theano_network)
#----------------------------------------------------------------------------
| 5,249 | 43.491525 | 122 | py |
GANFingerprints | GANFingerprints-master/classifier/loss.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import numpy as np
import tensorflow as tf
import tfutil
#----------------------------------------------------------------------------
# Convenience func that casts all of its arguments to tf.float32.
def fp32(*values):
if len(values) == 1 and isinstance(values[0], tuple):
values = values[0]
values = tuple(tf.cast(v, tf.float32) for v in values)
return values if len(values) >= 2 else values[0]
def C_classification(C_im, reals_orig, labels):
with tf.name_scope('ClassificationPenalty'):
real_labels_out = fp32(C_im.get_output_for(reals_orig))
real_class_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=real_labels_out, dim=1)
real_class_loss = tfutil.autosummary('Loss/real_class_loss', real_class_loss)
loss = tf.identity(real_class_loss)
return loss | 1,183 | 37.193548 | 114 | py |
GANFingerprints | GANFingerprints-master/classifier/misc.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import os
import sys
import glob
import datetime
import pickle
import re
import numpy as np
from collections import OrderedDict
import scipy.ndimage
import PIL.Image
import config
import dataset
import legacy
#----------------------------------------------------------------------------
# Convenience wrappers for pickle that are able to load data produced by
# older versions of the code.
def load_pkl(filename):
with open(filename, 'rb') as file:
return legacy.LegacyUnpickler(file, encoding='latin1').load()
def save_pkl(obj, filename):
with open(filename, 'wb') as file:
pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)
#----------------------------------------------------------------------------
# Image utils.
def adjust_dynamic_range(data, drange_in, drange_out):
if drange_in != drange_out:
scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0]))
bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)
data = data * scale + bias
return data
def create_image_grid(images, grid_size=None):
assert images.ndim == 3 or images.ndim == 4
num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2]
if grid_size is not None:
grid_w, grid_h = tuple(grid_size)
else:
grid_w = max(int(np.ceil(np.sqrt(num))), 1)
grid_h = max((num - 1) // grid_w + 1, 1)
grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype)
for idx in range(num):
x = (idx % grid_w) * img_w
y = (idx // grid_w) * img_h
grid[..., y : y + img_h, x : x + img_w] = images[idx]
return grid
def convert_to_pil_image(image, drange=[0,1]):
assert image.ndim == 2 or image.ndim == 3
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0] # grayscale CHW => HW
else:
image = image.transpose(1, 2, 0) # CHW -> HWC
image = adjust_dynamic_range(image, drange, [0,255])
image = np.rint(image).clip(0, 255).astype(np.uint8)
format = 'RGB' if image.ndim == 3 else 'L'
return PIL.Image.fromarray(image, format)
def save_image(image, filename, drange=[0,1], quality=95):
img = convert_to_pil_image(image, drange)
if '.jpg' in filename:
img.save(filename,"JPEG", quality=quality, optimize=True)
else:
img.save(filename)
def save_image_grid(images, filename, drange=[0,1], grid_size=None):
convert_to_pil_image(create_image_grid(images, grid_size), drange).save(filename)
#----------------------------------------------------------------------------
# Logging of stdout and stderr to a file.
class OutputLogger(object):
def __init__(self):
self.file = None
self.buffer = ''
def set_log_file(self, filename, mode='wt'):
assert self.file is None
self.file = open(filename, mode)
if self.buffer is not None:
self.file.write(self.buffer)
self.buffer = None
def write(self, data):
if self.file is not None:
self.file.write(data)
if self.buffer is not None:
self.buffer += data
def flush(self):
if self.file is not None:
self.file.flush()
class TeeOutputStream(object):
def __init__(self, child_streams, autoflush=False):
self.child_streams = child_streams
self.autoflush = autoflush
def write(self, data):
for stream in self.child_streams:
stream.write(data)
if self.autoflush:
self.flush()
def flush(self):
for stream in self.child_streams:
stream.flush()
output_logger = None
def init_output_logging():
global output_logger
if output_logger is None:
output_logger = OutputLogger()
sys.stdout = TeeOutputStream([sys.stdout, output_logger], autoflush=True)
sys.stderr = TeeOutputStream([sys.stderr, output_logger], autoflush=True)
def set_output_log_file(filename, mode='wt'):
if output_logger is not None:
output_logger.set_log_file(filename, mode)
#----------------------------------------------------------------------------
# Reporting results.
def create_result_subdir(result_dir, desc):
# Select run ID and create subdir.
while True:
run_id = 0
for fname in glob.glob(os.path.join(result_dir, '*')):
try:
fbase = os.path.basename(fname)
ford = int(fbase[:fbase.find('-')])
run_id = max(run_id, ford + 1)
except ValueError:
pass
result_subdir = os.path.join(result_dir, '%03d-%s' % (run_id, desc))
try:
os.makedirs(result_subdir)
break
except OSError:
if os.path.isdir(result_subdir):
continue
raise
print("Saving results to", result_subdir)
set_output_log_file(os.path.join(result_subdir, 'log.txt'))
# Export config.
try:
with open(os.path.join(result_subdir, 'config.txt'), 'wt') as fout:
for k, v in sorted(config.__dict__.items()):
if not k.startswith('_'):
fout.write("%s = %s\n" % (k, str(v)))
except:
pass
return result_subdir
def format_time(seconds):
s = int(np.rint(seconds))
if s < 60: return '%ds' % (s)
elif s < 60*60: return '%dm %02ds' % (s // 60, s % 60)
elif s < 24*60*60: return '%dh %02dm %02ds' % (s // (60*60), (s // 60) % 60, s % 60)
else: return '%dd %02dh %02dm' % (s // (24*60*60), (s // (60*60)) % 24, (s // 60) % 60)
def time_to_seconds(string):
idx_d = string.find('d')
if idx_d > -1:
d = int(string[:idx_d])
else:
d = 0
idx_h = string.find('h')
if idx_h > -1:
if idx_h == 1:
h = int(string[:idx_h])
else:
h = int(string[idx_h-2:idx_h])
else:
h = 0
idx_m = string.find('m')
if idx_m > -1:
m = int(string[idx_m-2:idx_m])
else:
m = 0
idx_s = string.find('s')
if idx_s > -1:
s = int(string[idx_s-2:idx_s])
else:
s = 0
return float(((d*24+h)*60+m)*60+s)
#----------------------------------------------------------------------------
# Locating results.
def locate_result_subdir(run_id_or_result_subdir):
if isinstance(run_id_or_result_subdir, str) and os.path.isdir(run_id_or_result_subdir):
return run_id_or_result_subdir
searchdirs = []
searchdirs += ['']
searchdirs += ['results']
searchdirs += ['networks']
for searchdir in searchdirs:
dir = config.result_dir if searchdir == '' else os.path.join(config.result_dir, searchdir)
dir = os.path.join(dir, str(run_id_or_result_subdir))
if os.path.isdir(dir):
return dir
prefix = '%03d' % run_id_or_result_subdir if isinstance(run_id_or_result_subdir, int) else str(run_id_or_result_subdir)
dirs = sorted(glob.glob(os.path.join(config.result_dir, searchdir, prefix + '-*')))
dirs = [dir for dir in dirs if os.path.isdir(dir)]
if len(dirs) == 1:
return dirs[0]
raise IOError('Cannot locate result subdir for run', run_id_or_result_subdir)
def locate_result_subdir_without_run_id():
searchdirs = []
searchdirs += ['']
searchdirs += ['results']
searchdirs += ['networks']
for searchdir in searchdirs:
dir = config.result_dir if searchdir == '' else os.path.join(config.result_dir, searchdir)
dirs = sorted(glob.glob(os.path.join(dir, '*-*')))
dirs = [dir for dir in dirs if os.path.isdir(dir)]
if len(dirs) > 0:
return dirs[-1]
raise IOError('Cannot locate result subdir for run')
def list_network_pkls(run_id_or_result_subdir, include_final=True):
if run_id_or_result_subdir is None:
result_subdir = locate_result_subdir_without_run_id()
else:
result_subdir = locate_result_subdir(run_id_or_result_subdir)
pkls = sorted(glob.glob(os.path.join(result_subdir, 'network-*.pkl')))
if len(pkls) >= 1 and os.path.basename(pkls[0]) == 'network-final.pkl':
if include_final:
pkls.append(pkls[0])
del pkls[0]
return pkls
def locate_network_pkl(run_id_or_result_subdir_or_network_pkl=None, snapshot=None):
if isinstance(run_id_or_result_subdir_or_network_pkl, str) and os.path.isfile(run_id_or_result_subdir_or_network_pkl):
return run_id_or_result_subdir_or_network_pkl
pkls = list_network_pkls(run_id_or_result_subdir_or_network_pkl)
if len(pkls) >= 1 and snapshot is None:
return pkls[-1]
for pkl in pkls:
try:
name = os.path.splitext(os.path.basename(pkl))[0]
number = int(name.split('-')[-1])
if number == snapshot:
return pkl
except ValueError: pass
except IndexError: pass
raise IOError('Cannot locate network pkl for snapshot', snapshot)
def get_id_string_for_network_pkl(network_pkl):
p = network_pkl.replace('.pkl', '').replace('\\', '/').split('/')
return '-'.join(p[max(len(p) - 2, 0):])
def resume_kimg_time(network_pkl):
path, file = os.path.split(network_pkl)
file = os.path.splitext(file)[0]
kimg = str(int(file[-6:]))
with open('%s/log.txt' % path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'tick' in line and 'kimg' in line and 'minibatch' in line and 'time' in line and 'sec/tick' in line and 'sec/kimg' in line and 'maintenance' in line and kimg in line:
idx = line.find(kimg)
kimg = float(line[idx:idx+len(kimg)+2])
idx = line.find('time')
t = line[idx+5:idx+5+12]
s = time_to_seconds(t)
break
return kimg, s
#----------------------------------------------------------------------------
# Loading and using trained networks.
def load_network_pkl(run_id_or_result_subdir_or_network_pkl=None, snapshot=None):
return load_pkl(locate_network_pkl(run_id_or_result_subdir_or_network_pkl, snapshot))
def random_latents(num_latents, E_zg, E_zl):
zg_latents = np.random.normal(size=(num_latents, *E_zg.output_shape[1:])).astype(np.float32)
zl_latents = np.random.normal(size=(num_latents, *E_zl.output_shape[1:])).astype(np.float32)
return zg_latents, zl_latents
def load_dataset_for_previous_run(run_id, **kwargs): # => dataset_obj, mirror_augment
result_subdir = locate_result_subdir(run_id)
# Parse config.txt.
parsed_cfg = dict()
with open(os.path.join(result_subdir, 'config.txt'), 'rt') as f:
for line in f:
if line.startswith('dataset =') or line.startswith('train ='):
exec(line, parsed_cfg, parsed_cfg)
dataset_cfg = parsed_cfg.get('dataset', dict())
train_cfg = parsed_cfg.get('train', dict())
mirror_augment = train_cfg.get('mirror_augment', False)
# Handle legacy options.
if 'h5_path' in dataset_cfg:
dataset_cfg['tfrecord_dir'] = dataset_cfg.pop('h5_path').replace('.h5', '')
if 'mirror_augment' in dataset_cfg:
mirror_augment = dataset_cfg.pop('mirror_augment')
if 'max_labels' in dataset_cfg:
v = dataset_cfg.pop('max_labels')
if v is None: v = 0
if v == 'all': v = 'full'
dataset_cfg['max_label_size'] = v
if 'max_images' in dataset_cfg:
dataset_cfg.pop('max_images')
# Handle legacy dataset names.
v = dataset_cfg['tfrecord_dir']
#v = v.replace('-32x32', '').replace('-32', '')
#v = v.replace('-128x128', '').replace('-128', '')
#v = v.replace('-256x256', '').replace('-256', '')
#v = v.replace('-1024x1024', '').replace('-1024', '')
#v = v.replace('celeba-hq', 'celebahq')
#v = v.replace('cifar-10', 'cifar10')
#v = v.replace('cifar-100', 'cifar100')
#v = v.replace('mnist-rgb', 'mnistrgb')
#v = re.sub('lsun-100k-([^-]*)', 'lsun-\\1-100k', v)
#v = re.sub('lsun-full-([^-]*)', 'lsun-\\1-full', v)
dataset_cfg['tfrecord_dir'] = v
# Load dataset.
dataset_cfg.update(kwargs)
dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **dataset_cfg)
return dataset_obj, mirror_augment
def apply_mirror_augment(minibatch):
mask = np.random.rand(minibatch.shape[0]) < 0.5
minibatch = np.array(minibatch)
minibatch[mask] = minibatch[mask, :, :, ::-1]
return minibatch
#----------------------------------------------------------------------------
# Text labels.
_text_label_cache = OrderedDict()
def draw_text_label(img, text, x, y, alignx=0.5, aligny=0.5, color=255, opacity=1.0, glow_opacity=1.0, **kwargs):
color = np.array(color).flatten().astype(np.float32)
assert img.ndim == 3 and img.shape[2] == color.size or color.size == 1
alpha, glow = setup_text_label(text, **kwargs)
xx, yy = int(np.rint(x - alpha.shape[1] * alignx)), int(np.rint(y - alpha.shape[0] * aligny))
xb, yb = max(-xx, 0), max(-yy, 0)
xe, ye = min(alpha.shape[1], img.shape[1] - xx), min(alpha.shape[0], img.shape[0] - yy)
img = np.array(img)
slice = img[yy+yb : yy+ye, xx+xb : xx+xe, :]
slice[:] = slice * (1.0 - (1.0 - (1.0 - alpha[yb:ye, xb:xe]) * (1.0 - glow[yb:ye, xb:xe] * glow_opacity)) * opacity)[:, :, np.newaxis]
slice[:] = slice + alpha[yb:ye, xb:xe, np.newaxis] * (color * opacity)[np.newaxis, np.newaxis, :]
return img
def setup_text_label(text, font='Calibri', fontsize=32, padding=6, glow_size=2.0, glow_coef=3.0, glow_exp=2.0, cache_size=100): # => (alpha, glow)
# Lookup from cache.
key = (text, font, fontsize, padding, glow_size, glow_coef, glow_exp)
if key in _text_label_cache:
value = _text_label_cache[key]
del _text_label_cache[key] # LRU policy
_text_label_cache[key] = value
return value
# Limit cache size.
while len(_text_label_cache) >= cache_size:
_text_label_cache.popitem(last=False)
# Render text.
import moviepy.editor # pip install moviepy
alpha = moviepy.editor.TextClip(text, font=font, fontsize=fontsize).mask.make_frame(0)
alpha = np.pad(alpha, padding, mode='constant', constant_values=0.0)
glow = scipy.ndimage.gaussian_filter(alpha, glow_size)
glow = 1.0 - np.maximum(1.0 - glow * glow_coef, 0.0) ** glow_exp
# Add to cache.
value = (alpha, glow)
_text_label_cache[key] = value
return value
#----------------------------------------------------------------------------
| 14,955 | 36.111663 | 177 | py |
GANFingerprints | GANFingerprints-master/classifier/dataset.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import os
import glob
import numpy as np
import tensorflow as tf
import tfutil
#----------------------------------------------------------------------------
# Parse individual image from a tfrecords file.
def parse_tfrecord_tf(record):
features = tf.parse_single_example(record, features={
'shape': tf.FixedLenFeature([3], tf.int64),
'data': tf.FixedLenFeature([], tf.string)})
data = tf.decode_raw(features['data'], tf.uint8)
return tf.reshape(data, features['shape'])
def parse_tfrecord_np(record):
ex = tf.train.Example()
ex.ParseFromString(record)
shape = ex.features.feature['shape'].int64_list.value
data = ex.features.feature['data'].bytes_list.value[0]
return np.fromstring(data, np.uint8).reshape(shape)
#----------------------------------------------------------------------------
# Dataset class that loads data from tfrecords files.
class TFRecordDataset:
def __init__(self,
tfrecord_dir, # Directory containing a collection of tfrecords files.
resolution = None, # Dataset resolution, None = autodetect.
label_file = None, # Relative path of the labels file, None = autodetect.
max_label_size = 0, # 0 = no labels, 'full' = full labels, <int> = N first label components.
repeat = True, # Repeat dataset indefinitely.
shuffle_mb = 4096, # Shuffle data within specified window (megabytes), 0 = disable shuffling.
prefetch_mb = 2048, # Amount of data to prefetch (megabytes), 0 = disable prefetching.
buffer_mb = 256, # Read buffer size (megabytes).
num_threads = 2): # Number of concurrent threads.
self.tfrecord_dir = tfrecord_dir
self.resolution = None
self.resolution_log2 = None
self.shape = [] # [channel, height, width]
self.dtype = 'uint8'
self.dynamic_range = [0, 255]
self.label_file = label_file
self.label_size = None # [component]
self.label_dtype = None
self._np_labels = None
self._tf_minibatch_in = None
self._tf_labels_var = None
self._tf_labels_dataset = None
self._tf_datasets = dict()
self._tf_iterator = None
self._tf_init_ops = dict()
self._tf_minibatch_np = None
self._cur_minibatch = -1
self._cur_lod = -1
# List tfrecords files and inspect their shapes.
assert os.path.isdir(self.tfrecord_dir)
tfr_files = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.tfrecords')))
assert len(tfr_files) >= 1
tfr_shapes = []
for tfr_file in tfr_files:
tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
for record in tf.python_io.tf_record_iterator(tfr_file, tfr_opt):
tfr_shapes.append(parse_tfrecord_np(record).shape)
break
# Autodetect label filename.
if self.label_file is None:
guess = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.labels')))
if len(guess):
self.label_file = guess[0]
elif not os.path.isfile(self.label_file):
guess = os.path.join(self.tfrecord_dir, self.label_file)
if os.path.isfile(guess):
self.label_file = guess
# Determine shape and resolution.
max_shape = max(tfr_shapes, key=lambda shape: np.prod(shape))
self.resolution = resolution if resolution is not None else max_shape[1]
self.resolution_log2 = int(np.log2(self.resolution))
self.shape = [max_shape[0], self.resolution, self.resolution]
tfr_lods = [self.resolution_log2 - int(np.log2(shape[1])) for shape in tfr_shapes]
assert all(shape[0] == max_shape[0] for shape in tfr_shapes)
assert all(shape[1] == shape[2] for shape in tfr_shapes)
assert all(shape[1] == self.resolution // (2**lod) for shape, lod in zip(tfr_shapes, tfr_lods))
#assert all(lod in tfr_lods for lod in range(self.resolution_log2 - 1))
# Load labels.
assert max_label_size == 'full' or max_label_size >= 0
self._np_labels = np.zeros([1<<20, 0], dtype=np.float32)
if self.label_file is not None and max_label_size != 0:
self._np_labels = np.load(self.label_file)
assert self._np_labels.ndim == 2
if max_label_size != 'full' and self._np_labels.shape[1] > max_label_size:
self._np_labels = self._np_labels[:, :max_label_size]
self.label_size = self._np_labels.shape[1]
self.label_dtype = self._np_labels.dtype.name
# Build TF expressions.
with tf.name_scope('Dataset'), tf.device('/cpu:0'):
self._tf_minibatch_in = tf.placeholder(tf.int64, name='minibatch_in', shape=[])
tf_labels_init = tf.zeros(self._np_labels.shape, self._np_labels.dtype)
self._tf_labels_var = tf.Variable(tf_labels_init, name='labels_var')
tfutil.set_vars({self._tf_labels_var: self._np_labels})
self._tf_labels_dataset = tf.data.Dataset.from_tensor_slices(self._tf_labels_var)
for tfr_file, tfr_shape, tfr_lod in zip(tfr_files, tfr_shapes, tfr_lods):
if tfr_lod < 0:
continue
dset = tf.data.TFRecordDataset(tfr_file, compression_type='', buffer_size=buffer_mb<<20)
dset = dset.map(parse_tfrecord_tf, num_parallel_calls=num_threads)
dset = tf.data.Dataset.zip((dset, self._tf_labels_dataset))
bytes_per_item = np.prod(tfr_shape) * np.dtype(self.dtype).itemsize
if shuffle_mb > 0:
dset = dset.shuffle(((shuffle_mb << 20) - 1) // bytes_per_item + 1)
if repeat:
dset = dset.repeat()
if prefetch_mb > 0:
dset = dset.prefetch(((prefetch_mb << 20) - 1) // bytes_per_item + 1)
dset = dset.batch(self._tf_minibatch_in)
self._tf_datasets[tfr_lod] = dset
self._tf_iterator = tf.data.Iterator.from_structure(self._tf_datasets[0].output_types, self._tf_datasets[0].output_shapes)
self._tf_init_ops = {lod: self._tf_iterator.make_initializer(dset) for lod, dset in self._tf_datasets.items()}
# Use the given minibatch size and level-of-detail for the data returned by get_minibatch_tf().
def configure(self, minibatch_size, lod=0):
lod = int(np.floor(lod))
assert minibatch_size >= 1 and lod in self._tf_datasets
if self._cur_minibatch != minibatch_size or self._cur_lod != lod:
self._tf_init_ops[lod].run({self._tf_minibatch_in: minibatch_size})
self._cur_minibatch = minibatch_size
self._cur_lod = lod
# Get next minibatch as TensorFlow expressions.
def get_minibatch_tf(self): # => images, labels
return self._tf_iterator.get_next()
# Get next minibatch as NumPy arrays.
def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels
self.configure(minibatch_size, lod)
if self._tf_minibatch_np is None:
self._tf_minibatch_np = self.get_minibatch_tf()
return tfutil.run(self._tf_minibatch_np)
# Get random labels as TensorFlow expression.
def get_random_labels_tf(self, minibatch_size): # => labels
if self.label_size > 0:
return tf.gather(self._tf_labels_var, tf.random_uniform([minibatch_size], 0, self._np_labels.shape[0], dtype=tf.int32))
else:
return tf.zeros([minibatch_size, 0], self.label_dtype)
# Get random labels as NumPy array.
def get_random_labels_np(self, minibatch_size): # => labels
if self.label_size > 0:
return self._np_labels[np.random.randint(self._np_labels.shape[0], size=[minibatch_size])]
else:
return np.zeros([minibatch_size, 0], self.label_dtype)
#----------------------------------------------------------------------------
# Base class for datasets that are generated on the fly.
class SyntheticDataset:
def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'):
self.resolution = resolution
self.resolution_log2 = int(np.log2(resolution))
self.shape = [num_channels, resolution, resolution]
self.dtype = dtype
self.dynamic_range = dynamic_range
self.label_size = label_size
self.label_dtype = label_dtype
self._tf_minibatch_var = None
self._tf_lod_var = None
self._tf_minibatch_np = None
self._tf_labels_np = None
assert self.resolution == 2 ** self.resolution_log2
with tf.name_scope('Dataset'):
self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var')
self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var')
def configure(self, minibatch_size, lod=0):
lod = int(np.floor(lod))
assert minibatch_size >= 1 and lod >= 0 and lod <= self.resolution_log2
tfutil.set_vars({self._tf_minibatch_var: minibatch_size, self._tf_lod_var: lod})
def get_minibatch_tf(self): # => images, labels
with tf.name_scope('SyntheticDataset'):
shrink = tf.cast(2.0 ** tf.cast(self._tf_lod_var, tf.float32), tf.int32)
shape = [self.shape[0], self.shape[1] // shrink, self.shape[2] // shrink]
images = self._generate_images(self._tf_minibatch_var, self._tf_lod_var, shape)
labels = self._generate_labels(self._tf_minibatch_var)
return images, labels
def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels
self.configure(minibatch_size, lod)
if self._tf_minibatch_np is None:
self._tf_minibatch_np = self.get_minibatch_tf()
return tfutil.run(self._tf_minibatch_np)
def get_random_labels_tf(self, minibatch_size): # => labels
with tf.name_scope('SyntheticDataset'):
return self._generate_labels(minibatch_size)
def get_random_labels_np(self, minibatch_size): # => labels
self.configure(minibatch_size)
if self._tf_labels_np is None:
self._tf_labels_np = self.get_random_labels_tf()
return tfutil.run(self._tf_labels_np)
def _generate_images(self, minibatch, lod, shape): # to be overridden by subclasses
return tf.zeros([minibatch] + shape, self.dtype)
def _generate_labels(self, minibatch): # to be overridden by subclasses
return tf.zeros([minibatch, self.label_size], self.label_dtype)
#----------------------------------------------------------------------------
# Helper func for constructing a dataset object using the given options.
def load_dataset(class_name='dataset.TFRecordDataset', data_dir=None, verbose=False, **kwargs):
adjusted_kwargs = dict(kwargs)
if 'tfrecord_dir' in adjusted_kwargs and data_dir is not None:
adjusted_kwargs['tfrecord_dir'] = os.path.join(data_dir, adjusted_kwargs['tfrecord_dir'])
if verbose:
print('Streaming data using %s...' % class_name)
dataset = tfutil.import_obj(class_name)(**adjusted_kwargs)
if verbose:
print('Dataset shape =', np.int32(dataset.shape).tolist())
print('Dynamic range =', dataset.dynamic_range)
print('Label size =', dataset.label_size)
return dataset
#----------------------------------------------------------------------------
| 12,112 | 49.053719 | 134 | py |
GANFingerprints | GANFingerprints-master/classifier/networks.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import numpy as np
import tensorflow as tf
# NOTE: Do not import any application-specific modules here!
#----------------------------------------------------------------------------
def lerp(a, b, t): return a + (b - a) * t
def lerp_clip(a, b, t): return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
def cset(cur_lambda, new_cond, new_lambda): return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
#----------------------------------------------------------------------------
# Get/create weight tensor for a convolutional or fully-connected layer.
def get_weight(shape, gain=np.sqrt(2), use_wscale=False, fan_in=None):
if fan_in is None: fan_in = np.prod(shape[:-1])
std = gain / np.sqrt(fan_in) # He init
if use_wscale:
wscale = tf.constant(np.float32(std), name='wscale')
return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale
else:
return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std))
#----------------------------------------------------------------------------
# Fully-connected layer.
def dense(x, fmaps, gain=np.sqrt(2), use_wscale=False):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
#----------------------------------------------------------------------------
# Convolutional layer.
def conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.cast(w, x.dtype)
if kernel == 1:
return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='VALID', data_format='NCHW')
else:
x = tf.pad(x, paddings=[[0, 0],[0, 0],[kernel//2, kernel//2],[kernel//2, kernel//2]], mode='REFLECT')
return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='VALID', data_format='NCHW')
#----------------------------------------------------------------------------
# Apply bias to the given activation tensor.
def apply_bias(x):
b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros())
b = tf.cast(b, x.dtype)
if len(x.shape) == 2:
return x + b
else:
return x + tf.reshape(b, [1, -1, 1, 1])
#----------------------------------------------------------------------------
# Leaky ReLU activation. Same as tf.nn.leaky_relu, but supports FP16.
def leaky_relu(x, alpha=0.2):
with tf.name_scope('LeakyRelu'):
alpha = tf.constant(alpha, dtype=x.dtype, name='alpha')
return tf.maximum(x * alpha, x)
#----------------------------------------------------------------------------
# Nearest-neighbor upscaling layer.
def upscale2d(x, factor=2):
assert isinstance(factor, int) and factor >= 1
if factor == 1: return x
with tf.variable_scope('Upscale2D'):
s = x.shape
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
return x
#----------------------------------------------------------------------------
# Fused upscale2d + conv2d.
# Faster and uses less memory than performing the operations separately.
def upscale2d_conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, fmaps, x.shape[1].value], gain=gain, use_wscale=use_wscale, fan_in=(kernel**2)*x.shape[1].value)
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]])
w = tf.cast(w, x.dtype)
os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2]
return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# upscale2d for RGB image by upsampling + Gaussian smoothing
gaussian_filter_up = tf.constant(list(np.float32([1,4,6,4,1,4,16,24,16,4,6,24,36,24,6,4,16,24,16,4,1,4,6,4,1])/256.0*4.0), dtype=tf.float32, shape=[5,5,1,1], name='GaussianFilterUp', verify_shape=False)
def upscale2d_rgb_Gaussian(x, factor=2):
assert isinstance(factor, int) and factor >= 1
if factor == 1: return x
with tf.variable_scope('Upscale2D_RGB_Gaussian'):
for i in range(int(round(np.log2(factor)))):
try:
s = x.shape
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
except:
s = tf.shape(x)
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.pad(x, paddings=[[0,0],[0,0],[0,0],[0,1],[0,0],[0,1]], mode='CONSTANT')
x = tf.reshape(x, [-1, s[1], s[2]*2, s[3]*2])
channel_list = []
for j in range(3):
z = tf.pad(x[:,j:j+1,:,:], paddings=[[0,0],[0,0],[2,2],[2,2]], mode='REFLECT')
channel_list.append(tf.nn.conv2d(z, filter=gaussian_filter_up, strides=[1,1,1,1], padding='VALID', data_format='NCHW', name='GaussianConvUp'))
x = tf.concat(channel_list, axis=1)
return x
#----------------------------------------------------------------------------
# Box filter downscaling layer.
def downscale2d(x, factor=2):
assert isinstance(factor, int) and factor >= 1
if factor == 1: return x
with tf.variable_scope('Downscale2D'):
ksize = [1, 1, factor, factor]
return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') # NOTE: requires tf_config['graph_options.place_pruned_graph'] = True
#----------------------------------------------------------------------------
# Fused conv2d + downscale2d.
# Faster and uses less memory than performing the operations separately.
def conv2d_downscale2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# downscale2d for RGB image by Gaussian smoothing + downsampling
gaussian_filter_down = tf.constant(list(np.float32([1,4,6,4,1,4,16,24,16,4,6,24,36,24,6,4,16,24,16,4,1,4,6,4,1])/256.0), dtype=tf.float32, shape=[5,5,1,1], name='GaussianFilterDown', verify_shape=False)
def downscale2d_rgb_Gaussian(x, factor=2):
assert isinstance(factor, int) and factor >= 1
if factor == 1: return x
with tf.variable_scope('Downscale2D_RGB_Gaussian'):
for i in range(int(round(np.log2(factor)))):
channel_list = []
for j in range(3):
z = tf.pad(x[:,j:j+1,:,:], paddings=[[0,0],[0,0],[2,2],[2,2]], mode='REFLECT')
channel_list.append(tf.nn.conv2d(z, filter=gaussian_filter_down, strides=[1,1,2,2], padding='VALID', data_format='NCHW', name='GaussianConvDown'))
x = tf.concat(channel_list, axis=1)
return x
#----------------------------------------------------------------------------
# Pixelwise feature vector normalization.
def pixel_norm(x, epsilon=1e-8):
with tf.variable_scope('PixelNorm'):
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon)
#----------------------------------------------------------------------------
# Minibatch standard deviation.
def minibatch_stddev_layer(x, group_size=4):
with tf.variable_scope('MinibatchStddev'):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) # [GMCHW] Split minibatch into M groups of size G.
y = tf.cast(y, tf.float32) # [GMCHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMCHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MCHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MCHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) # [M111] Take average over fmaps and pixels.
y = tf.cast(y, x.dtype) # [M111] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [N1HW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Patch-based Classifier network
def C_patch(
images_in, # Input: Images [minibatch, channel, height, width].
num_channels = 3, # Number of input color channels. Overridden based on dataset.
resolution = 128, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
latent_res = 4, # Spatial dimension of the latent vectors.
mode = 'postpool', # postpool means convolution first then pooling; predownscale means pooling first then convolution
switching_res = 4, # The resolution to separate from subsequent convolution and subsequent pooling
use_wscale = True, # Enable equalized learning rate?
mbstd_group_size = 0, # Group size for the minibatch standard deviation layer, 0 = disable.
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = False, # True = use fused conv2d + downscale2d, False = separate downscale2d layers.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
latent_res_log2 = 2 if latent_res == -1 else int(np.log2(latent_res))
switching_res_log2 = int(np.log2(switching_res))
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
act = leaky_relu
images_in.set_shape([None, num_channels, resolution, resolution])
images_in = tf.cast(images_in, dtype)
# Building blocks.
def block(x, res): # res = latent_res_log2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if res > latent_res_log2:
if mode == 'predownscale' and res <= switching_res_log2 or mode == 'postpool' and res > switching_res_log2:
with tf.variable_scope('Conv0'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))
if fused_scale:
with tf.variable_scope('Conv1_down'):
x = act(apply_bias(conv2d_downscale2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale)))
else:
with tf.variable_scope('Conv1'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale)))
x = downscale2d(x)
else:
if mode == 'predownscale':
x = downscale2d_rgb_Gaussian(x)
else:
x = downscale2d(x)
else:
if mbstd_group_size > 1:
x = minibatch_stddev_layer(x, mbstd_group_size)
with tf.variable_scope('Conv0'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))
# fully connected
if latent_res == -1:
with tf.variable_scope('Dense1'):
x = act(apply_bias(dense(x, fmaps=max(nf(res-1), label_size), use_wscale=use_wscale)))
with tf.variable_scope('Dense2'):
x = apply_bias(dense(x, fmaps=label_size, gain=1, use_wscale=use_wscale))
# fully convolutional
else:
with tf.variable_scope('Conv1'):
x = act(apply_bias(conv2d(x, fmaps=max(nf(res-1), label_size), kernel=1, use_wscale=use_wscale)))
with tf.variable_scope('Conv2'):
x = apply_bias(conv2d(x, fmaps=label_size, gain=1, kernel=1, use_wscale=use_wscale))
return x
x = tf.identity(images_in)
for res in range(resolution_log2, latent_res_log2-1, -1):
x = block(x, res)
labels_out = tf.identity(x)
assert labels_out.dtype == tf.as_dtype(dtype)
return labels_out | 13,945 | 52.43295 | 202 | py |
GANFingerprints | GANFingerprints-master/classifier/data_preparation.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import os
import glob
import argparse
import numpy as np
import tensorflow as tf
import PIL.Image
import scipy.ndimage
#----------------------------------------------------------------------------
def error(msg):
print('Error: ' + msg)
exit(1)
#----------------------------------------------------------------------------
class TFRecordExporter:
def __init__(self, tfrecord_dir, expected_images, print_progress=True, progress_interval=10, Gaussian_down=0):
self.tfrecord_dir = tfrecord_dir
self.tfr_prefix = os.path.join(self.tfrecord_dir, os.path.basename(self.tfrecord_dir))
self.expected_images = expected_images
self.cur_images = 0
self.shape = None
self.resolution_log2 = None
self.tfr_writers = []
self.print_progress = print_progress
self.progress_interval = progress_interval
self.Gaussian_down = Gaussian_down
if self.print_progress:
print('Creating dataset "%s"' % tfrecord_dir)
if not os.path.isdir(self.tfrecord_dir):
os.makedirs(self.tfrecord_dir)
assert(os.path.isdir(self.tfrecord_dir))
def close(self):
if self.print_progress:
print('%-40s\r' % 'Flushing data...', end='', flush=True)
for tfr_writer in self.tfr_writers:
tfr_writer.close()
self.tfr_writers = []
if self.print_progress:
print('%-40s\r' % '', end='', flush=True)
print('Added %d images.' % self.cur_images)
def choose_shuffled_order(self): # Note: Images and labels must be added in shuffled order.
order = np.arange(self.expected_images)
np.random.RandomState(123).shuffle(order)
return order
def add_image(self, img):
if self.print_progress and self.cur_images % self.progress_interval == 0:
print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True)
if self.shape is None:
self.shape = img.shape
self.resolution_log2 = int(np.log2(self.shape[1]))
assert self.shape[0] in [1, 3]
assert self.shape[1] == self.shape[2]
assert self.shape[1] == 2**self.resolution_log2
tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
#for lod in range(self.resolution_log2 - 1):
for lod in [0]:
tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)
self.tfr_writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt))
assert img.shape == self.shape
for lod, tfr_writer in enumerate(self.tfr_writers):
if lod:
img = img.astype(np.float32)
if self.Gaussian_down:
img = scipy.ndimage.convolve(img, gaussian_filter[np.newaxis, :, :], mode='mirror')[:, ::2, ::2]
else:
img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
quant = np.rint(img).clip(0, 255).astype(np.uint8)
ex = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
tfr_writer.write(ex.SerializeToString())
self.cur_images += 1
def add_labels(self, labels):
if self.print_progress:
print('%-40s\r' % 'Saving labels...', end='', flush=True)
assert labels.shape[0] == self.cur_images
with open(self.tfr_prefix + '-rxx.labels', 'wb') as f:
np.save(f, labels.astype(np.float32))
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#----------------------------------------------------------------------------
def data_preparation(image_dir, tfrecord_dir, resolution=128, shuffle=1, export_labels=1, percentage_samples=100):
print('Loading images from "%s"' % image_dir)
sources_dir = os.listdir(image_dir)
image_filenames = []
for source_dir in sources_dir:
image_filenames_temp = sorted(glob.glob(os.path.join(image_dir, source_dir, '*.png')))
image_filenames += image_filenames_temp[:int(float(len(image_filenames_temp))*float(percentage_samples)/100.0)]
if len(image_filenames) == 0:
error('No input images found')
img = np.asarray(PIL.Image.open(image_filenames[0]))
size = img.shape[0]
channels = img.shape[2] if img.ndim == 3 else 1
if channels not in [1, 3]:
error('Input images must be stored as RGB or grayscale')
with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames))
for idx in range(order.size):
img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
if channels == 1:
img = img[:, :, np.newaxis] # HW => HWC
img = PIL.Image.fromarray(img, 'RGB')
img = img.resize((resolution, resolution), PIL.Image.ANTIALIAS)
img = np.asarray(img)
img = img.transpose(2, 0, 1) # HWC => CHW
tfr.add_image(img)
if export_labels:
labels = []
for file in image_filenames:
name_list = file.split('/')
file_source = name_list[-2]
for label, source in enumerate(sources_dir):
if source == file_source:
labels.append(np.uint32(label))
break
labels = np.array(labels)
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--in_dir', type=str, default=' ') # The input directory containing subdirectories of images. Each subdirectory represents a data source, either from the real dataset or generated by a GAN
parser.add_argument('--out_dir', type=str, default=' ') # The output directory containing the prepared data format that enables efficient streaming
parser.add_argument('--resolution', type=int, default=128) # The resolution to which images are resized
parser.add_argument('--shuffle', type=int, default=1) # Shuffle the order of images when streaming?
parser.add_argument('--export_labels', type=int, default=1) # Export image source labels?
parser.add_argument('--percentage_samples', type=int, default=100) # The percentage of images used for data preparation
args = parser.parse_args()
data_preparation(args.in_dir, args.out_dir, args.resolution, args.shuffle, args.export_labels, args.percentage_samples) | 7,445 | 47.350649 | 212 | py |
GANFingerprints | GANFingerprints-master/classifier/run.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licen sed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import os
import time
import numpy as np
import tensorflow as tf
import config
import tfutil
import dataset
import misc
import argparse
#----------------------------------------------------------------------------
# Choose the size and contents of the image snapshot grids that are exported
# periodically during training.
def setup_snapshot_image_grid(training_set, drange_net, grid_size=None,
size = '1080p', # '1080p' = to be viewed on 1080p display, '4k' = to be viewed on 4k display.
layout = 'random'): # 'random' = grid contents are selected randomly, 'row_per_class' = each row corresponds to one class label.
# Select size.
if grid_size is None:
if size == '1080p':
gw = np.clip(1920 // training_set.shape[2], 3, 32)
gh = np.clip(1080 // training_set.shape[1], 2, 32)
if size == '4k':
gw = np.clip(3840 // training_set.shape[2], 7, 32)
gh = np.clip(2160 // training_set.shape[1], 4, 32)
else:
gw = grid_size[0]
gh = grid_size[1]
# Fill in reals and labels.
reals = np.zeros([gw * gh] + training_set.shape, dtype=np.float32)
labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype)
for idx in range(gw * gh):
x = idx % gw; y = idx // gw
while True:
real, label = training_set.get_minibatch_np(1)
real = real.astype(np.float32)
real = misc.adjust_dynamic_range(real, training_set.dynamic_range, drange_net)
if layout == 'row_per_class' and training_set.label_size > 0:
if label[0, y % training_set.label_size] == 0.0:
continue
reals[idx] = real[0]
labels[idx] = label[0]
break
return (gw, gh), reals, labels
#----------------------------------------------------------------------------
# Just-in-time processing of training images before feeding them to the networks.
def process_reals(x, lod, lr_mirror_augment, ud_mirror_augment, drange_data, drange_net):
with tf.name_scope('ProcessReals'):
with tf.name_scope('DynamicRange'):
x = tf.cast(x, tf.float32)
x = misc.adjust_dynamic_range(x, drange_data, drange_net)
if lr_mirror_augment:
with tf.name_scope('MirrorAugment'):
s = tf.shape(x)
mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)
mask = tf.tile(mask, [1, s[1], s[2], s[3]])
x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3]))
if ud_mirror_augment:
with tf.name_scope('udMirrorAugment'):
s = tf.shape(x)
mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)
mask = tf.tile(mask, [1, s[1], s[2], s[3]])
x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[2]))
with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.
s = tf.shape(x)
y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])
y = tf.reduce_mean(y, axis=[3, 5], keepdims=True)
y = tf.tile(y, [1, 1, 1, 2, 1, 2])
y = tf.reshape(y, [-1, s[1], s[2], s[3]])
x_fade = tfutil.lerp(x, y, lod - tf.floor(lod))
x_orig = tf.identity(x)
with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.
s = tf.shape(x)
factor = tf.cast(2 ** tf.floor(lod), tf.int32)
x_fade = tf.reshape(x_fade, [-1, s[1], s[2], 1, s[3], 1])
x_fade = tf.tile(x_fade, [1, 1, 1, factor, 1, factor])
x_fade = tf.reshape(x_fade, [-1, s[1], s[2] * factor, s[3] * factor])
x_orig = tf.reshape(x_orig, [-1, s[1], s[2], 1, s[3], 1])
x_orig = tf.tile(x_orig, [1, 1, 1, factor, 1, factor])
x_orig = tf.reshape(x_orig, [-1, s[1], s[2] * factor, s[3] * factor])
return x_fade, x_orig
#----------------------------------------------------------------------------
# Class for evaluating and storing the values of time-varying training parameters.
class TrainingSchedule:
def __init__(
self,
cur_nimg,
training_set,
lod_initial_resolution = 128, # Image resolution used at the beginning.
lod_training_kimg = 1500, # Thousands of real images to show before doubling the resolution.
lod_transition_kimg = 1500, # Thousands of real images to show when fading in new layers.
minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs.
minibatch_dict = {}, # Resolution-specific overrides.
max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU.
lrate_base = 0.001, # Learning rate for AutoEncoder.
lrate_dict = {}, # Resolution-specific overrides.
tick_kimg_base = 1, # Default interval of progress snapshots.
tick_kimg_dict = {}): # Resolution-specific overrides.
# Training phase.
self.kimg = cur_nimg / 1000.0
phase_dur = lod_training_kimg + lod_transition_kimg
phase_idx = int(np.floor(self.kimg / phase_dur)) if phase_dur > 0 else 0
phase_kimg = self.kimg - phase_idx * phase_dur
# Level-of-detail and resolution.
self.lod = training_set.resolution_log2
self.lod -= np.floor(np.log2(lod_initial_resolution))
self.lod -= phase_idx
if lod_transition_kimg > 0:
self.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg
self.lod = max(self.lod, 0.0)
self.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(self.lod)))
# Minibatch size.
self.minibatch = minibatch_dict.get(self.resolution, minibatch_base)
self.minibatch -= self.minibatch % config.num_gpus
if self.resolution in max_minibatch_per_gpu:
self.minibatch = min(self.minibatch, max_minibatch_per_gpu[self.resolution] * config.num_gpus)
# Other parameters.
self.lrate = lrate_dict.get(self.resolution, lrate_base)
self.tick_kimg = tick_kimg_dict.get(self.resolution, tick_kimg_base)
#----------------------------------------------------------------------------
# Main training script.
# To run, comment/uncomment appropriate lines in config.py and launch train.py.
def train_classifier(
smoothing = 0.999, # Exponential running average of encoder weights.
minibatch_repeats = 4, # Number of minibatches to run before adjusting training parameters.
reset_opt_for_new_lod = True, # Reset optimizer internal state (e.g. Adam moments) when new layers are introduced?
total_kimg = 100000, # Total length of the training, measured in thousands of real images.
lr_mirror_augment = True, # Enable mirror augment?
ud_mirror_augment = False, # Enable up-down mirror augment?
drange_net = [-1,1], # Dynamic range used when feeding image data to the networks.
accuracy_snapshot_ticks = 10, # How often to export image snapshots?
save_tf_graph = False, # Include full TensorFlow computation graph in the tfevents file?
save_weight_histograms = False): # Include weight histograms in the tfevents file?
maintenance_start_time = time.time()
training_set = dataset.load_dataset(data_dir=config.data_dir, verbose=True, **config.training_set)
validation_set = dataset.load_dataset(data_dir=config.data_dir, verbose=True, **config.validation_set)
network_snapshot_ticks = total_kimg // 100 # How often to export network snapshots?
# Construct networks.
with tf.device('/gpu:0'):
try:
network_pkl = misc.locate_network_pkl()
resume_kimg, resume_time = misc.resume_kimg_time(network_pkl)
print('Loading networks from "%s"...' % network_pkl)
C_im = misc.load_pkl(network_pkl)
except:
print('Constructing networks...')
resume_kimg = 0.0
resume_time = 0.0
C_im = tfutil.Network('C_im', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **config.C_im)
C_im.print_layers()
print('Building TensorFlow graph...')
with tf.name_scope('Inputs'):
lod_in = tf.placeholder(tf.float32, name='lod_in', shape=[])
lrate_in = tf.placeholder(tf.float32, name='lrate_in', shape=[])
minibatch_in = tf.placeholder(tf.int32, name='minibatch_in', shape=[])
minibatch_split = minibatch_in // config.num_gpus
reals, labels = training_set.get_minibatch_tf()
reals_split = tf.split(reals, config.num_gpus)
labels_split = tf.split(labels, config.num_gpus)
C_opt = tfutil.Optimizer(name='TrainC', learning_rate=lrate_in, **config.C_opt)
for gpu in range(config.num_gpus):
with tf.name_scope('GPU%d' % gpu), tf.device('/gpu:%d' % gpu):
C_im_gpu = C_im if gpu == 0 else C_im.clone(C_im.name + '_shadow_%d' % gpu)
reals_fade_gpu, reals_orig_gpu = process_reals(reals_split[gpu], lod_in, lr_mirror_augment, ud_mirror_augment, training_set.dynamic_range, drange_net)
labels_gpu = labels_split[gpu]
with tf.name_scope('C_loss'):
C_loss = tfutil.call_func_by_name(C_im=C_im_gpu, reals_orig=reals_orig_gpu, labels=labels_gpu, **config.C_loss)
C_opt.register_gradients(tf.reduce_mean(C_loss), C_im_gpu.trainables)
C_train_op = C_opt.apply_updates()
print('Setting up snapshot image grid...')
grid_size, train_reals, train_labels = setup_snapshot_image_grid(training_set, drange_net, [5000, 1], **config.grid)
grid_size, val_reals, val_labels = setup_snapshot_image_grid(validation_set, drange_net, [5000, 1], **config.grid)
sched = TrainingSchedule(total_kimg * 1000, training_set, **config.sched)
train_logits = C_im.run(train_reals, minibatch_size=sched.minibatch//config.num_gpus)
train_preds = np.argmax(train_logits, axis=1)
train_gt = np.argmax(train_labels, axis=1)
train_acc = np.float32(np.sum(train_gt==train_preds)) / np.float32(len(train_gt))
print('Training Accuracy = %f' % train_acc)
val_logits = C_im.run(val_reals, minibatch_size=sched.minibatch//config.num_gpus)
val_preds = np.argmax(val_logits, axis=1)
val_gt = np.argmax(val_labels, axis=1)
val_acc = np.float32(np.sum(val_gt==val_preds)) / np.float32(len(val_gt))
print('Validation Accuracy = %f' % val_acc)
print('Setting up result dir...')
result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
misc.save_image_grid(train_reals[:120,:,:,:], os.path.join(result_subdir, 'train_reals.png'), drange=drange_net, grid_size=[15,8])
misc.save_image_grid(val_reals[:120,:,:,:], os.path.join(result_subdir, 'val_reals.png'), drange=drange_net, grid_size=[15,8])
summary_log = tf.summary.FileWriter(result_subdir)
if save_tf_graph:
summary_log.add_graph(tf.get_default_graph())
if save_weight_histograms:
C_im.setup_weight_histograms()
print('Training...')
cur_nimg = int(resume_kimg * 1000)
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
train_start_time = tick_start_time - resume_time
prev_lod = -1.0
while cur_nimg < total_kimg * 1000:
# Choose training parameters and configure training ops.
sched = TrainingSchedule(cur_nimg, training_set, **config.sched)
training_set.configure(sched.minibatch, sched.lod)
if reset_opt_for_new_lod:
if np.floor(sched.lod) != np.floor(prev_lod) or np.ceil(sched.lod) != np.ceil(prev_lod):
C_opt.reset_optimizer_state()
prev_lod = sched.lod
# Run training ops.
for repeat in range(minibatch_repeats):
tfutil.run([C_train_op], {lod_in: sched.lod, lrate_in: sched.lrate, minibatch_in: sched.minibatch})
cur_nimg += sched.minibatch
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if cur_nimg >= tick_start_nimg + sched.tick_kimg * 1000 or done:
cur_tick += 1
cur_time = time.time()
tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0
tick_start_nimg = cur_nimg
tick_time = cur_time - tick_start_time
total_time = cur_time - train_start_time
maintenance_time = tick_start_time - maintenance_start_time
maintenance_start_time = cur_time
# Report progress.
print('tick %-5d kimg %-8.1f lod %-5.2f resolution %-4d minibatch %-4d time %-12s sec/tick %-7.1f sec/kimg %-7.2f maintenance %.1f' % (
tfutil.autosummary('Progress/tick', cur_tick),
tfutil.autosummary('Progress/kimg', cur_nimg / 1000.0),
tfutil.autosummary('Progress/lod', sched.lod),
tfutil.autosummary('Progress/resolution', sched.resolution),
tfutil.autosummary('Progress/minibatch', sched.minibatch),
misc.format_time(tfutil.autosummary('Timing/total_sec', total_time)),
tfutil.autosummary('Timing/sec_per_tick', tick_time),
tfutil.autosummary('Timing/sec_per_kimg', tick_time / tick_kimg),
tfutil.autosummary('Timing/maintenance_sec', maintenance_time)))
tfutil.autosummary('Timing/total_hours', total_time / (60.0 * 60.0))
tfutil.autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0))
tfutil.save_summaries(summary_log, cur_nimg)
# Print accuracy.
if cur_tick % accuracy_snapshot_ticks == 0 or done:
train_logits = C_im.run(train_reals, minibatch_size=sched.minibatch//config.num_gpus)
train_preds = np.argmax(train_logits, axis=1)
train_gt = np.argmax(train_labels, axis=1)
train_acc = np.float32(np.sum(train_gt==train_preds)) / np.float32(len(train_gt))
print('Training Accuracy = %f' % train_acc)
val_logits = C_im.run(val_reals, minibatch_size=sched.minibatch//config.num_gpus)
val_preds = np.argmax(val_logits, axis=1)
val_gt = np.argmax(val_labels, axis=1)
val_acc = np.float32(np.sum(val_gt==val_preds)) / np.float32(len(val_gt))
print('Validation Accuracy = %f' % val_acc)
if cur_tick % network_snapshot_ticks == 0 or done:
misc.save_pkl(C_im, os.path.join(result_subdir, 'network-snapshot-%06d.pkl' % (cur_nimg // 1000)))
# Record start time of the next tick.
tick_start_time = time.time()
# Write final results.
misc.save_pkl(C_im, os.path.join(result_subdir, 'network-final.pkl'))
summary_log.close()
open(os.path.join(result_subdir, '_training-done.txt'), 'wt').close()
#----------------------------------------------------------------------------
# Main entry point.
# Calls the function indicated in config.py.
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--app', type=str, default=' ')
#------------------- training arguments -------------------
parser.add_argument('--training_data_dir', type=str, default=' ') # The prepared training dataset directory that can be efficiently called by the code
parser.add_argument('--validation_data_dir', type=str, default=' ') # The prepared validation dataset directory that can be efficiently called by the code
parser.add_argument('--out_model_dir', type=str, default=' ') # The output directory containing trained models, training configureation, training log, and training snapshots
parser.add_argument('--training_seed', type=int, default=1000) # The random seed that differentiates training instances
#------------------- image generation arguments -------------------
parser.add_argument('--model_path', type=str, default=' ') # The pre-trained GAN model
parser.add_argument('--testing_data_path', type=str, default=' ') # The path of testing image file or the directory containing a collection of testing images
args = parser.parse_args()
if args.app == 'train':
assert args.training_data_dir != ' ' and args.out_model_dir != ' '
if args.validation_data_dir == ' ':
args.validation_data_dir = args.training_data_dir
misc.init_output_logging()
np.random.seed(args.training_seed)
print('Initializing TensorFlow...')
os.environ.update(config.env)
tfutil.init_tf(config.tf_config)
if args.training_data_dir[-1] == '/':
args.training_data_dir = args.training_data_dir[:-1]
idx = args.training_data_dir.rfind('/')
config.data_dir = args.training_data_dir[:idx]
config.training_set = config.EasyDict(tfrecord_dir=args.training_data_dir[idx+1:], max_label_size='full')
if args.validation_data_dir[-1] == '/':
args.validation_data_dir = args.validation_data_dir[:-1]
idx = args.validation_data_dir.rfind('/')
config.validation_set = config.EasyDict(tfrecord_dir=args.validation_data_dir[idx+1:], max_label_size='full')
app = config.EasyDict(func='run.train_classifier', lr_mirror_augment=True, ud_mirror_augment=False, total_kimg=100000)
config.result_dir = args.out_model_dir
elif args.app == 'test':
assert args.model_path != ' ' and args.testing_data_path != ' '
misc.init_output_logging()
print('Initializing TensorFlow...')
os.environ.update(config.env)
tfutil.init_tf(config.tf_config)
app = config.EasyDict(func='util_scripts.classify', model_path=args.model_path, testing_data_path=args.testing_data_path)
tfutil.call_func_by_name(**app) | 18,514 | 52.822674 | 177 | py |
GANFingerprints | GANFingerprints-master/classifier/config.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
#----------------------------------------------------------------------------
# Convenience class that behaves exactly like dict(), but allows accessing
# the keys and values using the attribute syntax, i.e., "mydict.key = value".
class EasyDict(dict):
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
def __getattr__(self, name): return self[name]
def __setattr__(self, name, value): self[name] = value
def __delattr__(self, name): del self[name]
#----------------------------------------------------------------------------
# Paths.
data_dir = 'datasets'
training_data = 'GAN_classifier_datasets_train'
validation_data = 'GAN_classifier_datasets_val'
result_dir = 'models/%s' % training_data
#----------------------------------------------------------------------------
# TensorFlow options.
tf_config = EasyDict() # TensorFlow session config, set by tfutil.init_tf().
env = EasyDict() # Environment variables, set by the main program in train.py.
tf_config['graph_options.place_pruned_graph'] = True # False (default) = Check that all ops are available on the designated device. True = Skip the check for ops that are not used.
tf_config['gpu_options.allow_growth'] = False # False (default) = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed.
env.CUDA_VISIBLE_DEVICES = '0' # Unspecified (default) = Use all available GPUs. List of ints = CUDA device numbers to use.
env.TF_CPP_MIN_LOG_LEVEL = '1' # 0 (default) = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info.
#----------------------------------------------------------------------------
# Official training configs, targeted mainly for CelebA-HQ.
# To run, comment/uncomment the lines as appropriate and launch train.py.
mode = 'postpool'#'postpool'#'predownscale'
switching_res = 4#128#64#32#16#8#4
desc = 'pgan' # Description string included in result subdir name.
random_seed = 1000 # Global random seed.
train = EasyDict(func='run.train_classifier') # Options for main training func.
C_im = EasyDict(func='networks.C_patch', fmap_base=1024, fmap_max=512, latent_res=-1, mode=mode, switching_res=switching_res) # Options for classifier network in the image domain.
C_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for classifier optimizer.
C_loss = EasyDict(func='loss.C_classification') # Options for classifier loss.
sched = EasyDict() # Options for train.TrainingSchedule.
grid = EasyDict(size='1080p', layout='random') # Options for train.setup_snapshot_image_grid().
# Dataset (choose one).
desc += '-%s' % training_data; training_set = EasyDict(tfrecord_dir=training_data); validation_set = EasyDict(tfrecord_dir=validation_data); train.lr_mirror_augment = True; train.ud_mirror_augment = False; sched.lod_initial_resolution = 128; sched.lod_training_kimg = 100000; sched.lod_transition_kimg = 100000; train.total_kimg = 100000
# Conditioning & snapshot options.
desc += '-labels'; training_set.max_label_size = 'full'; validation_set.max_label_size = 'full' # conditioned on full label
# Config presets (choose one). Note: the official settings are optimal. It is not the larger batch size the better.
desc += '-preset-v2-1gpu'; num_gpus = 1; sched.minibatch_base = 32; sched.lrate_dict = {1024: 0.0015}
# Numerical precision (choose one).
desc += '-fp32'; sched.max_minibatch_per_gpu = {256: 16, 512: 8, 1024: 4} | 4,057 | 63.412698 | 337 | py |
GANFingerprints | GANFingerprints-master/classifier/custom_vgg19.py | import os, inspect
import tensorflow as tf
import numpy as np
import time
from tensorflow_vgg import vgg19
VGG_MEAN = [103.939, 116.779, 123.68]
def loadWeightsData(vgg19_npy_path=None):
if vgg19_npy_path is None:
path = inspect.getfile(Vgg19)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "vgg19.npy")
vgg19_npy_path = path
print (vgg19_npy_path)
return np.load(vgg19_npy_path, encoding='latin1').item()
class custom_Vgg19(vgg19.Vgg19):
# Input should be an rgb image [batch, height, width, 3]
# values scaled [-1, 1]
def __init__(self, rgb, data_dict, train=False):
# It's a shared weights data and used in various
# member functions.
self.data_dict = data_dict
# start_time = time.time()
rgb = tf.transpose(rgb, perm=[0,2,3,1])
rgb_scaled = (rgb + 1.0) / 2.0 * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(rgb_scaled, 3, 3)
bgr = tf.concat([blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2]],
3)
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.avg_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.avg_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.conv3_4 = self.conv_layer(self.conv3_3, "conv3_4")
self.pool3 = self.avg_pool(self.conv3_4, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.conv4_4 = self.conv_layer(self.conv4_3, "conv4_4")
self.pool4 = self.avg_pool(self.conv4_4, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.conv5_4 = self.conv_layer(self.conv5_3, "conv5_4")
self.pool5 = self.avg_pool(self.conv5_4, 'pool5')
# self.data_dict = None
# print ("build model finished: %ds" % (time.time() - start_time))
def debug(self):
pass | 2,618 | 35.887324 | 74 | py |
GANFingerprints | GANFingerprints-master/classifier/util_scripts.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import os
import os.path
import numpy as np
import PIL.Image
import skimage
import skimage.transform
import misc
def classify(model_path, testing_data_path):
labels_1 = ['CelebA_real_data', 'ProGAN_generated_data', 'SNGAN_generated_data', 'CramerGAN_generated_data', 'MMDGAN_generated_data']
labels_2 = ['CelebA_real_data', 'ProGAN_seed_0_generated_data ', 'ProGAN_seed_1_generated_data', 'ProGAN_seed_2_generated_data', 'ProGAN_seed_3_generated_data', 'ProGAN_seed_4_generated_data', 'ProGAN_seed_5_generated_data', 'ProGAN_seed_6_generated_data', 'ProGAN_seed_7_generated_data', 'ProGAN_seed_8_generated_data', 'ProGAN_seed_9_generated_data']
print('Loading network...')
C_im = misc.load_network_pkl(model_path)
if testing_data_path.endswith('.png') or testing_data_path.endswith('.jpg'):
im = np.array(PIL.Image.open(testing_data_path)).astype(np.float32) / 255.0
if len(im.shape) < 3:
im = np.dstack([im, im, im])
if im.shape[2] == 4:
im = im[:,:,:3]
if im.shape[0] != 128:
im = skimage.transform.resize(im, (128, 128))
im = np.transpose(misc.adjust_dynamic_range(im, [0,1], [-1,1]), axes=[2,0,1])
im = np.reshape(im, [1]+list(im.shape))
logits = C_im.run(im, minibatch_size=1, num_gpus=1, out_dtype=np.float32)
idx = np.argmax(np.squeeze(logits))
if logits.shape[1] == len(labels_1):
labels = list(labels_1)
elif logits.shape[1] == len(labels_2):
labels = list(labels_2)
print('The input image is predicted as being sampled from %s' % labels[idx])
elif os.path.isdir(testing_data_path):
count_dict = None
name_list = sorted(os.listdir(testing_data_path))
length = len(name_list)
for (count0, name) in enumerate(name_list):
im = np.array(PIL.Image.open('%s/%s' % (testing_data_path, name))).astype(np.float32) / 255.0
if len(im.shape) < 3:
im = np.dstack([im, im, im])
if im.shape[2] == 4:
im = im[:,:,:3]
if im.shape[0] != 128:
im = skimage.transform.resize(im, (128, 128))
im = np.transpose(misc.adjust_dynamic_range(im, [0,1], [-1,1]), axes=[2,0,1])
im = np.reshape(im, [1]+list(im.shape))
logits = C_im.run(im, minibatch_size=1, num_gpus=1, out_dtype=np.float32)
idx = np.argmax(np.squeeze(logits))
if logits.shape[1] == len(labels_1):
labels = list(labels_1)
elif logits.shape[1] == len(labels_2):
labels = list(labels_2)
if count_dict is None:
count_dict = {}
for label in labels:
count_dict[label] = 0
count_dict[labels[idx]] += 1
print('Classifying %d/%d images: %s: predicted as being sampled from %s' % (count0, length, name, labels[idx]))
for label in labels:
print('The percentage of images sampled from %s is %d/%d = %.2f%%' % (label, count_dict[label], length, float(count_dict[label])/float(length)*100.0)) | 3,485 | 48.8 | 356 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/inception_resnet_v2_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.inception_resnet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception
class InceptionTest(tf.test.TestCase):
def testBuildLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue('AuxLogits' in endpoints)
auxlogits = endpoints['AuxLogits']
self.assertTrue(
auxlogits.op.name.startswith('InceptionResnetV2/AuxLogits'))
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testBuildWithoutAuxLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_resnet_v2(inputs, num_classes,
create_aux_logits=False)
self.assertTrue('AuxLogits' not in endpoints)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testBuildNoClasses(self):
batch_size = 5
height, width = 299, 299
num_classes = None
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
net, endpoints = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue('AuxLogits' not in endpoints)
self.assertTrue('Logits' not in endpoints)
self.assertTrue(
net.op.name.startswith('InceptionResnetV2/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1536])
def testBuildEndPoints(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue('Logits' in end_points)
logits = end_points['Logits']
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('AuxLogits' in end_points)
aux_logits = end_points['AuxLogits']
self.assertListEqual(aux_logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_7b_1x1']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 8, 8, 1536])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_resnet_v2_base(inputs)
self.assertTrue(net.op.name.startswith('InceptionResnetV2/Conv2d_7b_1x1'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 8, 8, 1536])
expected_endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_6a',
'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_6a',
'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint=endpoint)
if endpoint != 'PreAuxLogits':
self.assertTrue(out_tensor.op.name.startswith(
'InceptionResnetV2/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points.keys())
def testBuildAndCheckAllEndPointsUptoPreAuxLogits(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint='PreAuxLogits')
endpoints_shapes = {'Conv2d_1a_3x3': [5, 149, 149, 32],
'Conv2d_2a_3x3': [5, 147, 147, 32],
'Conv2d_2b_3x3': [5, 147, 147, 64],
'MaxPool_3a_3x3': [5, 73, 73, 64],
'Conv2d_3b_1x1': [5, 73, 73, 80],
'Conv2d_4a_3x3': [5, 71, 71, 192],
'MaxPool_5a_3x3': [5, 35, 35, 192],
'Mixed_5b': [5, 35, 35, 320],
'Mixed_6a': [5, 17, 17, 1088],
'PreAuxLogits': [5, 17, 17, 1088]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildAndCheckAllEndPointsUptoPreAuxLogitsWithAlignedFeatureMaps(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint='PreAuxLogits', align_feature_maps=True)
endpoints_shapes = {'Conv2d_1a_3x3': [5, 150, 150, 32],
'Conv2d_2a_3x3': [5, 150, 150, 32],
'Conv2d_2b_3x3': [5, 150, 150, 64],
'MaxPool_3a_3x3': [5, 75, 75, 64],
'Conv2d_3b_1x1': [5, 75, 75, 80],
'Conv2d_4a_3x3': [5, 75, 75, 192],
'MaxPool_5a_3x3': [5, 38, 38, 192],
'Mixed_5b': [5, 38, 38, 320],
'Mixed_6a': [5, 19, 19, 1088],
'PreAuxLogits': [5, 19, 19, 1088]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildAndCheckAllEndPointsUptoPreAuxLogitsWithOutputStrideEight(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2_base(
inputs, final_endpoint='PreAuxLogits', output_stride=8)
endpoints_shapes = {'Conv2d_1a_3x3': [5, 149, 149, 32],
'Conv2d_2a_3x3': [5, 147, 147, 32],
'Conv2d_2b_3x3': [5, 147, 147, 64],
'MaxPool_3a_3x3': [5, 73, 73, 64],
'Conv2d_3b_1x1': [5, 73, 73, 80],
'Conv2d_4a_3x3': [5, 71, 71, 192],
'MaxPool_5a_3x3': [5, 35, 35, 192],
'Mixed_5b': [5, 35, 35, 320],
'Mixed_6a': [5, 33, 33, 1088],
'PreAuxLogits': [5, 33, 33, 1088]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testVariablesSetDevice(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
inception.inception_resnet_v2(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
inception.inception_resnet_v2(inputs, num_classes)
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_7b_1x1']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 1536])
def testGlobalPool(self):
batch_size = 1
height, width = 330, 400
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_7b_1x1']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 8, 11, 1536])
def testGlobalPoolUnknownImageShape(self):
batch_size = 1
height, width = 330, 400
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (batch_size, None, None, 3))
logits, end_points = inception.inception_resnet_v2(
inputs, num_classes, create_aux_logits=False)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_7b_1x1']
images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
logits_out, pre_pool_out = sess.run([logits, pre_pool],
{inputs: images.eval()})
self.assertTupleEqual(logits_out.shape, (batch_size, num_classes))
self.assertTupleEqual(pre_pool_out.shape, (batch_size, 8, 11, 1536))
def testUnknownBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_resnet_v2(eval_inputs,
num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_resnet_v2(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_resnet_v2(eval_inputs,
num_classes,
is_training=False,
reuse=True)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
if __name__ == '__main__':
tf.test.main()
| 14,119 | 44.25641 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/mobilenet_v1_eval.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Validate mobilenet_v1 with options for quantization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
from datasets import dataset_factory
from nets import mobilenet_v1
from preprocessing import preprocessing_factory
slim = tf.contrib.slim
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Session master')
flags.DEFINE_integer('batch_size', 250, 'Batch size')
flags.DEFINE_integer('num_classes', 1001, 'Number of classes to distinguish')
flags.DEFINE_integer('num_examples', 50000, 'Number of examples to evaluate')
flags.DEFINE_integer('image_size', 224, 'Input image resolution')
flags.DEFINE_float('depth_multiplier', 1.0, 'Depth multiplier for mobilenet')
flags.DEFINE_bool('quantize', False, 'Quantize training')
flags.DEFINE_string('checkpoint_dir', '', 'The directory for checkpoints')
flags.DEFINE_string('eval_dir', '', 'Directory for writing eval event logs')
flags.DEFINE_string('dataset_dir', '', 'Location of dataset')
FLAGS = flags.FLAGS
def imagenet_input(is_training):
"""Data reader for imagenet.
Reads in imagenet data and performs pre-processing on the images.
Args:
is_training: bool specifying if train or validation dataset is needed.
Returns:
A batch of images and labels.
"""
if is_training:
dataset = dataset_factory.get_dataset('imagenet', 'train',
FLAGS.dataset_dir)
else:
dataset = dataset_factory.get_dataset('imagenet', 'validation',
FLAGS.dataset_dir)
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=is_training,
common_queue_capacity=2 * FLAGS.batch_size,
common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
'mobilenet_v1', is_training=is_training)
image = image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
images, labels = tf.train.batch(
tensors=[image, label],
batch_size=FLAGS.batch_size,
num_threads=4,
capacity=5 * FLAGS.batch_size)
return images, labels
def metrics(logits, labels):
"""Specify the metrics for eval.
Args:
logits: Logits output from the graph.
labels: Ground truth labels for inputs.
Returns:
Eval Op for the graph.
"""
labels = tf.squeeze(labels)
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'Accuracy': tf.metrics.accuracy(tf.argmax(logits, 1), labels),
'Recall_5': tf.metrics.recall_at_k(labels, logits, 5),
})
for name, value in names_to_values.iteritems():
slim.summaries.add_scalar_summary(
value, name, prefix='eval', print_summary=True)
return names_to_updates.values()
def build_model():
"""Build the mobilenet_v1 model for evaluation.
Returns:
g: graph with rewrites after insertion of quantization ops and batch norm
folding.
eval_ops: eval ops for inference.
variables_to_restore: List of variables to restore from checkpoint.
"""
g = tf.Graph()
with g.as_default():
inputs, labels = imagenet_input(is_training=False)
scope = mobilenet_v1.mobilenet_v1_arg_scope(
is_training=False, weight_decay=0.0)
with slim.arg_scope(scope):
logits, _ = mobilenet_v1.mobilenet_v1(
inputs,
is_training=False,
depth_multiplier=FLAGS.depth_multiplier,
num_classes=FLAGS.num_classes)
if FLAGS.quantize:
tf.contrib.quantize.create_eval_graph()
eval_ops = metrics(logits, labels)
return g, eval_ops
def eval_model():
"""Evaluates mobilenet_v1."""
g, eval_ops = build_model()
with g.as_default():
num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))
slim.evaluation.evaluate_once(
FLAGS.master,
FLAGS.checkpoint_dir,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=eval_ops)
def main(unused_arg):
eval_model()
if __name__ == '__main__':
tf.app.run(main)
| 4,826 | 30.54902 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/pix2pix_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for pix2pix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import pix2pix
class GeneratorTest(tf.test.TestCase):
def _reduced_default_blocks(self):
"""Returns the default blocks, scaled down to make test run faster."""
return [pix2pix.Block(b.num_filters // 32, b.decoder_keep_prob)
for b in pix2pix._default_generator_blocks()]
def test_output_size_nn_upsample_conv(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='nn_upsample_conv')
with self.test_session() as session:
session.run(tf.global_variables_initializer())
np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape))
def test_output_size_conv2d_transpose(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='conv2d_transpose')
with self.test_session() as session:
session.run(tf.global_variables_initializer())
np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape))
def test_block_number_dictates_number_of_layers(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
blocks = [
pix2pix.Block(64, 0.5),
pix2pix.Block(128, 0),
]
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
_, end_points = pix2pix.pix2pix_generator(
images, num_outputs, blocks)
num_encoder_layers = 0
num_decoder_layers = 0
for end_point in end_points:
if end_point.startswith('encoder'):
num_encoder_layers += 1
elif end_point.startswith('decoder'):
num_decoder_layers += 1
self.assertEqual(num_encoder_layers, len(blocks))
self.assertEqual(num_decoder_layers, len(blocks))
class DiscriminatorTest(tf.test.TestCase):
def _layer_output_size(self, input_size, kernel_size=4, stride=2, pad=2):
return (input_size + pad * 2 - kernel_size) // stride + 1
def test_four_layers(self):
batch_size = 2
input_size = 256
output_size = self._layer_output_size(input_size)
output_size = self._layer_output_size(output_size)
output_size = self._layer_output_size(output_size)
output_size = self._layer_output_size(output_size, stride=1)
output_size = self._layer_output_size(output_size, stride=1)
images = tf.ones((batch_size, input_size, input_size, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, end_points = pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512])
self.assertListEqual([batch_size, output_size, output_size, 1],
logits.shape.as_list())
self.assertListEqual([batch_size, output_size, output_size, 1],
end_points['predictions'].shape.as_list())
def test_four_layers_no_padding(self):
batch_size = 2
input_size = 256
output_size = self._layer_output_size(input_size, pad=0)
output_size = self._layer_output_size(output_size, pad=0)
output_size = self._layer_output_size(output_size, pad=0)
output_size = self._layer_output_size(output_size, stride=1, pad=0)
output_size = self._layer_output_size(output_size, stride=1, pad=0)
images = tf.ones((batch_size, input_size, input_size, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, end_points = pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=0)
self.assertListEqual([batch_size, output_size, output_size, 1],
logits.shape.as_list())
self.assertListEqual([batch_size, output_size, output_size, 1],
end_points['predictions'].shape.as_list())
def test_four_layers_wrog_paddig(self):
batch_size = 2
input_size = 256
images = tf.ones((batch_size, input_size, input_size, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with self.assertRaises(TypeError):
pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=1.5)
def test_four_layers_negative_padding(self):
batch_size = 2
input_size = 256
images = tf.ones((batch_size, input_size, input_size, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
with self.assertRaises(ValueError):
pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512], padding=-1)
if __name__ == '__main__':
tf.test.main()
| 5,965 | 37 | 79 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/mobilenet_v1_train.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build and train mobilenet_v1 with options for quantization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from datasets import dataset_factory
from nets import mobilenet_v1
from preprocessing import preprocessing_factory
slim = tf.contrib.slim
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Session master')
flags.DEFINE_integer('task', 0, 'Task')
flags.DEFINE_integer('ps_tasks', 0, 'Number of ps')
flags.DEFINE_integer('batch_size', 64, 'Batch size')
flags.DEFINE_integer('num_classes', 1001, 'Number of classes to distinguish')
flags.DEFINE_integer('number_of_steps', None,
'Number of training steps to perform before stopping')
flags.DEFINE_integer('image_size', 224, 'Input image resolution')
flags.DEFINE_float('depth_multiplier', 1.0, 'Depth multiplier for mobilenet')
flags.DEFINE_bool('quantize', False, 'Quantize training')
flags.DEFINE_string('fine_tune_checkpoint', '',
'Checkpoint from which to start finetuning.')
flags.DEFINE_string('checkpoint_dir', '',
'Directory for writing training checkpoints and logs')
flags.DEFINE_string('dataset_dir', '', 'Location of dataset')
flags.DEFINE_integer('log_every_n_steps', 100, 'Number of steps per log')
flags.DEFINE_integer('save_summaries_secs', 100,
'How often to save summaries, secs')
flags.DEFINE_integer('save_interval_secs', 100,
'How often to save checkpoints, secs')
FLAGS = flags.FLAGS
_LEARNING_RATE_DECAY_FACTOR = 0.94
def get_learning_rate():
if FLAGS.fine_tune_checkpoint:
# If we are fine tuning a checkpoint we need to start at a lower learning
# rate since we are farther along on training.
return 1e-4
else:
return 0.045
def get_quant_delay():
if FLAGS.fine_tune_checkpoint:
# We can start quantizing immediately if we are finetuning.
return 0
else:
# We need to wait for the model to train a bit before we quantize if we are
# training from scratch.
return 250000
def imagenet_input(is_training):
"""Data reader for imagenet.
Reads in imagenet data and performs pre-processing on the images.
Args:
is_training: bool specifying if train or validation dataset is needed.
Returns:
A batch of images and labels.
"""
if is_training:
dataset = dataset_factory.get_dataset('imagenet', 'train',
FLAGS.dataset_dir)
else:
dataset = dataset_factory.get_dataset('imagenet', 'validation',
FLAGS.dataset_dir)
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=is_training,
common_queue_capacity=2 * FLAGS.batch_size,
common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
'mobilenet_v1', is_training=is_training)
image = image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=4,
capacity=5 * FLAGS.batch_size)
labels = slim.one_hot_encoding(labels, FLAGS.num_classes)
return images, labels
def build_model():
"""Builds graph for model to train with rewrites for quantization.
Returns:
g: Graph with fake quantization ops and batch norm folding suitable for
training quantized weights.
train_tensor: Train op for execution during training.
"""
g = tf.Graph()
with g.as_default(), tf.device(
tf.train.replica_device_setter(FLAGS.ps_tasks)):
inputs, labels = imagenet_input(is_training=True)
with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope(is_training=True)):
logits, _ = mobilenet_v1.mobilenet_v1(
inputs,
is_training=True,
depth_multiplier=FLAGS.depth_multiplier,
num_classes=FLAGS.num_classes)
tf.losses.softmax_cross_entropy(labels, logits)
# Call rewriter to produce graph with fake quant ops and folded batch norms
# quant_delay delays start of quantization till quant_delay steps, allowing
# for better model accuracy.
if FLAGS.quantize:
tf.contrib.quantize.create_training_graph(quant_delay=get_quant_delay())
total_loss = tf.losses.get_total_loss(name='total_loss')
# Configure the learning rate using an exponential decay.
num_epochs_per_decay = 2.5
imagenet_size = 1271167
decay_steps = int(imagenet_size / FLAGS.batch_size * num_epochs_per_decay)
learning_rate = tf.train.exponential_decay(
get_learning_rate(),
tf.train.get_or_create_global_step(),
decay_steps,
_LEARNING_RATE_DECAY_FACTOR,
staircase=True)
opt = tf.train.GradientDescentOptimizer(learning_rate)
train_tensor = slim.learning.create_train_op(
total_loss,
optimizer=opt)
slim.summaries.add_scalar_summary(total_loss, 'total_loss', 'losses')
slim.summaries.add_scalar_summary(learning_rate, 'learning_rate', 'training')
return g, train_tensor
def get_checkpoint_init_fn():
"""Returns the checkpoint init_fn if the checkpoint is provided."""
if FLAGS.fine_tune_checkpoint:
variables_to_restore = slim.get_variables_to_restore()
global_step_reset = tf.assign(tf.train.get_or_create_global_step(), 0)
# When restoring from a floating point model, the min/max values for
# quantized weights and activations are not present.
# We instruct slim to ignore variables that are missing during restoration
# by setting ignore_missing_vars=True
slim_init_fn = slim.assign_from_checkpoint_fn(
FLAGS.fine_tune_checkpoint,
variables_to_restore,
ignore_missing_vars=True)
def init_fn(sess):
slim_init_fn(sess)
# If we are restoring from a floating point model, we need to initialize
# the global step to zero for the exponential decay to result in
# reasonable learning rates.
sess.run(global_step_reset)
return init_fn
else:
return None
def train_model():
"""Trains mobilenet_v1."""
g, train_tensor = build_model()
with g.as_default():
slim.learning.train(
train_tensor,
FLAGS.checkpoint_dir,
is_chief=(FLAGS.task == 0),
master=FLAGS.master,
log_every_n_steps=FLAGS.log_every_n_steps,
graph=g,
number_of_steps=FLAGS.number_of_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
init_fn=get_checkpoint_init_fn(),
global_step=tf.train.get_global_step())
def main(unused_arg):
train_model()
if __name__ == '__main__':
tf.app.run(main)
| 7,494 | 34.187793 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/resnet_v1_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nets import resnet_utils
from nets import resnet_v1
slim = tf.contrib.slim
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch_size, 1, 1, channels]))
class ResnetUtilsTest(tf.test.TestCase):
def testSubsampleThreeByThree(self):
x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 26],
[28, 48, 66, 37],
[43, 66, 84, 46],
[26, 37, 46, 22]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43],
[43, 84]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = tf.to_float([[48, 37],
[37, 22]])
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
return net, end_points
def testEndPointsV1(self):
"""Test the end points of a tiny v1 bottleneck network."""
blocks = [
resnet_v1.resnet_v1_block(
'block1', base_depth=1, num_units=2, stride=2),
resnet_v1.resnet_v1_block(
'block2', base_depth=2, num_units=2, stride=1),
]
inputs = create_test_input(2, 32, 16, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v1/shortcut',
'tiny/block1/unit_1/bottleneck_v1/conv1',
'tiny/block1/unit_1/bottleneck_v1/conv2',
'tiny/block1/unit_1/bottleneck_v1/conv3',
'tiny/block1/unit_2/bottleneck_v1/conv1',
'tiny/block1/unit_2/bottleneck_v1/conv2',
'tiny/block1/unit_2/bottleneck_v1/conv3',
'tiny/block2/unit_1/bottleneck_v1/shortcut',
'tiny/block2/unit_1/bottleneck_v1/conv1',
'tiny/block2/unit_1/bottleneck_v1/conv2',
'tiny/block2/unit_1/bottleneck_v1/conv3',
'tiny/block2/unit_2/bottleneck_v1/conv1',
'tiny/block2/unit_2/bottleneck_v1/conv2',
'tiny/block2/unit_2/bottleneck_v1/conv3']
self.assertItemsEqual(expected, end_points.keys())
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
def testAtrousValuesBottleneck(self):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
"""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs,
blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
def testStridingLastUnitVsSubsampleBlockEnd(self):
"""Compares subsampling at the block's last unit or block's end.
Makes sure that the final output is the same when we use a stride at the
last unit of a block vs. we subsample activations at the end of a block.
"""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Subsampling at the last unit of the block.
output = resnet_utils.stack_blocks_dense(
inputs, blocks, output_stride,
store_non_strided_activations=False,
outputs_collections='output')
output_end_points = slim.utils.convert_collection_to_dict(
'output')
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Subsample activations at the end of the blocks.
expected = resnet_utils.stack_blocks_dense(
inputs, blocks, output_stride,
store_non_strided_activations=True,
outputs_collections='expected')
expected_end_points = slim.utils.convert_collection_to_dict(
'expected')
sess.run(tf.global_variables_initializer())
# Make sure that the final output is the same.
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
# Make sure that intermediate block activations in
# output_end_points are subsampled versions of the corresponding
# ones in expected_end_points.
for i, block in enumerate(blocks[:-1:]):
output = output_end_points[block.scope]
expected = expected_end_points[block.scope]
atrous_activated = (output_stride is not None and
2 ** i >= output_stride)
if not atrous_activated:
expected = resnet_utils.subsample(expected, 2)
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
class ResnetCompleteNetworkTest(tf.test.TestCase):
"""Tests with complete small ResNet v1 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_small'):
"""A shallow and thin ResNet v1 for faster tests."""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=3, stride=2),
block('block2', base_depth=2, num_units=3, stride=2),
block('block3', base_depth=4, num_units=3, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
return resnet_v1.resnet_v1(inputs, blocks, num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=include_root_block,
spatial_squeeze=spatial_squeeze,
reuse=reuse,
scope=scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
self.assertTrue('global_pool' in end_points)
self.assertListEqual(end_points['global_pool'].get_shape().as_list(),
[2, 1, 1, 32])
def testClassificationEndPointsWithNoBatchNormArgscope(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
is_training=None,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
self.assertTrue('global_pool' in end_points)
self.assertListEqual(end_points['global_pool'].get_shape().as_list(),
[2, 1, 1, 32])
def testEndpointNames(self):
# Like ResnetUtilsTest.testEndPointsV1(), but for the public API.
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
expected = ['resnet/conv1']
for block in range(1, 5):
for unit in range(1, 4 if block < 4 else 3):
for conv in range(1, 4):
expected.append('resnet/block%d/unit_%d/bottleneck_v1/conv%d' %
(block, unit, conv))
expected.append('resnet/block%d/unit_%d/bottleneck_v1' % (block, unit))
expected.append('resnet/block%d/unit_1/bottleneck_v1/shortcut' % block)
expected.append('resnet/block%d' % block)
expected.extend(['global_pool', 'resnet/logits', 'resnet/spatial_squeeze',
'predictions'])
self.assertItemsEqual(end_points.keys(), expected)
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
include_root_block=False,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs,
None,
global_pool=global_pool,
output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
if __name__ == '__main__':
tf.test.main()
| 24,143 | 42.42446 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/vgg_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.vgg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import vgg
slim = tf.contrib.slim
class VGGATest(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_a/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testGlobalPool(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False,
global_pool=True)
self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 1, 1, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_a(inputs, num_classes)
expected_names = ['vgg_a/conv1/conv1_1',
'vgg_a/pool1',
'vgg_a/conv2/conv2_1',
'vgg_a/pool2',
'vgg_a/conv3/conv3_1',
'vgg_a/conv3/conv3_2',
'vgg_a/pool3',
'vgg_a/conv4/conv4_1',
'vgg_a/conv4/conv4_2',
'vgg_a/pool4',
'vgg_a/conv5/conv5_1',
'vgg_a/conv5/conv5_2',
'vgg_a/pool5',
'vgg_a/fc6',
'vgg_a/fc7',
'vgg_a/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testNoClasses(self):
batch_size = 5
height, width = 224, 224
num_classes = None
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = vgg.vgg_a(inputs, num_classes)
expected_names = ['vgg_a/conv1/conv1_1',
'vgg_a/pool1',
'vgg_a/conv2/conv2_1',
'vgg_a/pool2',
'vgg_a/conv3/conv3_1',
'vgg_a/conv3/conv3_2',
'vgg_a/pool3',
'vgg_a/conv4/conv4_1',
'vgg_a/conv4/conv4_2',
'vgg_a/pool4',
'vgg_a/conv5/conv5_1',
'vgg_a/conv5/conv5_2',
'vgg_a/pool5',
'vgg_a/fc6',
'vgg_a/fc7',
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
self.assertTrue(net.op.name.startswith('vgg_a/fc7'))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
vgg.vgg_a(inputs, num_classes)
expected_names = ['vgg_a/conv1/conv1_1/weights',
'vgg_a/conv1/conv1_1/biases',
'vgg_a/conv2/conv2_1/weights',
'vgg_a/conv2/conv2_1/biases',
'vgg_a/conv3/conv3_1/weights',
'vgg_a/conv3/conv3_1/biases',
'vgg_a/conv3/conv3_2/weights',
'vgg_a/conv3/conv3_2/biases',
'vgg_a/conv4/conv4_1/weights',
'vgg_a/conv4/conv4_1/biases',
'vgg_a/conv4/conv4_2/weights',
'vgg_a/conv4/conv4_2/biases',
'vgg_a/conv5/conv5_1/weights',
'vgg_a/conv5/conv5_1/biases',
'vgg_a/conv5/conv5_2/weights',
'vgg_a/conv5/conv5_2/biases',
'vgg_a/fc6/weights',
'vgg_a/fc6/biases',
'vgg_a/fc7/weights',
'vgg_a/fc7/biases',
'vgg_a/fc8/weights',
'vgg_a/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_a(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_a(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
class VGG16Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_16/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_16/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testGlobalPool(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs, num_classes, spatial_squeeze=False,
global_pool=True)
self.assertEquals(logits.op.name, 'vgg_16/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 1, 1, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_16(inputs, num_classes)
expected_names = ['vgg_16/conv1/conv1_1',
'vgg_16/conv1/conv1_2',
'vgg_16/pool1',
'vgg_16/conv2/conv2_1',
'vgg_16/conv2/conv2_2',
'vgg_16/pool2',
'vgg_16/conv3/conv3_1',
'vgg_16/conv3/conv3_2',
'vgg_16/conv3/conv3_3',
'vgg_16/pool3',
'vgg_16/conv4/conv4_1',
'vgg_16/conv4/conv4_2',
'vgg_16/conv4/conv4_3',
'vgg_16/pool4',
'vgg_16/conv5/conv5_1',
'vgg_16/conv5/conv5_2',
'vgg_16/conv5/conv5_3',
'vgg_16/pool5',
'vgg_16/fc6',
'vgg_16/fc7',
'vgg_16/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testNoClasses(self):
batch_size = 5
height, width = 224, 224
num_classes = None
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = vgg.vgg_16(inputs, num_classes)
expected_names = ['vgg_16/conv1/conv1_1',
'vgg_16/conv1/conv1_2',
'vgg_16/pool1',
'vgg_16/conv2/conv2_1',
'vgg_16/conv2/conv2_2',
'vgg_16/pool2',
'vgg_16/conv3/conv3_1',
'vgg_16/conv3/conv3_2',
'vgg_16/conv3/conv3_3',
'vgg_16/pool3',
'vgg_16/conv4/conv4_1',
'vgg_16/conv4/conv4_2',
'vgg_16/conv4/conv4_3',
'vgg_16/pool4',
'vgg_16/conv5/conv5_1',
'vgg_16/conv5/conv5_2',
'vgg_16/conv5/conv5_3',
'vgg_16/pool5',
'vgg_16/fc6',
'vgg_16/fc7',
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
self.assertTrue(net.op.name.startswith('vgg_16/fc7'))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
vgg.vgg_16(inputs, num_classes)
expected_names = ['vgg_16/conv1/conv1_1/weights',
'vgg_16/conv1/conv1_1/biases',
'vgg_16/conv1/conv1_2/weights',
'vgg_16/conv1/conv1_2/biases',
'vgg_16/conv2/conv2_1/weights',
'vgg_16/conv2/conv2_1/biases',
'vgg_16/conv2/conv2_2/weights',
'vgg_16/conv2/conv2_2/biases',
'vgg_16/conv3/conv3_1/weights',
'vgg_16/conv3/conv3_1/biases',
'vgg_16/conv3/conv3_2/weights',
'vgg_16/conv3/conv3_2/biases',
'vgg_16/conv3/conv3_3/weights',
'vgg_16/conv3/conv3_3/biases',
'vgg_16/conv4/conv4_1/weights',
'vgg_16/conv4/conv4_1/biases',
'vgg_16/conv4/conv4_2/weights',
'vgg_16/conv4/conv4_2/biases',
'vgg_16/conv4/conv4_3/weights',
'vgg_16/conv4/conv4_3/biases',
'vgg_16/conv5/conv5_1/weights',
'vgg_16/conv5/conv5_1/biases',
'vgg_16/conv5/conv5_2/weights',
'vgg_16/conv5/conv5_2/biases',
'vgg_16/conv5/conv5_3/weights',
'vgg_16/conv5/conv5_3/biases',
'vgg_16/fc6/weights',
'vgg_16/fc6/biases',
'vgg_16/fc7/weights',
'vgg_16/fc7/biases',
'vgg_16/fc8/weights',
'vgg_16/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_16(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_16(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
class VGG19Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_19/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testGlobalPool(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False,
global_pool=True)
self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 1, 1, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_19(inputs, num_classes)
expected_names = [
'vgg_19/conv1/conv1_1',
'vgg_19/conv1/conv1_2',
'vgg_19/pool1',
'vgg_19/conv2/conv2_1',
'vgg_19/conv2/conv2_2',
'vgg_19/pool2',
'vgg_19/conv3/conv3_1',
'vgg_19/conv3/conv3_2',
'vgg_19/conv3/conv3_3',
'vgg_19/conv3/conv3_4',
'vgg_19/pool3',
'vgg_19/conv4/conv4_1',
'vgg_19/conv4/conv4_2',
'vgg_19/conv4/conv4_3',
'vgg_19/conv4/conv4_4',
'vgg_19/pool4',
'vgg_19/conv5/conv5_1',
'vgg_19/conv5/conv5_2',
'vgg_19/conv5/conv5_3',
'vgg_19/conv5/conv5_4',
'vgg_19/pool5',
'vgg_19/fc6',
'vgg_19/fc7',
'vgg_19/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testNoClasses(self):
batch_size = 5
height, width = 224, 224
num_classes = None
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = vgg.vgg_19(inputs, num_classes)
expected_names = [
'vgg_19/conv1/conv1_1',
'vgg_19/conv1/conv1_2',
'vgg_19/pool1',
'vgg_19/conv2/conv2_1',
'vgg_19/conv2/conv2_2',
'vgg_19/pool2',
'vgg_19/conv3/conv3_1',
'vgg_19/conv3/conv3_2',
'vgg_19/conv3/conv3_3',
'vgg_19/conv3/conv3_4',
'vgg_19/pool3',
'vgg_19/conv4/conv4_1',
'vgg_19/conv4/conv4_2',
'vgg_19/conv4/conv4_3',
'vgg_19/conv4/conv4_4',
'vgg_19/pool4',
'vgg_19/conv5/conv5_1',
'vgg_19/conv5/conv5_2',
'vgg_19/conv5/conv5_3',
'vgg_19/conv5/conv5_4',
'vgg_19/pool5',
'vgg_19/fc6',
'vgg_19/fc7',
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
self.assertTrue(net.op.name.startswith('vgg_19/fc7'))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
vgg.vgg_19(inputs, num_classes)
expected_names = [
'vgg_19/conv1/conv1_1/weights',
'vgg_19/conv1/conv1_1/biases',
'vgg_19/conv1/conv1_2/weights',
'vgg_19/conv1/conv1_2/biases',
'vgg_19/conv2/conv2_1/weights',
'vgg_19/conv2/conv2_1/biases',
'vgg_19/conv2/conv2_2/weights',
'vgg_19/conv2/conv2_2/biases',
'vgg_19/conv3/conv3_1/weights',
'vgg_19/conv3/conv3_1/biases',
'vgg_19/conv3/conv3_2/weights',
'vgg_19/conv3/conv3_2/biases',
'vgg_19/conv3/conv3_3/weights',
'vgg_19/conv3/conv3_3/biases',
'vgg_19/conv3/conv3_4/weights',
'vgg_19/conv3/conv3_4/biases',
'vgg_19/conv4/conv4_1/weights',
'vgg_19/conv4/conv4_1/biases',
'vgg_19/conv4/conv4_2/weights',
'vgg_19/conv4/conv4_2/biases',
'vgg_19/conv4/conv4_3/weights',
'vgg_19/conv4/conv4_3/biases',
'vgg_19/conv4/conv4_4/weights',
'vgg_19/conv4/conv4_4/biases',
'vgg_19/conv5/conv5_1/weights',
'vgg_19/conv5/conv5_1/biases',
'vgg_19/conv5/conv5_2/weights',
'vgg_19/conv5/conv5_2/biases',
'vgg_19/conv5/conv5_3/weights',
'vgg_19/conv5/conv5_3/biases',
'vgg_19/conv5/conv5_4/weights',
'vgg_19/conv5/conv5_4/biases',
'vgg_19/fc6/weights',
'vgg_19/fc6/biases',
'vgg_19/fc7/weights',
'vgg_19/fc7/biases',
'vgg_19/fc8/weights',
'vgg_19/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_19(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_19(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
| 23,141 | 38.626712 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/resnet_v1.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the original form of Residual Networks.
The 'v1' residual networks (ResNets) implemented in this module were proposed
by:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Other variants were introduced in:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The networks defined in this module utilize the bottleneck building block of
[1] with projection shortcuts only for increasing depths. They employ batch
normalization *after* every weight layer. This is the architecture used by
MSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and
ResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'
architecture and the alternative 'v2' architecture of [2] which uses batch
normalization *before* every weight layer in the so-called full pre-activation
units.
Typical use:
from tensorflow.contrib.slim.nets import resnet_v1
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
resnet_arg_scope = resnet_utils.resnet_arg_scope
slim = tf.contrib.slim
class NoOpScope(object):
"""No-op context manager."""
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
@slim.add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None,
use_bounded_activations=False):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
use_bounded_activations: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(
inputs,
depth, [1, 1],
stride=stride,
activation_fn=tf.nn.relu6 if use_bounded_activations else None,
scope='shortcut')
residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
activation_fn=None, scope='conv3')
if use_bounded_activations:
# Use clip_by_value to simulate bandpass activation.
residual = tf.clip_by_value(residual, -6.0, 6.0)
output = tf.nn.relu6(shortcut + residual)
else:
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
output)
def resnet_v1(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope=None):
"""Generator for v1 ResNet models.
This function generates a family of ResNet v1 models. See the resnet_v1_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode. If this is set
to None, the callers can specify slim.batch_norm's is_training parameter
from an outer slim.arg_scope.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with (slim.arg_scope([slim.batch_norm], is_training=is_training)
if is_training is not None else NoOpScope()):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride,
store_non_strided_activations)
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v1.default_image_size = 224
def resnet_v1_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v1 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v1 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
def resnet_v1_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope='resnet_v1_50'):
"""ResNet-50 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_50.default_image_size = resnet_v1.default_image_size
def resnet_v1_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope='resnet_v1_101'):
"""ResNet-101 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_101.default_image_size = resnet_v1.default_image_size
def resnet_v1_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
store_non_strided_activations=False,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_152'):
"""ResNet-152 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_152.default_image_size = resnet_v1.default_image_size
def resnet_v1_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
store_non_strided_activations=False,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_200'):
"""ResNet-200 model of [2]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_200.default_image_size = resnet_v1.default_image_size
| 16,861 | 43.845745 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/cifarnet.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a variant of the CIFAR-10 model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(stddev=stddev)
def cifarnet(images, num_classes=10, is_training=False,
dropout_keep_prob=0.5,
prediction_fn=slim.softmax,
scope='CifarNet'):
"""Creates a variant of the CifarNet model.
Note that since the output is a set of 'logits', the values fall in the
interval of (-infinity, infinity). Consequently, to convert the outputs to a
probability distribution over the characters, one will need to convert them
using the softmax function:
logits = cifarnet.cifarnet(images, is_training=False)
probabilities = tf.nn.softmax(logits)
predictions = tf.argmax(logits, 1)
Args:
images: A batch of `Tensors` of size [batch_size, height, width, channels].
num_classes: the number of classes in the dataset. If 0 or None, the logits
layer is omitted and the input features to the logits layer are returned
instead.
is_training: specifies whether or not we're currently training the model.
This variable will determine the behaviour of the dropout layer.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
scope: Optional variable_scope.
Returns:
net: a 2D Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the input to the logits layer if num_classes
is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
"""
end_points = {}
with tf.variable_scope(scope, 'CifarNet', [images]):
net = slim.conv2d(images, 64, [5, 5], scope='conv1')
end_points['conv1'] = net
net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
end_points['pool1'] = net
net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm1')
net = slim.conv2d(net, 64, [5, 5], scope='conv2')
end_points['conv2'] = net
net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm2')
net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
end_points['pool2'] = net
net = slim.flatten(net)
end_points['Flatten'] = net
net = slim.fully_connected(net, 384, scope='fc3')
end_points['fc3'] = net
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout3')
net = slim.fully_connected(net, 192, scope='fc4')
end_points['fc4'] = net
if not num_classes:
return net, end_points
logits = slim.fully_connected(net, num_classes,
biases_initializer=tf.zeros_initializer(),
weights_initializer=trunc_normal(1/192.0),
weights_regularizer=None,
activation_fn=None,
scope='logits')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
cifarnet.default_image_size = 32
def cifarnet_arg_scope(weight_decay=0.004):
"""Defines the default cifarnet argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=5e-2),
activation_fn=tf.nn.relu):
with slim.arg_scope(
[slim.fully_connected],
biases_initializer=tf.constant_initializer(0.1),
weights_initializer=trunc_normal(0.04),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu) as sc:
return sc
| 4,683 | 38.694915 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/resnet_v2_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nets import resnet_utils
from nets import resnet_v2
slim = tf.contrib.slim
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch_size, 1, 1, channels]))
class ResnetUtilsTest(tf.test.TestCase):
def testSubsampleThreeByThree(self):
x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 26],
[28, 48, 66, 37],
[43, 66, 84, 46],
[26, 37, 46, 22]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43],
[43, 84]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = tf.to_float([[48, 37],
[37, 22]])
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
return net, end_points
def testEndPointsV2(self):
"""Test the end points of a tiny v2 bottleneck network."""
blocks = [
resnet_v2.resnet_v2_block(
'block1', base_depth=1, num_units=2, stride=2),
resnet_v2.resnet_v2_block(
'block2', base_depth=2, num_units=2, stride=1),
]
inputs = create_test_input(2, 32, 16, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v2/shortcut',
'tiny/block1/unit_1/bottleneck_v2/conv1',
'tiny/block1/unit_1/bottleneck_v2/conv2',
'tiny/block1/unit_1/bottleneck_v2/conv3',
'tiny/block1/unit_2/bottleneck_v2/conv1',
'tiny/block1/unit_2/bottleneck_v2/conv2',
'tiny/block1/unit_2/bottleneck_v2/conv3',
'tiny/block2/unit_1/bottleneck_v2/shortcut',
'tiny/block2/unit_1/bottleneck_v2/conv1',
'tiny/block2/unit_1/bottleneck_v2/conv2',
'tiny/block2/unit_1/bottleneck_v2/conv3',
'tiny/block2/unit_2/bottleneck_v2/conv1',
'tiny/block2/unit_2/bottleneck_v2/conv2',
'tiny/block2/unit_2/bottleneck_v2/conv3']
self.assertItemsEqual(expected, end_points.keys())
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
def testAtrousValuesBottleneck(self):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
"""
block = resnet_v2.resnet_v2_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs,
blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
class ResnetCompleteNetworkTest(tf.test.TestCase):
"""Tests with complete small ResNet v2 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_small'):
"""A shallow and thin ResNet v2 for faster tests."""
block = resnet_v2.resnet_v2_block
blocks = [
block('block1', base_depth=1, num_units=3, stride=2),
block('block2', base_depth=2, num_units=3, stride=2),
block('block3', base_depth=4, num_units=3, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
return resnet_v2.resnet_v2(inputs, blocks, num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=include_root_block,
spatial_squeeze=spatial_squeeze,
reuse=reuse,
scope=scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
self.assertTrue('global_pool' in end_points)
self.assertListEqual(end_points['global_pool'].get_shape().as_list(),
[2, 1, 1, 32])
def testEndpointNames(self):
# Like ResnetUtilsTest.testEndPointsV2(), but for the public API.
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
expected = ['resnet/conv1']
for block in range(1, 5):
for unit in range(1, 4 if block < 4 else 3):
for conv in range(1, 4):
expected.append('resnet/block%d/unit_%d/bottleneck_v2/conv%d' %
(block, unit, conv))
expected.append('resnet/block%d/unit_%d/bottleneck_v2' % (block, unit))
expected.append('resnet/block%d/unit_1/bottleneck_v2/shortcut' % block)
expected.append('resnet/block%d' % block)
expected.extend(['global_pool', 'resnet/logits', 'resnet/spatial_squeeze',
'predictions'])
self.assertItemsEqual(end_points.keys(), expected)
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
include_root_block=False,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(inputs, None,
is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None,
is_training=False,
global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None,
global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs,
None,
global_pool=global_pool,
output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
if __name__ == '__main__':
tf.test.main()
| 20,356 | 41.766807 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/inception.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Brings all inception models under one namespace."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from nets.inception_resnet_v2 import inception_resnet_v2
from nets.inception_resnet_v2 import inception_resnet_v2_arg_scope
from nets.inception_resnet_v2 import inception_resnet_v2_base
from nets.inception_v1 import inception_v1
from nets.inception_v1 import inception_v1_arg_scope
from nets.inception_v1 import inception_v1_base
from nets.inception_v2 import inception_v2
from nets.inception_v2 import inception_v2_arg_scope
from nets.inception_v2 import inception_v2_base
from nets.inception_v3 import inception_v3
from nets.inception_v3 import inception_v3_arg_scope
from nets.inception_v3 import inception_v3_base
from nets.inception_v4 import inception_v4
from nets.inception_v4 import inception_v4_arg_scope
from nets.inception_v4 import inception_v4_base
# pylint: enable=unused-import
| 1,676 | 43.131579 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/pix2pix.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Implementation of the Image-to-Image Translation model.
This network represents a port of the following work:
Image-to-Image Translation with Conditional Adversarial Networks
Phillip Isola, Jun-Yan Zhu, Tinghui Zhou and Alexei A. Efros
Arxiv, 2017
https://phillipi.github.io/pix2pix/
A reference implementation written in Lua can be found at:
https://github.com/phillipi/pix2pix/blob/master/models.lua
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import tensorflow as tf
layers = tf.contrib.layers
def pix2pix_arg_scope():
"""Returns a default argument scope for isola_net.
Returns:
An arg scope.
"""
# These parameters come from the online port, which don't necessarily match
# those in the paper.
# TODO(nsilberman): confirm these values with Philip.
instance_norm_params = {
'center': True,
'scale': True,
'epsilon': 0.00001,
}
with tf.contrib.framework.arg_scope(
[layers.conv2d, layers.conv2d_transpose],
normalizer_fn=layers.instance_norm,
normalizer_params=instance_norm_params,
weights_initializer=tf.random_normal_initializer(0, 0.02)) as sc:
return sc
def upsample(net, num_outputs, kernel_size, method='nn_upsample_conv'):
"""Upsamples the given inputs.
Args:
net: A `Tensor` of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
kernel_size: A list of 2 scalars or a 1x2 `Tensor` indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method.
Returns:
An `Tensor` which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
"""
net_shape = tf.shape(net)
height = net_shape[1]
width = net_shape[2]
if method == 'nn_upsample_conv':
net = tf.image.resize_nearest_neighbor(
net, [kernel_size[0] * height, kernel_size[1] * width])
net = layers.conv2d(net, num_outputs, [4, 4], activation_fn=None)
elif method == 'conv2d_transpose':
net = layers.conv2d_transpose(
net, num_outputs, [4, 4], stride=kernel_size, activation_fn=None)
else:
raise ValueError('Unknown method: [%s]', method)
return net
class Block(
collections.namedtuple('Block', ['num_filters', 'decoder_keep_prob'])):
"""Represents a single block of encoder and decoder processing.
The Image-to-Image translation paper works a bit differently than the original
U-Net model. In particular, each block represents a single operation in the
encoder which is concatenated with the corresponding decoder representation.
A dropout layer follows the concatenation and convolution of the concatenated
features.
"""
pass
def _default_generator_blocks():
"""Returns the default generator block definitions.
Returns:
A list of generator blocks.
"""
return [
Block(64, 0.5),
Block(128, 0.5),
Block(256, 0.5),
Block(512, 0),
Block(512, 0),
Block(512, 0),
Block(512, 0),
]
def pix2pix_generator(net,
num_outputs,
blocks=None,
upsample_method='nn_upsample_conv',
is_training=False): # pylint: disable=unused-argument
"""Defines the network architecture.
Args:
net: A `Tensor` of size [batch, height, width, channels]. Note that the
generator currently requires square inputs (e.g. height=width).
num_outputs: The number of (per-pixel) outputs.
blocks: A list of generator blocks or `None` to use the default generator
definition.
upsample_method: The method of upsampling images, one of 'nn_upsample_conv'
or 'conv2d_transpose'
is_training: Whether or not we're in training or testing mode.
Returns:
A `Tensor` representing the model output and a dictionary of model end
points.
Raises:
ValueError: if the input heights do not match their widths.
"""
end_points = {}
blocks = blocks or _default_generator_blocks()
input_size = net.get_shape().as_list()
input_size[3] = num_outputs
upsample_fn = functools.partial(upsample, method=upsample_method)
encoder_activations = []
###########
# Encoder #
###########
with tf.variable_scope('encoder'):
with tf.contrib.framework.arg_scope(
[layers.conv2d],
kernel_size=[4, 4],
stride=2,
activation_fn=tf.nn.leaky_relu):
for block_id, block in enumerate(blocks):
# No normalizer for the first encoder layers as per 'Image-to-Image',
# Section 5.1.1
if block_id == 0:
# First layer doesn't use normalizer_fn
net = layers.conv2d(net, block.num_filters, normalizer_fn=None)
elif block_id < len(blocks) - 1:
net = layers.conv2d(net, block.num_filters)
else:
# Last layer doesn't use activation_fn nor normalizer_fn
net = layers.conv2d(
net, block.num_filters, activation_fn=None, normalizer_fn=None)
encoder_activations.append(net)
end_points['encoder%d' % block_id] = net
###########
# Decoder #
###########
reversed_blocks = list(blocks)
reversed_blocks.reverse()
with tf.variable_scope('decoder'):
# Dropout is used at both train and test time as per 'Image-to-Image',
# Section 2.1 (last paragraph).
with tf.contrib.framework.arg_scope([layers.dropout], is_training=True):
for block_id, block in enumerate(reversed_blocks):
if block_id > 0:
net = tf.concat([net, encoder_activations[-block_id - 1]], axis=3)
# The Relu comes BEFORE the upsample op:
net = tf.nn.relu(net)
net = upsample_fn(net, block.num_filters, [2, 2])
if block.decoder_keep_prob > 0:
net = layers.dropout(net, keep_prob=block.decoder_keep_prob)
end_points['decoder%d' % block_id] = net
with tf.variable_scope('output'):
# Explicitly set the normalizer_fn to None to override any default value
# that may come from an arg_scope, such as pix2pix_arg_scope.
logits = layers.conv2d(
net, num_outputs, [4, 4], activation_fn=None, normalizer_fn=None)
logits = tf.reshape(logits, input_size)
end_points['logits'] = logits
end_points['predictions'] = tf.tanh(logits)
return logits, end_points
def pix2pix_discriminator(net, num_filters, padding=2, is_training=False):
"""Creates the Image2Image Translation Discriminator.
Args:
net: A `Tensor` of size [batch_size, height, width, channels] representing
the input.
num_filters: A list of the filters in the discriminator. The length of the
list determines the number of layers in the discriminator.
padding: Amount of reflection padding applied before each convolution.
is_training: Whether or not the model is training or testing.
Returns:
A logits `Tensor` of size [batch_size, N, N, 1] where N is the number of
'patches' we're attempting to discriminate and a dictionary of model end
points.
"""
del is_training
end_points = {}
num_layers = len(num_filters)
def padded(net, scope):
if padding:
with tf.variable_scope(scope):
spatial_pad = tf.constant(
[[0, 0], [padding, padding], [padding, padding], [0, 0]],
dtype=tf.int32)
return tf.pad(net, spatial_pad, 'REFLECT')
else:
return net
with tf.contrib.framework.arg_scope(
[layers.conv2d],
kernel_size=[4, 4],
stride=2,
padding='valid',
activation_fn=tf.nn.leaky_relu):
# No normalization on the input layer.
net = layers.conv2d(
padded(net, 'conv0'), num_filters[0], normalizer_fn=None, scope='conv0')
end_points['conv0'] = net
for i in range(1, num_layers - 1):
net = layers.conv2d(
padded(net, 'conv%d' % i), num_filters[i], scope='conv%d' % i)
end_points['conv%d' % i] = net
# Stride 1 on the last layer.
net = layers.conv2d(
padded(net, 'conv%d' % (num_layers - 1)),
num_filters[-1],
stride=1,
scope='conv%d' % (num_layers - 1))
end_points['conv%d' % (num_layers - 1)] = net
# 1-dim logits, stride 1, no activation, no normalization.
logits = layers.conv2d(
padded(net, 'conv%d' % num_layers),
1,
stride=1,
activation_fn=None,
normalizer_fn=None,
scope='conv%d' % num_layers)
end_points['logits'] = logits
end_points['predictions'] = tf.sigmoid(logits)
return logits, end_points
| 9,439 | 31.21843 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/nets_factory.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from nets import alexnet
from nets import cifarnet
from nets import inception
from nets import lenet
from nets import mobilenet_v1
from nets import overfeat
from nets import resnet_v1
from nets import resnet_v2
from nets import vgg
from nets.mobilenet import mobilenet_v2
from nets.nasnet import nasnet
from nets.nasnet import pnasnet
slim = tf.contrib.slim
networks_map = {'alexnet_v2': alexnet.alexnet_v2,
'cifarnet': cifarnet.cifarnet,
'overfeat': overfeat.overfeat,
'vgg_a': vgg.vgg_a,
'vgg_16': vgg.vgg_16,
'vgg_19': vgg.vgg_19,
'inception_v1': inception.inception_v1,
'inception_v2': inception.inception_v2,
'inception_v3': inception.inception_v3,
'inception_v4': inception.inception_v4,
'inception_resnet_v2': inception.inception_resnet_v2,
'lenet': lenet.lenet,
'resnet_v1_50': resnet_v1.resnet_v1_50,
'resnet_v1_101': resnet_v1.resnet_v1_101,
'resnet_v1_152': resnet_v1.resnet_v1_152,
'resnet_v1_200': resnet_v1.resnet_v1_200,
'resnet_v2_50': resnet_v2.resnet_v2_50,
'resnet_v2_101': resnet_v2.resnet_v2_101,
'resnet_v2_152': resnet_v2.resnet_v2_152,
'resnet_v2_200': resnet_v2.resnet_v2_200,
'mobilenet_v1': mobilenet_v1.mobilenet_v1,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_075,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_050,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_025,
'mobilenet_v2': mobilenet_v2.mobilenet,
'mobilenet_v2_140': mobilenet_v2.mobilenet_v2_140,
'mobilenet_v2_035': mobilenet_v2.mobilenet_v2_035,
'nasnet_cifar': nasnet.build_nasnet_cifar,
'nasnet_mobile': nasnet.build_nasnet_mobile,
'nasnet_large': nasnet.build_nasnet_large,
'pnasnet_large': pnasnet.build_pnasnet_large,
'pnasnet_mobile': pnasnet.build_pnasnet_mobile,
}
arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope,
'cifarnet': cifarnet.cifarnet_arg_scope,
'overfeat': overfeat.overfeat_arg_scope,
'vgg_a': vgg.vgg_arg_scope,
'vgg_16': vgg.vgg_arg_scope,
'vgg_19': vgg.vgg_arg_scope,
'inception_v1': inception.inception_v3_arg_scope,
'inception_v2': inception.inception_v3_arg_scope,
'inception_v3': inception.inception_v3_arg_scope,
'inception_v4': inception.inception_v4_arg_scope,
'inception_resnet_v2':
inception.inception_resnet_v2_arg_scope,
'lenet': lenet.lenet_arg_scope,
'resnet_v1_50': resnet_v1.resnet_arg_scope,
'resnet_v1_101': resnet_v1.resnet_arg_scope,
'resnet_v1_152': resnet_v1.resnet_arg_scope,
'resnet_v1_200': resnet_v1.resnet_arg_scope,
'resnet_v2_50': resnet_v2.resnet_arg_scope,
'resnet_v2_101': resnet_v2.resnet_arg_scope,
'resnet_v2_152': resnet_v2.resnet_arg_scope,
'resnet_v2_200': resnet_v2.resnet_arg_scope,
'mobilenet_v1': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v2': mobilenet_v2.training_scope,
'mobilenet_v2_035': mobilenet_v2.training_scope,
'mobilenet_v2_140': mobilenet_v2.training_scope,
'nasnet_cifar': nasnet.nasnet_cifar_arg_scope,
'nasnet_mobile': nasnet.nasnet_mobile_arg_scope,
'nasnet_large': nasnet.nasnet_large_arg_scope,
'pnasnet_large': pnasnet.pnasnet_large_arg_scope,
'pnasnet_mobile': pnasnet.pnasnet_mobile_arg_scope,
}
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
"""Returns a network_fn such as `logits, end_points = network_fn(images)`.
Args:
name: The name of the network.
num_classes: The number of classes to use for classification. If 0 or None,
the logits layer is omitted and its input features are returned instead.
weight_decay: The l2 coefficient for the model weights.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
network_fn: A function that applies the model to a batch of images. It has
the following signature:
net, end_points = network_fn(images)
The `images` input is a tensor of shape [batch_size, height, width, 3]
with height = width = network_fn.default_image_size. (The permissibility
and treatment of other sizes depends on the network_fn.)
The returned `end_points` are a dictionary of intermediate activations.
The returned `net` is the topmost layer, depending on `num_classes`:
If `num_classes` was a non-zero integer, `net` is a logits tensor
of shape [batch_size, num_classes].
If `num_classes` was 0 or `None`, `net` is a tensor with the input
to the logits layer of shape [batch_size, 1, 1, num_features] or
[batch_size, num_features]. Dropout has not been applied to this
(even if the network's original classification does); it remains for
the caller to do this or not.
Raises:
ValueError: If network `name` is not recognized.
"""
if name not in networks_map:
raise ValueError('Name of network unknown %s' % name)
func = networks_map[name]
@functools.wraps(func)
def network_fn(images, **kwargs):
arg_scope = arg_scopes_map[name](weight_decay=weight_decay)
with slim.arg_scope(arg_scope):
return func(images, num_classes, is_training=is_training, **kwargs)
if hasattr(func, 'default_image_size'):
network_fn.default_image_size = func.default_image_size
return network_fn
| 7,201 | 46.381579 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/vgg.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains model definitions for versions of the Oxford VGG network.
These model definitions were introduced in the following technical report:
Very Deep Convolutional Networks For Large-Scale Image Recognition
Karen Simonyan and Andrew Zisserman
arXiv technical report, 2015
PDF: http://arxiv.org/pdf/1409.1556.pdf
ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf
CC-BY-4.0
More information can be obtained from the VGG website:
www.robots.ox.ac.uk/~vgg/research/very_deep/
Usage:
with slim.arg_scope(vgg.vgg_arg_scope()):
outputs, end_points = vgg.vgg_a(inputs)
with slim.arg_scope(vgg.vgg_arg_scope()):
outputs, end_points = vgg.vgg_16(inputs)
@@vgg_a
@@vgg_16
@@vgg_19
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
return arg_sc
def vgg_a(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_a',
fc_conv_padding='VALID',
global_pool=False):
"""Oxford Net VGG 11-Layers version A Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_a', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_a.default_image_size = 224
def vgg_16(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16',
fc_conv_padding='VALID',
global_pool=False):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_16.default_image_size = 224
def vgg_19(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_19',
fc_conv_padding='VALID',
global_pool=False):
"""Oxford Net VGG 19-Layers version E Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the non-dropped-out input to the logits layer (if num_classes is 0 or
None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_19', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 4, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_19.default_image_size = 224
# Alias
vgg_d = vgg_16
vgg_e = vgg_19
| 14,019 | 45.270627 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/cyclegan.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the CycleGAN generator and discriminator networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
layers = tf.contrib.layers
def cyclegan_arg_scope(instance_norm_center=True,
instance_norm_scale=True,
instance_norm_epsilon=0.001,
weights_init_stddev=0.02,
weight_decay=0.0):
"""Returns a default argument scope for all generators and discriminators.
Args:
instance_norm_center: Whether instance normalization applies centering.
instance_norm_scale: Whether instance normalization applies scaling.
instance_norm_epsilon: Small float added to the variance in the instance
normalization to avoid dividing by zero.
weights_init_stddev: Standard deviation of the random values to initialize
the convolution kernels with.
weight_decay: Magnitude of weight decay applied to all convolution kernel
variables of the generator.
Returns:
An arg-scope.
"""
instance_norm_params = {
'center': instance_norm_center,
'scale': instance_norm_scale,
'epsilon': instance_norm_epsilon,
}
weights_regularizer = None
if weight_decay and weight_decay > 0.0:
weights_regularizer = layers.l2_regularizer(weight_decay)
with tf.contrib.framework.arg_scope(
[layers.conv2d],
normalizer_fn=layers.instance_norm,
normalizer_params=instance_norm_params,
weights_initializer=tf.random_normal_initializer(0, weights_init_stddev),
weights_regularizer=weights_regularizer) as sc:
return sc
def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose'):
"""Upsamples the given inputs.
Args:
net: A Tensor of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
stride: A list of 2 scalars or a 1x2 Tensor indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method: 'nn_upsample_conv', 'bilinear_upsample_conv',
or 'conv2d_transpose'.
Returns:
A Tensor which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
"""
with tf.variable_scope('upconv'):
net_shape = tf.shape(net)
height = net_shape[1]
width = net_shape[2]
# Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a 3x3
# 'valid' convolution produce an output with the same dimension as the
# input.
spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
if method == 'nn_upsample_conv':
net = tf.image.resize_nearest_neighbor(
net, [stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, 'REFLECT')
net = layers.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid')
elif method == 'bilinear_upsample_conv':
net = tf.image.resize_bilinear(
net, [stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, 'REFLECT')
net = layers.conv2d(net, num_outputs, kernel_size=[3, 3], padding='valid')
elif method == 'conv2d_transpose':
# This corrects 1 pixel offset for images with even width and height.
# conv2d is left aligned and conv2d_transpose is right aligned for even
# sized images (while doing 'SAME' padding).
# Note: This doesn't reflect actual model in paper.
net = layers.conv2d_transpose(
net, num_outputs, kernel_size=[3, 3], stride=stride, padding='valid')
net = net[:, 1:, 1:, :]
else:
raise ValueError('Unknown method: [%s]', method)
return net
def _dynamic_or_static_shape(tensor):
shape = tf.shape(tensor)
static_shape = tf.contrib.util.constant_value(shape)
return static_shape if static_shape is not None else shape
def cyclegan_generator_resnet(images,
arg_scope_fn=cyclegan_arg_scope,
num_resnet_blocks=6,
num_filters=64,
upsample_fn=cyclegan_upsample,
kernel_size=3,
num_outputs=3,
tanh_linear_slope=0.0,
is_training=False):
"""Defines the cyclegan resnet network architecture.
As closely as possible following
https://github.com/junyanz/CycleGAN/blob/master/models/architectures.lua#L232
FYI: This network requires input height and width to be divisible by 4 in
order to generate an output with shape equal to input shape. Assertions will
catch this if input dimensions are known at graph construction time, but
there's no protection if unknown at graph construction time (you'll see an
error).
Args:
images: Input image tensor of shape [batch_size, h, w, 3].
arg_scope_fn: Function to create the global arg_scope for the network.
num_resnet_blocks: Number of ResNet blocks in the middle of the generator.
num_filters: Number of filters of the first hidden layer.
upsample_fn: Upsampling function for the decoder part of the generator.
kernel_size: Size w or list/tuple [h, w] of the filter kernels for all inner
layers.
num_outputs: Number of output layers. Defaults to 3 for RGB.
tanh_linear_slope: Slope of the linear function to add to the tanh over the
logits.
is_training: Whether the network is created in training mode or inference
only mode. Not actually needed, just for compliance with other generator
network functions.
Returns:
A `Tensor` representing the model output and a dictionary of model end
points.
Raises:
ValueError: If the input height or width is known at graph construction time
and not a multiple of 4.
"""
# Neither dropout nor batch norm -> dont need is_training
del is_training
end_points = {}
input_size = images.shape.as_list()
height, width = input_size[1], input_size[2]
if height and height % 4 != 0:
raise ValueError('The input height must be a multiple of 4.')
if width and width % 4 != 0:
raise ValueError('The input width must be a multiple of 4.')
if not isinstance(kernel_size, (list, tuple)):
kernel_size = [kernel_size, kernel_size]
kernel_height = kernel_size[0]
kernel_width = kernel_size[1]
pad_top = (kernel_height - 1) // 2
pad_bottom = kernel_height // 2
pad_left = (kernel_width - 1) // 2
pad_right = kernel_width // 2
paddings = np.array(
[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]],
dtype=np.int32)
spatial_pad_3 = np.array([[0, 0], [3, 3], [3, 3], [0, 0]])
with tf.contrib.framework.arg_scope(arg_scope_fn()):
###########
# Encoder #
###########
with tf.variable_scope('input'):
# 7x7 input stage
net = tf.pad(images, spatial_pad_3, 'REFLECT')
net = layers.conv2d(net, num_filters, kernel_size=[7, 7], padding='VALID')
end_points['encoder_0'] = net
with tf.variable_scope('encoder'):
with tf.contrib.framework.arg_scope(
[layers.conv2d],
kernel_size=kernel_size,
stride=2,
activation_fn=tf.nn.relu,
padding='VALID'):
net = tf.pad(net, paddings, 'REFLECT')
net = layers.conv2d(net, num_filters * 2)
end_points['encoder_1'] = net
net = tf.pad(net, paddings, 'REFLECT')
net = layers.conv2d(net, num_filters * 4)
end_points['encoder_2'] = net
###################
# Residual Blocks #
###################
with tf.variable_scope('residual_blocks'):
with tf.contrib.framework.arg_scope(
[layers.conv2d],
kernel_size=kernel_size,
stride=1,
activation_fn=tf.nn.relu,
padding='VALID'):
for block_id in xrange(num_resnet_blocks):
with tf.variable_scope('block_{}'.format(block_id)):
res_net = tf.pad(net, paddings, 'REFLECT')
res_net = layers.conv2d(res_net, num_filters * 4)
res_net = tf.pad(res_net, paddings, 'REFLECT')
res_net = layers.conv2d(res_net, num_filters * 4,
activation_fn=None)
net += res_net
end_points['resnet_block_%d' % block_id] = net
###########
# Decoder #
###########
with tf.variable_scope('decoder'):
with tf.contrib.framework.arg_scope(
[layers.conv2d],
kernel_size=kernel_size,
stride=1,
activation_fn=tf.nn.relu):
with tf.variable_scope('decoder1'):
net = upsample_fn(net, num_outputs=num_filters * 2, stride=[2, 2])
end_points['decoder1'] = net
with tf.variable_scope('decoder2'):
net = upsample_fn(net, num_outputs=num_filters, stride=[2, 2])
end_points['decoder2'] = net
with tf.variable_scope('output'):
net = tf.pad(net, spatial_pad_3, 'REFLECT')
logits = layers.conv2d(
net,
num_outputs, [7, 7],
activation_fn=None,
normalizer_fn=None,
padding='valid')
logits = tf.reshape(logits, _dynamic_or_static_shape(images))
end_points['logits'] = logits
end_points['predictions'] = tf.tanh(logits) + logits * tanh_linear_slope
return end_points['predictions'], end_points
| 10,300 | 36.594891 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/dcgan.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DCGAN generator and discriminator from https://arxiv.org/abs/1511.06434."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import log
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
slim = tf.contrib.slim
def _validate_image_inputs(inputs):
inputs.get_shape().assert_has_rank(4)
inputs.get_shape()[1:3].assert_is_fully_defined()
if inputs.get_shape()[1] != inputs.get_shape()[2]:
raise ValueError('Input tensor does not have equal width and height: ',
inputs.get_shape()[1:3])
width = inputs.get_shape().as_list()[1]
if log(width, 2) != int(log(width, 2)):
raise ValueError('Input tensor `width` is not a power of 2: ', width)
# TODO(joelshor): Use fused batch norm by default. Investigate why some GAN
# setups need the gradient of gradient FusedBatchNormGrad.
def discriminator(inputs,
depth=64,
is_training=True,
reuse=None,
scope='Discriminator',
fused_batch_norm=False):
"""Discriminator network for DCGAN.
Construct discriminator network from inputs to the final endpoint.
Args:
inputs: A tensor of size [batch_size, height, width, channels]. Must be
floating point.
depth: Number of channels in first convolution layer.
is_training: Whether the network is for training or not.
reuse: Whether or not the network variables should be reused. `scope`
must be given to be reused.
scope: Optional variable_scope.
fused_batch_norm: If `True`, use a faster, fused implementation of
batch norm.
Returns:
logits: The pre-softmax activations, a tensor of size [batch_size, 1]
end_points: a dictionary from components of the network to their activation.
Raises:
ValueError: If the input image shape is not 4-dimensional, if the spatial
dimensions aren't defined at graph construction time, if the spatial
dimensions aren't square, or if the spatial dimensions aren't a power of
two.
"""
normalizer_fn = slim.batch_norm
normalizer_fn_args = {
'is_training': is_training,
'zero_debias_moving_mean': True,
'fused': fused_batch_norm,
}
_validate_image_inputs(inputs)
inp_shape = inputs.get_shape().as_list()[1]
end_points = {}
with tf.variable_scope(scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d],
stride=2,
kernel_size=4,
activation_fn=tf.nn.leaky_relu):
net = inputs
for i in xrange(int(log(inp_shape, 2))):
scope = 'conv%i' % (i + 1)
current_depth = depth * 2**i
normalizer_fn_ = None if i == 0 else normalizer_fn
net = slim.conv2d(
net, current_depth, normalizer_fn=normalizer_fn_, scope=scope)
end_points[scope] = net
logits = slim.conv2d(net, 1, kernel_size=1, stride=1, padding='VALID',
normalizer_fn=None, activation_fn=None)
logits = tf.reshape(logits, [-1, 1])
end_points['logits'] = logits
return logits, end_points
# TODO(joelshor): Use fused batch norm by default. Investigate why some GAN
# setups need the gradient of gradient FusedBatchNormGrad.
def generator(inputs,
depth=64,
final_size=32,
num_outputs=3,
is_training=True,
reuse=None,
scope='Generator',
fused_batch_norm=False):
"""Generator network for DCGAN.
Construct generator network from inputs to the final endpoint.
Args:
inputs: A tensor with any size N. [batch_size, N]
depth: Number of channels in last deconvolution layer.
final_size: The shape of the final output.
num_outputs: Number of output features. For images, this is the number of
channels.
is_training: whether is training or not.
reuse: Whether or not the network has its variables should be reused. scope
must be given to be reused.
scope: Optional variable_scope.
fused_batch_norm: If `True`, use a faster, fused implementation of
batch norm.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, 32, 32, channels]
end_points: a dictionary from components of the network to their activation.
Raises:
ValueError: If `inputs` is not 2-dimensional.
ValueError: If `final_size` isn't a power of 2 or is less than 8.
"""
normalizer_fn = slim.batch_norm
normalizer_fn_args = {
'is_training': is_training,
'zero_debias_moving_mean': True,
'fused': fused_batch_norm,
}
inputs.get_shape().assert_has_rank(2)
if log(final_size, 2) != int(log(final_size, 2)):
raise ValueError('`final_size` (%i) must be a power of 2.' % final_size)
if final_size < 8:
raise ValueError('`final_size` (%i) must be greater than 8.' % final_size)
end_points = {}
num_layers = int(log(final_size, 2)) - 1
with tf.variable_scope(scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d_transpose],
normalizer_fn=normalizer_fn,
stride=2,
kernel_size=4):
net = tf.expand_dims(tf.expand_dims(inputs, 1), 1)
# First upscaling is different because it takes the input vector.
current_depth = depth * 2 ** (num_layers - 1)
scope = 'deconv1'
net = slim.conv2d_transpose(
net, current_depth, stride=1, padding='VALID', scope=scope)
end_points[scope] = net
for i in xrange(2, num_layers):
scope = 'deconv%i' % (i)
current_depth = depth * 2 ** (num_layers - i)
net = slim.conv2d_transpose(net, current_depth, scope=scope)
end_points[scope] = net
# Last layer has different normalizer and activation.
scope = 'deconv%i' % (num_layers)
net = slim.conv2d_transpose(
net, depth, normalizer_fn=None, activation_fn=None, scope=scope)
end_points[scope] = net
# Convert to proper channels.
scope = 'logits'
logits = slim.conv2d(
net,
num_outputs,
normalizer_fn=None,
activation_fn=None,
kernel_size=1,
stride=1,
padding='VALID',
scope=scope)
end_points[scope] = logits
logits.get_shape().assert_has_rank(4)
logits.get_shape().assert_is_compatible_with(
[None, final_size, final_size, num_outputs])
return logits, end_points
| 7,546 | 36.17734 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/inception_v1_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nets import inception
slim = tf.contrib.slim
class InceptionV1Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'InceptionV1/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
mixed_6c, end_points = inception.inception_v1_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b',
'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c',
'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b',
'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV1/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points.keys())
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v1_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {'Conv2d_1a_7x7': [5, 112, 112, 64],
'MaxPool_2a_3x3': [5, 56, 56, 64],
'Conv2d_2b_1x1': [5, 56, 56, 64],
'Conv2d_2c_3x3': [5, 56, 56, 192],
'MaxPool_3a_3x3': [5, 28, 28, 192],
'Mixed_3b': [5, 28, 28, 256],
'Mixed_3c': [5, 28, 28, 480],
'MaxPool_4a_3x3': [5, 14, 14, 480],
'Mixed_4b': [5, 14, 14, 512],
'Mixed_4c': [5, 14, 14, 512],
'Mixed_4d': [5, 14, 14, 512],
'Mixed_4e': [5, 14, 14, 528],
'Mixed_4f': [5, 14, 14, 832],
'MaxPool_5a_2x2': [5, 7, 7, 832],
'Mixed_5b': [5, 7, 7, 832],
'Mixed_5c': [5, 7, 7, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(5607184, total_params)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
inputs = tf.random_uniform((batch_size, height, width, 3))
mixed_5c, _ = inception.inception_v1_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes,
global_pool=True)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 224, 224
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v1(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 224, 224, 3])
logits, _ = inception.inception_v1(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
tf.test.main()
| 10,157 | 40.802469 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/resnet_v2.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer.
Typical use:
from tensorflow.contrib.slim.nets import resnet_v2
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
slim = tf.contrib.slim
resnet_arg_scope = resnet_utils.resnet_arg_scope
@slim.add_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
outputs_collections=None, scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride,
normalizer_fn=None, activation_fn=None,
scope='shortcut')
residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
normalizer_fn=None, activation_fn=None,
scope='conv3')
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
output)
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with slim.arg_scope([slim.conv2d],
activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v2 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v2 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
resnet_v2.default_image_size = 224
def resnet_v2_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_50.default_image_size = resnet_v2.default_image_size
def resnet_v2_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_101'):
"""ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_101.default_image_size = resnet_v2.default_image_size
def resnet_v2_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_152.default_image_size = resnet_v2.default_image_size
def resnet_v2_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_200'):
"""ResNet-200 model of [2]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_200.default_image_size = resnet_v2.default_image_size
| 15,466 | 44.760355 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/inception_v4.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the Inception V4 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception_utils
slim = tf.contrib.slim
def block_inception_a(inputs, scope=None, reuse=None):
"""Builds Inception-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def block_reduction_a(inputs, scope=None, reuse=None):
"""Builds Reduction-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_b(inputs, scope=None, reuse=None):
"""Builds Inception-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 256, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 224, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 224, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 256, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def block_reduction_b(inputs, scope=None, reuse=None):
"""Builds Reduction-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_c(inputs, scope=None, reuse=None):
"""Builds Inception-C block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),
slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
"""Creates the Inception V4 network up to the given final endpoint.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
final_endpoint: specifies the endpoint to construct the network up to.
It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e',
'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c',
'Mixed_7d']
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
"""
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionV4', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 299 x 299 x 3
net = slim.conv2d(inputs, 32, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points
# 149 x 149 x 32
net = slim.conv2d(net, 32, [3, 3], padding='VALID',
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 147 x 147 x 64
with tf.variable_scope('Mixed_3a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_0a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID',
scope='Conv2d_0a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_3a', net): return net, end_points
# 73 x 73 x 160
with tf.variable_scope('Mixed_4a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_4a', net): return net, end_points
# 71 x 71 x 192
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_5a', net): return net, end_points
# 35 x 35 x 384
# 4 x Inception-A blocks
for idx in range(4):
block_scope = 'Mixed_5' + chr(ord('b') + idx)
net = block_inception_a(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 35 x 35 x 384
# Reduction-A block
net = block_reduction_a(net, 'Mixed_6a')
if add_and_check_final('Mixed_6a', net): return net, end_points
# 17 x 17 x 1024
# 7 x Inception-B blocks
for idx in range(7):
block_scope = 'Mixed_6' + chr(ord('b') + idx)
net = block_inception_b(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 17 x 17 x 1024
# Reduction-B block
net = block_reduction_b(net, 'Mixed_7a')
if add_and_check_final('Mixed_7a', net): return net, end_points
# 8 x 8 x 1536
# 3 x Inception-C blocks
for idx in range(3):
block_scope = 'Mixed_7' + chr(ord('b') + idx)
net = block_inception_c(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v4(inputs, num_classes=1001, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionV4',
create_aux_logits=True):
"""Creates the Inception V4 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxiliary logits.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped input to the logits layer
if num_classes is 0 or None.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV4', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v4_base(inputs, scope=scope)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# Auxiliary Head logits
if create_aux_logits and num_classes:
with tf.variable_scope('AuxLogits'):
# 17 x 17 x 1024
aux_logits = end_points['Mixed_6h']
aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3,
padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1],
scope='Conv2d_1b_1x1')
aux_logits = slim.conv2d(aux_logits, 768,
aux_logits.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a')
aux_logits = slim.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes,
activation_fn=None,
scope='Aux_logits')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
# TODO(sguada,arnoegw): Consider adding a parameter global_pool which
# can be set to False to disable pooling here (as in resnet_*()).
with tf.variable_scope('Logits'):
# 8 x 8 x 1536
kernel_size = net.get_shape()[1:3]
if kernel_size.is_fully_defined():
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a')
else:
net = tf.reduce_mean(net, [1, 2], keep_dims=True,
name='global_pool')
end_points['global_pool'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 1536
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b')
net = slim.flatten(net, scope='PreLogitsFlatten')
end_points['PreLogitsFlatten'] = net
# 1536
logits = slim.fully_connected(net, num_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points
inception_v4.default_image_size = 299
inception_v4_arg_scope = inception_utils.inception_arg_scope
| 16,409 | 47.550296 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/inception_v3.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v3 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception_utils
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def inception_v3_base(inputs,
final_endpoint='Mixed_7c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception model from http://arxiv.org/abs/1512.00567.
Constructs an Inception v3 network from inputs to the given final endpoint.
This method can construct the network up to the final inception block
Mixed_7c.
Note that the names of the layers in the paper do not correspond to the names
of the endpoints registered by this function although they build the same
network.
Here is a mapping from the old_names to the new names:
Old name | New name
=======================================
conv0 | Conv2d_1a_3x3
conv1 | Conv2d_2a_3x3
conv2 | Conv2d_2b_3x3
pool1 | MaxPool_3a_3x3
conv3 | Conv2d_3b_1x1
conv4 | Conv2d_4a_3x3
pool2 | MaxPool_5a_3x3
mixed_35x35x256a | Mixed_5b
mixed_35x35x288a | Mixed_5c
mixed_35x35x288b | Mixed_5d
mixed_17x17x768a | Mixed_6a
mixed_17x17x768b | Mixed_6b
mixed_17x17x768c | Mixed_6c
mixed_17x17x768d | Mixed_6d
mixed_17x17x768e | Mixed_6e
mixed_8x8x1280a | Mixed_7a
mixed_8x8x2048a | Mixed_7b
mixed_8x8x2048b | Mixed_7c
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',
'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV3', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='VALID'):
# 299 x 299 x 3
end_point = 'Conv2d_1a_3x3'
net = slim.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 149 x 149 x 32
end_point = 'Conv2d_2a_3x3'
net = slim.conv2d(net, depth(32), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 147 x 147 x 32
end_point = 'Conv2d_2b_3x3'
net = slim.conv2d(net, depth(64), [3, 3], padding='SAME', scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 147 x 147 x 64
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 73 x 73 x 64
end_point = 'Conv2d_3b_1x1'
net = slim.conv2d(net, depth(80), [1, 1], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 73 x 73 x 80.
end_point = 'Conv2d_4a_3x3'
net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 71 x 71 x 192.
end_point = 'MaxPool_5a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 35 x 35 x 192.
# Inception blocks
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# mixed: 35 x 35 x 256.
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_1: 35 x 35 x 288.
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv_1_0c_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1],
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_2: 35 x 35 x 288.
end_point = 'Mixed_5d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_3: 17 x 17 x 768.
end_point = 'Mixed_6a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed4: 17 x 17 x 768.
end_point = 'Mixed_6b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(128), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_5: 17 x 17 x 768.
end_point = 'Mixed_6c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_6: 17 x 17 x 768.
end_point = 'Mixed_6d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_7: 17 x 17 x 768.
end_point = 'Mixed_6e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_8: 8 x 8 x 1280.
end_point = 'Mixed_7a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_9: 8 x 8 x 2048.
end_point = 'Mixed_7b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_10: 8 x 8 x 2048.
end_point = 'Mixed_7c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v3(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
create_aux_logits=True,
scope='InceptionV3',
global_pool=False):
"""Inception model from http://arxiv.org/abs/1512.00567.
"Rethinking the Inception Architecture for Computer Vision"
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna.
With the default arguments this method constructs the exact model defined in
the paper. However, one can experiment with variations of the inception_v3
network by changing arguments dropout_keep_prob, min_depth and
depth_multiplier.
The default image size used to train this network is 299x299.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is of
shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
create_aux_logits: Whether to create the auxiliary logits.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if 'depth_multiplier' is less than or equal to zero.
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV3', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v3_base(
inputs, scope=scope, min_depth=min_depth,
depth_multiplier=depth_multiplier)
# Auxiliary Head logits
if create_aux_logits and num_classes:
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
aux_logits = end_points['Mixed_6e']
with tf.variable_scope('AuxLogits'):
aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, depth(128), [1, 1],
scope='Conv2d_1b_1x1')
# Shape of feature map before the final layer.
kernel_size = _reduced_kernel_size_for_small_input(
aux_logits, [5, 5])
aux_logits = slim.conv2d(
aux_logits, depth(768), kernel_size,
weights_initializer=trunc_normal(0.01),
padding='VALID', scope='Conv2d_2a_{}x{}'.format(*kernel_size))
aux_logits = slim.conv2d(
aux_logits, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, weights_initializer=trunc_normal(0.001),
scope='Conv2d_2b_1x1')
if spatial_squeeze:
aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='GlobalPool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
end_points['AvgPool_1a'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 2048
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
# 2048
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
# 1000
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v3.default_image_size = 299
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
inception_v3_arg_scope = inception_utils.inception_arg_scope
| 28,320 | 47.82931 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.