repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
larq | larq-main/larq/activations_test.py | import numpy as np
import pytest
import tensorflow as tf
import larq as lq
from larq.testing_utils import generate_real_values_with_zeros
@pytest.mark.parametrize("name", ["hard_tanh", "leaky_tanh"])
def test_serialization(name):
fn = tf.keras.activations.get(name)
ref_fn = getattr(lq.activations, name)
assert fn == ref_fn
config = tf.keras.activations.serialize(fn)
fn = tf.keras.activations.deserialize(config)
assert fn == ref_fn
def test_hard_tanh():
real_values = generate_real_values_with_zeros()
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [lq.activations.hard_tanh(x)])
result = f([real_values])[0]
np.testing.assert_allclose(result, np.clip(real_values, -1, 1))
def test_leaky_tanh():
@np.vectorize
def leaky_tanh(x, alpha):
if x <= -1:
return -1 + alpha * (x + 1)
elif x <= 1:
return x
else:
return 1 + alpha * (x - 1)
real_values = generate_real_values_with_zeros()
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [lq.activations.leaky_tanh(x)])
result = f([real_values])[0]
np.testing.assert_allclose(result, leaky_tanh(real_values, alpha=0.2))
| 1,259 | 29 | 74 | py |
larq | larq-main/larq/conftest_test.py | import pytest
import tensorflow as tf
from larq import context
def test_eager_and_graph_mode_fixture(eager_and_graph_mode):
if eager_and_graph_mode == "eager":
assert tf.executing_eagerly()
else:
assert not tf.executing_eagerly()
assert tf.compat.v1.get_default_session() is not None
@pytest.mark.usefixtures("eager_mode")
def test_eager_mode_fixture():
assert tf.executing_eagerly()
@pytest.mark.usefixtures("graph_mode")
def test_graph_mode_fixture():
assert not tf.executing_eagerly()
assert tf.compat.v1.get_default_session() is not None
def test_distribute_scope(distribute_scope):
assert tf.distribute.has_strategy() is distribute_scope
def test_quantize_scope(quantized):
assert context.should_quantize() == quantized
| 788 | 23.65625 | 61 | py |
larq | larq-main/larq/quantized_variable_test.py | import numpy as np
import pytest
import tensorflow as tf
from numpy.testing import assert_almost_equal, assert_array_equal
from packaging import version
from tensorflow.python.distribute.values import DistributedVariable
from larq import context, testing_utils
from larq.quantized_variable import QuantizedVariable
from larq.testing_utils import evaluate
def get_var(val, dtype=None, name=None):
return tf.compat.v1.Variable(val, use_resource=True, dtype=dtype, name=name)
def test_inheritance(distribute_scope):
variable = get_var(3.0)
quantized_variable = QuantizedVariable.from_variable(variable)
assert isinstance(quantized_variable, QuantizedVariable)
assert isinstance(quantized_variable, tf.Variable)
assert isinstance(quantized_variable, DistributedVariable) is distribute_scope
@pytest.mark.usefixtures("eager_and_graph_mode", "distribute_scope")
def test_read():
x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
assert evaluate(x) == 3.5
assert evaluate(x.value()) == 3.5
assert evaluate(x.read_value()) == 3.5
assert evaluate(tf.identity(x)) == 3.5
with context.quantized_scope(True):
assert evaluate(x) == 7
assert evaluate(x.value()) == 7
assert evaluate(x.read_value()) == 7
assert evaluate(tf.identity(x)) == 7
@pytest.mark.usefixtures("eager_and_graph_mode")
def test_sparse_reads():
x = QuantizedVariable.from_variable(get_var([1.0, 2.0]), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
assert evaluate(x.sparse_read([0])) == 1
assert evaluate(x.gather_nd([0])) == 1
with context.quantized_scope(True):
assert evaluate(x.sparse_read([0])) == 2
assert evaluate(x.gather_nd([0])) == 2
@pytest.mark.usefixtures("eager_and_graph_mode", "distribute_scope")
def test_read_nested_scopes():
x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
with context.quantized_scope(True):
assert evaluate(x.read_value()) == 7
with context.quantized_scope(False):
assert evaluate(x.read_value()) == 3.5
assert evaluate(x.read_value()) == 7
@pytest.mark.usefixtures("eager_and_graph_mode")
def test_method_delegations(distribute_scope):
x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
with context.quantized_scope(True):
evaluate(x.initializer)
assert evaluate(x.value()) == 7
assert evaluate(x.read_value()) == 7
assert x.trainable
if version.parse(tf.__version__) > version.parse("1.14"):
assert x.synchronization == x.latent_variable.synchronization
assert x.aggregation == x.latent_variable.aggregation
if version.parse(tf.__version__) < version.parse("2.13"):
assert evaluate(x.initialized_value()) == 7
if not tf.executing_eagerly():
if not distribute_scope:
# These functions are not supported for DistributedVariables
x.load(4.5)
assert x.eval() == 9
assert evaluate(x.initial_value) == 7
assert x.op == x.latent_variable.op
assert x.graph == x.latent_variable.graph
if not distribute_scope:
# These attributes are not supported for DistributedVariables
assert x.constraint is None
assert x.initializer == x.latent_variable.initializer
def apply_and_read(x, fn, args):
evaluate(fn(*args))
return evaluate(x)
assert apply_and_read(x, x.assign, [4]) == 8
assert apply_and_read(x, x.assign_add, [1]) == 10
assert apply_and_read(x, x.assign_sub, [1.5]) == 7
assert x.name == x.latent_variable.name
assert x.device == x.latent_variable.device
assert x.shape == ()
assert x.get_shape() == ()
try:
x.set_shape(())
assert x.shape == ()
except NotImplementedError:
pass
@pytest.mark.usefixtures("eager_and_graph_mode")
def test_scatter_method_delegations():
x = QuantizedVariable.from_variable(get_var([3.5, 4]), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
with context.quantized_scope(True):
assert_array_equal(evaluate(x.value()), [7, 8])
def slices(val, index):
return tf.IndexedSlices(values=val, indices=index)
assert_array_equal(evaluate(x.scatter_sub(slices(0.5, 0))), [6, 8])
assert_array_equal(evaluate(x.scatter_add(slices(0.5, 0))), [7, 8])
if version.parse(tf.__version__) > version.parse("1.14"):
assert_array_equal(evaluate(x.scatter_max(slices(4.5, 1))), [7, 9])
assert_array_equal(evaluate(x.scatter_min(slices(4.0, 1))), [7, 8])
assert_array_equal(evaluate(x.scatter_mul(slices(2.0, 1))), [7, 16])
assert_array_equal(evaluate(x.scatter_div(slices(2.0, 1))), [7, 8])
assert_array_equal(evaluate(x.scatter_update(slices(2.0, 1))), [7, 4])
assert_array_equal(evaluate(x.scatter_nd_sub([[0], [1]], [0.5, 1.0])), [6, 2])
assert_array_equal(evaluate(x.scatter_nd_add([[0], [1]], [0.5, 1.0])), [7, 4])
assert_array_equal(
evaluate(x.scatter_nd_update([[0], [1]], [0.5, 1.0])), [1, 2]
)
assert_array_equal(evaluate(x.batch_scatter_update(slices([2.0], [1]))), [1, 4])
@pytest.mark.usefixtures("eager_and_graph_mode", "distribute_scope")
def test_overloads(quantized):
if quantized:
x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
else:
x = QuantizedVariable.from_variable(get_var(7.0))
evaluate(x.initializer)
assert_almost_equal(8, evaluate(x + 1))
assert_almost_equal(10, evaluate(3 + x))
assert_almost_equal(14, evaluate(x + x))
assert_almost_equal(5, evaluate(x - 2))
assert_almost_equal(6, evaluate(13 - x))
assert_almost_equal(0, evaluate(x - x))
assert_almost_equal(14, evaluate(x * 2))
assert_almost_equal(21, evaluate(3 * x))
assert_almost_equal(49, evaluate(x * x))
assert_almost_equal(3.5, evaluate(x / 2))
assert_almost_equal(1.5, evaluate(10.5 / x))
assert_almost_equal(3, evaluate(x // 2))
assert_almost_equal(2, evaluate(15 // x))
assert_almost_equal(1, evaluate(x % 2))
assert_almost_equal(2, evaluate(16 % x))
assert evaluate(x < 12)
assert evaluate(x <= 12)
assert not evaluate(x > 12)
assert not evaluate(x >= 12)
assert not evaluate(12 < x)
assert not evaluate(12 <= x)
assert evaluate(12 > x)
assert evaluate(12 >= x)
assert_almost_equal(343, evaluate(pow(x, 3)))
assert_almost_equal(128, evaluate(pow(2, x)))
assert_almost_equal(-7, evaluate(-x))
assert_almost_equal(7, evaluate(abs(x)))
@pytest.mark.usefixtures("eager_mode")
def test_tensor_equality(quantized):
if quantized:
x = QuantizedVariable.from_variable(
get_var([3.5, 4.0, 4.5]), quantizer=lambda x: 2 * x
)
else:
x = QuantizedVariable.from_variable(get_var([7.0, 8.0, 9.0]))
evaluate(x.initializer)
assert_array_equal(evaluate(x), [7.0, 8.0, 9.0])
if version.parse(tf.__version__) >= version.parse("2"):
assert_array_equal(x == [7.0, 8.0, 10.0], [True, True, False])
assert_array_equal(x != [7.0, 8.0, 10.0], [False, False, True])
@pytest.mark.usefixtures("eager_and_graph_mode")
def test_assign(quantized, distribute_scope):
x = QuantizedVariable.from_variable(
get_var(0.0, tf.float64), quantizer=lambda x: 2 * x
)
evaluate(x.initializer)
latent_value = 3.14
value = latent_value * 2 if quantized else latent_value
# Assign doesn't correctly return a quantized variable in graph mode if a strategy is used
if tf.executing_eagerly() or not distribute_scope or not quantized:
# Assign float32 values
lv = tf.constant(latent_value, dtype=tf.float64)
assert_almost_equal(evaluate(x.assign(lv)), value)
assert_almost_equal(evaluate(x.assign_add(lv)), value * 2)
assert_almost_equal(evaluate(x.assign_sub(lv)), value)
# Assign Python floats
assert_almost_equal(evaluate(x.assign(0.0)), 0.0)
assert_almost_equal(evaluate(x.assign(latent_value)), value)
assert_almost_equal(evaluate(x.assign_add(latent_value)), value * 2)
assert_almost_equal(evaluate(x.assign_sub(latent_value)), value)
# Use the tf.assign functions instead of the var.assign methods.
assert_almost_equal(evaluate(tf.compat.v1.assign(x, 0.0)), 0.0)
assert_almost_equal(evaluate(tf.compat.v1.assign(x, latent_value)), value)
assert_almost_equal(
evaluate(tf.compat.v1.assign_add(x, latent_value)), value * 2
)
assert_almost_equal(evaluate(tf.compat.v1.assign_sub(x, latent_value)), value)
# Assign multiple times
if version.parse(tf.__version__) >= version.parse("2.2") and (
tf.executing_eagerly() or not distribute_scope
):
assign = x.assign(0.0)
assert_almost_equal(evaluate(assign), 0.0)
assert_almost_equal(evaluate(assign.assign(latent_value)), value)
assert_almost_equal(
evaluate(x.assign_add(latent_value).assign_add(latent_value)), value * 3
)
assert_almost_equal(evaluate(x), value * 3)
assert_almost_equal(
evaluate(x.assign_sub(latent_value).assign_sub(latent_value)), value
)
assert_almost_equal(evaluate(x), value)
# Assign with read_value=False
assert_almost_equal(evaluate(x.assign(0.0)), 0.0)
assert evaluate(x.assign(latent_value, read_value=False)) is None
assert_almost_equal(evaluate(x), value)
assert evaluate(x.assign_add(latent_value, read_value=False)) is None
assert_almost_equal(evaluate(x), 2 * value)
assert evaluate(x.assign_sub(latent_value, read_value=False)) is None
assert_almost_equal(evaluate(x), value)
@pytest.mark.usefixtures("eager_mode", "distribute_scope")
def test_assign_tf_function(quantized):
x = QuantizedVariable.from_variable(get_var(0.0), quantizer=lambda x: 2 * x)
@tf.function
def run_assign():
return x.assign(1.0).assign_add(3.0).assign_add(3.0).assign_sub(2.0)
assert_almost_equal(evaluate(run_assign()), 10.0 if quantized else 5.0)
@pytest.mark.usefixtures("eager_and_graph_mode", "distribute_scope")
def test_assign_op():
x = QuantizedVariable.from_variable(get_var(0.0), quantizer=lambda x: 2 * x)
@tf.function
def func():
assert x.assign(1.0).op is not None
assert x.assign_add(1.0).op is not None
assert x.assign_sub(1.0).op is not None
func()
@pytest.mark.usefixtures("eager_mode", "distribute_scope")
def test_tf_function_control_dependencies(quantized):
x = QuantizedVariable.from_variable(get_var(0.0), quantizer=lambda x: 2 * x)
@tf.function
def func():
update = x.assign_add(1.0)
with tf.control_dependencies([update]):
x.assign_add(1.0)
func()
assert_almost_equal(evaluate(x), 4.0 if quantized else 2.0)
def test_tf_function_with_variable_and_quantized_variable():
variable = get_var(tf.ones(2, 2))
quantized_variable = QuantizedVariable.from_variable(variable)
@tf.function
def f(x):
return x + 1
f(variable)
f(quantized_variable)
@pytest.mark.usefixtures("eager_and_graph_mode")
def test_checkpoint(tmp_path):
x = QuantizedVariable.from_variable(get_var(0.0), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
evaluate(x.assign(123.0))
checkpoint = tf.train.Checkpoint(x=x)
save_path = checkpoint.save(tmp_path)
evaluate(x.assign(234.0))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
assert isinstance(x, QuantizedVariable)
assert evaluate(x) == 123.0
with context.quantized_scope(True):
assert evaluate(x) == 123.0 * 2
@pytest.mark.usefixtures("distribute_scope")
def test_invalid_wrapped_usage():
with pytest.raises(ValueError, match="`variable` must be of type"):
QuantizedVariable.from_variable(tf.constant([1.0]))
with pytest.raises(ValueError, match="`quantizer` must be `callable` or `None`"):
QuantizedVariable.from_variable(get_var([1.0]), 1) # type: ignore
with pytest.raises(ValueError, match="`precision` must be of type `int` or `None`"):
QuantizedVariable.from_variable(get_var([1.0]), precision=1.0) # type: ignore
@pytest.mark.usefixtures("eager_and_graph_mode")
def test_repr(snapshot):
x = get_var(0.0, name="x")
class Quantizer:
def __call__(self, x):
return x
snapshot.assert_match(
repr(QuantizedVariable.from_variable(x, quantizer=lambda x: 2 * x))
)
snapshot.assert_match(
repr(QuantizedVariable.from_variable(x, quantizer=Quantizer()))
)
snapshot.assert_match(repr(QuantizedVariable.from_variable(x, precision=1)))
@pytest.mark.usefixtures("eager_mode")
@pytest.mark.parametrize("should_quantize", [True, False])
def test_optimizer(should_quantize):
x = QuantizedVariable.from_variable(get_var(1.0), quantizer=lambda x: -x)
opt = tf.keras.optimizers.SGD(1.0)
def loss():
with context.quantized_scope(should_quantize):
return x + 1.0
@tf.function
def f():
opt.minimize(loss, var_list=[x])
f()
if should_quantize:
assert evaluate(x) == 2.0
with context.quantized_scope(should_quantize):
assert evaluate(x) == -2.0
else:
assert evaluate(x) == 0.0
@pytest.mark.skipif(
version.parse(tf.__version__) < version.parse("2"),
reason="Requires TensorFlow 2",
)
def test_saved_model(tmp_path):
model_path = str(tmp_path / "model")
x = np.random.normal(size=(4, 32))
model = testing_utils.get_small_bnn_model(x.shape[1], 16, 10)
weights = model.get_weights()
model.save(model_path, save_format="tf")
reloaded_model = tf.keras.models.load_model(model_path)
reloaded_weights = reloaded_model.get_weights()
assert_almost_equal(reloaded_model.predict(x), model.predict(x))
assert len(reloaded_weights) == len(weights)
for reloaded_weight, weight in zip(reloaded_weights, weights):
assert_almost_equal(reloaded_weight, weight)
| 14,405 | 37.31383 | 94 | py |
larq | larq-main/larq/snapshots/snap_models_test.py | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_functional_model_summary 2.4+'] = '''+toy_model stats------------------------------------------------------------------------------------------+
| layer input prec. outputs # 1-bit # 32-bit memory 32-bit macs |
| (bit) x 1 x 1 (kb) |
+---------------------------------------------------------------------------------------------------------+
| input_1 - (-1, 32, 32, 3) 0 0 0 ? |
| quant_conv2d - (-1, 32, 32, 32) 864 32 0.23 884736 |
| tf.split - [(-1, 32, 32, 16), (-1, 32, 32, 16)] 0 0 0 ? |
| tf.concat - (-1, 32, 32, 32) 0 0 0 ? |
+---------------------------------------------------------------------------------------------------------+
| total 864 32 0.23 884736 |
+---------------------------------------------------------------------------------------------------------+
+toy_model summary------------------------+
| total params 896 |
| trainable params 896 |
| non-trainable params 0 |
| model size 236.00 b |
| model size (8-bit fp weights) 140.00 b |
| float-32 equivalent 3.50 kib |
| compression ratio of memory 0.07 |
| number of macs 885 k |
+-----------------------------------------+
'''
snapshots['test_subclass_model_summary 1'] = '''+toy_model stats-------------------------------------------------------------+
| Layer Input prec. Outputs # 1-bit # 32-bit Memory |
| (bit) x 1 x 1 (kB) |
+----------------------------------------------------------------------------+
| quant_conv2d - multiple 864 32 0.23 |
| global_average_pooling2d - multiple 0 0 0 |
| dense - multiple 0 330 1.29 |
+----------------------------------------------------------------------------+
| Total 864 362 1.52 |
+----------------------------------------------------------------------------+
+toy_model summary------------------------+
| Total params 1.23 k |
| Trainable params 1.23 k |
| Non-trainable params 0 |
| Model size 1.52 KiB |
| Model size (8-bit FP weights) 470.00 B |
| Float-32 Equivalent 4.79 KiB |
| Compression Ratio of Memory 0.32 |
| Number of MACs 0 |
+-----------------------------------------+
'''
snapshots['test_submodel_summary 1'] = '''+sequential_6 stats-------------------------------------------------------------------------------------------------------+
| Layer Input prec. Outputs # 1-bit # 2-bit # 32-bit Memory 1-bit MACs 2-bit MACs 32-bit MACs |
| (bit) x 1 x 1 x 1 (kB) |
+-------------------------------------------------------------------------------------------------------------------------+
| quant_conv2d_2 - (-1, 64, 64, 32) 288 0 32 0.16 0 0 1179648 |
| max_pooling2d_2 - (-1, 32, 32, 32) 0 0 0 0 0 0 0 |
| sequential_5 2 (-1, 11, 11, 32) 1312 288 96 0.61 158752 34848 0 |
| flatten_2 - (-1, 3872) 0 0 0 0 0 0 0 |
| dense_2 - (-1, 10) 0 0 38730 151.29 0 0 38720 |
+-------------------------------------------------------------------------------------------------------------------------+
| Total 1600 288 38858 152.05 158752 34848 1218368 |
+-------------------------------------------------------------------------------------------------------------------------+
+sequential_6 summary---------------------------+
| Total params 40.7 k |
| Trainable params 1.95 k |
| Non-trainable params 38.8 k |
| Model size 152.05 KiB |
| Model size (8-bit FP weights) 38.21 KiB |
| Float-32 Equivalent 159.16 KiB |
| Compression Ratio of Memory 0.96 |
| Number of MACs 1.41 M |
| Ratio of MACs that are binarized 0.1124 |
| Ratio of MACs that are ternarized 0.0247 |
+-----------------------------------------------+
'''
snapshots['test_summary 1'] = '''+sequential stats----------------------------------------------------------------------------------------------------------------+
| Layer Input prec. Outputs # 1-bit # 2-bit # 32-bit Memory 1-bit MACs 2-bit MACs 32-bit MACs |
| (bit) x 1 x 1 x 1 (kB) |
+--------------------------------------------------------------------------------------------------------------------------------+
| quant_conv2d - (-1, 64, 64, 32) 288 0 32 0.16 0 0 1179648 |
| max_pooling2d - (-1, 32, 32, 32) 0 0 0 0 0 0 0 |
| quant_depthwise_conv2d 2 (-1, 11, 11, 32) 0 288 0 0.07 0 34848 0 |
| batch_normalization - (-1, 11, 11, 32) 0 0 64 0.25 0 0 0 |
| quant_separable_conv2d 1 (-1, 11, 11, 32) 1312 0 32 0.29 158752 0 0 |
| flatten - (-1, 3872) 0 0 0 0 0 0 0 |
| dense - (-1, 10) 0 0 38730 151.29 0 0 38720 |
+--------------------------------------------------------------------------------------------------------------------------------+
| Total 1600 288 38858 152.05 158752 34848 1218368 |
+--------------------------------------------------------------------------------------------------------------------------------+
+sequential summary-----------------------------+
| Total params 40.7 k |
| Trainable params 1.95 k |
| Non-trainable params 38.8 k |
| Model size 152.05 KiB |
| Model size (8-bit FP weights) 38.21 KiB |
| Float-32 Equivalent 159.16 KiB |
| Compression Ratio of Memory 0.96 |
| Number of MACs 1.41 M |
| Ratio of MACs that are binarized 0.1124 |
| Ratio of MACs that are ternarized 0.0247 |
+-----------------------------------------------+
'''
snapshots['test_summary 2'] = '''+sequential_1 stats--------------------+
| Layer Input prec. Outputs Memory |
| (bit) (kB) |
+--------------------------------------+
| lambda - (2,) 0 |
+--------------------------------------+
| Total 0 |
+--------------------------------------+
+sequential_1 summary-------------------+
| Total params 0 |
| Trainable params 0 |
| Non-trainable params 0 |
| Model size 0.00 B |
| Model size (8-bit FP weights) 0.00 B |
| Float-32 Equivalent 0.00 B |
| Compression Ratio of Memory 0.00 |
| Number of MACs 0 |
+---------------------------------------+
'''
| 8,529 | 65.640625 | 165 | py |
larq | larq-main/larq/snapshots/snap_quantized_variable_test.py | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_repr[eager] 1'] = "<QuantizedVariable 'x:0' shape=() dtype=float32 quantizer=<lambda> numpy=0.0>"
snapshots['test_repr[eager] 2'] = "<QuantizedVariable 'x:0' shape=() dtype=float32 quantizer=Quantizer numpy=0.0>"
snapshots['test_repr[eager] 3'] = "<QuantizedVariable 'x:0' shape=() dtype=float32 precision=1 numpy=0.0>"
snapshots['test_repr[graph] 1'] = "<QuantizedVariable 'x:0' shape=() dtype=float32 quantizer=<lambda>>"
snapshots['test_repr[graph] 2'] = "<QuantizedVariable 'x:0' shape=() dtype=float32 quantizer=Quantizer>"
snapshots['test_repr[graph] 3'] = "<QuantizedVariable 'x:0' shape=() dtype=float32 precision=1>"
| 814 | 39.75 | 114 | py |
larq | larq-main/larq/snapshots/__init__.py | 0 | 0 | 0 | py | |
DAC2018 | DAC2018-master/main.py | ## This program is for DAC HDC contest ######
## 2017/11/22
## xxu8@nd.edu
## University of Notre Dame
import procfunc
import math
import numpy as np
import time
import sys
sys.path.append("./build/lib.linux-aarch64-2.7")
import mypack
#### !!!! you can import any package needed for your program ######
if __name__ == "__main__":
############### configurations for dir #################################################################################
## Folder structure:
## $DAC$|
## |images (all the test images are stored in this folder)
## |results-$teamName$|
## |time
## |xml
## !!!! Please specify your team name here
teamName = 'ICT-CAS'
## !!!! please specify the dir here, and please put all the images for test in the folder "images".
## Important! You can specify the folder in your local test. But for the sumission, DAC folder is fixed as follows
DAC = './' ## uncomment this line when submitting your code
[imgDir, resultDir, timeDir, xmlDir, myXmlDir, allTimeFile] = procfunc.setupDir(DAC, teamName)
############### processing for object detection and tracking ###########################################################
### load all the images names
[allImageName, imageNum] = procfunc.getImageNames(imgDir)
### process all the images in batch
batchNumDiskToDram = 4 ## the # of images read from disk to DRAM in one time
batchNumDramToGPU = 2## the # of images read from DRAM to GPU in one time for batch processing on the GPU
imageReadTime = int(math.ceil(imageNum/float(batchNumDiskToDram)))
imageProcTimeEachRead = int(math.ceil(batchNumDiskToDram/float(batchNumDramToGPU)))
resultRectangle = np.zeros((imageNum, 4)) ## store all the results about tracking accuracy
mypack.netInit()
time_start=time.time()
for i in range(int(imageReadTime)):
ImageDramBatch = procfunc.readImagesBatch(imgDir,allImageName, imageNum, i, batchNumDiskToDram)
for j in range(imageProcTimeEachRead):
start = j*batchNumDramToGPU
end = start + batchNumDramToGPU
if end > len(ImageDramBatch):
end = len(ImageDramBatch)
if end < start:
break
inputImageData = ImageDramBatch[start:end, :,:,:]
############ !!!!!!!!!! your detection and tracking code, please revise the function: detectionAndTracking() !!!!!!!############
resultRectangle[i * batchNumDiskToDram + start:i * batchNumDiskToDram + end, :] = procfunc.detectionAndTracking(inputImageData, end-start)
time_end = time.time()
resultRunTime = time_end-time_start
############### write results (write time to allTimeFile and detection results to xml) #################################
procfunc.storeResultsToXML(resultRectangle, allImageName, myXmlDir)
procfunc.write(imageNum,resultRunTime,teamName, allTimeFile)
| 2,960 | 47.540984 | 150 | py |
DAC2018 | DAC2018-master/setup.py | from distutils.core import setup, Extension
module = Extension('mypack',extra_compile_args=['-std=c++11'], include_dirs=['/usr/local/cuda/include'],
sources = ['Detector.cpp'],extra_objects = ['./plugin.o', './kernel.o'], extra_link_args=['-lnvinfer', '-lnvcaffe_parser', '-lcudnn'])
setup(name = 'mypack', version='1.0', description="Detector package", ext_modules = [module])
| 388 | 54.571429 | 142 | py |
DAC2018 | DAC2018-master/sender_1_client.py | ## this is for GPU demo with only one FPGA and one computer for computer for display
## xxu8@nd.edu
import socket
import threading
import struct
import time
import cv2
import numpy
import xml.etree.ElementTree as ET
NofClients = 1
class Senders_Carame_Object:
def __init__(self,addr_ports=[("192.168.1.80",3000)]):
print 'Senders_Carame_Object init'
print addr_ports
self.resolution=(640,360)
self.img_fps=10
self.addr_ports=addr_ports
self.connections=[]
for i in range (0,NofClients):
print "setup connection "+str(i)
self.Set_Socket(self.addr_ports[i])
def Set_Socket(self,addr_port):
print 'Senders_Carame_Object Set_Socket'
self.connections.append(socket.socket(socket.AF_INET,socket.SOCK_STREAM))
self.connections[-1].setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.connections[-1].bind(addr_port)
self.connections[-1].listen(5)
print("the process work in the port:%d" % addr_port[1])
def check_option(object,clients):
for i in range (0, NofClients):
info=struct.unpack('hh',clients[i].recv(4)) ## 8 or 12
if info[0]!=object.resolution[0] or info[1]!=object.resolution[1]:
print "error: check option fails, received resolution is: "+str(info[0])+","+str(info[1])
return 1
else:
return 0
def RT_Image(object,clients):
print 'RT_Image '
if(check_option(object,clients)==1):
return
# camera=cv2.VideoCapture(0)
camera=cv2.VideoCapture("demo_video.avi")
img_param=[int(cv2.IMWRITE_JPEG_QUALITY),object.img_fps]
indexN = 0
#used for test
# xmlDir = "./Training_data_xml_dac_demo/xmls_demo"
#end
while(1):
##time.sleep(0.1) ## about 10 fps
_,object.img=camera.read()
indexN = indexN + 1
object.img=cv2.resize(object.img,object.resolution)
#used for test
# xmlfile = xmlDir + '/' + str(indexN) + ".xml"
# xmltree = ET.parse(xmlfile)
# obj = xmltree.find("object")
# bndbox = obj.find('bndbox')
# xmin = int(bndbox.find('xmin').text.strip())
# xmax = int(bndbox.find('xmax').text.strip())
# ymin = int(bndbox.find('ymin').text.strip())
# ymax = int(bndbox.find('ymax').text.strip())
# ymin = int(ymin*360/480)
# ymax = int(ymax*360/480)
# object.img = cv2.rectangle(object.img, (xmin, ymin), (xmax, ymax), (0,0,255),4)
# print("{} bndbox {}".format(indexN, (xmin,ymin, xmax, ymax)))
#end
_,img_encode=cv2.imencode('.jpg',object.img,img_param)
img_code=numpy.array(img_encode)
object.img_data=img_code.tostring()
try:
for i in range (0, NofClients):
clients[i].send(struct.pack("ll",len(object.img_data),indexN)+object.img_data)
print str(indexN)+', size of the send img:', len(object.img_data)
## wait until the images are processed on FPGAs
feedback=struct.unpack('h',clients[0].recv(2))
if feedback[0]!=168:
print "feedback from FPGA error, "+str(feedback)
return
except:
camera.release()
return
if __name__ == '__main__':
senders=Senders_Carame_Object([("127.0.0.1",3010)])
clients = []
for i in range (0, NofClients):
print "connection accept with "+str(i)
clients.append(senders.connections[i].accept()[0])
clientThread=threading.Thread(None,target=RT_Image,args=(senders,clients,))
clientThread.start()
| 3,833 | 35.865385 | 95 | py |
DAC2018 | DAC2018-master/val_sample.py | import numpy as np
import sys
import os
import xml.etree.ElementTree as ET
import cv2
class bbox():
def __init__(self, xmin, ymin, xmax, ymax):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.width = ymax - ymin
self.height = xmax - xmin
def overlap_width(self, box):
maxl = self.xmin if self.xmin > box.xmin else box.xmin
minr = self.xmax if self.xmax < box.xmax else box.xmax
return minr - maxl
def overlap_height(self, box):
maxt = self.ymin if self.ymin > box.ymin else box.ymin
minb = self.ymax if self.ymax < box.ymax else box.ymax
return minb - maxt
def box_intersection(self, box):
w = self.overlap_width(box)
h = self.overlap_height(box)
w = w if w > 0 else 0
h = h if h > 0 else 0
area = w * h
# print("intersection {}".format(area))
return area
def box_union(self, box):
i = self.box_intersection(box)
u = self.height * self.width + box.height * box.width - i
# print("union {}".format(u))
return u
def iou(self, box):
return self.box_intersection(box) / self.box_union(box)
def print_box(self):
print("({} {}) {} X {}".format(self.xmin, self.ymin, self.width, self.height))
label_dir = './images'
pred_dir='./result/xml/ICT-CAS'
files = os.listdir(label_dir)
label_xmls = []
sum_iou = 0.
count = 0
for i in files:
if 'xml' in i:
label_xmls.append(i)
for i in label_xmls:
count += 1
pred_xml = pred_dir + '/' + i
predxmltree = ET.parse(pred_xml)
obj = predxmltree.findall('object')
if obj == None:
print("error : no obj in {}", pred_xml)
pred_bbox = obj[0].findall('bndbox')[0]
xmin = float(pred_bbox.find('xmin').text)
xmax = float(pred_bbox.find('xmax').text)
ymin = float(pred_bbox.find('ymin').text)
ymax = float(pred_bbox.find('ymax').text)
pred_bbox = bbox(xmin, ymin, xmax, ymax)
gt_xml = label_dir + '/' + i
gt_xmltree = ET.parse(gt_xml)
gt = gt_xmltree.findall('object')
if gt == None:
print("error : no obj in {}".format(gt_xml))
# gt_bbox = gt[0].findall('bndbox')[0]
gt_bbox = gt[0].findall('bndbox')[0]
gt_xmin = float(gt_bbox.find('xmin').text)
gt_xmax = float(gt_bbox.find('xmax').text)
gt_ymin = float(gt_bbox.find('ymin').text)
gt_ymax = float(gt_bbox.find('ymax').text)
gt_bbox = bbox(gt_xmin, gt_ymin, gt_xmax, gt_ymax)
cur_iou = gt_bbox.iou(pred_bbox)
sum_iou += cur_iou
print("index : {} {} \n\t avg_iou: {} cur_iou: {}".format(count, i,
sum_iou/count, cur_iou))
| 2,744 | 30.193182 | 86 | py |
DAC2018 | DAC2018-master/demo.py | import socket
import cv2
import threading
import struct
import sys
sys.path.append("./build/lib.linux-aarch64-2.7")
import mypack
import procfunc
import math
import numpy as np
import time
mypack.netInit()
if __name__ == "__main__":
teamName = 'ICT-CAS'
DAC = './'
[imgDir, resultDir, timeDir, xmlDir, myXmlDir, allTimeFile] = procfunc.setupDir(DAC, teamName)
[allImageName, imageNum] = procfunc.getImageNames(imgDir)
batchNumDiskToDram = 1
batchNumDramToGPU = 1
imageReadTime = int(math.ceil(imageNum/float(batchNumDiskToDram)))
imageProcTimeEachRead = int(math.ceil(batchNumDiskToDram/float(batchNumDramToGPU)))
mypack.netInit()
for i in range(int(imageReadTime)):
imgName = imgDir + '/' + allImageName[i]
img = cv2.imread(imgName, 1)
print("loaded img {}".format(imgName))
detecRec = procfunc.detectionAndTracking(img.astype(np.float32, copy=False), 1)[0]
print " perform detection processing successfully, and the result is "+str(detecRec)
cv2.rectangle(img, (abs(int(detecRec[0])),abs(int(detecRec[2]))),(abs(int(detecRec[1])),abs(int(detecRec[3]))),(0,255,0),4)
cv2.imshow("tmp", img)
cv2.waitKey(1)
| 1,237 | 33.388889 | 134 | py |
DAC2018 | DAC2018-master/procfunc.py | import os
import cv2
import time
import numpy as np
import xml.dom.minidom
import random
import sys
sys.path.append("./build/lib.linux-aarch64-2.7")
import mypack
imageSize = (360, 640, 3)
##must be called to creat default directory
def setupDir(homeFolder, teamName):
imgDir = homeFolder + '/images'
resultDir = homeFolder + '/result'
timeDir = resultDir + '/time'
xmlDir = resultDir + '/xml'
myXmlDir = xmlDir + '/' + teamName
allTimeFile = timeDir + '/alltime.txt'
if os.path.isdir(homeFolder):
pass
else:
os.mkdir(homeFolder)
if os.path.isdir(imgDir):
pass
else:
os.mkdir(imgDir)
if os.path.isdir(resultDir):
pass
else:
os.mkdir(resultDir)
if os.path.isdir(timeDir):
pass
else:
os.mkdir(timeDir)
if os.path.isdir(xmlDir):
pass
else:
os.mkdir(xmlDir)
if os.path.isdir(myXmlDir):
pass
else:
os.mkdir(myXmlDir)
##create timefile file
ftime = open(allTimeFile,'a+')
ftime.close()
return [imgDir, resultDir, timeDir, xmlDir, myXmlDir, allTimeFile]
## get image name list
def getImageNames(imgDir):
nameset1 = []
nameset2 = []
namefiles= os.listdir(imgDir)
for f in namefiles:
if 'jpg' in f:
imgname = f.split('.')[0]
nameset1.append(imgname)
nameset1.sort(key = int)
for f in nameset1:
f = f + ".jpg"
nameset2.append(f)
imageNum = len(nameset2)
return [nameset2, imageNum]
def readImagesBatch(imgDir, allImageName, imageNum, iter, batchNumDiskToDram):
start = iter*batchNumDiskToDram
end = start + batchNumDiskToDram
if end > imageNum:
end = imageNum
batchImageData = np.zeros((end-start, imageSize[0], imageSize[1], imageSize[2]))
for i in range(start, end):
imgName = imgDir + '/' + allImageName[i]
img = cv2.imread(imgName, 1)
batchImageData[i-start,:,:] = img[:,:]
return batchImageData.astype(np.float32, copy=False)
## detection and tracking algorithm
def detectionAndTracking(inputImageData, batchNum):
# result = np.random.randn(batchNum, 4)
# for i in range(batchNum):
# ret = mypack.infer(inputImageData[i].astype(np.float32, copy=False))
# tmp = (int((ret[0]-ret[2]/2)*640+0.5), int((ret[0]+ret[2]/2)*640+0.5), int((ret[1]-ret[3]/2)*360+0.5), int((ret[1]+ret[3]/2)*360+0.5))
# result[i] = tmp
return mypack.infer(inputImageData)
# return result
## store the results about detection accuracy to XML files
def storeResultsToXML(resultRectangle, allImageName, myXmlDir):
for i in range(len(allImageName)):
doc = xml.dom.minidom.Document()
root = doc.createElement('annotation')
doc.appendChild(root)
nameE = doc.createElement('filename')
nameT = doc.createTextNode(allImageName[i])
nameE.appendChild(nameT)
root.appendChild(nameE)
sizeE = doc.createElement('size')
nodeWidth = doc.createElement('width')
nodeWidth.appendChild(doc.createTextNode("640"))
nodelength = doc.createElement('length')
nodelength.appendChild(doc.createTextNode("360"))
sizeE.appendChild(nodeWidth)
sizeE.appendChild(nodelength)
root.appendChild(sizeE)
object = doc.createElement('object')
nodeName = doc.createElement('name')
nodeName.appendChild(doc.createTextNode("NotCare"))
nodebndbox = doc.createElement('bndbox')
nodebndbox_xmin = doc.createElement('xmin')
nodebndbox_xmin.appendChild(doc.createTextNode(str(resultRectangle[i, 0])))
nodebndbox_xmax = doc.createElement('xmax')
nodebndbox_xmax.appendChild(doc.createTextNode(str(resultRectangle[i, 1])))
nodebndbox_ymin = doc.createElement('ymin')
nodebndbox_ymin.appendChild(doc.createTextNode(str(resultRectangle[i, 2])))
nodebndbox_ymax = doc.createElement('ymax')
nodebndbox_ymax.appendChild(doc.createTextNode(str(resultRectangle[i, 3])))
nodebndbox.appendChild(nodebndbox_xmin)
nodebndbox.appendChild(nodebndbox_xmax)
nodebndbox.appendChild(nodebndbox_ymin)
nodebndbox.appendChild(nodebndbox_ymax)
#nodebndbox.appendChild(doc.createTextNode("360"))
object.appendChild(nodeName)
object.appendChild(nodebndbox)
root.appendChild(object)
fileName = allImageName[i].replace('jpg', 'xml')
fp = open(myXmlDir + "/" + fileName, 'w')
doc.writexml(fp, indent='\t', addindent='\t', newl='\n', encoding="utf-8")
return
##write time result to alltime.txt
def write(imageNum,runTime,teamName, allTimeFile):
FPS = imageNum / runTime
ftime = open(allTimeFile, 'a+')
ftime.write( "\n" + teamName + " Frames per second:" + str((FPS)) + ", imgNum: "+ str(imageNum) + ", runtime: " + str(runTime) + '\n') ## xiaowei xu
ftime.close()
return
| 5,024 | 32.278146 | 153 | py |
DAC2018 | DAC2018-master/display.py | import socket
import cv2
import threading
import struct
import numpy
import sys
sys.path.append("./build/lib.linux-aarch64-2.7")
import mypack
mypack.netInit()
###### change your team name here
teamName = "teamName"
windowName = "DAC HDC contest team:"+teamName
class process_display_Object:
def __init__(self,addr_port_client_Img=("",1000)):
print 'displayImg_Connect_Object init'
self.resolution=[640,360]
self.client_port_Img=addr_port_client_Img
def Socket_Connect_Client(self):
print 'displayImg_Connect_Object Socket_Connect'
self.clientIMG=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.clientIMG.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
print("As a client, displayImg receives imgs from %s:%d" % (self.client_port_Img[0],self.client_port_Img[1]))
self.clientIMG.connect(self.client_port_Img)
def ProcessImg(self):
print 'displayImg_Object shows imgs'
self.name="displayImg"
self.clientIMG.send(struct.pack("hh", self.resolution[0], self.resolution[1]))
while(1):
info=struct.unpack("ll",self.clientIMG.recv(16))
buf_size=info[0]
if buf_size:
try:
self.buf=b""
temp_buf=self.buf
while(buf_size):
temp_buf=self.clientIMG.recv(buf_size)
buf_size-=len(temp_buf)
self.buf+=temp_buf
data = numpy.fromstring(self.buf, dtype='uint8')
self.image = cv2.imdecode(data, 1)
print("img shape {}".format(self.image.shape))
###### replace this line with your detection code
# detecRec = numpy.random.random(4)
detecRec = mypack.infer(self.image.astype(numpy.float32, copy=False))[0]
print str(info[1])+", perform detection processing successfully, and the result is "+str(detecRec)
self.clientIMG.send(struct.pack("h",168)) ## indicate receive data succesfully
cv2.rectangle(self.image,(abs(int(detecRec[0])),abs(int(detecRec[2]))),(abs(int(detecRec[1])),abs(int(detecRec[3]))),(0,255,0),4)
###### uncomment the following 2 lines to enable fullscreen when you can successfully run the code
##cv2.namedWindow(windowName, cv2.WND_PROP_FULLSCREEN)
##cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
cv2.imshow(windowName, self.image)
cv2.waitKey(1)
except:
pass;
finally:
if(cv2.waitKey(10)==27):
self.client.close()
cv2.destroyAllWindows()
break
def ProcessInThread(self):
print 'displayImg_Connect_Object Get_Data'
showThread=threading.Thread(target=self.ProcessImg)
showThread.start()
if __name__ == '__main__':
###### Change the ip and port according to your setting in the line below
displayImg=process_display_Object(("127.0.0.1",3010))
displayImg.Socket_Connect_Client()
displayImg.ProcessInThread()
| 3,619 | 44.822785 | 156 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/test.py | import yaml
import os
from train.test import test
config = yaml.safe_load(open('config.yml'))
mode = config['mode']
os.environ["CUDA_VISIBLE_DEVICES"] = str(config['aspect_' + mode + '_model'][config['aspect_' + mode + '_model']['type']]['gpu'])
test(config) | 259 | 31.5 | 129 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train.py | import yaml
import os
from train.train import train
config = yaml.safe_load(open('config.yml'))
mode = config['mode']
os.environ["CUDA_VISIBLE_DEVICES"] = str(config['aspect_' + mode + '_model'][config['aspect_' + mode + '_model']['type']]['gpu'])
train(config) | 262 | 31.875 | 129 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/preprocess.py | import yaml
from data_process.data_process import data_process
config = yaml.safe_load(open('config.yml'))
data_process(config) | 129 | 20.666667 | 50 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_category_model/capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
from src.module.utils.constants import PAD_INDEX, INF
from src.module.utils.sentence_clip import sentence_clip
from src.module.attention.dot_attention import DotAttention
from src.module.attention.scaled_dot_attention import ScaledDotAttention
from src.module.attention.bilinear_attention import BilinearAttention
from src.module.attention.tanh_bilinear_attention import TanhBilinearAttention
from src.module.attention.concat_attention import ConcatAttention
from src.module.attention.tanh_concat_attention import TanhConcatAttention
from src.module.attention.mlp_attention import MlpAttention
import numpy as np
from src.module.utils.squash import squash
class CapsuleNetwork(nn.Module):
def __init__(self, embedding, aspect_embedding, hidden_size, capsule_size, dropout, num_categories):
super(CapsuleNetwork, self).__init__()
self.embedding = embedding
self.aspect_embedding = aspect_embedding
embed_size = embedding.embedding_dim
self.capsule_size = capsule_size
self.aspect_transform = nn.Sequential(
nn.Linear(embed_size, capsule_size),
nn.Dropout(dropout)
)
self.sentence_transform = nn.Sequential(
nn.Linear(hidden_size, capsule_size),
nn.Dropout(dropout)
)
self.norm_attention = BilinearAttention(capsule_size, capsule_size)
self.guide_capsule = nn.Parameter(
torch.Tensor(num_categories, capsule_size)
)
self.guide_weight = nn.Parameter(
torch.Tensor(capsule_size, capsule_size)
)
self.scale = nn.Parameter(torch.tensor(5.0))
self.capsule_projection = nn.Linear(capsule_size, capsule_size * num_categories)
self.dropout = dropout
self.num_categories = num_categories
self._reset_parameters()
def _reset_parameters(self):
init.xavier_uniform_(self.guide_capsule)
init.xavier_uniform_(self.guide_weight)
def load_sentiment(self, path):
sentiment = np.load(path)
e1 = np.mean(sentiment)
d1 = np.std(sentiment)
e2 = 0
d2 = np.sqrt(2.0 / (sentiment.shape[0] + sentiment.shape[1]))
sentiment = (sentiment - e1) / d1 * d2 + e2
self.guide_capsule.data.copy_(torch.tensor(sentiment))
def forward(self, sentence, aspect):
# get lengths and masks
sentence = sentence_clip(sentence)
sentence_mask = (sentence != PAD_INDEX)
# embedding
sentence = self.embedding(sentence)
sentence = F.dropout(sentence, p=self.dropout, training=self.training)
aspect = self.embedding(aspect)
aspect = F.dropout(aspect, p=self.dropout, training=self.training)
# sentence encode layer
sentence = self._sentence_encode(sentence, aspect)
# primary capsule layer
sentence = self.sentence_transform(sentence)
primary_capsule = squash(sentence, dim=-1)
# aspect capsule layer
aspect = self.aspect_transform(aspect)
aspect_capsule = squash(aspect, dim=-1)
# aspect aware normalization
norm_weight = self.norm_attention.get_attention_weights(aspect_capsule, primary_capsule, sentence_mask)
# capsule guided routing
category_capsule = self._capsule_guided_routing(primary_capsule, norm_weight)
category_capsule_norm = torch.sqrt(torch.sum(category_capsule * category_capsule, dim=-1, keepdim=False))
return category_capsule_norm
def _sentence_encode(self, sentence, aspect, mask=None):
raise NotImplementedError('_sentence_encode method is not implemented.')
def _capsule_guided_routing(self, primary_capsule, norm_weight):
guide_capsule = squash(self.guide_capsule)
guide_matrix = primary_capsule.matmul(self.guide_weight).matmul(guide_capsule.transpose(0, 1))
guide_matrix = F.softmax(guide_matrix, dim=-1)
guide_matrix = guide_matrix * norm_weight.unsqueeze(-1) * self.scale # (batch_size, time_step, num_categories)
category_capsule = guide_matrix.transpose(1, 2).matmul(primary_capsule)
category_capsule = F.dropout(category_capsule, p=self.dropout, training=self.training)
category_capsule = squash(category_capsule)
return category_capsule | 4,384 | 45.648936 | 119 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_category_model/recurrent_capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from src.aspect_category_model.capsnet import CapsuleNetwork
class RecurrentCapsuleNetwork(CapsuleNetwork):
def __init__(self, embedding, aspect_embedding, num_layers, bidirectional, capsule_size, dropout, num_categories):
super(RecurrentCapsuleNetwork, self).__init__(
embedding=embedding,
aspect_embedding=aspect_embedding,
hidden_size=embedding.embedding_dim * (2 if bidirectional else 1),
capsule_size=capsule_size,
dropout=dropout,
num_categories=num_categories
)
embed_size = embedding.embedding_dim
self.rnn = nn.GRU(
input_size=embed_size * 2,
hidden_size=embed_size,
num_layers=num_layers,
bidirectional=bidirectional,
batch_first=True
)
self.bidirectional = bidirectional
def _sentence_encode(self, sentence, aspect, mask=None):
batch_size, time_step, embed_size = sentence.size()
aspect_aware_sentence = torch.cat((
sentence, aspect.unsqueeze(1).expand(batch_size, time_step, embed_size)
), dim=-1)
output, _ = self.rnn(aspect_aware_sentence)
if self.bidirectional:
sentence = sentence.unsqueeze(-1).expand(batch_size, time_step, embed_size, 2)
sentence = sentence.contiguous().view(batch_size, time_step, embed_size * 2)
output = output + sentence
output = F.dropout(output, p=self.dropout, training=self.training)
return output | 1,597 | 41.052632 | 118 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_category_model/bert_capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
from src.module.utils.constants import PAD_INDEX, INF
from src.module.utils.sentence_clip import sentence_clip
from src.module.attention.dot_attention import DotAttention
from src.module.attention.scaled_dot_attention import ScaledDotAttention
from src.module.attention.bilinear_attention import BilinearAttention
from src.module.attention.tanh_bilinear_attention import TanhBilinearAttention
from src.module.attention.concat_attention import ConcatAttention
from src.module.attention.tanh_concat_attention import TanhConcatAttention
from src.module.attention.mlp_attention import MlpAttention
from src.aspect_term_model.capsnet import squash
import numpy as np
class BertCapsuleNetwork(nn.Module):
def __init__(self, bert, bert_size, capsule_size, dropout, num_categories):
super(BertCapsuleNetwork, self).__init__()
self.bert = bert
self.bert_size = bert_size
self.capsule_size = capsule_size
self.aspect_transform = nn.Sequential(
nn.Linear(bert_size, capsule_size),
nn.Dropout(dropout)
)
self.sentence_transform = nn.Sequential(
nn.Linear(bert_size, capsule_size),
nn.Dropout(dropout)
)
self.norm_attention = BilinearAttention(capsule_size, capsule_size)
self.guide_capsule = nn.Parameter(
torch.Tensor(num_categories, capsule_size)
)
self.guide_weight = nn.Parameter(
torch.Tensor(capsule_size, capsule_size)
)
self.scale = nn.Parameter(torch.tensor(5.0))
self.capsule_projection = nn.Linear(bert_size, bert_size * num_categories)
self.dropout = dropout
self.num_categories = num_categories
self._reset_parameters()
def _reset_parameters(self):
init.xavier_uniform_(self.guide_capsule)
init.xavier_uniform_(self.guide_weight)
def load_sentiment(self, path):
sentiment = np.load(path)
e1 = np.mean(sentiment)
d1 = np.std(sentiment)
e2 = 0
d2 = np.sqrt(2.0 / (sentiment.shape[0] + sentiment.shape[1]))
sentiment = (sentiment - e1) / d1 * d2 + e2
self.guide_capsule.data.copy_(torch.tensor(sentiment))
def forward(self, bert_token, bert_segment):
# BERT encoding
encoder_layer, _ = self.bert(bert_token, bert_segment, output_all_encoded_layers=False)
batch_size, segment_len = bert_segment.size()
max_segment_len = bert_segment.argmax(dim=-1, keepdim=True)
batch_arrange = torch.arange(segment_len).unsqueeze(0).expand(batch_size, segment_len).to(bert_segment.device)
segment_mask = batch_arrange <= max_segment_len
sentence_mask = segment_mask & (1 - bert_segment).byte()
aspect_mask = bert_segment
sentence_lens = sentence_mask.long().sum(dim=1, keepdim=True)
aspect_lens = aspect_mask.long().sum(dim=1, keepdim=True)
aspect = encoder_layer.masked_fill(aspect_mask.unsqueeze(-1) == 0, 0)
aspect = aspect.sum(dim=1, keepdim=False) / aspect_lens.float()
# sentence encode layer
max_len = sentence_lens.max().item()
sentence = encoder_layer[:, 0: max_len].contiguous()
sentence_mask = sentence_mask[:, 0: max_len].contiguous()
sentence = sentence.masked_fill(sentence_mask.unsqueeze(-1) == 0, 0)
# primary capsule layer
sentence = self.sentence_transform(sentence)
primary_capsule = squash(sentence, dim=-1)
# aspect capsule layer
aspect = self.aspect_transform(aspect)
aspect_capsule = squash(aspect, dim=-1)
# aspect aware normalization
norm_weight = self.norm_attention.get_attention_weights(aspect_capsule, primary_capsule, sentence_mask)
# capsule guided routing
category_capsule = self._capsule_guided_routing(primary_capsule, norm_weight)
category_capsule_norm = torch.sqrt(torch.sum(category_capsule * category_capsule, dim=-1, keepdim=False))
return category_capsule_norm
def _capsule_guided_routing(self, primary_capsule, norm_weight):
guide_capsule = squash(self.guide_capsule)
guide_matrix = primary_capsule.matmul(self.guide_weight).matmul(guide_capsule.transpose(0, 1))
guide_matrix = F.softmax(guide_matrix, dim=-1)
guide_matrix = guide_matrix * norm_weight.unsqueeze(-1) * self.scale # (batch_size, time_step, num_categories)
category_capsule = guide_matrix.transpose(1, 2).matmul(primary_capsule)
category_capsule = F.dropout(category_capsule, p=self.dropout, training=self.training)
category_capsule = squash(category_capsule)
return category_capsule | 4,772 | 48.206186 | 119 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_term_model/capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
from src.module.utils.constants import PAD_INDEX, INF
from src.module.utils.sentence_clip import sentence_clip
from src.module.attention.dot_attention import DotAttention
from src.module.attention.scaled_dot_attention import ScaledDotAttention
from src.module.attention.bilinear_attention import BilinearAttention
from src.module.attention.tanh_bilinear_attention import TanhBilinearAttention
from src.module.attention.concat_attention import ConcatAttention
from src.module.attention.tanh_concat_attention import TanhConcatAttention
from src.module.attention.mlp_attention import MlpAttention
import numpy as np
from src.module.utils.squash import squash
class CapsuleNetwork(nn.Module):
def __init__(self, embedding, hidden_size, capsule_size, dropout, num_categories):
super(CapsuleNetwork, self).__init__()
self.embedding = embedding
embed_size = embedding.embedding_dim
self.capsule_size = capsule_size
self.aspect_transform = nn.Sequential(
nn.Linear(embed_size, capsule_size),
nn.Dropout(dropout)
)
self.sentence_transform = nn.Sequential(
nn.Linear(hidden_size, capsule_size),
nn.Dropout(dropout)
)
self.norm_attention = BilinearAttention(capsule_size, capsule_size)
self.guide_capsule = nn.Parameter(
torch.Tensor(num_categories, capsule_size)
)
self.guide_weight = nn.Parameter(
torch.Tensor(capsule_size, capsule_size)
)
self.scale = nn.Parameter(torch.tensor(4.0))
self.capsule_projection = nn.Linear(capsule_size, capsule_size * num_categories)
self.dropout = dropout
self.num_categories = num_categories
self._reset_parameters()
def _reset_parameters(self):
init.xavier_uniform_(self.guide_capsule)
init.xavier_uniform_(self.guide_weight)
def load_sentiment(self, path):
sentiment = np.load(path)
e1 = np.mean(sentiment)
d1 = np.std(sentiment)
e2 = 0
d2 = np.sqrt(2.0 / (sentiment.shape[0] + sentiment.shape[1]))
sentiment = (sentiment - e1) / d1 * d2 + e2
self.guide_capsule.data.copy_(torch.tensor(sentiment))
def forward(self, sentence, aspect):
# get lengths and masks
sentence = sentence_clip(sentence)
aspect = sentence_clip(aspect)
sentence_mask = (sentence != PAD_INDEX)
aspect_mask = (aspect != PAD_INDEX)
# sentence_lens = sentence_mask.long().sum(dim=1, keepdim=True)
aspect_lens = aspect_mask.long().sum(dim=1, keepdim=True)
# embedding
sentence = self.embedding(sentence)
sentence = F.dropout(sentence, p=self.dropout, training=self.training)
aspect = self.embedding(aspect)
aspect = F.dropout(aspect, p=self.dropout, training=self.training)
# aspect average pooling
aspect = aspect.masked_fill(aspect_mask.unsqueeze(-1) == 0, 0)
aspect = aspect.sum(dim=1, keepdim=False) / aspect_lens.float()
# sentence encode layer
sentence = self._sentence_encode(sentence, aspect)
# primary capsule layer
sentence = self.sentence_transform(sentence)
primary_capsule = squash(sentence, dim=-1)
# aspect capsule layer
aspect = self.aspect_transform(aspect)
aspect_capsule = squash(aspect, dim=-1)
# aspect aware normalization
norm_weight = self.norm_attention.get_attention_weights(aspect_capsule, primary_capsule, sentence_mask)
# capsule guided routing
category_capsule = self._capsule_guided_routing(primary_capsule, norm_weight)
category_capsule_norm = torch.sqrt(torch.sum(category_capsule * category_capsule, dim=-1, keepdim=False))
return category_capsule_norm
def _sentence_encode(self, sentence, aspect, mask=None):
raise NotImplementedError('_sentence_encode method is not implemented.')
def _capsule_guided_routing(self, primary_capsule, norm_weight):
guide_capsule = squash(self.guide_capsule)
guide_matrix = primary_capsule.matmul(self.guide_weight).matmul(guide_capsule.transpose(0, 1))
guide_matrix = F.softmax(guide_matrix, dim=-1)
guide_matrix = guide_matrix * norm_weight.unsqueeze(-1) * self.scale # (batch_size, time_step, num_categories)
category_capsule = guide_matrix.transpose(1, 2).matmul(primary_capsule)
category_capsule = F.dropout(category_capsule, p=self.dropout, training=self.training)
category_capsule = squash(category_capsule)
return category_capsule | 4,714 | 46.15 | 119 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_term_model/recurrent_capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from src.aspect_term_model.capsnet import CapsuleNetwork
class RecurrentCapsuleNetwork(CapsuleNetwork):
def __init__(self, embedding, num_layers, bidirectional, capsule_size, dropout, num_categories):
super(RecurrentCapsuleNetwork, self).__init__(
embedding=embedding,
hidden_size=embedding.embedding_dim * (2 if bidirectional else 1),
capsule_size=capsule_size,
dropout=dropout,
num_categories=num_categories
)
embed_size = embedding.embedding_dim
self.rnn = nn.GRU(
input_size=embed_size * 2,
hidden_size=embed_size,
num_layers=num_layers,
bidirectional=bidirectional,
batch_first=True
)
self.bidirectional = bidirectional
def _sentence_encode(self, sentence, aspect, mask=None):
batch_size, time_step, embed_size = sentence.size()
aspect_aware_sentence = torch.cat((
sentence, aspect.unsqueeze(1).expand(batch_size, time_step, embed_size)
), dim=-1)
output, _ = self.rnn(aspect_aware_sentence)
if self.bidirectional:
sentence = sentence.unsqueeze(-1).expand(batch_size, time_step, embed_size, 2)
sentence = sentence.contiguous().view(batch_size, time_step, embed_size * 2)
output = output + sentence
output = F.dropout(output, p=self.dropout, training=self.training)
return output | 1,528 | 40.324324 | 100 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_term_model/bert_capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
from src.module.utils.constants import PAD_INDEX, INF
from src.module.utils.sentence_clip import sentence_clip
from src.module.attention.dot_attention import DotAttention
from src.module.attention.scaled_dot_attention import ScaledDotAttention
from src.module.attention.bilinear_attention import BilinearAttention
from src.module.attention.tanh_bilinear_attention import TanhBilinearAttention
from src.module.attention.concat_attention import ConcatAttention
from src.module.attention.tanh_concat_attention import TanhConcatAttention
from src.module.attention.mlp_attention import MlpAttention
from src.module.utils.squash import squash
import numpy as np
import time
class BertCapsuleNetwork(nn.Module):
def __init__(self, bert, bert_size, capsule_size, dropout, num_categories):
super(BertCapsuleNetwork, self).__init__()
self.bert = bert
self.bert_size = bert_size
self.capsule_size = capsule_size
self.aspect_transform = nn.Sequential(
nn.Linear(bert_size, capsule_size),
nn.Dropout(dropout)
)
self.sentence_transform = nn.Sequential(
nn.Linear(bert_size, capsule_size),
nn.Dropout(dropout)
)
self.norm_attention = BilinearAttention(capsule_size, capsule_size)
self.guide_capsule = nn.Parameter(
torch.Tensor(num_categories, capsule_size)
)
self.guide_weight = nn.Parameter(
torch.Tensor(capsule_size, capsule_size)
)
self.scale = nn.Parameter(torch.tensor(5.0))
self.capsule_projection = nn.Linear(bert_size, bert_size * num_categories)
self.dropout = dropout
self.num_categories = num_categories
self._reset_parameters()
def _reset_parameters(self):
init.xavier_uniform_(self.guide_capsule)
init.xavier_uniform_(self.guide_weight)
def load_sentiment(self, path):
sentiment = np.load(path)
e1 = np.mean(sentiment)
d1 = np.std(sentiment)
e2 = 0
d2 = np.sqrt(2.0 / (sentiment.shape[0] + sentiment.shape[1]))
sentiment = (sentiment - e1) / d1 * d2 + e2
self.guide_capsule.data.copy_(torch.tensor(sentiment))
def forward(self, bert_token, bert_segment):
# BERT encoding
encoder_layer, _ = self.bert(bert_token, bert_segment, output_all_encoded_layers=False)
batch_size, segment_len = bert_segment.size()
max_segment_len = bert_segment.argmax(dim=-1, keepdim=True)
batch_arrange = torch.arange(segment_len).unsqueeze(0).expand(batch_size, segment_len).to(bert_segment.device)
segment_mask = batch_arrange <= max_segment_len
sentence_mask = segment_mask & (1 - bert_segment).byte()
aspect_mask = bert_segment
sentence_lens = sentence_mask.long().sum(dim=1, keepdim=True)
# aspect average pooling
aspect_lens = aspect_mask.long().sum(dim=1, keepdim=True)
aspect = encoder_layer.masked_fill(aspect_mask.unsqueeze(-1) == 0, 0)
aspect = aspect.sum(dim=1, keepdim=False) / aspect_lens.float()
# sentence encode layer
max_len = sentence_lens.max().item()
sentence = encoder_layer[:, 0: max_len].contiguous()
sentence_mask = sentence_mask[:, 0: max_len].contiguous()
sentence = sentence.masked_fill(sentence_mask.unsqueeze(-1) == 0, 0)
# primary capsule layer
sentence = self.sentence_transform(sentence)
primary_capsule = squash(sentence, dim=-1)
aspect = self.aspect_transform(aspect)
aspect_capsule = squash(aspect, dim=-1)
# aspect aware normalization
norm_weight = self.norm_attention.get_attention_weights(aspect_capsule, primary_capsule, sentence_mask)
# capsule guided routing
category_capsule = self._capsule_guided_routing(primary_capsule, norm_weight)
category_capsule_norm = torch.sqrt(torch.sum(category_capsule * category_capsule, dim=-1, keepdim=False))
return category_capsule_norm
def _capsule_guided_routing(self, primary_capsule, norm_weight):
guide_capsule = squash(self.guide_capsule)
guide_matrix = primary_capsule.matmul(self.guide_weight).matmul(guide_capsule.transpose(0, 1))
guide_matrix = F.softmax(guide_matrix, dim=-1)
guide_matrix = guide_matrix * norm_weight.unsqueeze(-1) * self.scale # (batch_size, time_step, num_categories)
category_capsule = guide_matrix.transpose(1, 2).matmul(primary_capsule)
category_capsule = F.dropout(category_capsule, p=self.dropout, training=self.training)
category_capsule = squash(category_capsule)
return category_capsule | 4,780 | 47.785714 | 119 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/concat_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class ConcatAttention(Attention):
def __init__(self, query_size, key_size, dropout=0):
super(ConcatAttention, self).__init__(dropout)
self.query_weights = nn.Parameter(torch.Tensor(query_size, 1))
self.key_weights = nn.Parameter(torch.Tensor(key_size, 1))
init.xavier_uniform_(self.query_weights)
init.xavier_uniform_(self.key_weights)
def _score(self, query, key):
"""
query: FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
"""
batch_size, num_queries, time_step = query.size(0), query.size(1), key.size(1)
query = query.matmul(self.query_weights).expand(batch_size, num_queries, time_step)
key = key.matmul(self.key_weights).transpose(1, 2).expand(batch_size, num_queries, time_step)
score = query + key
return score | 1,007 | 41 | 101 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/bilinear_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class BilinearAttention(Attention):
def __init__(self, query_size, key_size, dropout=0):
super(BilinearAttention, self).__init__(dropout)
self.weights = nn.Parameter(torch.FloatTensor(query_size, key_size))
init.xavier_uniform_(self.weights)
def _score(self, query, key):
"""
query: FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
"""
score = query.matmul(self.weights).matmul(key.transpose(1, 2))
return score | 659 | 33.736842 | 76 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/tanh_bilinear_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class TanhBilinearAttention(Attention):
def __init__(self, query_size, key_size, dropout=0):
super(TanhBilinearAttention, self).__init__(dropout)
self.weights = nn.Parameter(torch.FloatTensor(query_size, key_size))
init.xavier_uniform_(self.weights)
self.bias = nn.Parameter(torch.zeros(1))
def _score(self, query, key):
"""
query: FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
"""
score = torch.tanh(query.matmul(self.weights).matmul(key.transpose(1, 2)) + self.bias)
return score | 740 | 36.05 | 94 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/tanh_concat_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class TanhConcatAttention(Attention):
def __init__(self, query_size, key_size, dropout=0):
super(TanhConcatAttention, self).__init__(dropout)
self.query_weights = nn.Parameter(torch.Tensor(query_size, 1))
self.key_weights = nn.Parameter(torch.Tensor(key_size, 1))
init.xavier_uniform_(self.query_weights)
init.xavier_uniform_(self.key_weights)
def _score(self, query, key):
"""
query: FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
"""
batch_size, num_queries, time_step = query.size(0), query.size(1), key.size(1)
query = query.matmul(self.query_weights).expand(batch_size, num_queries, time_step)
key = key.matmul(self.key_weights).transpose(1, 2).expand(batch_size, num_queries, time_step)
score = query + key
score = torch.tanh(score)
return score | 1,049 | 41 | 101 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/multi_head_attention.py | from torch import nn
from torch.nn import init
import math
class MultiHeadAttention(nn.Module):
def __init__(self, attention, num_heads, hidden_size, key_size='default', value_size='default', out_size='default'):
key_size = hidden_size // num_heads if key_size == 'default' else key_size
value_size = hidden_size // num_heads if value_size == 'default' else value_size
out_size = hidden_size if out_size == 'default' else out_size
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.key_size = key_size
self.value_size = value_size
self.query_projection = nn.Linear(hidden_size, num_heads * key_size)
self.key_projection = nn.Linear(hidden_size, num_heads * key_size)
self.value_projection = nn.Linear(hidden_size, num_heads * value_size)
init.normal_(self.query_projection.weight, mean=0, std=math.sqrt(2.0 / (hidden_size + key_size)))
init.normal_(self.key_projection.weight, mean=0, std=math.sqrt(2.0 / (hidden_size + key_size)))
init.normal_(self.value_projection.weight, mean=0, std=math.sqrt(2.0 / (hidden_size + value_size)))
self.output_projection = nn.Linear(num_heads * value_size, out_size)
init.xavier_normal_(self.output_projection.weight)
self.attention = attention
def forward(self, query, key, value, mask=None):
"""
query: FloatTensor (batch_size, hidden_size) or (batch_size, num_queries, hidden_size)
key: FloatTensor (batch_size, time_step, hidden_size)
value: FloatTensor (batch_size, time_step, hidden_size)
mask: ByteTensor (batch_size, time_step) or ByteTensor (batch_size, num_queries, time_step)
subsequent_mask: ByteTensor (num_queries, time_step)
"""
single_query = False
if len(query.size()) == 2:
query = query.unsqueeze(1)
single_query = True
if mask is not None:
if len(mask.size()) == 2:
mask = mask.unsqueeze(1)
else:
assert mask.size(1) == query.size(1)
num_heads, key_size, value_size = self.num_heads, self.key_size, self.value_size
batch_size, num_queries, time_step = query.size(0), query.size(1), key.size(1)
query = self.query_projection(query).view(batch_size, num_queries, num_heads, key_size)
key = self.key_projection(key).view(batch_size, time_step, num_heads, key_size)
value = self.value_projection(value).view(batch_size, time_step, num_heads, value_size)
if mask is not None:
if len(mask.size()) == 2:
mask = mask.unsqueeze(0).repeat(num_heads, 1, 1).view(-1, time_step)
else:
mask = mask.unsqueeze(0).repeat(num_heads, 1, 1, 1).view(-1, num_queries, time_step)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, num_queries, key_size)
key = key.permute(2, 0, 1, 3).contiguous().view(-1, time_step, key_size)
value = value.permute(2, 0, 1, 3).contiguous().view(-1, time_step, value_size)
output = self.attention(query, key, value, mask)
output = output.view(num_heads, batch_size, num_queries, value_size)
output = output.permute(1, 2, 0, 3).contiguous().view(batch_size, num_queries, -1)
output = self.output_projection(output)
if single_query:
output = output.squeeze(1)
return output | 3,451 | 55.590164 | 120 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/dot_attention.py | from src.module.attention.attention import Attention
class DotAttention(Attention):
def __init__(self, dropout=0):
super(DotAttention, self).__init__(dropout)
def _score(self, query, key):
"""
query: FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
"""
assert query.size(2) == key.size(2)
return query.matmul(key.transpose(1, 2)) | 448 | 31.071429 | 64 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/scaled_dot_attention.py | from src.module.attention.attention import Attention
import math
class ScaledDotAttention(Attention):
def __init__(self, dropout=0):
super(ScaledDotAttention, self).__init__(dropout)
def _score(self, query, key):
"""
query: FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
"""
assert query.size(2) == key.size(2)
return query.matmul(key.transpose(1, 2)) / math.sqrt(query.size(2)) | 499 | 32.333333 | 75 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/attention.py | from torch import nn
import torch.nn.functional as F
from src.module.utils import constants
class Attention(nn.Module):
"""
The base class of attention.
"""
def __init__(self, dropout):
super(Attention, self).__init__()
self.dropout = dropout
def forward(self, query, key, value, mask=None):
"""
query: FloatTensor (batch_size, query_size) or FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
value: FloatTensor (batch_size, time_step, hidden_size)
mask: ByteTensor (batch_size, time_step) or ByteTensor (batch_size, num_queries, time_step)
"""
single_query = False
if len(query.size()) == 2:
query = query.unsqueeze(1)
single_query = True
if mask is not None:
if len(mask.size()) == 2:
mask = mask.unsqueeze(1)
else:
assert mask.size(1) == query.size(1)
score = self._score(query, key) # FloatTensor (batch_size, num_queries, time_step)
weights = self._weights_normalize(score, mask)
weights = F.dropout(weights, p=self.dropout, training=self.training)
output = weights.matmul(value)
if single_query:
output = output.squeeze(1)
return output
def _score(self, query, key):
raise NotImplementedError('Attention score method is not implemented.')
def _weights_normalize(self, score, mask):
if not mask is None:
score = score.masked_fill(mask == 0, -constants.INF)
weights = F.softmax(score, dim=-1)
return weights
def get_attention_weights(self, query, key, mask=None):
single_query = False
if len(query.size()) == 2:
query = query.unsqueeze(1)
single_query = True
if mask is not None:
if len(mask.size()) == 2:
mask = mask.unsqueeze(1)
else:
assert mask.size(1) == query.size(1)
score = self._score(query, key) # FloatTensor (batch_size, num_queries, time_step)
weights = self._weights_normalize(score, mask)
weights = F.dropout(weights, p=self.dropout, training=self.training)
if single_query:
weights = weights.squeeze(1)
return weights | 2,355 | 37 | 104 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/mlp_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class MlpAttention(Attention):
def __init__(self, query_size, key_size, out_size=100, dropout=0):
super(MlpAttention, self).__init__(dropout)
self.query_projection = nn.Linear(query_size, out_size)
self.key_projection = nn.Linear(key_size, out_size)
self.v = nn.Parameter(torch.FloatTensor(out_size, 1))
init.xavier_uniform_(self.v)
def _score(self, query, key):
"""
query: FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
"""
batch_size, num_queries, time_step, out_size = query.size(0), query.size(1), key.size(1), self.v.size(0)
query = self.query_projection(query).unsqueeze(-1).expand(batch_size, num_queries, time_step, out_size)
key = self.key_projection(key).unsqueeze(1).expand(batch_size, num_queries, time_step, out_size)
score = torch.tanh(query + key).matmul(self.v).squeeze(-1)
return score | 1,092 | 44.541667 | 112 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/no_query_attention.py | import torch
from torch import nn
from torch.nn import init
class NoQueryAttention(nn.Module):
def __init__(self, query_size, attention):
super(NoQueryAttention, self).__init__()
self.query_size = query_size
self.query = nn.Parameter(torch.Tensor(1, query_size))
init.xavier_uniform_(self.query)
self.attention = attention
def forward(self, key, value, mask=None):
batch_size = key.size(0)
query = self.query.expand(batch_size, self.query_size)
return self.attention(query, key, value, mask) | 566 | 32.352941 | 62 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/utils/squash.py | import torch
def squash(x, dim=-1):
squared = torch.sum(x * x, dim=dim, keepdim=True)
scale = torch.sqrt(squared) / (1.0 + squared)
return scale * x | 161 | 26 | 53 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/utils/constants.py | PAD = '<pad>'
UNK = '<unk>'
ASPECT = '<aspect>'
PAD_INDEX = 0
UNK_INDEX = 1
ASPECT_INDEX = 2
INF = 1e9 | 104 | 10.666667 | 19 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/utils/loss.py | import torch
from torch import nn
import torch.nn.functional as F
class CapsuleLoss(nn.Module):
def __init__(self, smooth=0.1, lamda=0.6):
super(CapsuleLoss, self).__init__()
self.smooth = smooth
self.lamda = lamda
def forward(self, input, target):
one_hot = torch.zeros_like(input).to(input.device)
one_hot = one_hot.scatter(1, target.unsqueeze(-1), 1)
a = torch.max(torch.zeros_like(input).to(input.device), 1 - self.smooth - input)
b = torch.max(torch.zeros_like(input).to(input.device), input - self.smooth)
loss = one_hot * a * a + self.lamda * (1 - one_hot) * b * b
loss = loss.sum(dim=1, keepdim=False)
return loss.mean()
# CrossEntropyLoss for Label Smoothing Regularization
class CrossEntropyLoss_LSR(nn.Module):
def __init__(self, para_LSR=0.2):
super(CrossEntropyLoss_LSR, self).__init__()
self.para_LSR = para_LSR
self.logSoftmax = nn.LogSoftmax(dim=-1)
def _toOneHot_smooth(self, label, batchsize, classes):
prob = self.para_LSR * 1.0 / classes
one_hot_label = torch.zeros(batchsize, classes) + prob
for i in range(batchsize):
index = label[i]
one_hot_label[i, index] += (1.0 - self.para_LSR)
return one_hot_label
def forward(self, pre, label, size_average=True):
b, c = pre.size()
one_hot_label = self._toOneHot_smooth(label, b, c).to(pre.device)
loss = torch.sum(-one_hot_label * self.logSoftmax(pre), dim=1)
if size_average:
return torch.mean(loss)
else:
return torch.sum(loss)
class SmoothCrossEntropy(nn.Module):
def __init__(self, smooth=0.08):
super(SmoothCrossEntropy, self).__init__()
self.kldiv = nn.KLDivLoss()
self.smooth = smooth
def forward(self, input, target):
one_hot = torch.zeros_like(input).to(input.device)
one_hot = one_hot.scatter(1, target.unsqueeze(-1), 1)
target = (1 - self.smooth) * one_hot + self.smooth / (input.size(1) - 1) * (1 - one_hot)
# target = target + torch.rand_like(target).to(target.device) * 0.001
input = input - input.max(dim=1, keepdim=True)[0]
loss = -target * F.log_softmax(input, dim=-1)
return loss.mean() | 2,309 | 38.152542 | 96 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/utils/sentence_clip.py | from src.module.utils.constants import PAD_INDEX
def sentence_clip(sentence):
mask = (sentence != PAD_INDEX)
sentence_lens = mask.long().sum(dim=1, keepdim=False)
max_len = sentence_lens.max().item()
return sentence[:, :max_len] | 245 | 34.142857 | 57 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/test.py | import torch
import os
from train import make_aspect_term_model, make_aspect_category_model
from train.make_data import make_term_test_data, make_category_test_data
from train.eval import eval
def test(config):
mode = config['mode']
if mode == 'term':
model = make_aspect_term_model.make_model(config)
else:
model = make_aspect_category_model.make_model(config)
model = model.cuda()
model_path = os.path.join(config['base_path'], 'checkpoints/%s.pth' % config['aspect_' + mode + '_model']['type'])
model.load_state_dict(torch.load(model_path))
if mode == 'term':
test_loader = make_term_test_data(config)
else:
test_loader = make_category_test_data(config)
test_accuracy = eval(model, test_loader)
print('test:\taccuracy: %.4f' % (test_accuracy)) | 819 | 38.047619 | 118 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/make_aspect_term_model.py | import torch
from torch import nn
import numpy as np
import os
import yaml
from pytorch_pretrained_bert import BertModel
from src.aspect_term_model.recurrent_capsnet import RecurrentCapsuleNetwork
from src.aspect_term_model.bert_capsnet import BertCapsuleNetwork
def make_model(config):
model_type = config['aspect_term_model']['type']
if model_type == 'recurrent_capsnet':
return make_recurrent_capsule_network(config)
elif model_type == 'bert_capsnet':
return make_bert_capsule_network(config)
else:
raise ValueError('No Supporting.')
def make_bert_capsule_network(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
config = config['aspect_term_model'][config['aspect_term_model']['type']]
bert = BertModel.from_pretrained('bert-base-uncased')
model = BertCapsuleNetwork(
bert=bert,
bert_size=config['bert_size'],
capsule_size=config['capsule_size'],
dropout=config['dropout'],
num_categories=log['num_categories']
)
model.load_sentiment(os.path.join(base_path, 'processed/sentiment_matrix.npy'))
return model
def make_recurrent_capsule_network(config):
embedding = make_embedding(config)
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
config = config['aspect_term_model'][config['aspect_term_model']['type']]
model = RecurrentCapsuleNetwork(
embedding=embedding,
num_layers=config['num_layers'],
capsule_size=config['capsule_size'],
bidirectional=config['bidirectional'],
dropout=config['dropout'],
num_categories=log['num_categories']
)
model.load_sentiment(os.path.join(base_path, 'processed/sentiment_matrix.npy'))
return model
def make_embedding(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
vocab_size = log['vocab_size']
config = config['aspect_term_model'][config['aspect_term_model']['type']]
embed_size = config['embed_size']
embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_size)
glove = np.load(os.path.join(base_path, 'processed/glove.npy'))
embedding.weight.data.copy_(torch.tensor(glove))
return embedding | 2,462 | 38.725806 | 83 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/make_data.py | import os
from torch.utils.data import DataLoader
from data_process.dataset import ABSADataset
input_list = {
'recurrent_capsnet': ['context', 'aspect'],
'bert_capsnet': ['bert_token', 'bert_segment']
}
def make_term_data(config):
base_path = config['base_path']
train_path = os.path.join(base_path, 'processed/train.npz')
val_path = os.path.join(base_path, 'processed/val.npz')
train_data = ABSADataset(train_path, input_list[config['aspect_term_model']['type']])
val_data = ABSADataset(val_path, input_list[config['aspect_term_model']['type']])
config = config['aspect_term_model'][config['aspect_term_model']['type']]
train_loader = DataLoader(
dataset=train_data,
batch_size=config['batch_size'],
shuffle=True,
pin_memory=True
)
val_loader = DataLoader(
dataset=val_data,
batch_size=config['batch_size'],
shuffle=False,
pin_memory=True
)
return train_loader, val_loader
def make_term_test_data(config):
base_path = config['base_path']
test_path = os.path.join(base_path, 'processed/test.npz')
test_data = ABSADataset(test_path, input_list[config['aspect_term_model']['type']])
config = config['aspect_term_model'][config['aspect_term_model']['type']]
test_loader = DataLoader(
dataset=test_data,
batch_size=config['batch_size'],
shuffle=False,
pin_memory=True
)
return test_loader
def make_category_data(config):
model_type = config['aspect_category_model']['type']
if 'bert' in model_type:
i_list = ['bert_token', 'bert_segment']
else:
i_list = ['sentence', 'aspect']
base_path = config['base_path']
train_path = os.path.join(base_path, 'processed/train.npz')
val_path = os.path.join(base_path, 'processed/val.npz')
train_data = ABSADataset(train_path, i_list)
val_data = ABSADataset(val_path, i_list)
config = config['aspect_category_model'][config['aspect_category_model']['type']]
train_loader = DataLoader(
dataset=train_data,
batch_size=config['batch_size'],
shuffle=True,
pin_memory=True
)
val_loader = DataLoader(
dataset=val_data,
batch_size=config['batch_size'],
shuffle=False,
pin_memory=True
)
return train_loader, val_loader
def make_category_test_data(config):
model_type = config['aspect_category_model']['type']
if 'bert' in model_type:
i_list = ['bert_token', 'bert_segment']
else:
i_list = ['sentence', 'aspect']
base_path = config['base_path']
test_path = os.path.join(base_path, 'processed/test.npz')
test_data = ABSADataset(test_path, i_list)
config = config['aspect_category_model'][config['aspect_category_model']['type']]
test_loader = DataLoader(
dataset=test_data,
batch_size=config['batch_size'],
shuffle=False,
pin_memory=True
)
return test_loader
def make_distill_data(config):
base_path = config['base_path']
train_path = os.path.join(base_path, 'processed/train.npz')
val_path = os.path.join(base_path, 'processed/val.npz')
train_data = ABSADataset(train_path, ['context', 'aspect', 'bert_token', 'bert_segment'])
val_data = ABSADataset(val_path, input_list[config['aspect_term_model']['type']])
config = config['aspect_term_model'][config['aspect_term_model']['type']]
train_loader = DataLoader(
dataset=train_data,
batch_size=config['batch_size'],
shuffle=True,
pin_memory=True
)
val_loader = DataLoader(
dataset=val_data,
batch_size=config['batch_size'],
shuffle=False,
pin_memory=True
)
return train_loader, val_loader | 3,771 | 34.252336 | 93 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/make_optimizer.py | from torch import optim
import adabound
def make_optimizer(config, model):
mode = config['mode']
config = config['aspect_' + mode + '_model'][config['aspect_' + mode + '_model']['type']]
lr = config['learning_rate']
weight_decay = config['weight_decay']
opt = {
'sgd': optim.SGD,
'adadelta': optim.Adadelta,
'adam': optim.Adam,
'adamax': optim.Adamax,
'adagrad': optim.Adagrad,
'asgd': optim.ASGD,
'rmsprop': optim.RMSprop,
'adabound': adabound.AdaBound
}
if 'momentum' in config:
optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay, momentum=config['momentum'])
else:
optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay)
return optimizer | 831 | 35.173913 | 127 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/eval.py | import torch
def eval(model, data_loader, criterion=None):
total_samples = 0
correct_samples = 0
total_loss = 0
model.eval()
with torch.no_grad():
for data in data_loader:
input0, input1, label = data
input0, input1, label = input0.cuda(), input1.cuda(), label.cuda()
logit = model(input0, input1)
loss = criterion(logit, label).item() if criterion is not None else 0
total_samples += input0.size(0)
pred = logit.argmax(dim=1)
correct_samples += (label == pred).long().sum().item()
total_loss += loss * input0.size(0)
accuracy = correct_samples / total_samples
avg_loss = total_loss / total_samples
if criterion is not None:
return accuracy, avg_loss
else:
return accuracy | 829 | 35.086957 | 81 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/__init__.py | 0 | 0 | 0 | py | |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/make_aspect_category_model.py | import torch
from torch import nn
import numpy as np
import os
import yaml
from pytorch_pretrained_bert import BertModel
from src.aspect_category_model.recurrent_capsnet import RecurrentCapsuleNetwork
from src.aspect_category_model.bert_capsnet import BertCapsuleNetwork
def make_model(config):
model_type = config['aspect_category_model']['type']
if model_type == 'recurrent_capsnet':
return make_recurrent_capsule_network(config)
elif model_type == 'bert_capsnet':
return make_bert_capsule_network(config)
else:
raise ValueError('No Supporting.')
def make_bert_capsule_network(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
config = config['aspect_category_model'][config['aspect_category_model']['type']]
bert = BertModel.from_pretrained('bert-base-uncased')
model = BertCapsuleNetwork(
bert=bert,
bert_size=config['bert_size'],
capsule_size=config['capsule_size'],
dropout=config['dropout'],
num_categories=log['num_categories']
)
model.load_sentiment(os.path.join(base_path, 'processed/sentiment_matrix.npy'))
return model
def make_recurrent_capsule_network(config):
embedding = make_embedding(config)
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
config = config['aspect_category_model'][config['aspect_category_model']['type']]
aspect_embedding = nn.Embedding(num_embeddings=8, embedding_dim=config['embed_size'])
model = RecurrentCapsuleNetwork(
embedding=embedding,
aspect_embedding=aspect_embedding,
num_layers=config['num_layers'],
bidirectional=config['bidirectional'],
capsule_size=config['capsule_size'],
dropout=config['dropout'],
num_categories=log['num_categories']
)
model.load_sentiment(os.path.join(base_path, 'processed/sentiment_matrix.npy'))
return model
def make_embedding(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
vocab_size = log['vocab_size']
config = config['aspect_category_model'][config['aspect_category_model']['type']]
embed_size = config['embed_size']
embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_size)
glove = np.load(os.path.join(base_path, 'processed/glove.npy'))
embedding.weight.data.copy_(torch.tensor(glove))
return embedding | 2,631 | 40.125 | 89 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/train.py | import torch
from torch import nn
from torch import optim
from train import make_aspect_term_model, make_aspect_category_model
from train.make_data import make_term_data, make_category_data
from train.make_optimizer import make_optimizer
from train.eval import eval
import os
import time
import pickle
from src.module.utils.loss import CapsuleLoss
# torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
def train(config):
mode = config['mode']
if mode == 'term':
model = make_aspect_term_model.make_model(config)
train_loader, val_loader = make_term_data(config)
else:
model = make_aspect_category_model.make_model(config)
train_loader, val_loader = make_category_data(config)
model = model.cuda()
base_path = config['base_path']
model_path = os.path.join(base_path, 'checkpoints/%s.pth' % config['aspect_' + mode + '_model']['type'])
if not os.path.exists(os.path.dirname(model_path)):
os.makedirs(os.path.dirname(model_path))
with open(os.path.join(base_path, 'processed/index2word.pickle'), 'rb') as handle:
index2word = pickle.load(handle)
criterion = CapsuleLoss()
optimizer = make_optimizer(config, model)
max_val_accuracy = 0
min_val_loss = 100
global_step = 0
config = config['aspect_' + mode + '_model'][config['aspect_' + mode + '_model']['type']]
for epoch in range(config['num_epoches']):
total_loss = 0
total_samples = 0
correct_samples = 0
start = time.time()
for i, data in enumerate(train_loader):
global_step += 1
model.train()
input0, input1, label = data
input0, input1, label = input0.cuda(), input1.cuda(), label.cuda()
optimizer.zero_grad()
logit = model(input0, input1)
loss = criterion(logit, label)
batch_size = input0.size(0)
total_loss += batch_size * loss.item()
total_samples += batch_size
pred = logit.argmax(dim=1)
correct_samples += (label == pred).long().sum().item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
optimizer.step()
if i % 10 == 0 and i > 0:
train_loss = total_loss / total_samples
train_accuracy = correct_samples / total_samples
total_loss = 0
total_samples = 0
correct_samples = 0
val_accuracy, val_loss = eval(model, val_loader, criterion)
print('[epoch %2d] [step %3d] train_loss: %.4f train_acc: %.4f val_loss: %.4f val_acc: %.4f'
% (epoch, i, train_loss, train_accuracy, val_loss, val_accuracy))
if val_accuracy > max_val_accuracy:
max_val_accuracy = val_accuracy
# torch.save(aspect_term_model.state_dict(), model_path)
if val_loss < min_val_loss:
min_val_loss = val_loss
if epoch > 0:
torch.save(model.state_dict(), model_path)
end = time.time()
print('time: %.4fs' % (end - start))
print('max_val_accuracy:', max_val_accuracy) | 3,271 | 42.052632 | 108 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/data_process/data_process.py | import os
import numpy as np
import pickle
import yaml
from data_process.utils import *
def data_process(config):
mode = config['mode']
assert mode in ('term', 'category')
base_path = config['base_path']
raw_train_path = os.path.join(base_path, 'raw/train.xml')
raw_val_path = os.path.join(base_path, 'raw/val.xml')
raw_test_path = os.path.join(base_path, 'raw/test.xml')
lowercase = config['lowercase']
if mode == 'term':
train_data = parse_sentence_term(raw_train_path, lowercase=lowercase)
val_data = parse_sentence_term(raw_val_path, lowercase=lowercase)
test_data = parse_sentence_term(raw_test_path, lowercase=lowercase)
else:
train_data = parse_sentence_category(raw_train_path, lowercase=lowercase)
val_data = parse_sentence_category(raw_val_path, lowercase=lowercase)
test_data = parse_sentence_category(raw_test_path, lowercase=lowercase)
remove_list = ['conflict']
train_data = category_filter(train_data, remove_list)
val_data = category_filter(val_data, remove_list)
test_data = category_filter(test_data, remove_list)
word2index, index2word = build_vocab(train_data, max_size=config['max_vocab_size'], min_freq=config['min_vocab_freq'])
if not os.path.exists(os.path.join(base_path, 'processed')):
os.makedirs(os.path.join(base_path, 'processed'))
if mode == 'term':
save_term_data(train_data, word2index, os.path.join(base_path, 'processed/train.npz'))
save_term_data(val_data, word2index, os.path.join(base_path, 'processed/val.npz'))
save_term_data(test_data, word2index, os.path.join(base_path, 'processed/test.npz'))
else:
save_category_data(train_data, word2index, os.path.join(base_path, 'processed/train.npz'))
save_category_data(val_data, word2index, os.path.join(base_path, 'processed/val.npz'))
save_category_data(test_data, word2index, os.path.join(base_path, 'processed/test.npz'))
glove = load_glove(config['glove_path'], len(index2word), word2index)
sentiment_matrix = load_sentiment_matrix(config['glove_path'], config['sentiment_path'])
np.save(os.path.join(base_path, 'processed/glove.npy'), glove)
np.save(os.path.join(base_path, 'processed/sentiment_matrix.npy'), sentiment_matrix)
with open(os.path.join(base_path, 'processed/word2index.pickle'), 'wb') as handle:
pickle.dump(word2index, handle)
with open(os.path.join(base_path, 'processed/index2word.pickle'), 'wb') as handle:
pickle.dump(index2word, handle)
analyze = analyze_term if mode == 'term' else analyze_category
log = {
'vocab_size': len(index2word),
'oov_size': len(word2index) - len(index2word),
'train_data': analyze(train_data),
'val_data': analyze(val_data),
'test_data': analyze(test_data),
'num_categories': 3
}
if not os.path.exists(os.path.join(base_path, 'log')):
os.makedirs(os.path.join(base_path, 'log'))
with open(os.path.join(base_path, 'log/log.yml'), 'w') as handle:
yaml.safe_dump(log, handle, encoding='utf-8', allow_unicode=True, default_flow_style=False) | 3,169 | 53.655172 | 122 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/data_process/utils.py | import os
import numpy as np
import random
from xml.etree.ElementTree import parse
from pytorch_pretrained_bert import BertModel, BertTokenizer
from data_process.vocab import Vocab
from src.module.utils.constants import UNK, PAD_INDEX, ASPECT_INDEX
import spacy
import re
import json
url = re.compile('(<url>.*</url>)')
spacy_en = spacy.load('en')
def check(x):
return len(x) >= 1 and not x.isspace()
def tokenizer(text):
tokens = [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))]
return list(filter(check, tokens))
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
def parse_sentence_term(path, lowercase=False):
tree = parse(path)
sentences = tree.getroot()
data = []
split_char = '__split__'
for sentence in sentences:
text = sentence.find('text')
if text is None:
continue
text = text.text
if lowercase:
text = text.lower()
aspectTerms = sentence.find('aspectTerms')
if aspectTerms is None:
continue
for aspectTerm in aspectTerms:
term = aspectTerm.get('term')
if lowercase:
term = term.lower()
polarity = aspectTerm.get('polarity')
start = aspectTerm.get('from')
end = aspectTerm.get('to')
piece = text + split_char + term + split_char + polarity + split_char + start + split_char + end
data.append(piece)
return data
def parse_sentence_category(path, lowercase=False):
tree = parse(path)
sentences = tree.getroot()
data = []
split_char = '__split__'
for sentence in sentences:
text = sentence.find('text')
if text is None:
continue
text = text.text
if lowercase:
text = text.lower()
aspectCategories = sentence.find('aspectCategories')
if aspectCategories is None:
continue
for aspectCategory in aspectCategories:
category = aspectCategory.get('category')
polarity = aspectCategory.get('polarity')
piece = text + split_char + category + split_char + polarity
data.append(piece)
return data
def category_filter(data, remove_list):
remove_set = set(remove_list)
filtered_data = []
for text in data:
if not text.split('__split__')[2] in remove_set:
filtered_data.append(text)
return filtered_data
def build_vocab(data, max_size, min_freq):
if max_size == 'None':
max_size = None
vocab = Vocab()
for piece in data:
text = piece.split('__split__')[0]
text = tokenizer(text)
vocab.add_list(text)
return vocab.get_vocab(max_size=max_size, min_freq=min_freq)
def save_term_data(data, word2index, path):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
sentence = []
aspect = []
label = []
context = []
bert_token = []
bert_segment = []
td_left = []
td_right = []
f = lambda x: word2index[x] if x in word2index else word2index[UNK]
g = lambda x: list(map(f, tokenizer(x)))
d = {
'positive': 0,
'negative': 1,
'neutral': 2,
'conflict': 3
}
for piece in data:
text, term, polarity, start, end = piece.split('__split__')
start, end = int(start), int(end)
assert text[start: end] == term
sentence.append(g(text))
aspect.append(g(term))
label.append(d[polarity])
left_part = g(text[:start])
right_part = g(text[end:])
context.append(left_part + [ASPECT_INDEX] + right_part)
bert_sentence = bert_tokenizer.tokenize(text)
bert_aspect = bert_tokenizer.tokenize(term)
bert_token.append(bert_tokenizer.convert_tokens_to_ids(['[CLS]'] + bert_sentence + ['[SEP]'] + bert_aspect + ['[SEP]']))
bert_segment.append([0] * (len(bert_sentence) + 2) + [1] * (len(bert_aspect) + 1))
td_left.append(g(text[:end]))
td_right.append(g(text[start:])[::-1])
assert len(bert_token[-1]) == len(bert_segment[-1])
max_length = lambda x: max([len(y) for y in x])
sentence_max_len = max_length(sentence)
aspect_max_len = max_length(aspect)
context_max_len = max_length(context)
bert_max_len = max_length(bert_token)
td_left_max_len = max_length(td_left)
td_right_max_len = max_length(td_right)
num = len(data)
for i in range(num):
sentence[i].extend([0] * (sentence_max_len - len(sentence[i])))
aspect[i].extend([0] * (aspect_max_len - len(aspect[i])))
context[i].extend([0] * (context_max_len - len(context[i])))
bert_token[i].extend([0] * (bert_max_len - len(bert_token[i])))
bert_segment[i].extend([0] * (bert_max_len - len(bert_segment[i])))
td_left[i].extend([0] * (td_left_max_len - len(td_left[i])))
td_right[i].extend([0] * (td_right_max_len - len(td_right[i])))
sentence = np.asarray(sentence, dtype=np.int32)
aspect = np.asarray(aspect, dtype=np.int32)
label = np.asarray(label, dtype=np.int32)
context = np.asarray(context, dtype=np.int32)
bert_token = np.asarray(bert_token, dtype=np.int32)
bert_segment = np.asarray(bert_segment, dtype=np.int32)
td_left = np.asarray(td_left, dtype=np.int32)
td_right = np.asarray(td_right, dtype=np.int32)
np.savez(path, sentence=sentence, aspect=aspect, label=label, context=context, bert_token=bert_token, bert_segment=bert_segment,
td_left=td_left, td_right=td_right)
def save_category_data(data, word2index, path):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
sentence = []
aspect = []
label = []
bert_token = []
bert_segment = []
f = lambda x: word2index[x] if x in word2index else word2index[UNK]
g = lambda x: list(map(f, tokenizer(x)))
d = {
'positive': 0,
'negative': 1,
'neutral': 2,
'conflict': 3
}
cd = {
'food': 0,
'service': 1,
'staff': 2,
'price': 3,
'ambience': 4,
'menu': 5,
'place': 6,
'miscellaneous': 7
}
for piece in data:
text, category, polarity = piece.split('__split__')
sentence.append(g(text))
aspect.append(cd[category])
label.append(d[polarity])
bert_sentence = bert_tokenizer.tokenize(text)
bert_aspect = bert_tokenizer.tokenize(category)
bert_token.append(bert_tokenizer.convert_tokens_to_ids(['[CLS]'] + bert_sentence + ['[SEP]'] + bert_aspect + ['[SEP]']))
bert_segment.append([0] * (len(bert_sentence) + 2) + [1] * (len(bert_aspect) + 1))
assert len(bert_token[-1]) == len(bert_segment[-1])
max_length = lambda x: max([len(y) for y in x])
sentence_max_len = max_length(sentence)
bert_max_len = max_length(bert_token)
num = len(data)
for i in range(num):
sentence[i].extend([0] * (sentence_max_len - len(sentence[i])))
bert_token[i].extend([0] * (bert_max_len - len(bert_token[i])))
bert_segment[i].extend([0] * (bert_max_len - len(bert_segment[i])))
sentence = np.asarray(sentence, dtype=np.int32)
aspect = np.asarray(aspect, dtype=np.int32)
label = np.asarray(label, dtype=np.int32)
bert_token = np.asarray(bert_token, dtype=np.int32)
bert_segment = np.asarray(bert_segment, dtype=np.int32)
np.savez(path, sentence=sentence, aspect=aspect, label=label, bert_token=bert_token, bert_segment=bert_segment)
def analyze_term(data):
num = len(data)
sentence_lens = []
aspect_lens = []
log = {'total': num}
for piece in data:
text, term, polarity, _, _ = piece.split('__split__')
sentence_lens.append(len(tokenizer(text)))
aspect_lens.append(len(tokenizer(term)))
if not polarity in log:
log[polarity] = 0
log[polarity] += 1
log['sentence_max_len'] = max(sentence_lens)
log['sentence_avg_len'] = sum(sentence_lens) / len(sentence_lens)
log['aspect_max_len'] = max(aspect_lens)
log['aspect_avg_len'] = sum(aspect_lens) / len(aspect_lens)
return log
def analyze_category(data):
num = len(data)
sentence_lens = []
log = {'total': num}
for piece in data:
text, category, polarity = piece.split('__split__')
sentence_lens.append(len(tokenizer(text)))
if not polarity in log:
log[polarity] = 0
log[polarity] += 1
log['sentence_max_len'] = max(sentence_lens)
log['sentence_avg_len'] = sum(sentence_lens) / len(sentence_lens)
return log
def load_glove(path, vocab_size, word2index):
if not os.path.isfile(path):
raise IOError('Not a file', path)
glove = np.random.uniform(-0.01, 0.01, [vocab_size, 300])
with open(path, 'r', encoding='utf-8') as f:
for line in f:
content = line.split(' ')
if content[0] in word2index:
glove[word2index[content[0]]] = np.array(list(map(float, content[1:])))
glove[PAD_INDEX, :] = 0
return glove
def load_sentiment_matrix(glove_path, sentiment_path):
sentiment_matrix = np.zeros((3, 300), dtype=np.float32)
sd = json.load(open(sentiment_path, 'r', encoding='utf-8'))
sd['positive'] = set(sd['positive'])
sd['negative'] = set(sd['negative'])
sd['neutral'] = set(sd['neutral'])
with open(glove_path, 'r', encoding='utf-8') as f:
for line in f:
content = line.split(' ')
word = content[0]
vec = np.array(list(map(float, content[1:])))
if word in sd['positive']:
sentiment_matrix[0] += vec
elif word in sd['negative']:
sentiment_matrix[1] += vec
elif word in sd['neutral']:
sentiment_matrix[2] += vec
sentiment_matrix -= sentiment_matrix.mean()
sentiment_matrix = sentiment_matrix / sentiment_matrix.std() * np.sqrt(2.0 / (300.0 + 3.0))
return sentiment_matrix | 10,092 | 36.520446 | 132 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/data_process/dataset.py | import torch
from torch.utils.data import Dataset
import numpy as np
class ABSADataset(Dataset):
def __init__(self, path, input_list):
super(ABSADataset, self).__init__()
data = np.load(path)
self.data = {}
for key, value in data.items():
self.data[key] = torch.tensor(value).long()
self.len = self.data['label'].size(0)
self.input_list = input_list
def __getitem__(self, index):
return_value = []
for input in self.input_list:
return_value.append(self.data[input][index])
return_value.append(self.data['label'][index])
return return_value
def __len__(self):
return self.len | 702 | 28.291667 | 56 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/data_process/vocab.py | import operator
from src.module.utils.constants import PAD, UNK, ASPECT
class Vocab(object):
def __init__(self):
self._count_dict = dict()
self._predefined_list = [PAD, UNK, ASPECT]
def add(self, word):
if word in self._count_dict:
self._count_dict[word] += 1
else:
self._count_dict[word] = 1
def add_list(self, words):
for word in words:
self.add(word)
def get_vocab(self, max_size=None, min_freq=0):
sorted_words = sorted(self._count_dict.items(), key=operator.itemgetter(1), reverse=True)
word2index = {}
for word in self._predefined_list:
word2index[word] = len(word2index)
for word, freq in sorted_words:
if word in word2index:
continue
if (max_size is not None and len(word2index) >= max_size) or freq < min_freq:
word2index[word] = word2index[UNK]
else:
word2index[word] = len(word2index)
index2word = {}
index2word[word2index[UNK]] = UNK
for word, index in word2index.items():
if index == word2index[UNK]:
continue
else:
index2word[index] = word
return word2index, index2word | 1,296 | 32.25641 | 97 | py |
OpenFWI | OpenFWI-main/pytorch_ssim.py | # From https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/pytorch_ssim/__init__.py
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average) | 2,722 | 35.306667 | 104 | py |
OpenFWI | OpenFWI-main/test.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import sys
import time
import datetime
import json
import torch
import torch.nn as nn
from torch.utils.data import SequentialSampler
from torch.utils.data.dataloader import default_collate
import torchvision
from torchvision.transforms import Compose
import numpy as np
import utils
import network
from vis import *
from dataset import FWIDataset
import transforms as T
import pytorch_ssim
def evaluate(model, criterions, dataloader, device, k, ctx,
vis_path, vis_batch, vis_sample, missing, std):
model.eval()
label_list, label_pred_list= [], [] # store denormalized predcition & gt in numpy
label_tensor, label_pred_tensor = [], [] # store normalized prediction & gt in tensor
if missing or std:
data_list, data_noise_list = [], [] # store original data and noisy/muted data
with torch.no_grad():
batch_idx = 0
for data, label in dataloader:
data = data.type(torch.FloatTensor).to(device, non_blocking=True)
label = label.type(torch.FloatTensor).to(device, non_blocking=True)
label_np = T.tonumpy_denormalize(label, ctx['label_min'], ctx['label_max'], exp=False)
label_list.append(label_np)
label_tensor.append(label)
if missing or std:
# Add gaussian noise
data_noise = torch.clip(data + (std ** 0.5) * torch.randn(data.shape).to(device, non_blocking=True), min=-1, max=1)
# Mute some traces
mute_idx = np.random.choice(data.shape[3], size=missing, replace=False)
data_noise[:, :, :, mute_idx] = data[0, 0, 0, 0]
data_np = T.tonumpy_denormalize(data, ctx['data_min'], ctx['data_max'], k=k)
data_noise_np = T.tonumpy_denormalize(data_noise, ctx['data_min'], ctx['data_max'], k=k)
data_list.append(data_np)
data_noise_list.append(data_noise_np)
pred = model(data_noise)
else:
pred = model(data)
label_pred_np = T.tonumpy_denormalize(pred, ctx['label_min'], ctx['label_max'], exp=False)
label_pred_list.append(label_pred_np)
label_pred_tensor.append(pred)
# Visualization
if vis_path and batch_idx < vis_batch:
for i in range(vis_sample):
plot_velocity(label_pred_np[i, 0], label_np[i, 0], f'{vis_path}/V_{batch_idx}_{i}.png') #, vmin=ctx['label_min'], vmax=ctx['label_max'])
if missing or std:
for ch in [2]: # range(data.shape[1]):
plot_seismic(data_np[i, ch], data_noise_np[i, ch], f'{vis_path}/S_{batch_idx}_{i}_{ch}.png',
vmin=ctx['data_min'] * 0.01, vmax=ctx['data_max'] * 0.01)
batch_idx += 1
label, label_pred = np.concatenate(label_list), np.concatenate(label_pred_list)
label_t, pred_t = torch.cat(label_tensor), torch.cat(label_pred_tensor)
l1 = nn.L1Loss()
l2 = nn.MSELoss()
print(f'MAE: {l1(label_t, pred_t)}')
print(f'MSE: {l2(label_t, pred_t)}')
ssim_loss = pytorch_ssim.SSIM(window_size=11)
print(f'SSIM: {ssim_loss(label_t / 2 + 0.5, pred_t / 2 + 0.5)}') # (-1, 1) to (0, 1)
for name, criterion in criterions.items():
print(f' * Velocity {name}: {criterion(label, label_pred)}')
# print(f' | Velocity 2 layers {name}: {criterion(label[:1000], label_pred[:1000])}')
# print(f' | Velocity 3 layers {name}: {criterion(label[1000:2000], label_pred[1000:2000])}')
# print(f' | Velocity 4 layers {name}: {criterion(label[2000:], label_pred[2000:])}')
def main(args):
print(args)
print("torch version: ", torch.__version__)
print("torchvision version: ", torchvision.__version__)
utils.mkdir(args.output_path)
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
with open('dataset_config.json') as f:
try:
ctx = json.load(f)[args.dataset]
except KeyError:
print('Unsupported dataset.')
sys.exit()
if args.file_size is not None:
ctx['file_size'] = args.file_size
print("Loading data")
print("Loading validation data")
log_data_min = T.log_transform(ctx['data_min'], k=args.k)
log_data_max = T.log_transform(ctx['data_max'], k=args.k)
transform_valid_data = Compose([
T.LogTransform(k=args.k),
T.MinMaxNormalize(log_data_min, log_data_max),
])
transform_valid_label = Compose([
T.MinMaxNormalize(ctx['label_min'], ctx['label_max'])
])
if args.val_anno[-3:] == 'txt':
dataset_valid = FWIDataset(
args.val_anno,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_valid_data,
transform_label=transform_valid_label
)
else:
dataset_valid = torch.load(args.val_anno)
print("Creating data loaders")
valid_sampler = SequentialSampler(dataset_valid)
dataloader_valid = torch.utils.data.DataLoader(
dataset_valid, batch_size=args.batch_size,
sampler=valid_sampler, num_workers=args.workers,
pin_memory=True, collate_fn=default_collate)
print("Creating model")
if args.model not in network.model_dict:
print('Unsupported model.')
sys.exit()
model = network.model_dict[args.model](upsample_mode=args.up_mode,
sample_spatial=args.sample_spatial, sample_temporal=args.sample_temporal, norm=args.norm).to(device)
criterions = {
'MAE': lambda x, y: np.mean(np.abs(x - y)),
'MSE': lambda x, y: np.mean((x - y) ** 2)
}
if args.resume:
print(args.resume)
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(network.replace_legacy(checkpoint['model']))
print('Loaded model checkpoint at Epoch {} / Step {}.'.format(checkpoint['epoch'], checkpoint['step']))
if args.vis:
# Create folder to store visualization results
vis_folder = f'visualization_{args.vis_suffix}' if args.vis_suffix else 'visualization'
vis_path = os.path.join(args.output_path, vis_folder)
utils.mkdir(vis_path)
else:
vis_path = None
print("Start testing")
start_time = time.time()
evaluate(model, criterions, dataloader_valid, device, args.k, ctx,
vis_path, args.vis_batch, args.vis_sample, args.missing, args.std)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Testing time {}'.format(total_time_str))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='FCN Testing')
parser.add_argument('-d', '--device', default='cuda', help='device')
parser.add_argument('-ds', '--dataset', default='flatfault-b', type=str, help='dataset name')
parser.add_argument('-fs', '--file-size', default=None, type=int, help='number of samples in each npy file')
# Path related
parser.add_argument('-ap', '--anno-path', default='split_files', help='annotation files location')
parser.add_argument('-v', '--val-anno', default='flatfault_b_val_invnet.txt', help='name of val anno')
parser.add_argument('-o', '--output-path', default='Invnet_models', help='path to parent folder to save checkpoints')
parser.add_argument('-n', '--save-name', default='fcn_l1loss_ffb', help='folder name for this experiment')
parser.add_argument('-s', '--suffix', type=str, default=None, help='subfolder name for this run')
# Model related
parser.add_argument('-m', '--model', type=str, help='inverse model name')
parser.add_argument('-no', '--norm', default='bn', help='normalization layer type, support bn, in, ln (default: bn)')
parser.add_argument('-um', '--up-mode', default=None, help='upsampling layer mode such as "nearest", "bicubic", etc.')
parser.add_argument('-ss', '--sample-spatial', type=float, default=1.0, help='spatial sampling ratio')
parser.add_argument('-st', '--sample-temporal', type=int, default=1, help='temporal sampling ratio')
# Test related
parser.add_argument('-b', '--batch-size', default=50, type=int)
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers (default: 16)')
parser.add_argument('--k', default=1, type=float, help='k in log transformation')
parser.add_argument('-r', '--resume', default=None, help='resume from checkpoint')
parser.add_argument('--vis', help='visualization option', action="store_true")
parser.add_argument('-vsu','--vis-suffix', default=None, type=str, help='visualization suffix')
parser.add_argument('-vb','--vis-batch', help='number of batch to be visualized', default=0, type=int)
parser.add_argument('-vsa', '--vis-sample', help='number of samples in a batch to be visualized', default=0, type=int)
parser.add_argument('--missing', default=0, type=int, help='number of missing traces')
parser.add_argument('--std', default=0, type=float, help='standard deviation of gaussian noise')
args = parser.parse_args()
args.output_path = os.path.join(args.output_path, args.save_name, args.suffix or '')
args.val_anno = os.path.join(args.anno_path, args.val_anno)
args.resume = os.path.join(args.output_path, args.resume)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 10,383 | 42.814346 | 156 | py |
OpenFWI | OpenFWI-main/gan_train.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import sys
import time
import datetime
import json
import torch
from torch import nn
from torch.utils.data import RandomSampler, DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
import torchvision
from torchvision.transforms import Compose
import utils
import network
from dataset import FWIDataset
from scheduler import WarmupMultiStepLR
import transforms as T
# Need to use parallel in apex, torch ddp can cause bugs when computing gradient penalty
import apex.parallel as parallel
step = 0
def train_one_epoch(model, model_d, criterion_g, criterion_d, optimizer_g, optimizer_d,
lr_schedulers, dataloader, device, epoch, print_freq, writer, n_critic=5):
global step
model.train()
model_d.train()
# Logger setup
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr_g', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('lr_d', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('samples/s', utils.SmoothedValue(window_size=10, fmt='{value:.3f}'))
header = 'Epoch: [{}]'.format(epoch)
itr = 0 # step in this epoch
max_itr = len(dataloader)
for data, label in metric_logger.log_every(dataloader, print_freq, header):
start_time = time.time()
data, label = data.to(device), label.to(device)
# Update discribminator first
optimizer_d.zero_grad()
with torch.no_grad():
pred = model(data)
loss_d, loss_diff, loss_gp = criterion_d(label, pred, model_d)
loss_d.backward()
optimizer_d.step()
metric_logger.update(loss_diff=loss_diff, loss_gp=loss_gp)
# Update generator occasionally
if ((itr + 1) % n_critic == 0) or (itr == max_itr - 1):
optimizer_g.zero_grad()
pred = model(data)
loss_g, loss_g1v, loss_g2v = criterion_g(pred, label, model_d)
loss_g.backward()
optimizer_g.step()
metric_logger.update(loss_g1v=loss_g1v, loss_g2v=loss_g2v)
batch_size = data.shape[0]
metric_logger.update(lr_g=optimizer_g.param_groups[0]['lr'],
lr_d=optimizer_d.param_groups[0]['lr'])
metric_logger.meters['samples/s'].update(batch_size / (time.time() - start_time))
if writer:
writer.add_scalar('loss_diff', loss_diff, step)
writer.add_scalar('loss_gp', loss_gp, step)
if ((itr + 1) % n_critic == 0) or (itr == max_itr - 1):
writer.add_scalar('loss_g1v', loss_g1v, step)
writer.add_scalar('loss_g2v', loss_g2v, step)
step += 1
itr += 1
for lr_scheduler in lr_schedulers:
lr_scheduler.step()
def evaluate(model, criterion, dataloader, device, writer):
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
with torch.no_grad():
for data, label in metric_logger.log_every(dataloader, 20, header):
data = data.to(device, non_blocking=True)
label = label.to(device, non_blocking=True)
pred = model(data)
loss, loss_g1v, loss_g2v = criterion(pred, label)
metric_logger.update(loss=loss.item(),
loss_g1v=loss_g1v.item(), loss_g2v=loss_g2v.item())
# Gather the stats from all processes
metric_logger.synchronize_between_processes()
print(' * Loss {loss.global_avg:.8f}\n'.format(loss=metric_logger.loss))
if writer:
writer.add_scalar('loss', metric_logger.loss.global_avg, step)
writer.add_scalar('loss_g1v', metric_logger.loss_g1v.global_avg, step)
writer.add_scalar('loss_g2v', metric_logger.loss_g2v.global_avg, step)
return metric_logger.loss.global_avg
def main(args):
global step
print(args)
print('torch version: ', torch.__version__)
print('torchvision version: ', torchvision.__version__)
utils.mkdir(args.output_path) # create folder to store checkpoints
utils.init_distributed_mode(args) # distributed mode initialization
# Set up tensorboard summary writer
train_writer, val_writer = None, None
if args.tensorboard:
utils.mkdir(args.log_path) # create folder to store tensorboard logs
if not args.distributed or (args.rank == 0) and (args.local_rank == 0):
train_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'train'))
val_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'val'))
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
with open('dataset_config.json') as f:
try:
ctx = json.load(f)[args.dataset]
except KeyError:
print('Unsupported dataset.')
sys.exit()
if args.file_size is not None:
ctx['file_size'] = args.file_size
# Create dataset and dataloader
print('Loading data')
print('Loading training data')
log_data_min = T.log_transform(ctx['data_min'], k=args.k)
log_data_max = T.log_transform(ctx['data_max'], k=args.k)
transform_data = Compose([
T.LogTransform(k=args.k),
T.MinMaxNormalize(log_data_min, log_data_max)
])
transform_label = Compose([
T.MinMaxNormalize(ctx['label_min'], ctx['label_max'])
])
if args.train_anno[-3:] == 'txt':
dataset_train = FWIDataset(
args.train_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_train = torch.load(args.train_anno)
print('Loading validation data')
if args.val_anno[-3:] == 'txt':
dataset_valid = FWIDataset(
args.val_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_valid = torch.load(args.val_anno)
print('Creating data loaders')
if args.distributed:
train_sampler = DistributedSampler(dataset_train, shuffle=True)
valid_sampler = DistributedSampler(dataset_valid, shuffle=True)
else:
train_sampler = RandomSampler(dataset_train)
valid_sampler = RandomSampler(dataset_valid)
dataloader_train = DataLoader(
dataset_train, batch_size=args.batch_size,
sampler=train_sampler, num_workers=args.workers,
pin_memory=True, drop_last=True, collate_fn=default_collate)
dataloader_valid = DataLoader(
dataset_valid, batch_size=args.batch_size,
sampler=valid_sampler, num_workers=args.workers,
pin_memory=True, collate_fn=default_collate)
print('Creating model')
if args.model not in network.model_dict or args.model_d not in network.model_dict:
print('Unsupported model.')
sys.exit()
model = network.model_dict[args.model](upsample_mode=args.up_mode,
sample_spatial=args.sample_spatial, sample_temporal=args.sample_temporal).to(device)
model_d = network.model_dict[args.model_d]().to(device)
if args.distributed and args.sync_bn:
model = parallel.convert_syncbn_model(model)
model_d = parallel.convert_syncbn_model(model_d)
# Define loss function
l1loss = nn.L1Loss()
l2loss = nn.MSELoss()
def criterion_g(pred, gt, model_d=None):
loss_g1v = l1loss(pred, gt)
loss_g2v = l2loss(pred, gt)
loss = args.lambda_g1v * loss_g1v + args.lambda_g2v * loss_g2v
if model_d is not None:
loss_adv = -torch.mean(model_d(pred))
loss += args.lambda_adv * loss_adv
return loss, loss_g1v, loss_g2v
criterion_d = utils.Wasserstein_GP(device, args.lambda_gp)
# Scale lr according to effective batch size
lr_g = args.lr_g * args.world_size
lr_d = args.lr_d * args.world_size
optimizer_g = torch.optim.AdamW(model.parameters(), lr=lr_g, betas=(0, 0.9), weight_decay=args.weight_decay)
optimizer_d = torch.optim.AdamW(model_d.parameters(), lr=lr_d, betas=(0, 0.9), weight_decay=args.weight_decay)
# Convert scheduler to be per iteration instead of per epoch
warmup_iters = args.lr_warmup_epochs * len(dataloader_train)
lr_milestones = [len(dataloader_train) * m for m in args.lr_milestones]
lr_schedulers = [WarmupMultiStepLR(
optimizer, milestones=lr_milestones, gamma=args.lr_gamma,
warmup_iters=warmup_iters, warmup_factor=1e-5) for optimizer in [optimizer_g, optimizer_d]]
model_without_ddp = model
model_d_without_ddp = model_d
if args.distributed:
model = parallel.DistributedDataParallel(model)
model_d = parallel.DistributedDataParallel(model_d)
model_without_ddp = model.module
model_d_without_ddp = model_d.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(network.replace_legacy(checkpoint['model']))
model_d_without_ddp.load_state_dict(network.replace_legacy(checkpoint['model_d']))
optimizer_g.load_state_dict(checkpoint['optimizer_g'])
optimizer_d.load_state_dict(checkpoint['optimizer_d'])
args.start_epoch = checkpoint['epoch'] + 1
step = checkpoint['step']
for i in range(len(lr_schedulers)):
lr_schedulers[i].load_state_dict(checkpoint['lr_schedulers'][i])
for lr_scheduler in lr_schedulers:
lr_scheduler.milestones = lr_milestones
print('Start training')
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, model_d, criterion_g, criterion_d, optimizer_g, optimizer_d,
lr_schedulers, dataloader_train, device, epoch,
args.print_freq, train_writer, args.n_critic)
evaluate(model, criterion_g, dataloader_valid, device, val_writer)
checkpoint = {
'model': model_without_ddp.state_dict(),
'model_d': model_d_without_ddp.state_dict(),
'optimizer_g': optimizer_g.state_dict(),
'optimizer_d': optimizer_d.state_dict(),
'lr_schedulers': [scheduler.state_dict() for scheduler in lr_schedulers],
'epoch': epoch,
'step': step,
'args': args}
# Save checkpoint per epoch
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'checkpoint.pth'))
# Save checkpoint every epoch block
if args.output_path and (epoch + 1) % args.epoch_block == 0:
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'model_{}.pth'.format(epoch + 1)))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='GAN Training')
parser.add_argument('-d', '--device', default='cuda', help='device')
parser.add_argument('-ds', '--dataset', default='flat', type=str, help='dataset name')
parser.add_argument('-fs', '--file-size', default=None, type=str, help='number of samples in each npy file')
# Path related
parser.add_argument('-ap', '--anno-path', default='/vast/home/aicyd/Desktop/OpenFWI/src/', help='annotation files location')
parser.add_argument('-t', '--train-anno', default='train_flatvel.json', help='name of train anno')
parser.add_argument('-v', '--val-anno', default='val_flatvel.json', help='name of val anno')
parser.add_argument('-o', '--output-path', default='models', help='path to parent folder to save checkpoints')
parser.add_argument('-l', '--log-path', default='models', help='path to parent folder to save logs')
parser.add_argument('-n', '--save-name', default='gan', help='folder name for this experiment')
parser.add_argument('-s', '--suffix', type=str, default=None, help='subfolder name for this run')
# Model related
parser.add_argument('-m', '--model', type=str, help='generator name')
parser.add_argument('-md', '--model-d', default='Discriminator', help='discriminator name')
parser.add_argument('-um', '--up-mode', default=None, help='upsampling layer mode such as "nearest", "bicubic", etc.')
parser.add_argument('-ss', '--sample-spatial', type=float, default=1.0, help='spatial sampling ratio')
parser.add_argument('-st', '--sample-temporal', type=int, default=1, help='temporal sampling ratio')
# Training related
parser.add_argument('-nc', '--n_critic', default=5, type=int, help='generator & discriminator update ratio')
parser.add_argument('-b', '--batch-size', default=64, type=int)
parser.add_argument('--lr_g', default=0.0001, type=float, help='initial learning rate of generator')
parser.add_argument('--lr_d', default=0.0001, type=float, help='initial learning rate of discriminator')
parser.add_argument('-lm', '--lr-milestones', nargs='+', default=[], type=int, help='decrease lr on milestones')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', default=1e-4 , type=float, help='weight decay (default: 1e-4)')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--lr-warmup-epochs', default=0, type=int, help='number of warmup epochs')
parser.add_argument('-eb', '--epoch_block', type=int, default=20, help='epochs in a saved block')
parser.add_argument('-nb', '--num_block', type=int, default=25, help='number of saved block')
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers (default: 16)')
parser.add_argument('--k', default=1, type=float, help='k in log transformation')
parser.add_argument('--print-freq', default=20, type=int, help='print frequency')
parser.add_argument('-r', '--resume', default=None, help='resume from checkpoint')
parser.add_argument('--start-epoch', default=0, type=int, help='start epoch')
# Loss related
parser.add_argument('-g1v', '--lambda_g1v', type=float, default=100.0)
parser.add_argument('-g2v', '--lambda_g2v', type=float, default=100.0)
parser.add_argument('-adv', '--lambda_adv', type=float, default=1.0)
parser.add_argument('-gp', '--lambda_gp', type=float, default=10.0)
# Distributed training related
parser.add_argument('--sync-bn', action='store_true', help='Use sync batch norm')
parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
# Tensorboard related
parser.add_argument('--tensorboard', action='store_true', help='Use tensorboard for logging.')
args = parser.parse_args()
args.output_path = os.path.join(args.output_path, args.save_name, args.suffix or '')
args.log_path = os.path.join(args.log_path, args.save_name, args.suffix or '')
args.train_anno = os.path.join(args.anno_path, args.train_anno)
args.val_anno = os.path.join(args.anno_path, args.val_anno)
args.epochs = args.epoch_block * args.num_block
if args.resume:
args.resume = os.path.join(args.output_path, args.resume)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 16,662 | 43.553476 | 128 | py |
OpenFWI | OpenFWI-main/network.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import ceil
from collections import OrderedDict
NORM_LAYERS = { 'bn': nn.BatchNorm2d, 'in': nn.InstanceNorm2d, 'ln': nn.LayerNorm }
# Replace the key names in the checkpoint in which legacy network building blocks are used
def replace_legacy(old_dict):
li = []
for k, v in old_dict.items():
k = (k.replace('Conv2DwithBN', 'layers')
.replace('Conv2DwithBN_Tanh', 'layers')
.replace('Deconv2DwithBN', 'layers')
.replace('ResizeConv2DwithBN', 'layers'))
li.append((k, v))
return OrderedDict(li)
class Conv2DwithBN(nn.Module):
def __init__(self, in_fea, out_fea,
kernel_size=3, stride=1, padding=1,
bn=True, relu_slop=0.2, dropout=None):
super(Conv2DwithBN,self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
if bn:
layers.append(nn.BatchNorm2d(num_features=out_fea))
layers.append(nn.LeakyReLU(relu_slop, inplace=True))
if dropout:
layers.append(nn.Dropout2d(0.8))
self.Conv2DwithBN = nn.Sequential(*layers)
def forward(self, x):
return self.Conv2DwithBN(x)
class ResizeConv2DwithBN(nn.Module):
def __init__(self, in_fea, out_fea, scale_factor=2, mode='nearest'):
super(ResizeConv2DwithBN, self).__init__()
layers = [nn.Upsample(scale_factor=scale_factor, mode=mode)]
layers.append(nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(num_features=out_fea))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.ResizeConv2DwithBN = nn.Sequential(*layers)
def forward(self, x):
return self.ResizeConv2DwithBN(x)
class Conv2DwithBN_Tanh(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=3, stride=1, padding=1):
super(Conv2DwithBN_Tanh, self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
layers.append(nn.BatchNorm2d(num_features=out_fea))
layers.append(nn.Tanh())
self.Conv2DwithBN = nn.Sequential(*layers)
def forward(self, x):
return self.Conv2DwithBN(x)
class ConvBlock(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=3, stride=1, padding=1, norm='bn', relu_slop=0.2, dropout=None):
super(ConvBlock,self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.LeakyReLU(relu_slop, inplace=True))
if dropout:
layers.append(nn.Dropout2d(0.8))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class ConvBlock_Tanh(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=3, stride=1, padding=1, norm='bn'):
super(ConvBlock_Tanh, self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.Tanh())
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class DeconvBlock(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=2, stride=2, padding=0, output_padding=0, norm='bn'):
super(DeconvBlock, self).__init__()
layers = [nn.ConvTranspose2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding)]
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class ResizeBlock(nn.Module):
def __init__(self, in_fea, out_fea, scale_factor=2, mode='nearest', norm='bn'):
super(ResizeBlock, self).__init__()
layers = [nn.Upsample(scale_factor=scale_factor, mode=mode)]
layers.append(nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=3, stride=1, padding=1))
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
# FlatFault/CurveFault
# 1000, 70 -> 70, 70
class InversionNet(nn.Module):
def __init__(self, dim1=32, dim2=64, dim3=128, dim4=256, dim5=512, sample_spatial=1.0, **kwargs):
super(InversionNet, self).__init__()
self.convblock1 = ConvBlock(5, dim1, kernel_size=(7, 1), stride=(2, 1), padding=(3, 0))
self.convblock2_1 = ConvBlock(dim1, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock2_2 = ConvBlock(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock3_1 = ConvBlock(dim2, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock3_2 = ConvBlock(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock4_1 = ConvBlock(dim2, dim3, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock4_2 = ConvBlock(dim3, dim3, kernel_size=(3, 1), padding=(1, 0))
self.convblock5_1 = ConvBlock(dim3, dim3, stride=2)
self.convblock5_2 = ConvBlock(dim3, dim3)
self.convblock6_1 = ConvBlock(dim3, dim4, stride=2)
self.convblock6_2 = ConvBlock(dim4, dim4)
self.convblock7_1 = ConvBlock(dim4, dim4, stride=2)
self.convblock7_2 = ConvBlock(dim4, dim4)
self.convblock8 = ConvBlock(dim4, dim5, kernel_size=(8, ceil(70 * sample_spatial / 8)), padding=0)
self.deconv1_1 = DeconvBlock(dim5, dim5, kernel_size=5)
self.deconv1_2 = ConvBlock(dim5, dim5)
self.deconv2_1 = DeconvBlock(dim5, dim4, kernel_size=4, stride=2, padding=1)
self.deconv2_2 = ConvBlock(dim4, dim4)
self.deconv3_1 = DeconvBlock(dim4, dim3, kernel_size=4, stride=2, padding=1)
self.deconv3_2 = ConvBlock(dim3, dim3)
self.deconv4_1 = DeconvBlock(dim3, dim2, kernel_size=4, stride=2, padding=1)
self.deconv4_2 = ConvBlock(dim2, dim2)
self.deconv5_1 = DeconvBlock(dim2, dim1, kernel_size=4, stride=2, padding=1)
self.deconv5_2 = ConvBlock(dim1, dim1)
self.deconv6 = ConvBlock_Tanh(dim1, 1)
def forward(self,x):
# Encoder Part
x = self.convblock1(x) # (None, 32, 500, 70)
x = self.convblock2_1(x) # (None, 64, 250, 70)
x = self.convblock2_2(x) # (None, 64, 250, 70)
x = self.convblock3_1(x) # (None, 64, 125, 70)
x = self.convblock3_2(x) # (None, 64, 125, 70)
x = self.convblock4_1(x) # (None, 128, 63, 70)
x = self.convblock4_2(x) # (None, 128, 63, 70)
x = self.convblock5_1(x) # (None, 128, 32, 35)
x = self.convblock5_2(x) # (None, 128, 32, 35)
x = self.convblock6_1(x) # (None, 256, 16, 18)
x = self.convblock6_2(x) # (None, 256, 16, 18)
x = self.convblock7_1(x) # (None, 256, 8, 9)
x = self.convblock7_2(x) # (None, 256, 8, 9)
x = self.convblock8(x) # (None, 512, 1, 1)
# Decoder Part
x = self.deconv1_1(x) # (None, 512, 5, 5)
x = self.deconv1_2(x) # (None, 512, 5, 5)
x = self.deconv2_1(x) # (None, 256, 10, 10)
x = self.deconv2_2(x) # (None, 256, 10, 10)
x = self.deconv3_1(x) # (None, 128, 20, 20)
x = self.deconv3_2(x) # (None, 128, 20, 20)
x = self.deconv4_1(x) # (None, 64, 40, 40)
x = self.deconv4_2(x) # (None, 64, 40, 40)
x = self.deconv5_1(x) # (None, 32, 80, 80)
x = self.deconv5_2(x) # (None, 32, 80, 80)
x = F.pad(x, [-5, -5, -5, -5], mode="constant", value=0) # (None, 32, 70, 70) 125, 100
x = self.deconv6(x) # (None, 1, 70, 70)
return x
class FCN4_Deep_Resize_2(nn.Module):
def __init__(self, dim1=32, dim2=64, dim3=128, dim4=256, dim5=512, ratio=1.0, upsample_mode='nearest'):
super(FCN4_Deep_Resize_2, self).__init__()
self.convblock1 = Conv2DwithBN(5, dim1, kernel_size=(7, 1), stride=(2, 1), padding=(3, 0))
self.convblock2_1 = Conv2DwithBN(dim1, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock2_2 = Conv2DwithBN(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock3_1 = Conv2DwithBN(dim2, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock3_2 = Conv2DwithBN(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock4_1 = Conv2DwithBN(dim2, dim3, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock4_2 = Conv2DwithBN(dim3, dim3, kernel_size=(3, 1), padding=(1, 0))
self.convblock5_1 = Conv2DwithBN(dim3, dim3, stride=2)
self.convblock5_2 = Conv2DwithBN(dim3, dim3)
self.convblock6_1 = Conv2DwithBN(dim3, dim4, stride=2)
self.convblock6_2 = Conv2DwithBN(dim4, dim4)
self.convblock7_1 = Conv2DwithBN(dim4, dim4, stride=2)
self.convblock7_2 = Conv2DwithBN(dim4, dim4)
self.convblock8 = Conv2DwithBN(dim4, dim5, kernel_size=(8, ceil(70 * ratio / 8)), padding=0)
self.deconv1_1 = ResizeConv2DwithBN(dim5, dim5, scale_factor=5, mode=upsample_mode)
self.deconv1_2 = Conv2DwithBN(dim5, dim5)
self.deconv2_1 = ResizeConv2DwithBN(dim5, dim4, scale_factor=2, mode=upsample_mode)
self.deconv2_2 = Conv2DwithBN(dim4, dim4)
self.deconv3_1 = ResizeConv2DwithBN(dim4, dim3, scale_factor=2, mode=upsample_mode)
self.deconv3_2 = Conv2DwithBN(dim3, dim3)
self.deconv4_1 = ResizeConv2DwithBN(dim3, dim2, scale_factor=2, mode=upsample_mode)
self.deconv4_2 = Conv2DwithBN(dim2, dim2)
self.deconv5_1 = ResizeConv2DwithBN(dim2, dim1, scale_factor=2, mode=upsample_mode)
self.deconv5_2 = Conv2DwithBN(dim1, dim1)
self.deconv6 = Conv2DwithBN_Tanh(dim1, 1)
def forward(self,x):
# Encoder Part
x = self.convblock1(x) # (None, 32, 500, 70)
x = self.convblock2_1(x) # (None, 64, 250, 70)
x = self.convblock2_2(x) # (None, 64, 250, 70)
x = self.convblock3_1(x) # (None, 64, 125, 70)
x = self.convblock3_2(x) # (None, 64, 125, 70)
x = self.convblock4_1(x) # (None, 128, 63, 70)
x = self.convblock4_2(x) # (None, 128, 63, 70)
x = self.convblock5_1(x) # (None, 128, 32, 35)
x = self.convblock5_2(x) # (None, 128, 32, 35)
x = self.convblock6_1(x) # (None, 256, 16, 18)
x = self.convblock6_2(x) # (None, 256, 16, 18)
x = self.convblock7_1(x) # (None, 256, 8, 9)
x = self.convblock7_2(x) # (None, 256, 8, 9)
x = self.convblock8(x) # (None, 512, 1, 1)
# Decoder Part
x = self.deconv1_1(x) # (None, 512, 5, 5)
x = self.deconv1_2(x) # (None, 512, 5, 5)
x = self.deconv2_1(x) # (None, 256, 10, 10)
x = self.deconv2_2(x) # (None, 256, 10, 10)
x = self.deconv3_1(x) # (None, 128, 20, 20)
x = self.deconv3_2(x) # (None, 128, 20, 20)
x = self.deconv4_1(x) # (None, 64, 40, 40)
x = self.deconv4_2(x) # (None, 64, 40, 40)
x = self.deconv5_1(x) # (None, 32, 80, 80)
x = self.deconv5_2(x) # (None, 32, 80, 80)
x = F.pad(x, [-5, -5, -5, -5], mode="constant", value=0) # (None, 32, 70, 70)
x = self.deconv6(x) # (None, 1, 70, 70)
return x
class Discriminator(nn.Module):
def __init__(self, dim1=32, dim2=64, dim3=128, dim4=256, **kwargs):
super(Discriminator, self).__init__()
self.convblock1_1 = ConvBlock(1, dim1, stride=2)
self.convblock1_2 = ConvBlock(dim1, dim1)
self.convblock2_1 = ConvBlock(dim1, dim2, stride=2)
self.convblock2_2 = ConvBlock(dim2, dim2)
self.convblock3_1 = ConvBlock(dim2, dim3, stride=2)
self.convblock3_2 = ConvBlock(dim3, dim3)
self.convblock4_1 = ConvBlock(dim3, dim4, stride=2)
self.convblock4_2 = ConvBlock(dim4, dim4)
self.convblock5 = ConvBlock(dim4, 1, kernel_size=5, padding=0)
def forward(self, x):
x = self.convblock1_1(x)
x = self.convblock1_2(x)
x = self.convblock2_1(x)
x = self.convblock2_2(x)
x = self.convblock3_1(x)
x = self.convblock3_2(x)
x = self.convblock4_1(x)
x = self.convblock4_2(x)
x = self.convblock5(x)
x = x.view(x.shape[0], -1)
return x
class Conv_HPGNN(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=None, stride=None, padding=None, **kwargs):
super(Conv_HPGNN, self).__init__()
layers = [
ConvBlock(in_fea, out_fea, relu_slop=0.1, dropout=0.8),
ConvBlock(out_fea, out_fea, relu_slop=0.1, dropout=0.8),
]
if kernel_size is not None:
layers.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class Deconv_HPGNN(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size, **kwargs):
super(Deconv_HPGNN, self).__init__()
layers = [
nn.ConvTranspose2d(in_fea, in_fea, kernel_size=kernel_size, stride=2, padding=0),
ConvBlock(in_fea, out_fea, relu_slop=0.1, dropout=0.8),
ConvBlock(out_fea, out_fea, relu_slop=0.1, dropout=0.8)
]
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
model_dict = {
'InversionNet': InversionNet,
'Discriminator': Discriminator,
'UPFWI': FCN4_Deep_Resize_2
}
| 14,861 | 45.15528 | 167 | py |
OpenFWI | OpenFWI-main/vis.py | import os
import torch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
# Load colormap for velocity map visualization
rainbow_cmap = ListedColormap(np.load('rainbow256.npy'))
def plot_velocity(output, target, path, vmin=None, vmax=None):
fig, ax = plt.subplots(1, 2, figsize=(11, 5))
if vmin is None or vmax is None:
vmax, vmin = np.max(target), np.min(target)
im = ax[0].matshow(output, cmap=rainbow_cmap, vmin=vmin, vmax=vmax)
ax[0].set_title('Prediction', y=1.08)
ax[1].matshow(target, cmap=rainbow_cmap, vmin=vmin, vmax=vmax)
ax[1].set_title('Ground Truth', y=1.08)
for axis in ax:
# axis.set_xticks(range(0, 70, 10))
# axis.set_xticklabels(range(0, 1050, 150))
# axis.set_yticks(range(0, 70, 10))
# axis.set_yticklabels(range(0, 1050, 150))
axis.set_xticks(range(0, 70, 10))
axis.set_xticklabels(range(0, 700, 100))
axis.set_yticks(range(0, 70, 10))
axis.set_yticklabels(range(0, 700, 100))
axis.set_ylabel('Depth (m)', fontsize=12)
axis.set_xlabel('Offset (m)', fontsize=12)
fig.colorbar(im, ax=ax, shrink=0.75, label='Velocity(m/s)')
plt.savefig(path)
plt.close('all')
def plot_single_velocity(label, path):
plt.rcParams.update({'font.size': 16})
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
vmax, vmin = np.max(label), np.min(label)
im = ax.matshow(label, cmap=rainbow_cmap, vmin=vmin, vmax=vmax)
# im = ax.matshow(label, cmap="gist_rainbow", vmin=vmin, vmax=vmax)
# nx = label.shape[0]
# ax.set_aspect(aspect=1)
# ax.set_xticks(range(0, nx, int(150//(1050/nx)))[:7])
# ax.set_xticklabels(range(0, 1050, 150))
# ax.set_yticks(range(0, nx, int(150//(1050/nx)))[:7])
# ax.set_yticklabels(range(0, 1050, 150))
# ax.set_title('Offset (m)', y=1.08)
# ax.set_ylabel('Depth (m)', fontsize=18)
fig.colorbar(im, ax=ax, shrink=1.0, label='Velocity(m/s)')
plt.savefig(path)
plt.close('all')
# def plot_seismic(output, target, path, vmin=-1e-5, vmax=1e-5):
# fig, ax = plt.subplots(1, 3, figsize=(15, 6))
# im = ax[0].matshow(output, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
# ax[0].set_title('Prediction')
# ax[1].matshow(target, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
# ax[1].set_title('Ground Truth')
# ax[2].matshow(output - target, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
# ax[2].set_title('Difference')
# fig.colorbar(im, ax=ax, format='%.1e')
# plt.savefig(path)
# plt.close('all')
def plot_seismic(output, target, path, vmin=-1e-5, vmax=1e-5):
fig, ax = plt.subplots(1, 3, figsize=(20, 5))
# fig, ax = plt.subplots(1, 2, figsize=(11, 5))
aspect = output.shape[1]/output.shape[0]
im = ax[0].matshow(target, aspect=aspect, cmap='gray', vmin=vmin, vmax=vmax)
ax[0].set_title('Ground Truth')
ax[1].matshow(output, aspect=aspect, cmap='gray', vmin=vmin, vmax=vmax)
ax[1].set_title('Prediction')
ax[2].matshow(output - target, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
ax[2].set_title('Difference')
# for axis in ax:
# axis.set_xticks(range(0, 70, 10))
# axis.set_xticklabels(range(0, 1050, 150))
# axis.set_title('Offset (m)', y=1.1)
# axis.set_ylabel('Time (ms)', fontsize=12)
# fig.colorbar(im, ax=ax, shrink=1.0, pad=0.01, label='Amplitude')
fig.colorbar(im, ax=ax, shrink=0.75, label='Amplitude')
plt.savefig(path)
plt.close('all')
def plot_single_seismic(data, path):
nz, nx = data.shape
plt.rcParams.update({'font.size': 18})
vmin, vmax = np.min(data), np.max(data)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
im = ax.matshow(data, aspect='auto', cmap='gray', vmin=vmin * 0.01, vmax=vmax * 0.01)
ax.set_aspect(aspect=nx/nz)
ax.set_xticks(range(0, nx, int(300//(1050/nx)))[:5])
ax.set_xticklabels(range(0, 1050, 300))
ax.set_title('Offset (m)', y=1.08)
ax.set_yticks(range(0, nz, int(200//(1000/nz)))[:5])
ax.set_yticklabels(range(0, 1000, 200))
ax.set_ylabel('Time (ms)', fontsize=18)
fig.colorbar(im, ax=ax, shrink=1.0, pad=0.01, label='Amplitude')
plt.savefig(path)
plt.close('all')
| 4,324 | 38.318182 | 89 | py |
OpenFWI | OpenFWI-main/utils.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
from collections import defaultdict, deque
import datetime
import time
import torch
import torch.distributed as dist
import torch.autograd as autograd
from torch.autograd import Variable
import torch.nn as nn
import errno
import os
import itertools
from torchvision.models import vgg16
import numpy as np
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
if isinstance(iterable, list):
length = max(len(x) for x in iterable)
iterable = [x if len(x) == length else itertools.cycle(x) for x in iterable]
iterable = zip(*iterable)
else:
length = len(iterable)
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(length))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj # <-- yield the batch in for loop
iter_time.update(time.time() - end)
if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (length - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, length, eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, length, eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {}'.format(header, total_time_str))
# Legacy code
class ContentLoss(nn.Module):
def __init__(self, args):
super(ContentLoss, self).__init__()
names = ['l1', 'l2']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, model, input, target):
pred = model(input)
loss_l1 = self.l1loss(target, pred)
loss_l2 = self.l2loss(target, pred)
loss = loss_l1 * self.lambda_l1 + loss_l2 * self.lambda_l2
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
# Legacy code
class IdenticalLoss(nn.Module):
def __init__(self, args):
super(IdenticalLoss, self).__init__()
names = ['id1s', 'id2s']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, model_s2v, model_v2s, input):
mid = model_s2v(input)
pred = model_v2s(mid)
cal_loss = lambda x, y: (self.l1loss(x, y), self.l2loss(x, y))
loss_id1s, loss_id2s = cal_loss(input, pred)
loss = loss_id1s * self.lambda_id1s + loss_id2s * self.lambda_id2s
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
# Implemented according to H-PGNN, not useful
class NMSELoss(nn.Module):
def __init__(self):
super(NMSELoss, self).__init__()
def forward(self, pred, gt):
return torch.mean(((pred - gt) / (torch.amax(gt, (-2, -1), keepdim=True) + 1e-5)) ** 2)
class CycleLoss(nn.Module):
def __init__(self, args):
super(CycleLoss, self).__init__()
names = ['g1v', 'g2v', 'g1s', 'g2s', 'c1v', 'c2v', 'c1s', 'c2s']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, data, label, pred_s=None, pred_v=None, recon_s=None, recon_v=None):
cal_loss = lambda x, y: (self.l1loss(x, y), self.l2loss(x, y))
loss_g1v, loss_g2v, loss_g1s, loss_g2s = [0] * 4
if pred_v is not None:
loss_g1v, loss_g2v = cal_loss(pred_v, label)
if pred_s is not None:
loss_g1s, loss_g2s = cal_loss(pred_s, data)
loss_c1v, loss_c2v, loss_c1s , loss_c2s = [0] * 4
if recon_v is not None:
loss_c1v, loss_c2v = cal_loss(recon_v, label)
if recon_s is not None:
loss_c1s, loss_c2s = cal_loss(recon_s, data)
loss = loss_g1v * self.lambda_g1v + loss_g2v * self.lambda_g2v + \
loss_g1s * self.lambda_g1s + loss_g2s * self.lambda_g2s + \
loss_c1v * self.lambda_c1v + loss_c2v * self.lambda_c2v + \
loss_c1s * self.lambda_c1s + loss_c2s * self.lambda_c2s
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
# Legacy code
class _CycleLoss(nn.Module):
def __init__(self, args):
super(_CycleLoss, self).__init__()
names = ['g1v', 'g2v', 'g1s', 'g2s', 'c1v', 'c2v', 'c1s', 'c2s']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, data, label, pred_s=None, pred_v=None, recon_s=None, recon_v=None):
cal_loss = lambda x, y: (self.l1loss(x, y), self.l2loss(x, y))
loss_g1v, loss_g2v, loss_g1s, loss_g2s = [0] * 4
if pred_v is not None and (self.lambda_g1v != 0 or self.lambda_g2v != 0):
loss_g1v, loss_g2v = cal_loss(pred_v, label)
if pred_s is not None and (self.lambda_g1s != 0 or self.lambda_g2s != 0):
loss_g1s, loss_g2s = cal_loss(pred_s, data)
loss_c1v, loss_c2v, loss_c1s , loss_c2s = [0] * 4
if recon_v is not None and (self.lambda_c1v != 0 or self.lambda_c2v != 0):
loss_c1v, loss_c2v = cal_loss(recon_v, label)
if recon_s is not None and (self.lambda_c1s != 0 or self.lambda_c2s != 0):
loss_c1s, loss_c2s = cal_loss(recon_s, data)
loss = loss_g1v * self.lambda_g1v + loss_g2v * self.lambda_g2v + \
loss_g1s * self.lambda_g1s + loss_g2s * self.lambda_g2s + \
loss_c1v * self.lambda_c1v + loss_c2v * self.lambda_c2v + \
loss_c1s * self.lambda_c1s + loss_c2s * self.lambda_c2s
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.local_rank = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ and args.world_size > 1:
args.rank = int(os.environ['SLURM_PROCID'])
args.local_rank = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.local_rank)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
setup_for_distributed(args.rank == 0)
class Wasserstein_GP(nn.Module):
def __init__(self, device, lambda_gp):
super(Wasserstein_GP, self).__init__()
self.device = device
self.lambda_gp = lambda_gp
def forward(self, real, fake, model):
gradient_penalty = self.compute_gradient_penalty(model, real, fake)
loss_real = torch.mean(model(real))
loss_fake = torch.mean(model(fake))
loss = -loss_real + loss_fake + gradient_penalty * self.lambda_gp
return loss, loss_real-loss_fake, gradient_penalty
def compute_gradient_penalty(self, model, real_samples, fake_samples):
alpha = torch.rand(real_samples.size(0), 1, 1, 1, device=self.device)
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = model(interpolates)
gradients = autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=torch.ones(real_samples.size(0), d_interpolates.size(1)).to(self.device),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
# Modified from https://gist.github.com/alper111/8233cdb0414b4cb5853f2f730ab95a49
class VGGPerceptualLoss(nn.Module):
def __init__(self, resize=True):
super(VGGPerceptualLoss, self).__init__()
blocks = []
blocks.append(vgg16(pretrained=True).features[:4].eval()) # relu1_2
blocks.append(vgg16(pretrained=True).features[4:9].eval()) # relu2_2
blocks.append(vgg16(pretrained=True).features[9:16].eval()) # relu3_3
blocks.append(vgg16(pretrained=True).features[16:23].eval()) # relu4_3
for bl in blocks:
for p in bl:
p.requires_grad = False
self.blocks = nn.ModuleList(blocks)
self.transform = nn.functional.interpolate
self.resize = resize
self.register_buffer("mean", torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer("std", torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, input, target, rescale=True, feature_layers=[1]):
input = input.view(-1, 1, input.shape[-2], input.shape[-1]).repeat(1, 3, 1, 1)
target = target.view(-1, 1, target.shape[-2], target.shape[-1]).repeat(1, 3, 1, 1)
if rescale: # from [-1, 1] to [0, 1]
input = input / 2 + 0.5
target = target / 2 + 0.5
input = (input-self.mean) / self.std
target = (target-self.mean) / self.std
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
loss_l1, loss_l2 = 0.0, 0.0
x = input
y = target
for i, block in enumerate(self.blocks):
x = block(x)
y = block(y)
if i in feature_layers:
loss_l1 += self.l1loss(x, y)
loss_l2 += self.l2loss(x, y)
return loss_l1, loss_l2
def cal_psnr(gt, data, max_value):
mse = np.mean((gt - data) ** 2)
if (mse == 0):
return 100
return 20 * np.log10(max_value / np.sqrt(mse))
| 17,006 | 34.804211 | 105 | py |
OpenFWI | OpenFWI-main/dataset.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import numpy as np
from torch.utils.data import Dataset
from torchvision.transforms import Compose
import transforms as T
class FWIDataset(Dataset):
''' FWI dataset
For convenience, in this class, a batch refers to a npy file
instead of the batch used during training.
Args:
anno: path to annotation file
preload: whether to load the whole dataset into memory
sample_ratio: downsample ratio for seismic data
file_size: # of samples in each npy file
transform_data|label: transformation applied to data or label
'''
def __init__(self, anno, preload=True, sample_ratio=1, file_size=500,
transform_data=None, transform_label=None):
if not os.path.exists(anno):
print(f'Annotation file {anno} does not exists')
self.preload = preload
self.sample_ratio = sample_ratio
self.file_size = file_size
self.transform_data = transform_data
self.transform_label = transform_label
with open(anno, 'r') as f:
self.batches = f.readlines()
if preload:
self.data_list, self.label_list = [], []
for batch in self.batches:
data, label = self.load_every(batch)
self.data_list.append(data)
if label is not None:
self.label_list.append(label)
# Load from one line
def load_every(self, batch):
batch = batch.split('\t')
data_path = batch[0] if len(batch) > 1 else batch[0][:-1]
data = np.load(data_path)[:, :, ::self.sample_ratio, :]
data = data.astype('float32')
if len(batch) > 1:
label_path = batch[1][:-1]
label = np.load(label_path)
label = label.astype('float32')
else:
label = None
return data, label
def __getitem__(self, idx):
batch_idx, sample_idx = idx // self.file_size, idx % self.file_size
if self.preload:
data = self.data_list[batch_idx][sample_idx]
label = self.label_list[batch_idx][sample_idx] if len(self.label_list) != 0 else None
else:
data, label = self.load_every(self.batches[batch_idx])
data = data[sample_idx]
label = label[sample_idx] if label is not None else None
if self.transform_data:
data = self.transform_data(data)
if self.transform_label and label is not None:
label = self.transform_label(label)
return data, label if label is not None else np.array([])
def __len__(self):
return len(self.batches) * self.file_size
if __name__ == '__main__':
transform_data = Compose([
T.LogTransform(k=1),
T.MinMaxNormalize(T.log_transform(-61, k=1), T.log_transform(120, k=1))
])
transform_label = Compose([
T.MinMaxNormalize(2000, 6000)
])
dataset = FWIDataset(f'relevant_files/temp.txt', transform_data=transform_data, transform_label=transform_label, file_size=1)
data, label = dataset[0]
print(data.shape)
print(label is None)
| 3,920 | 37.441176 | 129 | py |
OpenFWI | OpenFWI-main/scheduler.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import torch
from bisect import bisect_right
# Scheduler adopted from the original repo
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=5,
warmup_method="linear",
last_epoch=-1,
):
if not milestones == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr *
warmup_factor *
self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| 2,380 | 35.075758 | 105 | py |
OpenFWI | OpenFWI-main/train.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import sys
import time
import datetime
import json
import torch
from torch import nn
from torch.utils.data import RandomSampler, DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel
from torch.utils.tensorboard import SummaryWriter
import torchvision
from torchvision.transforms import Compose
import utils
import network
from dataset import FWIDataset
from scheduler import WarmupMultiStepLR
import transforms as T
step = 0
def train_one_epoch(model, criterion, optimizer, lr_scheduler,
dataloader, device, epoch, print_freq, writer):
global step
model.train()
# Logger setup
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('samples/s', utils.SmoothedValue(window_size=10, fmt='{value:.3f}'))
header = 'Epoch: [{}]'.format(epoch)
for data, label in metric_logger.log_every(dataloader, print_freq, header):
start_time = time.time()
optimizer.zero_grad()
data, label = data.to(device), label.to(device)
output = model(data)
loss, loss_g1v, loss_g2v = criterion(output, label)
loss.backward()
optimizer.step()
loss_val = loss.item()
loss_g1v_val = loss_g1v.item()
loss_g2v_val = loss_g2v.item()
batch_size = data.shape[0]
metric_logger.update(loss=loss_val, loss_g1v=loss_g1v_val,
loss_g2v=loss_g2v_val, lr=optimizer.param_groups[0]['lr'])
metric_logger.meters['samples/s'].update(batch_size / (time.time() - start_time))
if writer:
writer.add_scalar('loss', loss_val, step)
writer.add_scalar('loss_g1v', loss_g1v_val, step)
writer.add_scalar('loss_g2v', loss_g2v_val, step)
step += 1
lr_scheduler.step()
def evaluate(model, criterion, dataloader, device, writer):
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
with torch.no_grad():
for data, label in metric_logger.log_every(dataloader, 20, header):
data = data.to(device, non_blocking=True)
label = label.to(device, non_blocking=True)
output = model(data)
loss, loss_g1v, loss_g2v = criterion(output, label)
metric_logger.update(loss=loss.item(),
loss_g1v=loss_g1v.item(),
loss_g2v=loss_g2v.item())
# Gather the stats from all processes
metric_logger.synchronize_between_processes()
print(' * Loss {loss.global_avg:.8f}\n'.format(loss=metric_logger.loss))
if writer:
writer.add_scalar('loss', metric_logger.loss.global_avg, step)
writer.add_scalar('loss_g1v', metric_logger.loss_g1v.global_avg, step)
writer.add_scalar('loss_g2v', metric_logger.loss_g2v.global_avg, step)
return metric_logger.loss.global_avg
def main(args):
global step
print(args)
print('torch version: ', torch.__version__)
print('torchvision version: ', torchvision.__version__)
utils.mkdir(args.output_path) # create folder to store checkpoints
utils.init_distributed_mode(args) # distributed mode initialization
# Set up tensorboard summary writer
train_writer, val_writer = None, None
if args.tensorboard:
utils.mkdir(args.log_path) # create folder to store tensorboard logs
if not args.distributed or (args.rank == 0) and (args.local_rank == 0):
train_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'train'))
val_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'val'))
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
with open('dataset_config.json') as f:
try:
ctx = json.load(f)[args.dataset]
except KeyError:
print('Unsupported dataset.')
sys.exit()
if args.file_size is not None:
ctx['file_size'] = args.file_size
# Create dataset and dataloader
print('Loading data')
print('Loading training data')
# Normalize data and label to [-1, 1]
transform_data = Compose([
T.LogTransform(k=args.k),
T.MinMaxNormalize(T.log_transform(ctx['data_min'], k=args.k), T.log_transform(ctx['data_max'], k=args.k))
])
transform_label = Compose([
T.MinMaxNormalize(ctx['label_min'], ctx['label_max'])
])
if args.train_anno[-3:] == 'txt':
dataset_train = FWIDataset(
args.train_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_train = torch.load(args.train_anno)
print('Loading validation data')
if args.val_anno[-3:] == 'txt':
dataset_valid = FWIDataset(
args.val_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_valid = torch.load(args.val_anno)
print('Creating data loaders')
if args.distributed:
train_sampler = DistributedSampler(dataset_train, shuffle=True)
valid_sampler = DistributedSampler(dataset_valid, shuffle=True)
else:
train_sampler = RandomSampler(dataset_train)
valid_sampler = RandomSampler(dataset_valid)
dataloader_train = DataLoader(
dataset_train, batch_size=args.batch_size,
sampler=train_sampler, num_workers=args.workers,
pin_memory=True, drop_last=True, collate_fn=default_collate)
dataloader_valid = DataLoader(
dataset_valid, batch_size=args.batch_size,
sampler=valid_sampler, num_workers=args.workers,
pin_memory=True, collate_fn=default_collate)
print('Creating model')
if args.model not in network.model_dict:
print('Unsupported model.')
sys.exit()
model = network.model_dict[args.model](upsample_mode=args.up_mode,
sample_spatial=args.sample_spatial, sample_temporal=args.sample_temporal).to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# Define loss function
l1loss = nn.L1Loss()
l2loss = nn.MSELoss()
def criterion(pred, gt):
loss_g1v = l1loss(pred, gt)
loss_g2v = l2loss(pred, gt)
loss = args.lambda_g1v * loss_g1v + args.lambda_g2v * loss_g2v
return loss, loss_g1v, loss_g2v
# Scale lr according to effective batch size
lr = args.lr * args.world_size
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, betas=(0.9, 0.999), weight_decay=args.weight_decay)
# Convert scheduler to be per iteration instead of per epoch
warmup_iters = args.lr_warmup_epochs * len(dataloader_train)
lr_milestones = [len(dataloader_train) * m for m in args.lr_milestones]
lr_scheduler = WarmupMultiStepLR(
optimizer, milestones=lr_milestones, gamma=args.lr_gamma,
warmup_iters=warmup_iters, warmup_factor=1e-5)
model_without_ddp = model
if args.distributed:
model = DistributedDataParallel(model, device_ids=[args.local_rank])
model_without_ddp = model.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(network.replace_legacy(checkpoint['model']))
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
step = checkpoint['step']
lr_scheduler.milestones=lr_milestones
print('Start training')
start_time = time.time()
best_loss = 10
chp=1
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, criterion, optimizer, lr_scheduler, dataloader_train,
device, epoch, args.print_freq, train_writer)
loss = evaluate(model, criterion, dataloader_valid, device, val_writer)
checkpoint = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'step': step,
'args': args}
# Save checkpoint per epoch
if loss < best_loss:
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'checkpoint.pth'))
print('saving checkpoint at epoch: ', epoch)
chp = epoch
best_loss = loss
# Save checkpoint every epoch block
print('current best loss: ', best_loss)
print('current best epoch: ', chp)
if args.output_path and (epoch + 1) % args.epoch_block == 0:
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'model_{}.pth'.format(epoch + 1)))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='FCN Training')
parser.add_argument('-d', '--device', default='cuda', help='device')
parser.add_argument('-ds', '--dataset', default='flatfault-b', type=str, help='dataset name')
parser.add_argument('-fs', '--file-size', default=None, type=int, help='number of samples in each npy file')
# Path related
parser.add_argument('-ap', '--anno-path', default='split_files', help='annotation files location')
parser.add_argument('-t', '--train-anno', default='flatfault_b_train_invnet.txt', help='name of train anno')
parser.add_argument('-v', '--val-anno', default='flatfault_b_val_invnet.txt', help='name of val anno')
parser.add_argument('-o', '--output-path', default='Invnet_models', help='path to parent folder to save checkpoints')
parser.add_argument('-l', '--log-path', default='Invnet_models', help='path to parent folder to save logs')
parser.add_argument('-n', '--save-name', default='fcn_l1loss_ffb', help='folder name for this experiment')
parser.add_argument('-s', '--suffix', type=str, default=None, help='subfolder name for this run')
# Model related
parser.add_argument('-m', '--model', type=str, help='inverse model name')
parser.add_argument('-um', '--up-mode', default=None, help='upsampling layer mode such as "nearest", "bicubic", etc.')
parser.add_argument('-ss', '--sample-spatial', type=float, default=1.0, help='spatial sampling ratio')
parser.add_argument('-st', '--sample-temporal', type=int, default=1, help='temporal sampling ratio')
# Training related
parser.add_argument('-b', '--batch-size', default=256, type=int)
parser.add_argument('--lr', default=0.0001, type=float, help='initial learning rate')
parser.add_argument('-lm', '--lr-milestones', nargs='+', default=[], type=int, help='decrease lr on milestones')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', default=1e-4 , type=float, help='weight decay (default: 1e-4)')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--lr-warmup-epochs', default=0, type=int, help='number of warmup epochs')
parser.add_argument('-eb', '--epoch_block', type=int, default=40, help='epochs in a saved block')
parser.add_argument('-nb', '--num_block', type=int, default=3, help='number of saved block')
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers (default: 16)')
parser.add_argument('--k', default=1, type=float, help='k in log transformation')
parser.add_argument('--print-freq', default=50, type=int, help='print frequency')
parser.add_argument('-r', '--resume', default=None, help='resume from checkpoint')
parser.add_argument('--start-epoch', default=0, type=int, help='start epoch')
# Loss related
parser.add_argument('-g1v', '--lambda_g1v', type=float, default=1.0)
parser.add_argument('-g2v', '--lambda_g2v', type=float, default=1.0)
# Distributed training related
parser.add_argument('--sync-bn', action='store_true', help='Use sync batch norm')
parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
# Tensorboard related
parser.add_argument('--tensorboard', action='store_true', help='Use tensorboard for logging.')
args = parser.parse_args()
args.output_path = os.path.join(args.output_path, args.save_name, args.suffix or '')
args.log_path = os.path.join(args.log_path, args.save_name, args.suffix or '')
args.train_anno = os.path.join(args.anno_path, args.train_anno)
args.val_anno = os.path.join(args.anno_path, args.val_anno)
args.epochs = args.epoch_block * args.num_block
if args.resume:
args.resume = os.path.join(args.output_path, args.resume)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 14,469 | 41.558824 | 122 | py |
OpenFWI | OpenFWI-main/transforms.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import torch
import numpy as np
import random
from sklearn.decomposition import PCA
def crop(vid, i, j, h, w):
return vid[..., i:(i + h), j:(j + w)]
def center_crop(vid, output_size):
h, w = vid.shape[-2:]
th, tw = output_size
i = int(round((h - th) / 2.))
j = int(round((w - tw) / 2.))
return crop(vid, i, j, th, tw)
def hflip(vid):
return vid.flip(dims=(-1,))
# NOTE: for those functions, which generally expect mini-batches, we keep them
# as non-minibatch so that they are applied as if they were 4d (thus image).
# this way, we only apply the transformation in the spatial domain
def resize(vid, size, interpolation='bilinear'):
# NOTE: using bilinear interpolation because we don't work on minibatches
# at this level
scale = None
if isinstance(size, int):
scale = float(size) / min(vid.shape[-2:])
size = None
return torch.nn.functional.interpolate(
vid, size=size, scale_factor=scale, mode=interpolation, align_corners=False)
def random_resize(vid, size, random_factor, interpolation='bilinear'):
# NOTE: using bilinear interpolation because we don't work on minibatches
# at this level
scale = None
r = 1 + random.random() * (random_factor - 1)
if isinstance(size, int):
scale = float(size) / min(vid.shape[-2:]) * r
size = None
else:
size = tuple([int(elem * r) for elem in list(size)])
return torch.nn.functional.interpolate(
vid, size=size, scale_factor=scale, mode=interpolation, align_corners=False)
def pad(vid, padding, fill=0, padding_mode="constant"):
# NOTE: don't want to pad on temporal dimension, so let as non-batch
# (4d) before padding. This works as expected
return torch.nn.functional.pad(vid, padding, value=fill, mode=padding_mode)
def to_normalized_float_tensor(vid):
return vid.permute(3, 0, 1, 2).to(torch.float32) / 255
def normalize(vid, mean, std):
shape = (-1,) + (1,) * (vid.dim() - 1)
mean = torch.as_tensor(mean).reshape(shape)
std = torch.as_tensor(std).reshape(shape)
return (vid - mean) / std
def minmax_normalize(vid, vmin, vmax, scale=2):
vid -= vmin
vid /= (vmax - vmin)
return (vid - 0.5) * 2 if scale == 2 else vid
def minmax_denormalize(vid, vmin, vmax, scale=2):
if scale == 2:
vid = vid / 2 + 0.5
return vid * (vmax - vmin) + vmin
def add_noise(data, snr):
sig_avg_power_db = 10*np.log10(np.mean(data**2))
noise_avg_power_db = sig_avg_power_db - snr
noise_avg_power = 10**(noise_avg_power_db/10)
noise = np.random.normal(0, np.sqrt(noise_avg_power), data.shape)
noisy_data = data + noise
return noisy_data
def log_transform(data, k=1, c=0):
return (np.log1p(np.abs(k * data) + c)) * np.sign(data)
def log_transform_tensor(data, k=1, c=0):
return (torch.log1p(torch.abs(k * data) + c)) * torch.sign(data)
def exp_transform(data, k=1, c=0):
return (np.expm1(np.abs(data)) - c) * np.sign(data) / k
def tonumpy_denormalize(vid, vmin, vmax, exp=True, k=1, c=0, scale=2):
if exp:
vmin = log_transform(vmin, k=k, c=c)
vmax = log_transform(vmax, k=k, c=c)
vid = minmax_denormalize(vid.cpu().numpy(), vmin, vmax, scale)
return exp_transform(vid, k=k, c=c) if exp else vid
# Class interface
class RandomCrop(object):
def __init__(self, size):
self.size = size
@staticmethod
def get_params(vid, output_size):
"""Get parameters for ``crop`` for a random crop.
"""
h, w = vid.shape[-2:]
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, vid):
i, j, h, w = self.get_params(vid, self.size)
return crop(vid, i, j, h, w)
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, vid):
return center_crop(vid, self.size)
class Resize(object):
def __init__(self, size):
self.size = size
def __call__(self, vid):
return resize(vid, self.size)
class RandomResize(object):
def __init__(self, size, random_factor=1.25):
self.size = size
self.factor = random_factor
def __call__(self, vid):
return random_resize(vid, self.size, self.factor)
class ToFloatTensorInZeroOne(object):
def __call__(self, vid):
return to_normalized_float_tensor(vid)
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, vid):
return normalize(vid, self.mean, self.std)
class MinMaxNormalize(object):
def __init__(self, datamin, datamax, scale=2):
self.datamin = datamin
self.datamax = datamax
self.scale = scale
def __call__(self, vid):
return minmax_normalize(vid, self.datamin, self.datamax, self.scale)
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, vid):
if random.random() < self.p:
return hflip(vid)
return vid
class Pad(object):
def __init__(self, padding, fill=0):
self.padding = padding
self.fill = fill
def __call__(self, vid):
return pad(vid, self.padding, self.fill)
class TemporalDownsample(object):
def __init__(self, rate=1):
self.rate = rate
def __call__(self, vid):
return vid[::self.rate]
class AddNoise(object):
def __init__(self, snr=10):
self.snr = snr
def __call__(self, vid):
return add_noise(vid, self.snr)
class PCD(object):
def __init__(self, n_comp=8):
self.pca = PCA(n_components=n_comp)
def __call__(self, data):
data= data.reshape((data.shape[0], -1))
feat_mean = data.mean(axis=0)
data -= np.tile(feat_mean, (data.shape[0], 1))
pc = self.pca.fit_transform(data)
pc = pc.reshape((-1,))
pc = pc[:, np.newaxis, np.newaxis]
return pc
class StackPCD(object):
def __init__(self, n_comp=(32, 8)):
self.primary_pca = PCA(n_components=n_comp[0])
self.secondary_pca = PCA(n_components=n_comp[1])
def __call__(self, data):
data = np.transpose(data, (0, 2, 1))
primary_pc = []
for sample in data:
feat_mean = sample.mean(axis=0)
sample -= np.tile(feat_mean, (sample.shape[0], 1))
primary_pc.append(self.primary_pca.fit_transform(sample))
primary_pc = np.array(primary_pc)
data = primary_pc.reshape((data.shape[0], -1))
feat_mean = data.mean(axis=0)
data -= np.tile(feat_mean, (data.shape[0], 1))
secondary_pc = self.secondary_pca.fit_transform(data)
secondary_pc = secondary_pc.reshape((-1,))
secondary_pc = pc[:, np.newaxis, np.newaxis]
return secondary_pc
class LogTransform(object):
def __init__(self, k=1, c=0):
self.k = k
self.c = c
def __call__(self, data):
return log_transform(data, k=self.k, c=self.c)
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
# def __init__(self, device):
# self.device = device
def __call__(self, sample):
return torch.from_numpy(sample)
| 8,236 | 29.394834 | 105 | py |
Desbordante-web-app | Desbordante-web-app/python-consumer/consumer.py | import time
import sys
import json
import logging
import signal
from enum import Enum
import confluent_kafka
import docker
import config
from error_handlers import update_internal_server_error
from error_handlers import update_resource_limit_error
docker_client = docker.from_env()
docker_api_client = docker.APIClient(base_url='unix://var/run/docker.sock')
def create_consumer():
kafka_consumer_config = {'bootstrap.servers': config.KAFKA_ADDR,
'group.id': 'tasks_1',
'session.timeout.ms': 6000,
# 'on_commit': my_commit_callback,
'auto.offset.reset': 'earliest'}
consumer = confluent_kafka.Consumer(kafka_consumer_config)
consumer.subscribe(['tasks'])
return consumer
def container_exit_handler(container, container_state, active_tasks, taskID):
class exit_codes(Enum):
TASK_SUCCESSFULLY_PROCESSED = 0
TASK_CRASHED_STATUS_UPDATED = 1
TASK_CRASHED_WITHOUT_STATUS_UPDATING = 2
TASK_NOT_FOUND = 3
exitCode = container_state["ExitCode"]
match exitCode:
case exit_codes.TASK_SUCCESSFULLY_PROCESSED:
logging.info(f"[{taskID}] task done successfully")
logging.info(container.logs())
case exit_codes.TASK_CRASHED_STATUS_UPDATED:
logging.warning(f"[{taskID}] cpp-consumer has crashed, \
status was updated by cpp-consumer")
logging.warning(container.logs())
case exit_codes.TASK_CRASHED_WITHOUT_STATUS_UPDATING:
logging.warning(f"[{taskID}] cpp-consumer has crashed \
without status updating")
update_internal_server_error(taskID,
f"Crash {container.logs()}")
logging.warning(container.logs())
case exit_codes.TASK_NOT_FOUND:
logging.warning(f"[{taskID}] task not found")
container.remove()
active_tasks.pop(taskID)
def container_OOMKilled_handler(container, active_tasks, taskID):
logging.warning(f"{taskID} ML")
container.remove()
active_tasks.pop(taskID)
update_resource_limit_error(taskID, "MEMORY_LIMIT")
def check_active_containers(active_tasks):
for taskID, (container, t) in active_tasks.items():
container.reload()
logging.info(f'{taskID}, {container}, {container.status}, \
{int(time.time() - t)}s')
if time.time() - t >= config.TIMELIMIT:
# TL
logging.info(f'time exceeded for {taskID}, \
container {container} removed')
container.stop(timeout=1)
container.remove()
active_tasks.pop(taskID)
update_resource_limit_error(taskID, "TIME_LIMIT")
break
container_state = docker_api_client.inspect_container(container.id)[
"State"]
OOMKilled = container_state["OOMKilled"]
if OOMKilled:
container_OOMKilled_handler(container, active_tasks, taskID)
break
if container.status == "exited":
container_exit_handler(
container, container_state, active_tasks, taskID)
break
def create_container(taskID):
logging.info(f"creating container for {taskID}")
env_variables = {
"POSTGRES_HOST": config.POSTGRES_HOST,
"POSTGRES_PORT": config.POSTGRES_PORT,
"POSTGRES_USER": config.POSTGRES_USER,
"POSTGRES_PASSWORD": config.POSTGRES_PASSWORD,
"POSTGRES_DBNAME": config.POSTGRES_DBNAME
}
container_properties = {
'image': "cpp-consumer:latest",
'network': config.DOCKER_NETWORK,
'command': taskID,
'volumes': [
'desbordante_uploads:/server/uploads/',
'desbordante_datasets:/build/target/input_data/'],
'detach': True,
'mem_limit': f'{config.MAX_RAM}m',
'environment': env_variables,
'labels': {"type": "cpp-consumer"}
}
return docker_client.containers.run(**container_properties)
def main(containers):
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(sys.stderr))
consumer = create_consumer()
while True:
check_active_containers(containers)
containers_amount = len(containers)
if containers_amount >= config.MAX_ACTIVE_TASKS:
time.sleep(1)
continue
msg = consumer.poll(3.0)
if msg is None:
continue
if msg.error():
logging.error(f"Consumer error: {msg.error()}")
continue
logging.info(f'Received task: {msg.value().decode("utf-8")}')
taskID = json.loads(msg.value().decode('utf-8'))['taskID']
container = create_container(taskID)
containers[taskID] = (container, time.time())
consumer.close()
def exit_gracefully(*args):
for _, (container, _) in containers.items():
container.stop(timeout=1)
container.remove(force=True)
def remove_dangling_containers():
active_cpp_containers = docker_client.containers.list(
filters={"label": "type=cpp-consumer"})
for container in active_cpp_containers:
logging.info("removing dangling", container.id)
container.stop(timeout=1)
container.remove(force=True)
if __name__ == '__main__':
global containers
containers = dict()
signal.signal(signal.SIGINT, exit_gracefully)
signal.signal(signal.SIGTERM, exit_gracefully)
try:
remove_dangling_containers()
main(containers)
except Exception:
exit_gracefully()
| 5,672 | 30.516667 | 77 | py |
Desbordante-web-app | Desbordante-web-app/python-consumer/error_handlers.py | import config
import psycopg
def update_error_status(taskID, errorType, error):
# errorType : INTERNAL SERVER ERROR | RESOURCE LIMIT IS REACHED
with psycopg.connect(f"dbname={config.POSTGRES_DBNAME} \
user={config.POSTGRES_USER} password={config.POSTGRES_PASSWORD} \
host={config.POSTGRES_HOST} port={config.POSTGRES_PORT}") as conn:
with conn.cursor() as cur:
sql = f""" UPDATE "{config.DB_TASKS_TABLE_NAME}"
SET "errorMsg" = %s, "status" = %s
WHERE "taskID" = %s;"""
error = error.replace("\'", "\'\'")
cur.execute(sql, (error, errorType, taskID))
conn.commit()
def update_internal_server_error(taskID, error):
update_error_status(taskID, "INTERNAL_SERVER_ERROR", error)
def update_resource_limit_error(taskID, error):
# error: MEMORY LIMIT | TIME LIMIT
update_error_status(taskID, "RESOURCE_LIMIT_IS_REACHED", error)
| 958 | 35.884615 | 70 | py |
Desbordante-web-app | Desbordante-web-app/python-consumer/config.py | import os
TIMELIMIT = int(os.getenv('TIMELIMIT'))
MAX_RAM = int(os.getenv('MAX_RAM'))
KAFKA_ADDR = os.getenv('KAFKA_HOST') + ':' + os.getenv('KAFKA_PORT')
MAX_ACTIVE_TASKS = int(os.getenv('MAX_ACTIVE_TASKS'))
DOCKER_NETWORK = os.getenv('DOCKER_NETWORK')
POSTGRES_HOST = os.getenv('POSTGRES_HOST')
POSTGRES_PORT = os.getenv('POSTGRES_PORT')
POSTGRES_USER = os.getenv('POSTGRES_USER')
POSTGRES_PASSWORD = os.getenv('POSTGRES_PASSWORD')
POSTGRES_DBNAME = os.getenv('POSTGRES_DBNAME')
DB_TASKS_TABLE_NAME = os.getenv('DB_TASKS_TABLE_NAME')
| 537 | 37.428571 | 68 | py |
clFFT | clFFT-master/src/scripts/perf/plotPerformance.py | # ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
# to use this script, you will need to download and install the 32-BIT VERSION of:
# - Python 2.7 x86 (32-bit) - http://www.python.org/download/releases/2.7.1
#
# you will also need the 32-BIT VERSIONS of the following packages as not all the packages are available in 64bit at the time of this writing
# The ActiveState python distribution is recommended for windows
# (make sure to get the python 2.7-compatible packages):
# - NumPy 1.5.1 (32-bit, 64-bit unofficial, supports Python 2.4 - 2.7 and 3.1 - 3.2.) - http://sourceforge.net/projects/numpy/files/NumPy/
# - matplotlib 1.0.1 (32-bit & 64-bit, supports Python 2.4 - 2.7) - http://sourceforge.net/projects/matplotlib/files/matplotlib/
#
# For ActiveState Python, all that one should need to type is 'pypm install matplotlib'
import datetime
import sys
import argparse
import subprocess
import itertools
import os
import matplotlib
import pylab
from matplotlib.backends.backend_pdf import PdfPages
from fftPerformanceTesting import *
def plotGraph(dataForAllPlots, title, plottype, plotkwargs, xaxislabel, yaxislabel):
"""
display a pretty graph
"""
dh.write('Making graph\n')
colors = ['k','y','m','c','b','r','g']
#plottype = 'plot'
for thisPlot in dataForAllPlots:
getattr(pylab, plottype)(thisPlot.xdata, thisPlot.ydata,
'{}.-'.format(colors.pop()),
label=thisPlot.label, **plotkwargs)
if len(dataForAllPlots) > 1:
pylab.legend(loc='best')
pylab.title(title)
pylab.xlabel(xaxislabel)
pylab.ylabel(yaxislabel)
pylab.grid(True)
if args.outputFilename == None:
# if no pdf output is requested, spit the graph to the screen . . .
pylab.show()
else:
pylab.savefig(args.outputFilename,dpi=(1024/8))
# . . . otherwise, gimme gimme pdf
#pdf = PdfPages(args.outputFilename)
#pdf.savefig()
#pdf.close()
######## plotFromDataFile() Function to plot from data file begins ########
def plotFromDataFile():
data = []
"""
read in table(s) from file(s)
"""
for thisFile in args.datafile:
if not os.path.isfile(thisFile):
print 'No file with the name \'{}\' exists. Please indicate another filename.'.format(thisFile)
quit()
results = open(thisFile, 'r')
resultsContents = results.read()
resultsContents = resultsContents.rstrip().split('\n')
firstRow = resultsContents.pop(0)
if firstRow != tableHeader:
print 'ERROR: input file \'{}\' does not match expected format.'.format(thisFile)
quit()
for row in resultsContents:
row = row.split(',')
row = TableRow(TestCombination(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9]), row[10])
data.append(GraphPoint(row.parameters.x, row.parameters.y, row.parameters.z, row.parameters.batchsize, row.parameters.precision, row.parameters.device, row.parameters.label, row.gflops))
"""
data sanity check
"""
# if multiple plotvalues have > 1 value among the data rows, the user must specify which to plot
multiplePlotValues = []
for option in plotvalues:
values = []
for point in data:
values.append(getattr(point, option))
multiplePlotValues.append(len(set(values)) > 1)
if multiplePlotValues.count(True) > 1 and args.plot == None:
print 'ERROR: more than one parameter of {} has multiple values. Please specify which parameter to plot with --plot'.format(plotvalues)
quit()
# if args.graphxaxis is not 'problemsize', the user should know that the results might be strange
if args.graphxaxis != 'problemsize':
xaxisvalueSet = []
for option in xaxisvalues:
if option != 'problemsize':
values = []
for point in data:
values.append(getattr(point, option))
xaxisvalueSet.append(len(set(values)) > 1)
if xaxisvalueSet.count(True) > 1:
print 'WARNING: more than one parameter of {} is varied. unexpected results may occur. please double check your graphs for accuracy.'.format(xaxisvalues)
# multiple rows should not have the same input values
pointInputs = []
for point in data:
pointInputs.append(point.__str__().split(';')[0])
if len(set(pointInputs)) != len(data):
print 'ERROR: imported table has duplicate rows with identical input parameters'
quit()
"""
figure out if we have multiple plots on this graph (and what they should be)
"""
if args.plot != None:
multiplePlots = args.plot
elif multiplePlotValues.count(True) == 1:
multiplePlots = plotvalues[multiplePlotValues.index(True)]
else:
# default to device if none of the options to plot have multiple values
multiplePlots = 'device'
"""
assemble data for the graphs
"""
data.sort(key=lambda row: int(getattr(row, args.graphxaxis)))
# choose scale for x axis
if args.xaxisscale == None:
# user didn't specify. autodetect
if int(getattr(data[len(data)-1], args.graphxaxis)) > 2000: # big numbers on x-axis
args.xaxisscale = 'log2'
elif int(getattr(data[len(data)-1], args.graphxaxis)) > 10000: # bigger numbers on x-axis
args.xaxisscale = 'log10'
else: # small numbers on x-axis
args.xaxisscale = 'linear'
if args.yaxisscale == None:
args.yaxisscale = 'linear'
plotkwargs = {}
if args.xaxisscale == 'linear':
plottype = 'plot'
elif args.xaxisscale == 'log2':
plottype = 'semilogx'
if (args.yaxisscale=='log2'):
plottype = 'loglog'
plotkwargs = {'basex':2,'basey':2}
elif (args.yaxisscale=='log10'):
plottype = 'loglog'
plotkwargs = {'basex':2,'basey':10}
elif (args.yaxisscale=='linear'):
plottype = 'semilogx'
plotkwargs = {'basex':2}
elif args.xaxisscale == 'log10':
plottype = 'semilogx'
if (args.yaxisscale=='log2'):
plottype = 'loglog'
plotkwargs = {'basex':10,'basey':2}
elif (args.yaxisscale=='log10'):
plottype = 'loglog'
plotkwargs = {'basex':10,'basey':10}
else:
print 'ERROR: invalid value for x-axis scale'
quit()
plots = set(getattr(row, multiplePlots) for row in data)
class DataForOnePlot:
def __init__(self, inlabel, inxdata, inydata):
self.label = inlabel
self.xdata = inxdata
self.ydata = inydata
dataForAllPlots=[]
for plot in plots:
dataForThisPlot = itertools.ifilter( lambda x: getattr(x, multiplePlots) == plot, data)
dataForThisPlot = list(itertools.islice(dataForThisPlot, None))
if args.graphxaxis == 'problemsize':
xdata = [int(row.x) * int(row.y) * int(row.z) * int(row.batchsize) for row in dataForThisPlot]
else:
xdata = [getattr(row, args.graphxaxis) for row in dataForThisPlot]
ydata = [getattr(row, args.graphyaxis) for row in dataForThisPlot]
dataForAllPlots.append(DataForOnePlot(plot,xdata,ydata))
"""
assemble labels for the graph or use the user-specified ones
"""
if args.graphtitle:
# use the user selection
title = args.graphtitle
else:
# autogen a lovely title
title = 'Performance vs. ' + args.graphxaxis.capitalize()
if args.xaxislabel:
# use the user selection
xaxislabel = args.xaxislabel
else:
# autogen a lovely x-axis label
if args.graphxaxis == 'cachesize':
units = '(bytes)'
else:
units = '(datapoints)'
xaxislabel = args.graphxaxis + ' ' + units
if args.yaxislabel:
# use the user selection
yaxislabel = args.yaxislabel
else:
# autogen a lovely y-axis label
if args.graphyaxis == 'gflops':
units = 'GFLOPS'
yaxislabel = 'Performance (' + units + ')'
"""
display a pretty graph
"""
colors = ['k','y','m','c','b','g','r']
def getkey(item):
return str(item.label)
dataForAllPlots.sort(key=getkey)
#for thisPlot in sorted(dataForAllPlots,key=getkey):
for thisPlot in sorted(dataForAllPlots,key=getkey):
getattr(pylab, plottype)(thisPlot.xdata, thisPlot.ydata, '{}.-'.format(colors.pop()), label=thisPlot.label, **plotkwargs)
if len(dataForAllPlots) > 1:
pylab.legend(loc='best')
pylab.title(title)
pylab.xlabel(xaxislabel)
pylab.ylabel(yaxislabel)
pylab.grid(True)
if args.outputFilename == None:
# if no pdf output is requested, spit the graph to the screen . . .
pylab.show()
else:
# . . . otherwise, gimme gimme pdf
#pdf = PdfPages(args.outputFilename)
#pdf.savefig()
#pdf.close()
pylab.savefig(args.outputFilename,dpi=(1024/8))
######### plotFromDataFile() Function to plot from data file ends #########
######## "main" program begins #####
"""
define and parse parameters
"""
xaxisvalues = ['x','y','z','batchsize','problemsize']
yaxisvalues = ['gflops']
plotvalues = ['device', 'precision', 'label']
parser = argparse.ArgumentParser(description='Plot performance of the clfft\
library. clfft.plotPerformance.py reads in data tables from clfft.\
measurePerformance.py and plots their values')
fileOrDb = parser.add_mutually_exclusive_group(required=True)
fileOrDb.add_argument('-d', '--datafile',
dest='datafile', action='append', default=None, required=False,
help='indicate a file to use as input. must be in the format output by\
clfft.measurePerformance.py. may be used multiple times to indicate\
multiple input files. e.g., -d cypressOutput.txt -d caymanOutput.txt')
parser.add_argument('-x', '--x_axis',
dest='graphxaxis', default=None, choices=xaxisvalues, required=True,
help='indicate which value will be represented on the x axis. problemsize\
is defined as x*y*z*batchsize')
parser.add_argument('-y', '--y_axis',
dest='graphyaxis', default='gflops', choices=yaxisvalues,
help='indicate which value will be represented on the y axis')
parser.add_argument('--plot',
dest='plot', default=None, choices=plotvalues,
help='indicate which of {} should be used to differentiate multiple plots.\
this will be chosen automatically if not specified'.format(plotvalues))
parser.add_argument('--title',
dest='graphtitle', default=None,
help='the desired title for the graph generated by this execution. if\
GRAPHTITLE contains any spaces, it must be entered in \"double quotes\".\
if this option is not specified, the title will be autogenerated')
parser.add_argument('--x_axis_label',
dest='xaxislabel', default=None,
help='the desired label for the graph\'s x-axis. if XAXISLABEL contains\
any spaces, it must be entered in \"double quotes\". if this option\
is not specified, the x-axis label will be autogenerated')
parser.add_argument('--x_axis_scale',
dest='xaxisscale', default=None, choices=['linear','log2','log10'],
help='the desired scale for the graph\'s x-axis. if nothing is specified,\
it will be selected automatically')
parser.add_argument('--y_axis_scale',
dest='yaxisscale', default=None, choices=['linear','log2','log10'],
help='the desired scale for the graph\'s y-axis. if nothing is specified,\
linear will be selected')
parser.add_argument('--y_axis_label',
dest='yaxislabel', default=None,
help='the desired label for the graph\'s y-axis. if YAXISLABEL contains any\
spaces, it must be entered in \"double quotes\". if this option is not\
specified, the y-axis label will be autogenerated')
parser.add_argument('--outputfile',
dest='outputFilename', default=None,
help='name of the file to output graphs. Supported formats: emf, eps, pdf, png, ps, raw, rgba, svg, svgz.')
args = parser.parse_args()
if args.datafile != None:
plotFromDataFile()
else:
print "Atleast specify if you want to use text files or database for plotting graphs. Use -h or --help option for more details"
quit()
| 12,413 | 36.504532 | 192 | py |
clFFT | clFFT-master/src/scripts/perf/errorHandler.py | # ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
#---------------------------------File Note------------------------------------
#Date: 27 January 2012
#This file defines all the error code and error handler mechanism
#--------------------------------Global Variables------------------------------
UINS_CAT = 100
WIN_REG_SEARCH_FAIL = 101
UNIMPL_APP = 200
SYS_ERR = 300
TIME_OUT = 400
DIM_INCO_FILE_FMT = 500 #incorrect file format for dimension
DIM_FILE_VAL_INCO = 501 #Value coming from dimension file is incorrect
#__errorTable : Defines all the errors in the system. Add a new error code and
# error message here
"""Error table is defined as private to this module"""
errorTable = {
UINS_CAT: 'Application is not able to find the installed catalyst',
WIN_REG_SEARCH_FAIL: 'Windows Registry search for catalysts version is unsuccessful',
UNIMPL_APP: 'Unimplemented Application requirement',
SYS_ERR: 'System error occurred - Please check the source code',
TIME_OUT: 'Operation is timed out',
DIM_INCO_FILE_FMT: 'incorrect file format for dimension - Not able to find dimension',
DIM_FILE_VAL_INCO: 'Value coming from dimension file is incorrect'
}
#--------------------------------Class Definitions-----------------------------
class TimeoutException(Exception):
pass
"""Base class for handling all the application generated exception"""
class ApplicationException(Exception):
def __init__(self, fileName, errno, msg = ""):
self.fileName = fileName
self.errno = errno
self.mess = errorTable[errno] + msg
self.message = 'Application ERROR:'+repr(self.fileName+'-'+str(self.errno)+'-'+self.mess)
def __str__(self):
return repr(self.fileName+'-'+str(self.errno)+'-'+self.mess)
#--------------------------------Global Function-------------------------------
if __name__ == '__main__':
#print errorTable
try:
raise ApplicationException('errorHandler', SYS_ERR)
except:
print 'Generic exception'
| 2,824 | 39.942029 | 100 | py |
clFFT | clFFT-master/src/scripts/perf/measurePerformance.py | # ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
import sys
import argparse
import subprocess
import itertools
import re#gex
import os
import math
from threading import Timer, Thread
import thread, time
from platform import system
import numpy as np
from datetime import datetime
import errorHandler
from fftPerformanceTesting import *
from performanceUtility import timeout, log, generate235Radices
IAM = 'FFT'
TIMOUT_VAL = 900 #In seconds
devicevalues = ['g', 'c']
layoutvalues = ['cp', 'ci']
placevalues = ['in', 'out']
precisionvalues = ['single', 'double']
libraryvalues = ['clFFT','cuFFT']
pow10 = '1-9,10-90:10,100-900:100,1000-9000:1000,10000-90000:10000,100000-900000:100000,1000000-9000000:1000000'
parser = argparse.ArgumentParser(description='Measure performance of the clFFT library')
parser.add_argument('--device',
dest='device', default='g',
help='device(s) to run on; may be a comma-delimited list. choices are ' + str(devicevalues) + '. (default gpu)')
parser.add_argument('-b', '--batchsize',
dest='batchSize', default='1',
help='number of FFTs to perform with one invocation of the client. the special value \'adapt\' may be used to adjust the batch size on a per-transform basis to the maximum problem size possible on the device. (default 1)'.format(pow10))
parser.add_argument('-a', '--adaptivemax',
dest='constProbSize', default='-1',
help='Max problem size that you want to maintain across the invocations of client with different lengths. This is adaptive and adjusts itself automtically.'.format(pow10))
parser.add_argument('-x', '--lengthx',
dest='lengthx', default='1',
help='length(s) of x to test; must be factors of 1, 2, 3, or 5 with clFFT; may be a range or a comma-delimited list. e.g., 16-128 or 1200 or 16,2048-32768 (default 1)')
parser.add_argument('-y', '--lengthy',
dest='lengthy', default='1',
help='length(s) of y to test; must be factors of 1, 2, 3, or 5 with clFFT; may be a range or a comma-delimited list. e.g., 16-128 or 1200 or 16,32768 (default 1)')
parser.add_argument('-z', '--lengthz',
dest='lengthz', default='1',
help='length(s) of z to test; must be factors of 1, 2, 3, or 5 with clFFT; may be a range or a comma-delimited list. e.g., 16-128 or 1200 or 16,32768 (default 1)')
parser.add_argument('-reps',
dest='reps', default='10',
help='Number of repetitions (default 10)')
parser.add_argument('-prime_factor', '--prime_factor',
dest='prime_factor', default='2',
help='only test the prime factors within the specified range of lengthx/y/z. Select from 2,3,5, and 7. Example: -prime_factor 2,3')
parser.add_argument('-test_count', '--test_count',
dest='test_count', default='100',
help='Number of tests to perform')
parser.add_argument('--problemsize',
dest='problemsize', default=None)
# help='additional problems of a set size. may be used in addition to lengthx/y/z. each indicated problem size will be added to the list of FFTs to perform. should be entered in AxBxC:D format. A, B, and C indicate the sizes of the X, Y, and Z dimensions (respectively). D is the batch size. All values except the length of X are optional. may enter multiple in a comma-delimited list. e.g., 2x2x2:32768 or 256x256:100,512x512:256')
parser.add_argument('-i', '--inputlayout',
dest='inputlayout', default='1',
help=' 1. interleaved (default) 2. planar 3. hermitian interleaved 4. hermitian planar 5. real' )
parser.add_argument('-o', '--outputlayout',
dest='outputlayout', default='1',
help=' 1. interleaved (default) 2. planar 3. hermitian interleaved 4. hermitian planar 5. real' )
parser.add_argument('--placeness',
dest='placeness', default='in',
help='Choices are ' + str(placevalues) + '. in = in place, out = out of place (default in)')
parser.add_argument('-r', '--precision',
dest='precision', default='single',
help='Choices are ' + str(precisionvalues) + '. (default single)')
parser.add_argument('--library',
dest='library', default='clFFT', choices=libraryvalues,
help='indicates the library to use for testing on this run')
parser.add_argument('--label',
dest='label', default=None,
help='a label to be associated with all transforms performed in this run. if LABEL includes any spaces, it must be in \"double quotes\". note that the label is not saved to an .ini file. e.g., --label cayman may indicate that a test was performed on a cayman card or --label \"Windows 32\" may indicate that the test was performed on Windows 32')
#parser.add_argument('--createini',
# dest='createIniFilename', default=None,
# help='create an .ini file with the given name that saves the other parameters given at the command line, then quit. e.g., \'measureperformance.py -x 2048 --createini my_favorite_setup.ini\' will create an .ini file that will save the configuration for a 2048-datapoint 1D FFT.')
parser.add_argument('--ini',
dest='iniFilename', default=None,
help='use the parameters in the named .ini file instead of the command line parameters.')
parser.add_argument('--tablefile',
dest='tableOutputFilename', default=None,
help='save the results to a plaintext table with the file name indicated. this can be used with plotPerformance.py to generate graphs of the data (default: table prints to screen)')
parser.add_argument('--prefix',
dest='prefix', default='./',
help='Path where the library client is located (default current directory)')
args = parser.parse_args()
label = str(args.label)
subprocess.call('mkdir perfLog', shell = True)
logfile = os.path.join('perfLog', (label+'-'+'fftMeasurePerfLog.txt'))
def printLog(txt):
print txt
log(logfile, txt)
printLog("=========================MEASURE PERFORMANCE START===========================")
printLog("Process id of Measure Performance:"+str(os.getpid()))
currCommandProcess = None
printLog('Executing measure performance for label: '+str(label))
#This function is defunct now
@timeout(1, "fileName") # timeout is 5 minutes, 5*60 = 300 secs
def checkTimeOutPut2(args):
global currCommandProcess
#ret = subprocess.check_output(args, stderr=subprocess.STDOUT)
#return ret
currCommandProcess = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
printLog("Curr Command Process id = "+str(currCommandProcess.pid))
ret = currCommandProcess.communicate()
if(ret[0] == None or ret[0] == ''):
errCode = currCommandProcess.poll()
raise subprocess.CalledProcessError(errCode, args, output=ret[1])
return ret[0]
#Spawns a separate thread to execute the library command and wait for that thread to complete
#This wait is of 900 seconds (15 minutes). If still the thread is alive then we kill the thread
def checkTimeOutPut(args):
t = None
global currCommandProcess
global stde
global stdo
stde = None
stdo = None
def executeCommand():
global currCommandProcess
global stdo
global stde
try:
stdo, stde = currCommandProcess.communicate()
printLog('stdout:\n'+str(stdo))
printLog('stderr:\n'+str(stde))
except:
printLog("ERROR: UNKNOWN Exception - +checkWinTimeOutPut()::executeCommand()")
currCommandProcess = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
thread = Thread(target=executeCommand)
thread.start()
thread.join(TIMOUT_VAL) #wait for the thread to complete
if thread.is_alive():
printLog('ERROR: Killing the process - terminating thread because it is taking too much of time to execute')
currCommandProcess.kill()
printLog('ERROR: Timed out exception')
raise errorHandler.ApplicationException(__file__, errorHandler.TIME_OUT)
if stdo == "" or stdo==None:
errCode = currCommandProcess.poll()
printLog('ERROR: @@@@@Raising Called processor exception')
raise subprocess.CalledProcessError(errCode, args, output=stde)
return stdo
# don't try to create and use an .ini file at the same time (it will open a portal through which demons will emerge)
#if args.iniFilename and args.createIniFilename:
# printLog('ERROR: --ini and --createini are mutually exclusive. Please choose only one.')
# quit()
#read in .ini parameters if --ini is used
#if args.iniFilename != None:
# if not os.path.isfile(args.iniFilename):
# printLog("No file with the name \'{}\' exists. Please indicate another filename.".format(args.iniFilename))
# quit()
#
# ini = open(args.iniFilename, 'r')
# iniContents = ini.read()
# iniContents = iniContents.split(';')
# for i in range(0,len(iniContents)):
# line = iniContents.pop()
# line = line.partition(' ')
# parameter = line[0]
# value = line[2]
# value = value.replace('\'','').replace('[','').replace(']','').replace(' ','')
# print"value= ",value
#
# if parameter == 'batchSize':
# args.batchSize = value
# elif parameter == 'constProbSize':
# args.constProbSize = value
# elif parameter == 'lengthx':
# args.lengthx = value
# elif parameter == 'reps':
# args.reps = value
# elif parameter == 'prime_factor':
# args.prime_factor = value
# elif parameter == 'test_count':
# args.test_count = value
# elif parameter == 'lengthy':
# args.lengthy = value
# elif parameter == 'lengthz':
# args.lengthz = value
# elif parameter == 'problemsize':
# args.problemsize = value
# elif parameter == 'device':
# args.device = value
# elif parameter == 'inputlayout':
# args.inputlayout = value
# elif parameter == 'outputlayout':
# args.outputlayout = value
# elif parameter == 'placeness':
# args.placeness = value
# elif parameter == 'precision':
# args.precision = value
# else:
# printLog('{} corrupted. Please re-create a .ini file with the --createini flag.'.format(args.iniFilename))
# #quit()
#create ini file if requested
#if args.createIniFilename != None:
# printLog('Creating Ini files')
# if os.path.isfile(args.createIniFilename):
# printLog('A file with the name \'{}\' already exists. Please delete the file or choose another name.'.format(args.createIniFilename))
# quit()
# printLog('Creating Ini file:'+args.createIniFilename+'\n')
# ini = open(args.createIniFilename, 'w')
# ini.write('batchSize {} ;'.format(args.batchSize))
# ini.write('constProbSize {} ;'.format(args.constProbSize))
# ini.write('lengthx {} ;'.format(args.lengthx))
# ini.write('lengthy {} ;'.format(args.lengthy))
# ini.write('lengthz {} ;'.format(args.lengthz))
# ini.write('prime_factor {} ;'.format(args.prime_factor))
# ini.write('test_count {} ;'.format(args.test_count))
# ini.write('reps {} ;'.format(args.reps))
# ini.write('problemsize {} ;'.format(args.problemsize))
# ini.write('device {} ;'.format(args.device))
# ini.write('inputlayout {} ;'.format(args.inputlayout))
# ini.write('outputlayout {} ;'.format(args.outputlayout))
# ini.write('placeness {} ;'.format(args.placeness))
# ini.write('precision {} ;'.format(args.precision))
# printLog('Created Ini file:'+args.createIniFilename+'\n')
# printLog("=========================MEASURE PERFORMANCE START===========================\n")
# quit()
#
#turn pow10 into its range list
if args.batchSize.count('pow10'):
args.batchSize = pow10
#split up comma-delimited lists
args.batchSize = args.batchSize.split(',')
args.constProbSize = int(args.constProbSize.split(',')[0])
args.device = args.device.split(',')
args.lengthx = args.lengthx.split(',')
args.lengthy = args.lengthy.split(',')
args.lengthz = args.lengthz.split(',')
args.prime_factor = args.prime_factor.split(',')
if args.problemsize:
args.problemsize = args.problemsize.split(',')
args.inputlayout = args.inputlayout.split(',')
args.outputlayout = args.outputlayout.split(',')
args.placeness = args.placeness.split(',')
args.precision = args.precision.split(',')
printLog('Executing for label: '+str(args.label))
#check parameters for sanity
# batchSize of 'max' must not be in a list (does not get on well with others)
#if args.batchSize.count('max') and len(args.batchSize) > 1:
if ( args.batchSize.count('max') or args.batchSize.count('adapt') )and len(args.batchSize) > 1:
printLog('ERROR: --batchsize max must not be in a comma delimited list')
quit()
# in case of an in-place transform, input and output layouts must be the same (otherwise: *boom*)
#for n in args.placeness:
# if n == 'in' or n == 'inplace':
# if len(args.inputlayout) > 1 or len(args.outputlayout) > 1 or args.inputlayout[0] != args.outputlayout[0]:
# printLog('ERROR: if transformation is in-place, input and output layouts must match')
# quit()
# check for valid values in precision
for n in args.precision:
if n != 'single' and n != 'double':
printLog('ERROR: invalid value for precision')
quit()
def isPrime(n):
import math
n = abs(n)
i = 2
while i <= math.sqrt(n):
if n%i == 0:
return False
i += 1
return True
def findFactors(number):
iter_space = range(1, number+1)
prime_factor_list = []
for curr_iter in iter_space:
if isPrime(curr_iter) == True:
#print 'curr_iter_prime: ', curr_iter
if number%curr_iter == 0:
prime_factor_list.append(curr_iter)
return prime_factor_list
#Type : Function
#Input: num, a number which we need to factorize
#Return Type: list
#Details: This function returns only the prime factors on an input number
# e.g: input: 20, returns: [2,2,5]
# input: 32, returns: [2,2,2,2,2]
def factor(num):
if num == 1:
return [1]
i = 2
limit = num**0.5
while i <= limit:
if num % i == 0:
ret = factor(num/i)
ret.append(i)
return ret
i += 1
return [num]
def validateFactors(flist):
ref_list = [1,2,3,5]
if flist==ref_list:
return True
if len(flist) > len(ref_list):
return False
for felement in flist:
if ref_list.count(felement) != 1:
return False
return True
#Type : Function
#Input: num, a number which we need to validate for 1,2,3 or 5 factors
#Return Type: boolean
#Details: This function validates an input number for its prime factors
# If factors has number other than 1,2,3 or 5 then return false else return true
# e.g: input: 20, returns: True
# input: 28, returns: False
def validate_number_for_1235(num):
if num == 0:
return True
set1235 = set([1,2,3,5])
setPrimeFactors = set(factor(num))
setPrimeFactors = setPrimeFactors | set1235 #performed union of two sets
#if still the sets are same then we are done!!!
#else we got few factors other than 1,2,3 or 5 and we should invalidate
#the input number
if setPrimeFactors == set1235:
return True
return False
def getValidNumbersInRange(rlist):
valid_number_list = []
for relement in rlist:
prime_factors = findFactors(relement)
if validateFactors(prime_factors) == True:
valid_number_list.append(relement)
return valid_number_list
def get_next_num_with_1235_factors(start):
start+=1
while not validateFactors(findFactors(start)):
start+=1
return start
def check_number_for_1235_factors(number):
#printLog('number:'+ number)
factors = findFactors(number)
#printLog('factors:'+ factors)
if not validateFactors(factors):
printLog("ERROR: --{0} must have only 1,2,3,5 as factors")
return False
return True
def check_for_1235_factors(values, option):
#print 'values: ', values
for n in values:
for m in n.replace('-',',').split(','):
if not validate_number_for_1235(int(m)):
print 'ERROR: --{0} must specify number with only 1,2,3,5 as factors'.format(option)
quit()
#print 'Valid number for :',option,':', m
if args.library == 'clFFT':
check_for_1235_factors(args.lengthx, 'lengthx')
check_for_1235_factors(args.lengthy, 'lengthy')
check_for_1235_factors(args.lengthz, 'lengthz')
if not os.path.isfile(args.prefix+executable(args.library)):
printLog("ERROR: Could not find client named {0}".format(executable(args.library)))
quit()
def get235RadicesNumberInRange(minimum, maximum):
if minimum == 0 and maximum == 0:
return [0]
numbers = generate235Radices(maximum)
minIndex = numbers.index(minimum)
maxIndex = numbers.index(maximum)
return numbers[minIndex:maxIndex+1]
#expand ranges
class Range:
def __init__(self, ranges, defaultStep='+1'):
self.expanded = []
for thisRange in ranges:
if thisRange != 'max' and thisRange != 'adapt' :
if thisRange.count(':'):
self._stepAmount = thisRange.split(':')[1]
else:
self._stepAmount = defaultStep
thisRange = thisRange.split(':')[0]
if self._stepAmount.count('x'):
self._stepper = '_mult'
self._stepAmount = self._stepAmount.lstrip('+x')
self._stepAmount = int(self._stepAmount)
elif self._stepAmount.count('l'):
self._stepper = '_next_num_with_1235_factor'
self._stepAmount = 0
else:
self._stepper = '_add'
self._stepAmount = self._stepAmount.lstrip('+x')
self._stepAmount = int(self._stepAmount)
if thisRange.count('-'):
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[1])
else:
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[0])
self.current = self.begin
# _thisRangeExpanded = []
if thisRange == 'max':
self.expanded = self.expanded + ['max']
elif thisRange == 'adapt':
self.expanded = self.expanded + ['adapt']
elif self.begin == 0 and self._stepper == '_mult':
self.expanded = self.expanded + [0]
else:
if self._stepper == '_next_num_with_1235_factor':
self.expanded = self.expanded + get235RadicesNumberInRange(self.current, self.end)
else:
while self.current <= self.end:
self.expanded = self.expanded + [self.current]
self._step()
# now we want to uniquify and sort the expanded range
self.expanded = list(set(self.expanded))
self.expanded.sort()
# advance current value to next
def _step(self):
getattr(self, self._stepper)()
def _mult(self):
self.current = self.current * self._stepAmount
def _add(self):
self.current = self.current + self._stepAmount
def _next_num_with_1235_factor(self):
self.current = get_next_num_with_1235_factors(self.current)
args.batchSize = Range(args.batchSize).expanded
args.lengthx = Range(args.lengthx, 'l').expanded
args.lengthy = Range(args.lengthy, 'l').expanded
args.lengthz = Range(args.lengthz, 'l').expanded
def create_prime_factors(args,input_list):
powers2=[1]
powers3=[1]
powers5=[1]
powers7=[1]
if '2' in args.prime_factor:
powers2+=[2**x for x in range(1,int(math.floor(math.log(max(input_list),2)+1)))]
if '3' in args.prime_factor:
powers3+=[3**x for x in range(1,int(math.floor(math.log(max(input_list),3)+1)))]
if '5' in args.prime_factor:
powers5+=[5**x for x in range(1,int(math.floor(math.log(max(input_list),5)+1)))]
if '7' in args.prime_factor:
powers7+=[7**x for x in range(1,int(math.floor(math.log(max(input_list),7)+1)))]
xlist=[]
for i in powers2:
for j in powers3:
for k in powers5:
for l in powers7:
dummy=int(i)*int(j)*int(k)*int(l)
if(dummy<=max(input_list)) and (dummy>=min(input_list)):
xlist.append(dummy)
xlist=sorted(xlist)
xlist=xlist[:int(args.test_count)] #snafu
return xlist
args.lengthx=create_prime_factors(args,args.lengthx)
args.lengthy=create_prime_factors(args,args.lengthy)
args.lengthz=create_prime_factors(args,args.lengthz)
#expand problemsizes ('XxYxZ:batch')
#print "args.problemsize--1-->", args.problemsize
if args.problemsize and args.problemsize[0] != 'None':
i = 0
while i < len(args.problemsize):
args.problemsize[i] = args.problemsize[i].split(':')
args.problemsize[i][0] = args.problemsize[i][0].split('x')
i = i+1
#create the problem size combinations for each run of the client
# A: This part creats a product of all possible combinations. Too many cases in 2/3D
#problem_size_combinations = itertools.product(args.lengthx, args.lengthy, args.lengthz, args.batchSize)
#problem_size_combinations = list(itertools.islice(problem_size_combinations, None))
if args.lengthy[0]==1:
args.lengthy=[1]*len(args.lengthx)
if args.lengthz[0]==1:
args.lengthz=[1]*len(args.lengthx)
dummy=[args.batchSize[0]]*len(args.lengthx)
problem_size_combinations=zip(args.lengthx,args.lengthy,args.lengthz,dummy)
#print "args.problemsize--2-->", args.problemsize
#add manually entered problem sizes to the list of FFTs to crank out
manual_test_combinations = []
if args.problemsize and args.problemsize[0] != 'None':
for n in args.problemsize:
x = []
y = []
z = []
batch = []
x.append(int(n[0][0]))
if len(n[0]) >= 2:
y.append(int(n[0][1]))
else:
y.append(1)
if len(n[0]) >= 3:
z.append(int(n[0][2]))
else:
z.append(1)
if len(n) > 1:
batch.append(int(n[1]))
else:
batch.append(1)
combos = itertools.product(x, y, z, batch)
combos = list(itertools.islice(combos, None))
for n in combos:
manual_test_combinations.append(n)
# manually entered problem sizes should not be plotted (for now). they may still be output in a table if requested
problem_size_combinations = problem_size_combinations + manual_test_combinations
#create final list of all transformations (with problem sizes and transform properties)
test_combinations = itertools.product(problem_size_combinations, args.device, args.inputlayout, args.outputlayout, args.placeness, args.precision)
test_combinations = list(itertools.islice(test_combinations, None))
test_combinations = [TestCombination(params[0][0], params[0][1], params[0][2], params[0][3], params[1], params[2], params[3], params[4], params[5], args.label) for params in test_combinations]
if args.iniFilename != None:
array=np.genfromtxt(args.iniFilename, names=True, delimiter=',', dtype=None)
test_combinations = [TestCombination(params[0],params[1], params[2], params[3], params[4],params[5],params[6],params[7],params[8],args.label) for params in array]
#print("lenghtx= ",test_combinations[0].x)
#print("lenghty= ",test_combinations[0].y)
#print("lenghtz= ",test_combinations[0].z)
#print("placeness= ",test_combinations[0].placeness)
#turn each test combination into a command, run the command, and then stash the gflops
result = [] # this is where we'll store the results for the table
#open output file and write the header
if args.tableOutputFilename == None:
if args.library == 'cuFFT':
args.tableOutputFilename = 'cuFFT_' + 'x_'+ str(args.lengthx[0]) + '_y_'+str(args.lengthy[0])+'_z_'+str(args.lengthz[0])+'_'+str(args.precision[0]) +'_'+datetime.now().isoformat().replace(':','.') + '.txt'
elif args.library=='clFFT':
args.tableOutputFilename = 'clFFT_' + 'x_'+ str(args.lengthx[0]) + '_y_'+str(args.lengthy[0])+'_z_'+str(args.lengthz[0])+'_'+str(args.precision[0])+ '_'+datetime.now().isoformat().replace(':','.') + '.txt'
else:
if os.path.isfile(args.tableOutputFilename):
oldname = args.tableOutputFilename
args.tableOutputFilename = args.tableOutputFilename + datetime.now().isoformat().replace(':','.')
message = 'A file with the name ' + oldname + ' already exists. Changing filename to ' + args.tableOutputFilename
printLog(message)
printLog('table header---->'+ str(tableHeader))
table = open(args.tableOutputFilename, 'w')
table.write(tableHeader + '\n')
table.flush()
if args.constProbSize == -1:
args.constProbSize = maxBatchSize(1, 1, 1, args.inputlayout[0], args.precision[0], executable(args.library), '-' + args.device[0])
args.constProbSize = int(args.constProbSize)
printLog('Total combinations = '+str(len(test_combinations)))
vi = 0
for params in test_combinations:
if vi>=int(args.test_count):
break
vi = vi+1
printLog("")
printLog('preparing command: '+ str(vi))
device = params.device
lengthx = str(params.x)
lengthy = str(params.y)
lengthz = str(params.z)
inlayout=str(params.inlayout)
outlayout=str(params.outlayout)
prefix=str(args.prefix)
if params.batchsize == 'max':
batchSize = maxBatchSize(lengthx, lengthy, lengthz, params.inlayout, params.precision, executable(args.library), '-' + device)
elif params.batchsize == 'adapt':
batchSize = str(args.constProbSize/(int(lengthx)*int(lengthy)*int(lengthz)))
else:
batchSize = str(params.batchsize)
if params.placeness == 'inplace' or params.placeness == 'in':
placeness = ''
elif params.placeness == 'outofplace' or params.placeness == 'out':
placeness = '--outPlace'
else:
printLog('ERROR: invalid value for placeness when assembling client command')
if params.precision == 'single':
precision = ''
elif params.precision == 'double':
precision = '--double'
else:
printLog('ERROR: invalid value for precision when assembling client command')
#set up arguments here
if args.library == 'clFFT':
arguments = [prefix+ executable(args.library),
'-' + device,
'-x', lengthx,
'-y', lengthy,
'-z', lengthz,
'--batchSize', batchSize,
'--inLayout', inlayout,
'--outLayout',outlayout,
placeness,
precision,
'-p', args.reps]
elif args.library == 'cuFFT':
if inlayout[0]=='1' and outlayout[0]=='1':
cuFFT_type='1'
elif inlayout[0]=='3' and outlayout[0]=='5':
cuFFT_type='3'
elif inlayout[0]=='5' and outlayout[0]=='3':
cuFFT_type='2'
else:
print"Wrong input/outputlayout. Only C2C/R2C/C2R are supported for Cuda"
exit()
arguments=[prefix+executable(args.library),
'-x', lengthx,
'-y', lengthy,
'-z', lengthz,
'-b', batchSize,
'-p',args.reps,
'-d',str(int(args.precision[0]=='double')),
'-type',cuFFT_type]
writeline = True
try:
arguments=' '.join(arguments)
printLog('Executing Command: '+str(arguments))
output = checkTimeOutPut(arguments)
output = output.split(os.linesep);
printLog('Execution Successfull---------------\n')
except errorHandler.ApplicationException as ae:
writeline = False
printLog('ERROR: Command is taking too much of time '+ae.message+'\n'+'Command: \n'+str(arguments))
continue
except subprocess.CalledProcessError as clientCrash:
print 'Command execution failure--->'
if clientCrash.output.count('CLFFT_INVALID_BUFFER_SIZE'):
writeline = False
printLog('Omitting line from table - problem is too large')
else:
writeline = False
printLog('ERROR: client crash. Please report the following error message (with \'CLFFT_*\' error code, if given, and the parameters used to invoke measurePerformance.py) \n'+clientCrash.output+'\n')
printLog('IN ORIGINAL WE CALL QUIT HERE - 1\n')
continue
for x in output:
if x.count('out of memory'):
writeline = False
printLog('ERROR: Omitting line from table - problem is too large')
if writeline:
try:
if args.library == 'cuFFT':
output = itertools.ifilter( lambda x: x.count('gflops'), output)
else:
output = itertools.ifilter( lambda x: x.count('gflops'), output)
output = list(itertools.islice(output, None))
thisResult = re.search('\d+\.*\d*e*-*\d*$', output[-1])
if args.library == 'cuFFT':
thisResult = re.search('[-+]?\d*\.\d+|\d+$', output[-1])
thisResult = float(thisResult.group(0))
thisResult = (params.x, params.y, params.z, batchSize, params.device, params.inlayout, params.outlayout, params.placeness, params.precision, params.label, thisResult)
outputRow = ''
for x in thisResult:
outputRow = outputRow + str(x) + ','
outputRow = outputRow.rstrip(',')
table.write(outputRow + '\n')
table.flush()
except:
printLog('ERROR: Exception occurs in GFLOP parsing')
else:
if(len(output) > 0):
if output[0].find('nan') or output[0].find('inf'):
printLog( 'WARNING: output from client was funky for this run. skipping table row')
else:
prinLog('ERROR: output from client makes no sense')
printLog(str(output[0]))
printLog('IN ORIGINAL WE CALL QUIT HERE - 2\n')
else:
prinLog('ERROR: output from client makes no sense')
#quit()
printLog("=========================MEASURE PERFORMANCE ENDS===========================\n")
#
#"""
#print a pretty table
#"""
#if args.tableOutputFilename == None:
# args.tableOutputFilename = 'results' + datetime.now().isoformat().replace(':','.') + '.txt'
#else:
# if os.path.isfile(args.tableOutputFilename):
# oldname = args.tableOutputFilename
# args.tableOutputFilename = args.tableOutputFilename + datetime.now().isoformat().replace(':','.')
# message = 'A file with the name ' + oldname + ' already exists. Changing filename to ' + args.tableOutputFilename
# print message
#
#table = open(args.tableOutputFilename, 'w')
#table.write(tableHeader + '\n')
#for x in result:
# row = ''
# for y in x:
# row = row + str(y) + ','
# row = row[:-1] #chomp off the trailing comma
# table.write(row + '\n')
| 31,972 | 39.116688 | 435 | py |
clFFT | clFFT-master/src/scripts/perf/fftPerformanceTesting.py | # ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
import itertools
import re#gex
import subprocess
import os
import sys
from datetime import datetime
# Common data and functions for the performance suite
tableHeader = 'lengthx,lengthy,lengthz,batch,device,inlay,outlay,place,precision,label,GFLOPS'
class TestCombination:
def __init__(self,
lengthx, lengthy, lengthz, batchsize,
device, inlayout, outlayout, placeness, precision,
label):
self.x = lengthx
self.y = lengthy
self.z = lengthz
self.batchsize = batchsize
self.device = device
self.inlayout = inlayout
self.outlayout = outlayout
self.placeness = placeness
self.precision = precision
self.label = label
def __str__(self):
return self.x + 'x' + self.y + 'x' + self.z + ':' + self.batchsize + ', ' + self.device + ', ' + self.inlayout + '/' + self.outlayout + ', ' + self.placeness + ', ' + self.precision + ' -- ' + self.label
class GraphPoint:
def __init__(self,
lengthx, lengthy, lengthz, batchsize,
precision, device, label,
gflops):
self.x = lengthx
self.y = lengthy
self.z = lengthz
self.batchsize = batchsize
self.device = device
self.label = label
self.precision = precision
self.gflops = gflops
self.problemsize = str(int(self.x) * int(self.y) * int(self.z) * int(self.batchsize))
def __str__(self):
# ALL members must be represented here (x, y, z, batch, device, label, etc)
return self.x + 'x' + self.y + 'x' + self.z + ':' + self.batchsize + ', ' + self.precision + ' precision, ' + self.device + ', -- ' + self.label + '; ' + self.gflops
class TableRow:
# parameters = class TestCombination instantiation
def __init__(self, parameters, gflops):
self.parameters = parameters
self.gflops = gflops
def __str__(self):
return self.parameters.__str__() + '; ' + self.gflops
def transformDimension(x,y,z):
if int(z) != 1:
return 3
elif int(y) != 1:
return 2
elif int(x) != 1:
return 1
def executable(library):
if type(library) != str:
print 'ERROR: expected library name to be a string'
quit()
if sys.platform != 'win32' and sys.platform != 'linux2':
print 'ERROR: unknown operating system'
quit()
if library == 'clFFT' or library == 'null':
if sys.platform == 'win32':
exe = 'clFFT-client.exe'
elif sys.platform == 'linux2':
exe = 'clFFT-client'
elif library == 'cuFFT':
if sys.platform == 'win32':
exe = 'cuFFT-client.exe'
elif sys.platform == 'linux2':
exe = 'cuFFT-client'
else:
print 'ERROR: unknown library -- cannot determine executable name'
quit()
return exe
def max_mem_available_in_bytes(exe, device):
arguments = [exe, '-i', device]
deviceInfo = subprocess.check_output(arguments, stderr=subprocess.STDOUT).split(os.linesep)
deviceInfo = itertools.ifilter( lambda x: x.count('MAX_MEM_ALLOC_SIZE'), deviceInfo)
deviceInfo = list(itertools.islice(deviceInfo, None))
maxMemoryAvailable = re.search('\d+', deviceInfo[0])
return int(maxMemoryAvailable.group(0))
def max_problem_size(exe, layout, precision, device):
if precision == 'single':
bytes_in_one_number = 4
elif precision == 'double':
bytes_in_one_number = 8
else:
print 'max_problem_size(): unknown precision'
quit()
max_problem_size = pow(2,25)
if layout == '5':
max_problem_size = pow(2,24) # TODO: Upper size limit for real transform
return max_problem_size
def maxBatchSize(lengthx, lengthy, lengthz, layout, precision, exe, device):
problemSize = int(lengthx) * int(lengthy) * int(lengthz)
maxBatchSize = max_problem_size(exe, layout, precision, device) / problemSize
return str(maxBatchSize)
def create_ini_file_if_requested(args):
if args.createIniFilename:
for x in vars(args):
if (type(getattr(args,x)) != file) and x.count('File') == 0:
args.createIniFilename.write('--' + x + os.linesep)
args.createIniFilename.write(str(getattr(args,x)) + os.linesep)
quit()
def load_ini_file_if_requested(args, parser):
if args.useIniFilename:
argument_list = args.useIniFilename.readlines()
argument_list = [x.strip() for x in argument_list]
args = parser.parse_args(argument_list)
return args
def is_numeric_type(x):
return type(x) == int or type(x) == long or type(x) == float
def split_up_comma_delimited_lists(args):
for x in vars(args):
attr = getattr(args, x)
if attr == None:
setattr(args, x, [None])
elif is_numeric_type(attr):
setattr(args, x, [attr])
elif type(attr) == str:
setattr(args, x, attr.split(','))
return args
class Range:
def __init__(self, ranges, defaultStep='+1'):
# we might be passed in a single value or a list of strings
# if we receive a single value, we want to feed it right back
if type(ranges) != list:
self.expanded = ranges
elif ranges[0] == None:
self.expanded = [None]
else:
self.expanded = []
for thisRange in ranges:
thisRange = str(thisRange)
if re.search('^\+\d+$', thisRange):
self.expanded = self.expanded + [thisRange]
elif thisRange == 'max':
self.expanded = self.expanded + ['max']
else:
#elif thisRange != 'max':
if thisRange.count(':'):
self._stepAmount = thisRange.split(':')[1]
else:
self._stepAmount = defaultStep
thisRange = thisRange.split(':')[0]
if self._stepAmount.count('x'):
self._stepper = '_mult'
else:
self._stepper = '_add'
self._stepAmount = self._stepAmount.lstrip('+x')
self._stepAmount = int(self._stepAmount)
if thisRange.count('-'):
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[1])
else:
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[0])
self.current = self.begin
if self.begin == 0 and self._stepper == '_mult':
self.expanded = self.expanded + [0]
else:
while self.current <= self.end:
self.expanded = self.expanded + [self.current]
self._step()
# now we want to uniquify and sort the expanded range
self.expanded = list(set(self.expanded))
self.expanded.sort()
# advance current value to next
def _step(self):
getattr(self, self._stepper)()
def _mult(self):
self.current = self.current * self._stepAmount
def _add(self):
self.current = self.current + self._stepAmount
def expand_range(a_range):
return Range(a_range).expanded
def decode_parameter_problemsize(problemsize):
if not problemsize.count(None):
i = 0
while i < len(problemsize):
problemsize[i] = problemsize[i].split(':')
j = 0
while j < len(problemsize[i]):
problemsize[i][j] = problemsize[i][j].split('x')
j = j+1
i = i+1
return problemsize
def gemm_table_header():
return 'm,n,k,lda,ldb,ldc,alpha,beta,order,transa,transb,function,device,library,label,GFLOPS'
class GemmTestCombination:
def __init__(self,
sizem, sizen, sizek, lda, ldb, ldc,
alpha, beta, order, transa, transb,
function, device, library, label):
self.sizem = str(sizem)
self.sizen = str(sizen)
self.sizek = str(sizek)
self.lda = str(lda)
self.ldb = str(ldb)
self.ldc = str(ldc)
self.alpha = str(alpha)
self.beta = str(beta)
self.order = order
self.transa = transa
self.transb = transb
self.function = function
self.device = device
self.library = library
self.label = label
def __str__(self):
return self.sizem + 'x' + self.sizen + 'x' + self.sizek + ':' + self.lda + 'x' + self.ldb + 'x' + self.ldc + ', ' + self.device + ', ' + self.function + ', ' + self.library + ', alpha(' + self.alpha + '), beta(' + self.beta + '), order(' + self.order + '), transa(' + self.transa + '), transb(' + self.transb + ') -- ' + self.label
class GemmGraphPoint:
def __init__(self,
sizem, sizen, sizek,
lda, ldb, ldc,
device, order, transa, transb,
function, library, label,
gflops):
self.sizem = sizem
self.sizen = sizen
self.sizek = sizek
self.lda = lda
self.ldb = ldb
self.ldc = ldc
self.device = device
self.order = order
self.transa = transa
self.transb = transb
self.function = function
self.library = library
self.label = label
self.gflops = gflops
def __str__(self):
# ALL members must be represented here (x, y, z, batch, device, label, etc)
return self.sizem + 'x' + self.sizen + 'x' + self.sizek + ':' + self.device + ', ' + self.function + ', ' + self.library + ', order(' + self.order + '), transa(' + self.transa + '), transb(' + self.transb + ') -- ' + self.label + '; ' + self.gflops + ' gflops'
def open_file( filename ):
if type(filename) == list:
filename = filename[0]
if filename == None:
filename = 'results' + datetime.now().isoformat().replace(':','.') + '.txt'
else:
if os.path.isfile(filename):
oldname = filename
filename = filename + datetime.now().isoformat().replace(':','.')
message = 'A file with the name ' + oldname + ' already exists. Changing filename to ' + filename
print message
return open(filename, 'w')
| 11,307 | 35.714286 | 339 | py |
clFFT | clFFT-master/src/scripts/perf/performanceUtility.py | # ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
#This file contains a number of utilities function which could be independent of
#any specific domain concept
import signal
from subprocess import check_output
import errorHandler
from datetime import datetime
def currentUser():
try:
return check_output("who", shell = True).split()[0];
except:
print 'Unhandled Exception at performanceUtility::currentUser()'
raise
#Details: Generate sorted numbers in radices of 2,3 and 5 upto a given upper limit number
def generate235Radices(maxSize):
sizeList = list()
i = 0
j = 0
k = 0
SUM = int()
sumj = int()
sumk = int()
sumi = 1
while(True):
sumj = 1
j = 0
while(True):
sumk = 1
k = 0
while(True):
SUM = sumi*sumj*sumk
if ( SUM > maxSize ): break
sizeList.append(SUM)
k += 1
sumk *= 2
if (k == 0): break
j += 1
sumj *= 3
if ( j == 0 and k == 0): break
i += 1
sumi *= 5
sizeList.sort()
return sizeList
def timeout(timeout_time, default):
def timeout_function(f):
def f2(args):
def timeout_handler(signum, frame):
raise errorHandler.TimeoutException()
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(timeout_time) # triger alarm in timeout_time seconds
retval = ""
try:
retval = f(args)
except errorHandler.TimeoutException:
raise errorHandler.ApplicationException(__file__, errorHandler.TIME_OUT)
except:
signal.alarm(0)
raise
finally:
#print 'executing finally'
signal.signal(signal.SIGALRM, old_handler)
signal.alarm(0)
return retval
return f2
return timeout_function
def logTxtOutput(fileName, mode, txt):
todayFile = fileName+'-'+datetime.now().strftime('%Y-%b-%d')+'.txt'
with open(todayFile, mode) as f:
f.write('------\n'+txt+'\n')
def log(filename, txt):
with open(filename, 'a') as f:
f.write(datetime.now().ctime()+'# '+txt+'\n')
| 3,044 | 30.391753 | 89 | py |
snowboy | snowboy-master/setup.py | import os
import sys
from setuptools import setup, find_packages
from distutils.command.build import build
from distutils.dir_util import copy_tree
from subprocess import call
py_dir = 'Python' if sys.version_info[0] < 3 else 'Python3'
class SnowboyBuild(build):
def run(self):
cmd = ['make']
swig_dir = os.path.join('swig', py_dir)
def compile():
call(cmd, cwd=swig_dir)
self.execute(compile, [], 'Compiling snowboy...')
# copy generated .so to build folder
self.mkpath(self.build_lib)
snowboy_build_lib = os.path.join(self.build_lib, 'snowboy')
self.mkpath(snowboy_build_lib)
target_file = os.path.join(swig_dir, '_snowboydetect.so')
if not self.dry_run:
self.copy_file(target_file,
snowboy_build_lib)
# copy resources too since it is a symlink
resources_dir = 'resources'
resources_dir_on_build = os.path.join(snowboy_build_lib,
'resources')
copy_tree(resources_dir, resources_dir_on_build)
build.run(self)
setup(
name='snowboy',
version='1.3.0',
description='Snowboy is a customizable hotword detection engine',
maintainer='KITT.AI',
maintainer_email='snowboy@kitt.ai',
license='Apache-2.0',
url='https://snowboy.kitt.ai',
packages=find_packages(os.path.join('examples', py_dir)),
package_dir={'snowboy': os.path.join('examples', py_dir)},
py_modules=['snowboy.snowboydecoder', 'snowboy.snowboydetect'],
package_data={'snowboy': ['resources/*']},
zip_safe=False,
long_description="",
classifiers=[],
install_requires=[
'PyAudio',
],
cmdclass={
'build': SnowboyBuild
}
)
| 1,814 | 28.274194 | 69 | py |
snowboy | snowboy-master/examples/REST_API/training_service.py | #! /usr/bin/evn python
import sys
import base64
import requests
def get_wave(fname):
with open(fname) as infile:
return base64.b64encode(infile.read())
endpoint = "https://snowboy.kitt.ai/api/v1/train/"
############# MODIFY THE FOLLOWING #############
token = ""
hotword_name = "???"
language = "en"
age_group = "20_29"
gender = "M"
microphone = "??" # e.g., macbook pro microphone
############### END OF MODIFY ##################
if __name__ == "__main__":
try:
[_, wav1, wav2, wav3, out] = sys.argv
except ValueError:
print "Usage: %s wave_file1 wave_file2 wave_file3 out_model_name" % sys.argv[0]
sys.exit()
data = {
"name": hotword_name,
"language": language,
"age_group": age_group,
"gender": gender,
"microphone": microphone,
"token": token,
"voice_samples": [
{"wave": get_wave(wav1)},
{"wave": get_wave(wav2)},
{"wave": get_wave(wav3)}
]
}
response = requests.post(endpoint, json=data)
if response.ok:
with open(out, "w") as outfile:
outfile.write(response.content)
print "Saved model to '%s'." % out
else:
print "Request failed."
print response.text
| 1,276 | 23.09434 | 87 | py |
snowboy | snowboy-master/examples/Python/snowboythreaded.py | import snowboydecoder
import threading
import Queue
class ThreadedDetector(threading.Thread):
"""
Wrapper class around detectors to run them in a separate thread
and provide methods to pause, resume, and modify detection
"""
def __init__(self, models, **kwargs):
"""
Initialize Detectors object. **kwargs is for any __init__ keyword
arguments to be passed into HotWordDetector __init__() method.
"""
threading.Thread.__init__(self)
self.models = models
self.init_kwargs = kwargs
self.interrupted = True
self.commands = Queue.Queue()
self.vars_are_changed = True
self.detectors = None # Initialize when thread is run in self.run()
self.run_kwargs = None # Initialize when detectors start in self.start_recog()
def initialize_detectors(self):
"""
Returns initialized Snowboy HotwordDetector objects
"""
self.detectors = snowboydecoder.HotwordDetector(self.models, **self.init_kwargs)
def run(self):
"""
Runs in separate thread - waits on command to either run detectors
or terminate thread from commands queue
"""
try:
while True:
command = self.commands.get(True)
if command == "Start":
self.interrupted = False
if self.vars_are_changed:
# If there is an existing detector object, terminate it
if self.detectors is not None:
self.detectors.terminate()
self.initialize_detectors()
self.vars_are_changed = False
# Start detectors - blocks until interrupted by self.interrupted variable
self.detectors.start(interrupt_check=lambda: self.interrupted, **self.run_kwargs)
elif command == "Terminate":
# Program ending - terminate thread
break
finally:
if self.detectors is not None:
self.detectors.terminate()
def start_recog(self, **kwargs):
"""
Starts recognition in thread. Accepts kwargs to pass into the
HotWordDetector.start() method, but does not accept interrupt_callback,
as that is already set up.
"""
assert "interrupt_check" not in kwargs, \
"Cannot set interrupt_check argument. To interrupt detectors, use Detectors.pause_recog() instead"
self.run_kwargs = kwargs
self.commands.put("Start")
def pause_recog(self):
"""
Halts recognition in thread.
"""
self.interrupted = True
def terminate(self):
"""
Terminates recognition thread - called when program terminates
"""
self.pause_recog()
self.commands.put("Terminate")
def is_running(self):
return not self.interrupted
def change_models(self, models):
if self.is_running():
print("Models will be changed after restarting detectors.")
if self.models != models:
self.models = models
self.vars_are_changed = True
def change_sensitivity(self, sensitivity):
if self.is_running():
print("Sensitivity will be changed after restarting detectors.")
if self.init_kwargs['sensitivity'] != sensitivity:
self.init_kwargs['sensitivity'] = sensitivity
self.vars_are_changed = True
| 3,554 | 35.649485 | 110 | py |
snowboy | snowboy-master/examples/Python/demo4.py | import snowboydecoder
import sys
import signal
import speech_recognition as sr
import os
"""
This demo file shows you how to use the new_message_callback to interact with
the recorded audio after a keyword is spoken. It uses the speech recognition
library in order to convert the recorded audio into text.
Information on installing the speech recognition library can be found at:
https://pypi.python.org/pypi/SpeechRecognition/
"""
interrupted = False
def audioRecorderCallback(fname):
print "converting audio to text"
r = sr.Recognizer()
with sr.AudioFile(fname) as source:
audio = r.record(source) # read the entire audio file
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
print(r.recognize_google(audio))
except sr.UnknownValueError:
print "Google Speech Recognition could not understand audio"
except sr.RequestError as e:
print "Could not request results from Google Speech Recognition service; {0}".format(e)
os.remove(fname)
def detectedCallback():
sys.stdout.write("recording audio...")
sys.stdout.flush()
def signal_handler(signal, frame):
global interrupted
interrupted = True
def interrupt_callback():
global interrupted
return interrupted
if len(sys.argv) == 1:
print "Error: need to specify model name"
print "Usage: python demo.py your.model"
sys.exit(-1)
model = sys.argv[1]
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, signal_handler)
detector = snowboydecoder.HotwordDetector(model, sensitivity=0.38)
print "Listening... Press Ctrl+C to exit"
# main loop
detector.start(detected_callback=detectedCallback,
audio_recorder_callback=audioRecorderCallback,
interrupt_check=interrupt_callback,
sleep_time=0.01)
detector.terminate()
| 2,066 | 25.844156 | 106 | py |
snowboy | snowboy-master/examples/Python/snowboydecoder_arecord.py | #!/usr/bin/env python
import collections
import snowboydetect
import time
import wave
import os
import logging
import subprocess
import threading
logging.basicConfig()
logger = logging.getLogger("snowboy")
logger.setLevel(logging.INFO)
TOP_DIR = os.path.dirname(os.path.abspath(__file__))
RESOURCE_FILE = os.path.join(TOP_DIR, "resources/common.res")
DETECT_DING = os.path.join(TOP_DIR, "resources/ding.wav")
DETECT_DONG = os.path.join(TOP_DIR, "resources/dong.wav")
class RingBuffer(object):
"""Ring buffer to hold audio from audio capturing tool"""
def __init__(self, size = 4096):
self._buf = collections.deque(maxlen=size)
def extend(self, data):
"""Adds data to the end of buffer"""
self._buf.extend(data)
def get(self):
"""Retrieves data from the beginning of buffer and clears it"""
tmp = bytes(bytearray(self._buf))
self._buf.clear()
return tmp
def play_audio_file(fname=DETECT_DING):
"""Simple callback function to play a wave file. By default it plays
a Ding sound.
:param str fname: wave file name
:return: None
"""
os.system("aplay " + fname + " > /dev/null 2>&1")
class HotwordDetector(object):
"""
Snowboy decoder to detect whether a keyword specified by `decoder_model`
exists in a microphone input stream.
:param decoder_model: decoder model file path, a string or a list of strings
:param resource: resource file path.
:param sensitivity: decoder sensitivity, a float of a list of floats.
The bigger the value, the more senstive the
decoder. If an empty list is provided, then the
default sensitivity in the model will be used.
:param audio_gain: multiply input volume by this factor.
"""
def __init__(self, decoder_model,
resource=RESOURCE_FILE,
sensitivity=[],
audio_gain=1):
tm = type(decoder_model)
ts = type(sensitivity)
if tm is not list:
decoder_model = [decoder_model]
if ts is not list:
sensitivity = [sensitivity]
model_str = ",".join(decoder_model)
self.detector = snowboydetect.SnowboyDetect(
resource_filename=resource.encode(), model_str=model_str.encode())
self.detector.SetAudioGain(audio_gain)
self.num_hotwords = self.detector.NumHotwords()
if len(decoder_model) > 1 and len(sensitivity) == 1:
sensitivity = sensitivity*self.num_hotwords
if len(sensitivity) != 0:
assert self.num_hotwords == len(sensitivity), \
"number of hotwords in decoder_model (%d) and sensitivity " \
"(%d) does not match" % (self.num_hotwords, len(sensitivity))
sensitivity_str = ",".join([str(t) for t in sensitivity])
if len(sensitivity) != 0:
self.detector.SetSensitivity(sensitivity_str.encode())
self.ring_buffer = RingBuffer(
self.detector.NumChannels() * self.detector.SampleRate() * 5)
def record_proc(self):
CHUNK = 2048
RECORD_RATE = 16000
cmd = 'arecord -q -r %d -f S16_LE' % RECORD_RATE
process = subprocess.Popen(cmd.split(' '),
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
wav = wave.open(process.stdout, 'rb')
while self.recording:
data = wav.readframes(CHUNK)
self.ring_buffer.extend(data)
process.terminate()
def init_recording(self):
"""
Start a thread for spawning arecord process and reading its stdout
"""
self.recording = True
self.record_thread = threading.Thread(target = self.record_proc)
self.record_thread.start()
def start(self, detected_callback=play_audio_file,
interrupt_check=lambda: False,
sleep_time=0.03):
"""
Start the voice detector. For every `sleep_time` second it checks the
audio buffer for triggering keywords. If detected, then call
corresponding function in `detected_callback`, which can be a single
function (single model) or a list of callback functions (multiple
models). Every loop it also calls `interrupt_check` -- if it returns
True, then breaks from the loop and return.
:param detected_callback: a function or list of functions. The number of
items must match the number of models in
`decoder_model`.
:param interrupt_check: a function that returns True if the main loop
needs to stop.
:param float sleep_time: how much time in second every loop waits.
:return: None
"""
self.init_recording()
if interrupt_check():
logger.debug("detect voice return")
return
tc = type(detected_callback)
if tc is not list:
detected_callback = [detected_callback]
if len(detected_callback) == 1 and self.num_hotwords > 1:
detected_callback *= self.num_hotwords
assert self.num_hotwords == len(detected_callback), \
"Error: hotwords in your models (%d) do not match the number of " \
"callbacks (%d)" % (self.num_hotwords, len(detected_callback))
logger.debug("detecting...")
while True:
if interrupt_check():
logger.debug("detect voice break")
break
data = self.ring_buffer.get()
if len(data) == 0:
time.sleep(sleep_time)
continue
ans = self.detector.RunDetection(data)
if ans == -1:
logger.warning("Error initializing streams or reading audio data")
elif ans > 0:
message = "Keyword " + str(ans) + " detected at time: "
message += time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(time.time()))
logger.info(message)
callback = detected_callback[ans-1]
if callback is not None:
callback()
logger.debug("finished.")
def terminate(self):
"""
Terminate audio stream. Users cannot call start() again to detect.
:return: None
"""
self.recording = False
self.record_thread.join()
| 6,573 | 35.120879 | 82 | py |
snowboy | snowboy-master/examples/Python/demo.py | import snowboydecoder
import sys
import signal
interrupted = False
def signal_handler(signal, frame):
global interrupted
interrupted = True
def interrupt_callback():
global interrupted
return interrupted
if len(sys.argv) == 1:
print("Error: need to specify model name")
print("Usage: python demo.py your.model")
sys.exit(-1)
model = sys.argv[1]
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, signal_handler)
detector = snowboydecoder.HotwordDetector(model, sensitivity=0.5)
print('Listening... Press Ctrl+C to exit')
# main loop
detector.start(detected_callback=snowboydecoder.play_audio_file,
interrupt_check=interrupt_callback,
sleep_time=0.03)
detector.terminate()
| 757 | 20.055556 | 65 | py |
snowboy | snowboy-master/examples/Python/demo_arecord.py | import snowboydecoder_arecord
import sys
import signal
interrupted = False
def signal_handler(signal, frame):
global interrupted
interrupted = True
def interrupt_callback():
global interrupted
return interrupted
if len(sys.argv) == 1:
print("Error: need to specify model name")
print("Usage: python demo.py your.model")
sys.exit(-1)
model = sys.argv[1]
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, signal_handler)
detector = snowboydecoder_arecord.HotwordDetector(model, sensitivity=0.5)
print('Listening... Press Ctrl+C to exit')
# main loop
detector.start(detected_callback=snowboydecoder_arecord.play_audio_file,
interrupt_check=interrupt_callback,
sleep_time=0.03)
detector.terminate()
| 781 | 20.722222 | 73 | py |
snowboy | snowboy-master/examples/Python/snowboydetect.py | ../../swig/Python/snowboydetect.py | 34 | 34 | 34 | py |
snowboy | snowboy-master/examples/Python/__init__.py | 0 | 0 | 0 | py | |
snowboy | snowboy-master/examples/Python/snowboydecoder.py | #!/usr/bin/env python
import collections
import pyaudio
import snowboydetect
import time
import wave
import os
import logging
from ctypes import *
from contextlib import contextmanager
logging.basicConfig()
logger = logging.getLogger("snowboy")
logger.setLevel(logging.INFO)
TOP_DIR = os.path.dirname(os.path.abspath(__file__))
RESOURCE_FILE = os.path.join(TOP_DIR, "resources/common.res")
DETECT_DING = os.path.join(TOP_DIR, "resources/ding.wav")
DETECT_DONG = os.path.join(TOP_DIR, "resources/dong.wav")
def py_error_handler(filename, line, function, err, fmt):
pass
ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
@contextmanager
def no_alsa_error():
try:
asound = cdll.LoadLibrary('libasound.so')
asound.snd_lib_error_set_handler(c_error_handler)
yield
asound.snd_lib_error_set_handler(None)
except:
yield
pass
class RingBuffer(object):
"""Ring buffer to hold audio from PortAudio"""
def __init__(self, size = 4096):
self._buf = collections.deque(maxlen=size)
def extend(self, data):
"""Adds data to the end of buffer"""
self._buf.extend(data)
def get(self):
"""Retrieves data from the beginning of buffer and clears it"""
tmp = bytes(bytearray(self._buf))
self._buf.clear()
return tmp
def play_audio_file(fname=DETECT_DING):
"""Simple callback function to play a wave file. By default it plays
a Ding sound.
:param str fname: wave file name
:return: None
"""
ding_wav = wave.open(fname, 'rb')
ding_data = ding_wav.readframes(ding_wav.getnframes())
with no_alsa_error():
audio = pyaudio.PyAudio()
stream_out = audio.open(
format=audio.get_format_from_width(ding_wav.getsampwidth()),
channels=ding_wav.getnchannels(),
rate=ding_wav.getframerate(), input=False, output=True)
stream_out.start_stream()
stream_out.write(ding_data)
time.sleep(0.2)
stream_out.stop_stream()
stream_out.close()
audio.terminate()
class HotwordDetector(object):
"""
Snowboy decoder to detect whether a keyword specified by `decoder_model`
exists in a microphone input stream.
:param decoder_model: decoder model file path, a string or a list of strings
:param resource: resource file path.
:param sensitivity: decoder sensitivity, a float of a list of floats.
The bigger the value, the more senstive the
decoder. If an empty list is provided, then the
default sensitivity in the model will be used.
:param audio_gain: multiply input volume by this factor.
:param apply_frontend: applies the frontend processing algorithm if True.
"""
def __init__(self, decoder_model,
resource=RESOURCE_FILE,
sensitivity=[],
audio_gain=1,
apply_frontend=False):
def audio_callback(in_data, frame_count, time_info, status):
self.ring_buffer.extend(in_data)
play_data = chr(0) * len(in_data)
return play_data, pyaudio.paContinue
tm = type(decoder_model)
ts = type(sensitivity)
if tm is not list:
decoder_model = [decoder_model]
if ts is not list:
sensitivity = [sensitivity]
model_str = ",".join(decoder_model)
self.detector = snowboydetect.SnowboyDetect(
resource_filename=resource.encode(), model_str=model_str.encode())
self.detector.SetAudioGain(audio_gain)
self.detector.ApplyFrontend(apply_frontend)
self.num_hotwords = self.detector.NumHotwords()
if len(decoder_model) > 1 and len(sensitivity) == 1:
sensitivity = sensitivity*self.num_hotwords
if len(sensitivity) != 0:
assert self.num_hotwords == len(sensitivity), \
"number of hotwords in decoder_model (%d) and sensitivity " \
"(%d) does not match" % (self.num_hotwords, len(sensitivity))
sensitivity_str = ",".join([str(t) for t in sensitivity])
if len(sensitivity) != 0:
self.detector.SetSensitivity(sensitivity_str.encode())
self.ring_buffer = RingBuffer(
self.detector.NumChannels() * self.detector.SampleRate() * 5)
with no_alsa_error():
self.audio = pyaudio.PyAudio()
self.stream_in = self.audio.open(
input=True, output=False,
format=self.audio.get_format_from_width(
self.detector.BitsPerSample() / 8),
channels=self.detector.NumChannels(),
rate=self.detector.SampleRate(),
frames_per_buffer=2048,
stream_callback=audio_callback)
def start(self, detected_callback=play_audio_file,
interrupt_check=lambda: False,
sleep_time=0.03,
audio_recorder_callback=None,
silent_count_threshold=15,
recording_timeout=100):
"""
Start the voice detector. For every `sleep_time` second it checks the
audio buffer for triggering keywords. If detected, then call
corresponding function in `detected_callback`, which can be a single
function (single model) or a list of callback functions (multiple
models). Every loop it also calls `interrupt_check` -- if it returns
True, then breaks from the loop and return.
:param detected_callback: a function or list of functions. The number of
items must match the number of models in
`decoder_model`.
:param interrupt_check: a function that returns True if the main loop
needs to stop.
:param float sleep_time: how much time in second every loop waits.
:param audio_recorder_callback: if specified, this will be called after
a keyword has been spoken and after the
phrase immediately after the keyword has
been recorded. The function will be
passed the name of the file where the
phrase was recorded.
:param silent_count_threshold: indicates how long silence must be heard
to mark the end of a phrase that is
being recorded.
:param recording_timeout: limits the maximum length of a recording.
:return: None
"""
if interrupt_check():
logger.debug("detect voice return")
return
tc = type(detected_callback)
if tc is not list:
detected_callback = [detected_callback]
if len(detected_callback) == 1 and self.num_hotwords > 1:
detected_callback *= self.num_hotwords
assert self.num_hotwords == len(detected_callback), \
"Error: hotwords in your models (%d) do not match the number of " \
"callbacks (%d)" % (self.num_hotwords, len(detected_callback))
logger.debug("detecting...")
state = "PASSIVE"
while True:
if interrupt_check():
logger.debug("detect voice break")
break
data = self.ring_buffer.get()
if len(data) == 0:
time.sleep(sleep_time)
continue
status = self.detector.RunDetection(data)
if status == -1:
logger.warning("Error initializing streams or reading audio data")
#small state machine to handle recording of phrase after keyword
if state == "PASSIVE":
if status > 0: #key word found
self.recordedData = []
self.recordedData.append(data)
silentCount = 0
recordingCount = 0
message = "Keyword " + str(status) + " detected at time: "
message += time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(time.time()))
logger.info(message)
callback = detected_callback[status-1]
if callback is not None:
callback()
if audio_recorder_callback is not None:
state = "ACTIVE"
continue
elif state == "ACTIVE":
stopRecording = False
if recordingCount > recording_timeout:
stopRecording = True
elif status == -2: #silence found
if silentCount > silent_count_threshold:
stopRecording = True
else:
silentCount = silentCount + 1
elif status == 0: #voice found
silentCount = 0
if stopRecording == True:
fname = self.saveMessage()
audio_recorder_callback(fname)
state = "PASSIVE"
continue
recordingCount = recordingCount + 1
self.recordedData.append(data)
logger.debug("finished.")
def saveMessage(self):
"""
Save the message stored in self.recordedData to a timestamped file.
"""
filename = 'output' + str(int(time.time())) + '.wav'
data = b''.join(self.recordedData)
#use wave to save data
wf = wave.open(filename, 'wb')
wf.setnchannels(1)
wf.setsampwidth(self.audio.get_sample_size(
self.audio.get_format_from_width(
self.detector.BitsPerSample() / 8)))
wf.setframerate(self.detector.SampleRate())
wf.writeframes(data)
wf.close()
logger.debug("finished saving: " + filename)
return filename
def terminate(self):
"""
Terminate audio stream. Users cannot call start() again to detect.
:return: None
"""
self.stream_in.stop_stream()
self.stream_in.close()
self.audio.terminate()
| 10,392 | 37.069597 | 82 | py |
snowboy | snowboy-master/examples/Python/demo2.py | import snowboydecoder
import sys
import signal
# Demo code for listening to two hotwords at the same time
interrupted = False
def signal_handler(signal, frame):
global interrupted
interrupted = True
def interrupt_callback():
global interrupted
return interrupted
if len(sys.argv) != 3:
print("Error: need to specify 2 model names")
print("Usage: python demo.py 1st.model 2nd.model")
sys.exit(-1)
models = sys.argv[1:]
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, signal_handler)
sensitivity = [0.5]*len(models)
detector = snowboydecoder.HotwordDetector(models, sensitivity=sensitivity)
callbacks = [lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING),
lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG)]
print('Listening... Press Ctrl+C to exit')
# main loop
# make sure you have the same numbers of callbacks and models
detector.start(detected_callback=callbacks,
interrupt_check=interrupt_callback,
sleep_time=0.03)
detector.terminate()
| 1,075 | 24.619048 | 80 | py |
snowboy | snowboy-master/examples/Python/demo_threaded.py | import snowboythreaded
import sys
import signal
import time
stop_program = False
# This a demo that shows running Snowboy in another thread
def signal_handler(signal, frame):
global stop_program
stop_program = True
if len(sys.argv) == 1:
print("Error: need to specify model name")
print("Usage: python demo4.py your.model")
sys.exit(-1)
model = sys.argv[1]
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, signal_handler)
# Initialize ThreadedDetector object and start the detection thread
threaded_detector = snowboythreaded.ThreadedDetector(model, sensitivity=0.5)
threaded_detector.start()
print('Listening... Press Ctrl+C to exit')
# main loop
threaded_detector.start_recog(sleep_time=0.03)
# Let audio initialization happen before requesting input
time.sleep(1)
# Do a simple task separate from the detection - addition of numbers
while not stop_program:
try:
num1 = int(raw_input("Enter the first number to add: "))
num2 = int(raw_input("Enter the second number to add: "))
print "Sum of number: {}".format(num1 + num2)
except ValueError:
print "You did not enter a number."
threaded_detector.terminate()
| 1,203 | 24.083333 | 76 | py |
snowboy | snowboy-master/examples/Python/demo3.py | import snowboydecoder
import sys
import wave
# Demo code for detecting hotword in a .wav file
# Example Usage:
# $ python demo3.py resources/snowboy.wav resources/models/snowboy.umdl
# Should print:
# Hotword Detected!
#
# $ python demo3.py resources/ding.wav resources/models/snowboy.umdl
# Should print:
# Hotword Not Detected!
if len(sys.argv) != 3:
print("Error: need to specify wave file name and model name")
print("Usage: python demo3.py wave_file model_file")
sys.exit(-1)
wave_file = sys.argv[1]
model_file = sys.argv[2]
f = wave.open(wave_file)
assert f.getnchannels() == 1, "Error: Snowboy only supports 1 channel of audio (mono, not stereo)"
assert f.getframerate() == 16000, "Error: Snowboy only supports 16K sampling rate"
assert f.getsampwidth() == 2, "Error: Snowboy only supports 16bit per sample"
data = f.readframes(f.getnframes())
f.close()
sensitivity = 0.5
detection = snowboydecoder.HotwordDetector(model_file, sensitivity=sensitivity)
ans = detection.detector.RunDetection(data)
if ans == 1:
print('Hotword Detected!')
else:
print('Hotword Not Detected!')
| 1,113 | 26.170732 | 98 | py |
snowboy | snowboy-master/examples/Python3/demo4.py | import snowboydecoder
import sys
import signal
import speech_recognition as sr
import os
"""
This demo file shows you how to use the new_message_callback to interact with
the recorded audio after a keyword is spoken. It uses the speech recognition
library in order to convert the recorded audio into text.
Information on installing the speech recognition library can be found at:
https://pypi.python.org/pypi/SpeechRecognition/
"""
interrupted = False
def audioRecorderCallback(fname):
print("converting audio to text")
r = sr.Recognizer()
with sr.AudioFile(fname) as source:
audio = r.record(source) # read the entire audio file
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
print(r.recognize_google(audio))
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
os.remove(fname)
def detectedCallback():
print('recording audio...', end='', flush=True)
def signal_handler(signal, frame):
global interrupted
interrupted = True
def interrupt_callback():
global interrupted
return interrupted
if len(sys.argv) == 1:
print("Error: need to specify model name")
print("Usage: python demo.py your.model")
sys.exit(-1)
model = sys.argv[1]
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, signal_handler)
detector = snowboydecoder.HotwordDetector(model, sensitivity=0.38)
print('Listening... Press Ctrl+C to exit')
# main loop
detector.start(detected_callback=detectedCallback,
audio_recorder_callback=audioRecorderCallback,
interrupt_check=interrupt_callback,
sleep_time=0.01)
detector.terminate()
| 2,060 | 26.118421 | 106 | py |
snowboy | snowboy-master/examples/Python3/demo.py | import snowboydecoder
import sys
import signal
interrupted = False
def signal_handler(signal, frame):
global interrupted
interrupted = True
def interrupt_callback():
global interrupted
return interrupted
if len(sys.argv) == 1:
print("Error: need to specify model name")
print("Usage: python demo.py your.model")
sys.exit(-1)
model = sys.argv[1]
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, signal_handler)
detector = snowboydecoder.HotwordDetector(model, sensitivity=0.5)
print('Listening... Press Ctrl+C to exit')
# main loop
detector.start(detected_callback=snowboydecoder.play_audio_file,
interrupt_check=interrupt_callback,
sleep_time=0.03)
detector.terminate()
| 757 | 20.055556 | 65 | py |
snowboy | snowboy-master/examples/Python3/snowboydetect.py | ../../swig/Python3/snowboydetect.py | 35 | 35 | 35 | py |
snowboy | snowboy-master/examples/Python3/snowboydecoder.py | #!/usr/bin/env python
import collections
import pyaudio
from . import snowboydetect
import time
import wave
import os
import logging
from ctypes import *
from contextlib import contextmanager
logging.basicConfig()
logger = logging.getLogger("snowboy")
logger.setLevel(logging.INFO)
TOP_DIR = os.path.dirname(os.path.abspath(__file__))
RESOURCE_FILE = os.path.join(TOP_DIR, "resources/common.res")
DETECT_DING = os.path.join(TOP_DIR, "resources/ding.wav")
DETECT_DONG = os.path.join(TOP_DIR, "resources/dong.wav")
def py_error_handler(filename, line, function, err, fmt):
pass
ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
@contextmanager
def no_alsa_error():
try:
asound = cdll.LoadLibrary('libasound.so')
asound.snd_lib_error_set_handler(c_error_handler)
yield
asound.snd_lib_error_set_handler(None)
except:
yield
pass
class RingBuffer(object):
"""Ring buffer to hold audio from PortAudio"""
def __init__(self, size=4096):
self._buf = collections.deque(maxlen=size)
def extend(self, data):
"""Adds data to the end of buffer"""
self._buf.extend(data)
def get(self):
"""Retrieves data from the beginning of buffer and clears it"""
tmp = bytes(bytearray(self._buf))
self._buf.clear()
return tmp
def play_audio_file(fname=DETECT_DING):
"""Simple callback function to play a wave file. By default it plays
a Ding sound.
:param str fname: wave file name
:return: None
"""
ding_wav = wave.open(fname, 'rb')
ding_data = ding_wav.readframes(ding_wav.getnframes())
with no_alsa_error():
audio = pyaudio.PyAudio()
stream_out = audio.open(
format=audio.get_format_from_width(ding_wav.getsampwidth()),
channels=ding_wav.getnchannels(),
rate=ding_wav.getframerate(), input=False, output=True)
stream_out.start_stream()
stream_out.write(ding_data)
time.sleep(0.2)
stream_out.stop_stream()
stream_out.close()
audio.terminate()
class HotwordDetector(object):
"""
Snowboy decoder to detect whether a keyword specified by `decoder_model`
exists in a microphone input stream.
:param decoder_model: decoder model file path, a string or a list of strings
:param resource: resource file path.
:param sensitivity: decoder sensitivity, a float of a list of floats.
The bigger the value, the more senstive the
decoder. If an empty list is provided, then the
default sensitivity in the model will be used.
:param audio_gain: multiply input volume by this factor.
:param apply_frontend: applies the frontend processing algorithm if True.
"""
def __init__(self, decoder_model,
resource=RESOURCE_FILE,
sensitivity=[],
audio_gain=1,
apply_frontend=False):
tm = type(decoder_model)
ts = type(sensitivity)
if tm is not list:
decoder_model = [decoder_model]
if ts is not list:
sensitivity = [sensitivity]
model_str = ",".join(decoder_model)
self.detector = snowboydetect.SnowboyDetect(
resource_filename=resource.encode(), model_str=model_str.encode())
self.detector.SetAudioGain(audio_gain)
self.detector.ApplyFrontend(apply_frontend)
self.num_hotwords = self.detector.NumHotwords()
if len(decoder_model) > 1 and len(sensitivity) == 1:
sensitivity = sensitivity * self.num_hotwords
if len(sensitivity) != 0:
assert self.num_hotwords == len(sensitivity), \
"number of hotwords in decoder_model (%d) and sensitivity " \
"(%d) does not match" % (self.num_hotwords, len(sensitivity))
sensitivity_str = ",".join([str(t) for t in sensitivity])
if len(sensitivity) != 0:
self.detector.SetSensitivity(sensitivity_str.encode())
self.ring_buffer = RingBuffer(
self.detector.NumChannels() * self.detector.SampleRate() * 5)
def start(self, detected_callback=play_audio_file,
interrupt_check=lambda: False,
sleep_time=0.03,
audio_recorder_callback=None,
silent_count_threshold=15,
recording_timeout=100):
"""
Start the voice detector. For every `sleep_time` second it checks the
audio buffer for triggering keywords. If detected, then call
corresponding function in `detected_callback`, which can be a single
function (single model) or a list of callback functions (multiple
models). Every loop it also calls `interrupt_check` -- if it returns
True, then breaks from the loop and return.
:param detected_callback: a function or list of functions. The number of
items must match the number of models in
`decoder_model`.
:param interrupt_check: a function that returns True if the main loop
needs to stop.
:param float sleep_time: how much time in second every loop waits.
:param audio_recorder_callback: if specified, this will be called after
a keyword has been spoken and after the
phrase immediately after the keyword has
been recorded. The function will be
passed the name of the file where the
phrase was recorded.
:param silent_count_threshold: indicates how long silence must be heard
to mark the end of a phrase that is
being recorded.
:param recording_timeout: limits the maximum length of a recording.
:return: None
"""
self._running = True
def audio_callback(in_data, frame_count, time_info, status):
self.ring_buffer.extend(in_data)
play_data = chr(0) * len(in_data)
return play_data, pyaudio.paContinue
with no_alsa_error():
self.audio = pyaudio.PyAudio()
self.stream_in = self.audio.open(
input=True, output=False,
format=self.audio.get_format_from_width(
self.detector.BitsPerSample() / 8),
channels=self.detector.NumChannels(),
rate=self.detector.SampleRate(),
frames_per_buffer=2048,
stream_callback=audio_callback)
if interrupt_check():
logger.debug("detect voice return")
return
tc = type(detected_callback)
if tc is not list:
detected_callback = [detected_callback]
if len(detected_callback) == 1 and self.num_hotwords > 1:
detected_callback *= self.num_hotwords
assert self.num_hotwords == len(detected_callback), \
"Error: hotwords in your models (%d) do not match the number of " \
"callbacks (%d)" % (self.num_hotwords, len(detected_callback))
logger.debug("detecting...")
state = "PASSIVE"
while self._running is True:
if interrupt_check():
logger.debug("detect voice break")
break
data = self.ring_buffer.get()
if len(data) == 0:
time.sleep(sleep_time)
continue
status = self.detector.RunDetection(data)
if status == -1:
logger.warning("Error initializing streams or reading audio data")
#small state machine to handle recording of phrase after keyword
if state == "PASSIVE":
if status > 0: #key word found
self.recordedData = []
self.recordedData.append(data)
silentCount = 0
recordingCount = 0
message = "Keyword " + str(status) + " detected at time: "
message += time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(time.time()))
logger.info(message)
callback = detected_callback[status-1]
if callback is not None:
callback()
if audio_recorder_callback is not None:
state = "ACTIVE"
continue
elif state == "ACTIVE":
stopRecording = False
if recordingCount > recording_timeout:
stopRecording = True
elif status == -2: #silence found
if silentCount > silent_count_threshold:
stopRecording = True
else:
silentCount = silentCount + 1
elif status == 0: #voice found
silentCount = 0
if stopRecording == True:
fname = self.saveMessage()
audio_recorder_callback(fname)
state = "PASSIVE"
continue
recordingCount = recordingCount + 1
self.recordedData.append(data)
logger.debug("finished.")
def saveMessage(self):
"""
Save the message stored in self.recordedData to a timestamped file.
"""
filename = 'output' + str(int(time.time())) + '.wav'
data = b''.join(self.recordedData)
#use wave to save data
wf = wave.open(filename, 'wb')
wf.setnchannels(1)
wf.setsampwidth(self.audio.get_sample_size(
self.audio.get_format_from_width(
self.detector.BitsPerSample() / 8)))
wf.setframerate(self.detector.SampleRate())
wf.writeframes(data)
wf.close()
logger.debug("finished saving: " + filename)
return filename
def terminate(self):
"""
Terminate audio stream. Users can call start() again to detect.
:return: None
"""
self.stream_in.stop_stream()
self.stream_in.close()
self.audio.terminate()
self._running = False
| 10,475 | 36.683453 | 82 | py |
snowboy | snowboy-master/examples/Python3/demo2.py | import snowboydecoder
import sys
import signal
# Demo code for listening to two hotwords at the same time
interrupted = False
def signal_handler(signal, frame):
global interrupted
interrupted = True
def interrupt_callback():
global interrupted
return interrupted
if len(sys.argv) != 3:
print("Error: need to specify 2 model names")
print("Usage: python demo.py 1st.model 2nd.model")
sys.exit(-1)
models = sys.argv[1:]
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, signal_handler)
sensitivity = [0.5]*len(models)
detector = snowboydecoder.HotwordDetector(models, sensitivity=sensitivity)
callbacks = [lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING),
lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG)]
print('Listening... Press Ctrl+C to exit')
# main loop
# make sure you have the same numbers of callbacks and models
detector.start(detected_callback=callbacks,
interrupt_check=interrupt_callback,
sleep_time=0.03)
detector.terminate()
| 1,075 | 24.619048 | 80 | py |
snowboy | snowboy-master/examples/Python3/demo3.py | import snowboydecoder
import sys
import wave
# Demo code for detecting hotword in a .wav file
# Example Usage:
# $ python demo3.py resources/snowboy.wav resources/models/snowboy.umdl
# Should print:
# Hotword Detected!
#
# $ python demo3.py resources/ding.wav resources/models/snowboy.umdl
# Should print:
# Hotword Not Detected!
if len(sys.argv) != 3:
print("Error: need to specify wave file name and model name")
print("Usage: python demo3.py wave_file model_file")
sys.exit(-1)
wave_file = sys.argv[1]
model_file = sys.argv[2]
f = wave.open(wave_file)
assert f.getnchannels() == 1, "Error: Snowboy only supports 1 channel of audio (mono, not stereo)"
assert f.getframerate() == 16000, "Error: Snowboy only supports 16K sampling rate"
assert f.getsampwidth() == 2, "Error: Snowboy only supports 16bit per sample"
data = f.readframes(f.getnframes())
f.close()
sensitivity = 0.5
detection = snowboydecoder.HotwordDetector(model_file, sensitivity=sensitivity)
ans = detection.detector.RunDetection(data)
if ans == 1:
print('Hotword Detected!')
else:
print('Hotword Not Detected!')
| 1,113 | 26.170732 | 98 | py |
mlj19-iggp | mlj19-iggp-master/specialised_ilasp.py | import asp
import config as cfg
import subprocess
import re
import os
import glob
import common
import prolog
import json
class SPECIALISED_ILASP:
ilasp='./GGP_ILASP'
name='specialised_ilasp'
def __init__(self):
pass
def parse_train(self,datafile,outpath,game,target):
for (bk,modes,examples,subtarget) in self.parse(datafile,game,target):
with open('{}/{}.ilasp'.format(outpath,subtarget),'w') as f:
f.write(bk + '\n' + modes + '\n' + examples)
# - inpath is the path for the game files (e.g. exp/aleph/train/minimal_decay/)
# - outfile is the file to which to write the hypothesis (e.g. programs/aleph/minimal_decay/next_value.pl)
# - target is the sub-target (e.g. next_value)
def train(self,inpath,outfile,target):
game = inpath.split("/")[-2]
main_target = outfile.split("/")[-1].split(".")[0].split("_")[0]
infile = '{}{}.ilasp'.format(inpath,target)
with open(outfile + "_raw", 'w') as f:
cmd="{} {} ./data/train/{}_{}_train.dat ./types/{}.typ".format(self.ilasp, target, game, main_target, game)
print(cmd)
try:
subprocess.run(cmd.split(' '),timeout=cfg.learning_timeout,stdout=f)
except subprocess.TimeoutExpired:
pass
map(os.remove, glob.glob("out_{}{}*".format(target, game)))
map(os.remove, glob.glob("task_{}{}*".format(target, game)))
raw_output = open(outfile + "_raw").read()
final_hypothesis = ""
for hyp in raw_output.split("{"):
if "UNSATISFIABLE" not in hyp:
final_hypothesis = hyp.split("}")[0]
with open(outfile, 'w') as f:
f.write(final_hypothesis)
def do_test(self,dataf,programf,outf):
print(outf)
game = outf.split("/")[-2]
main_target = outf.split("/")[-1].split(".")[0].split("_")[0]
sub_target = outf.split("/")[-1].split(".")[0]
sets = open("./data/test/{}_{}_test.dat".format(game, main_target)).read().split("---")
herbrand_base = []
background = []
examples = []
for s in sets:
splt = re.findall('[a-z][a-zA-Z0-9_\\(\\) ,]*', s)
if len(splt) > 0:
if splt[0] == "atoms":
for a in splt[1:]:
herbrand_base.append(a)
elif splt[0] == "statics":
for a in splt[1:]:
background.append(a)
elif splt[0] == "background":
examples.append([[], []])
for a in splt[1:]:
examples[-1][0].append(a)
elif splt[0] == "positives":
for a in splt[1:]:
examples[-1][1].append(a)
results = "\n\n"
for eg in examples:
prg = ""
for a in background:
prg += a + ".\n"
prg += "% example\n"
for a in eg[0]:
prg += a + ".\n"
prg += "% types\n"
types = open('types/' + game + '.typ').read()
for type_dec in re.finditer('([^\\.]*)\\.', types):
match = re.search('(.*[^ ]) *:: *([^ ].*)', type_dec.group(1), re.MULTILINE | re.DOTALL)
if match:
lhs = match.group(1).split(",")
rhs = match.group(2).split("->")
if rhs[-1] != "bool":
if len(rhs) == 1:
for pred in lhs:
prg += rhs[0] + '(' + pred + ').\n'
else:
match = re.search('(.*[^ ]) *:> *([^ ].*)', type_dec.group(1), re.MULTILINE | re.DOTALL)
if match:
lhs = match.group(1).split(",")
for pred in lhs:
prg += match.group(2) + '(X) :- ' + pred + '(X).\n'
prg += "% hypothesis\n"
prg += "\n\n" + open(programf).read() + "\n\n"
with open(outf + "_test", 'w') as f:
f.write(prg)
with open(outf + "_out", 'w') as f:
with open(outf + '_err', 'w') as f_err:
subprocess.run(["clingo", outf + "_test","--outf=2"], stdout=f, stderr=f_err)
jsn = json.loads(open(outf + "_out").read())
try:
call = jsn['Call']
first_call = call[0]
wtness = first_call['Witnesses']
first_wtness = wtness[0]
answer_set = first_wtness['Value']
for a in herbrand_base:
if sub_target in a:
res = 0
label = 0
if a.replace(" ", "").replace("()", "") in(answer_set):
res = 1
if a in(eg[1]):
label = 1
results += "{},{}\n".format(res,label)
except:
print(jsn)
print(open(outf + "_err").read())
with open(outf, 'w') as f:
f.write(results)
def parse(self,filename,game,target):
for sub_predicate in asp.get_subpredicate(game,target):
# return a dummy task, as the ILASP_GGP executable does the real parsing.
modes = ""
background = ""
examples = ""
yield (modes,background,examples,sub_predicate)
def parse_test(self,datafile,outpath,game,target):
for (bk,modes,examples,subtarget) in self.parse(datafile,game,target):
with open('{}/{}.ilasp'.format(outpath,subtarget),'w') as f:
f.write(bk + '\n' + modes + '\n' + examples)
| 5,853 | 33.233918 | 119 | py |
mlj19-iggp | mlj19-iggp-master/asp.py | import re
def fill_in_fns(arg_list, func_decs, type_decs):
mds = [{"name": "", "body": ""}]
for arg in arg_list:
new_mds = []
if any(td["type"] == arg for td in type_decs):
for md in mds:
new_mds.append({"name": md["name"], "body": (md["body"] + ", +" + arg)})
for fd in func_decs:
if fd["type"] == arg:
fd_proc = fill_in_fns(fd["body"], func_decs, type_decs)
for md in mds:
for body in fd_proc:
new_mds.append({"name": md["name"] + "_" + fd["name"] + body["name"], "body": (md["body"] + body["body"])})
mds = new_mds
return mds
def get_subpredicate(game,target):
func_decs = []
type_decs = []
types = open('types/' + game + '.typ').read().replace(" ", "")
for type_dec in re.finditer('([^\\.\\n]*)\\.', types):
match = re.search('(.*[^ ]) *:: *([^ ].*)', type_dec.group(1))
if match:
lhs = match.group(1).split(",")
rhs = match.group(2).split("->")
if rhs[-1] != "bool":
if len(rhs) != 1:
for pred in lhs:
func_decs.append({ "type": rhs[-1], "name": pred, "body": rhs[:-1]})
for type_dec in re.finditer('([^\\.\\n]*)\\.', types):
match = re.search('(.*[^ ]) *:: *([^ ].*)', type_dec.group(1))
if match:
lhs = match.group(1).split(",")
rhs = match.group(2).split("->")
if rhs[-1] == 'bool':
for pred in lhs:
if pred == target:
preds = [pred]
for arg in rhs:
found = False
new_preds = []
for f in func_decs:
if f["type"] == arg:
found = True
for p in preds:
new_preds.append(p + "_" + f["name"])
if found:
preds = new_preds
for p in preds:
yield(p)
def parse(filename,game,target):
for sub_predicate in get_subpredicate(game,target):
modes = ""
background = ""
examples = ""
func_decs = []
type_decs = []
types = open('types/' + game + '.typ').read().replace(" ", "")
for type_dec in re.finditer('([^\\.\\n]*)\\.', types):
match = re.search('(.*[^ ]) *:: *([^ ].*)', type_dec.group(1))
if match:
lhs = match.group(1).split(",")
rhs = match.group(2).split("->")
if rhs[-1] != "bool":
if len(rhs) == 1:
for pred in lhs:
type_decs.append({ "type": rhs[-1], "name": pred})
background += rhs[0] + '(' + pred + ', EG_ID) :- eg_id(EG_ID).\n'
background += rhs[0] + '(' + pred + ').\n'
background += 'eq(' + pred + ', ' + pred + ').\n'
modes += 'modeb(eq(+' + rhs[0] + ', #' + rhs[0] + ')).\n'
modes += 'modeb(' + rhs[0] + '(-' + rhs[0] + ', +eg_id)).\n'
else:
for pred in lhs:
func_decs.append({ "type": rhs[-1], "name": pred, "body": rhs[:-1]})
else:
match = re.search('(.*[^ ]) *:> *([^ ].*)', type_dec.group(1), re.MULTILINE | re.DOTALL)
if match:
lhs = match.group(1).split(",")
for pred in lhs:
background += match.group(2) + '(X) :- ' + pred + '(X).\n'
arity = 0
for type_dec in re.finditer('([^\\.\\n]*)\\.', types):
match = re.search('(.*[^ ]) *:: *([^ ].*)', type_dec.group(1))
if match:
lhs = match.group(1).split(",")
rhs = match.group(2).split("->")
if rhs[-1] == 'bool':
for pred in lhs:
if len(rhs) == 1:
if pred == sub_predicate:
modes += 'modeh(' + pred + '(+eg_id)).\n'
arity = 0
elif not pred.startswith("legal") and not pred.startswith("terminal") and not pred.startswith("next") and not pred.startswith("goal"):
modes += 'modeb(' + pred + '(+eg_id)).\n'
else:
mds = fill_in_fns(rhs[:-1], func_decs, type_decs)
for md in mds:
m = pred + md["name"] + '(+eg_id' + md["body"] + ')'
if pred + md["name"] == sub_predicate:
modes += 'modeh(' + m + ').\n'
arity = len(md["body"].split(",")) - 1
elif not pred.startswith("legal") and not pred.startswith("terminal") and not pred.startswith("next") and not pred.startswith("goal"):
modes += 'modeb(' + m + ').\n'
herbrand_domain = []
sets = open(filename).read().split("---")
eg_id = 0
i = 0
for s in sets:
i = i + 1
splt = re.findall('[a-z][a-zA-Z0-9_\\(\\) ,]*', s)
if splt != []:
if splt[0] == 'atoms':
herbrand_domain = splt[1:]
elif splt[0] == 'background':
eg_id += 1
for a in splt[1:]:
split_a = a.split("(")
if len(split_a) == 1:
background += a + '(' + str(eg_id) + ').\n'
else:
background += split_a[0] + '(' + str(eg_id) + ', ' + split_a[1] + '.\n'
background += ":- " + sub_predicate + "(" + str(eg_id)
for var_i in range(arity):
background += ", V" + str(var_i)
background += "), not eg(" + sub_predicate + "(" + str(eg_id)
for var_i in range(arity):
background += ", V" + str(var_i)
background += ")).\n"
elif splt[0] == 'positives':
for a in herbrand_domain:
split_a = a.split("(")
if a in splt[1:] and split_a[0] == sub_predicate:
examples += "example("
if '()' in a or len(split_a) == 1:
examples += split_a[0] + '(' + str(eg_id) + ')'
else:
examples += split_a[0] + '(' + str(eg_id) + ', ' + split_a[1]
examples += ", 1"
examples += ").\n"
examples += "eg("
if '()' in a or len(split_a) == 1:
examples += split_a[0] + '(' + str(eg_id) + ')'
else:
examples += split_a[0] + '(' + str(eg_id) + ', ' + split_a[1]
examples += ").\n"
elif splt[0] == 'statics':
for a in splt[1:]:
split_a = a.split("(")
if len(split_a) == 1:
background += a + '(EG_ID) :- eg_id(EG_ID).\n'
else:
background += split_a[0] + '(EG_ID, ' + split_a[1] + ' :- eg_id(EG_ID).\n'
background += 'eg_id(1..' + str(eg_id) + ').\n'
#print(background)
# print examples
yield (modes,background,examples,sub_predicate)
| 8,030 | 42.646739 | 166 | py |
mlj19-iggp | mlj19-iggp-master/aleph.py | import common
import prolog
import config as cfg
from os.path import isfile
class Aleph:
name='aleph'
aleph_path='aleph/aleph'
aleph_runner='aleph/runner'
def __init__(self):
pass
def parse_train(self,datafile,outpath,game,target):
for (subtarget,bk,pos,neg) in common.parse_target(datafile):
prims=set(map(common.pred,bk))
(p,a)=subtarget
subtarget='{}/{}'.format(p,a)
prims=list(map(lambda x: '{}/{}'.format(x[0],x[1]),prims))
fname=outpath+p
with open(fname+'.b','w') as f:
for prim in prims:
f.write(':- determination({},{}).\n'.format(subtarget,prim))
for atom in bk:
f.write('{}.\n'.format(atom))
with open(fname+'.f','w') as f:
for atom in pos:
f.write('{}.\n'.format(atom))
with open(fname+'.n','w') as f:
for atom in neg:
f.write('{}.\n'.format(atom))
def parse_test(self,datafile,outpath,game,target):
for (subtarget,bk,pos,neg) in common.parse_target(datafile):
common.parse_test(outpath,subtarget,bk,pos,neg)
def train(self,inpath,outfile,target):
infile=inpath+target
cmd="set(verbose,0),read_all('{}'),induce_modes,induce,write_rules('{}'),halt.".format(infile,outfile)
prolog.yap(cmd,[self.aleph_path],outfile=None,timeout=cfg.learning_timeout)
def do_test(self,dataf,programf,outf):
if isfile(programf):
prolog.swipl('do_test,halt.',[dataf,programf,self.aleph_runner],outf,timeout=None)
else:
prolog.swipl('do_test,halt.',[dataf,self.aleph_runner],outf,timeout=None) | 1,761 | 36.489362 | 110 | py |
mlj19-iggp | mlj19-iggp-master/ilasp.py | import asp
import config as cfg
import subprocess
import re
class ILASP:
ilasp=''
xhail='xhail/xhail_mod.jar'
name='ilasp'
clasp='xhail/clasp-3.1.0-x86_64-linux'
gringo='xhail/gringo3-linux'
def __init__(self):
pass
def parse_train(self,datafile,outpath,game,target):
for (bk,modes,examples,subtarget) in self.parse(datafile,game,target):
with open('{}/{}.ilasp'.format(outpath,subtarget),'w') as f:
f.write(bk + '\n' + modes + '\n' + examples)
def parse_test(self,datafile,outpath,game,target):
for (bk,modes,examples,subtarget) in self.parse(datafile,game,target):
with open('{}/{}.ilasp'.format(outpath,subtarget),'w') as f:
f.write(bk + '\n' + modes + '\n' + examples)
# - inpath is the path for the game files (e.g. exp/aleph/train/minimal_decay/)
# - outfile is the file to which to write the hypothesis (e.g. programs/aleph/minimal_decay/next_value.pl)
# - target is the sub-target (e.g. next_value)
def train(self,inpath,outfile,target):
infile = '{}{}.ilasp'.format(inpath,target)
with open(outfile,'w') as f:
cmd="java -jar {} -c {} -g {} {} -i 100".format(self.ilasp,self.clasp,self.gringo,infile)
print(cmd)
try:
subprocess.run(cmd.split(' '),timeout=cfg.learning_timeout,stdout=f)
except subprocess.TimeoutExpired:
pass
# - datapath is the name of the file with the test data (e.g. exp/aleph/test/minimal_decay)
# - programf is the name of the file with the hypothesis (e.g. programs/aleph/minimal_decay/next_value.pl)
# - outf is the name of the file to write the results (e.g. results/aleph/minimal_decay/next_value.pl)
# the results file should be a csv where each line represents an example and is of the form prediction,label
# for instance, the following 5 lines denote 3 positive examples and 2 negative examples (the second column), the learner misclassified the third example (predicted 0 when it should have been 1)
# 1,1
# 1,1
# 0,1
# 0,0
# 0,0
def do_test(self,dataf,programf,outf):
pass
def parse(self,filename,game,target):
for sub_predicate in asp.get_subpredicate(game,target):
type_modes = ""
constant_modes = ""
modes = ""
background = ""
examples = ""
func_decs = []
type_decs = []
types = open('types/' + game + '.typ').read()
for type_dec in re.finditer('([^\\.\\n]*)\\.', types):
match = re.search('(.*[^ ]) *:: *([^ ].*)', type_dec.group(1))
if match:
lhs = match.group(1).split(", ")
rhs = match.group(2).split(" -> ")
if rhs[-1] != "bool":
if len(rhs) == 1:
for pred in lhs:
type_decs.append({ "type": rhs[-1], "name": pred})
background += rhs[0] + '(' + pred + ').\n'
constant_modes += '#constant(' + rhs[0] + ", " + pred + ').\n'
type_modes += '#modeb(' + rhs[0] + '(ph(' + rhs[0] + '))).\n'
else:
for pred in lhs:
func_decs.append({ "type": rhs[-1], "name": pred, "body": rhs[:-1]})
for type_dec in re.finditer('([^\\.\\n]*)\\.', types):
match = re.search('(.*[^ ]) *:: *([^ ].*)', type_dec.group(1))
if match:
lhs = match.group(1).split(", ")
rhs = match.group(2).split(" -> ")
if rhs[-1] == 'bool':
for pred in lhs:
if len(rhs) == 1:
if pred == sub_predicate:
modes += '#modeh(' + modes + pred + ').\n'
elif pred not in ["next", "terminal", "goal", "legal"]:
modes += '#modeb(' + modes + pred + ').\n'
else:
mds = self.fill_in_fns(rhs[:-1], func_decs, type_decs)
for md in mds:
m = pred + md["name"] + '(' + md["body"][2:] + ')'
if pred + md["name"] == sub_predicate:
modes += '#modeh(' + m + ').\n'
elif pred not in ["next", "terminal", "goal", "legal"]:
modes += '#modeb(' + m + ').\n'
modes += type_modes
modes += constant_modes
herbrand_domain = []
sets = open(filename).read().split("---")
ctx = ""
for s in sets:
splt = re.findall('[a-z][a-zA-Z0-9_\\(\\) ,]*', s)
if splt != []:
if splt[0] == 'atoms':
herbrand_domain = splt[1:]
elif splt[0] == 'background':
ctx = ""
for a in splt[1:]:
split_a = a.split("(")
if len(split_a) == 1:
ctx += a + '.\n'
else:
ctx += split_a[0] + '(' + split_a[1] + '.\n'
elif splt[0] == 'positives':
examples += "#pos({"
incs = []
excs = []
for a in herbrand_domain:
split_a = a.split("(")
if split_a[0] == sub_predicate:
if '()' in a or len(split_a) == 1:
if a not in splt[1:]:
excs.append(split_a[0])
else:
incs.append(split_a[0])
else:
if a not in splt[1:]:
excs.append(split_a[0] + '(' + split_a[1])
else:
incs.append(split_a[0] + '(' + split_a[1])
examples += ", ".join(incs) + "}, {" + ", ".join(excs) + "}, {\n"
examples += ctx + "\n}).\n"
elif splt[0] == 'statics':
for a in splt[1:]:
split_a = a.split("(")
if len(split_a) == 1:
background += a + '.\n'
else:
background += split_a[0] + '(' + split_a[1] + '.\n'
yield (modes,background,examples,sub_predicate)
def fill_in_fns(self, arg_list, func_decs, type_decs):
mds = [{"name": "", "body": ""}]
for arg in arg_list:
new_mds = []
if any(td["type"] == arg for td in type_decs):
for md in mds:
new_mds.append({"name": md["name"], "body": (md["body"] + ", ph(" + arg + ")")})
for fd in func_decs:
if fd["type"] == arg:
fd_proc = self.fill_in_fns(fd["body"], func_decs, type_decs)
for md in mds:
for body in fd_proc:
new_mds.append({"name": md["name"] + "_" + fd["name"] + body["name"], "body": (md["body"] + body["body"])})
mds = new_mds
return mds
| 7,805 | 43.605714 | 198 | py |
mlj19-iggp | mlj19-iggp-master/config.py | map_size=8
# learning_timeout=600 # 10 minutes
# learning_timeout=60
learning_timeout=1800
| 91 | 17.4 | 35 | py |
mlj19-iggp | mlj19-iggp-master/runner.py | import aleph
import metagol
import specialised_ilasp
import os
import multiprocessing
import signal
import numpy as np
from os import listdir
from os.path import isfile, join
from multiprocessing import Pool
import config as cfg
import sys
def game_names(path):
# return ['minimal_decay']
return sorted(set('_'.join(f.split('_')[:-2]) for f in listdir(path) if isfile(join(path, f)) and f.endswith('.dat')))
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def pred(atom):
xs=atom.split('(')
return (xs[0],xs[1].count(',')+1)
def targets(inpath):
return set(f.split('.')[0] for f in listdir(inpath) if isfile(join(inpath, f)))
def parmap(func,jobs):
p=Pool(cfg.map_size)
return p.map(func,jobs)
def parse_(args):
(system,game) = args
for stage in ['train','test']:
outpath='exp/{}/{}/{}/'.format(system.name,stage,game)
mkdir(outpath)
for target in ['next','goal','legal','terminal']:
datafile='data/{0}/{1}_{2}_{0}.dat'.format(stage,game,target)
if stage == 'train':
system.parse_train(datafile,outpath,game,target)
else:
system.parse_test(datafile,outpath,game,target)
def parse(system):
parmap(parse_,list((system,game) for game in game_names('data/train')))
def train_(args):
(system,game,target) = args
inpath='exp/{}/train/{}/'.format(system.name,game)
outpath='programs/{}/{}/'.format(system.name,game)
mkdir(outpath)
system.train(inpath,outpath+'{}.pl'.format(target),target)
def train(system):
jobs=[]
for game in game_names('data/train'):
inpath='exp/{}/train/{}/'.format(system.name,game)
outpath='programs/{}/{}/'.format(system.name,game)
mkdir(outpath)
for target in targets(inpath):
jobs += [(system,game,target)]
parmap(train_,jobs)
def do_test_(args):
(system,game)=args
inpath='exp/{}/test/{}/'.format(system.name,game)
outpath='results/{}/{}/'.format(system.name,game)
mkdir(outpath)
for target in targets(inpath):
dataf='exp/{}/test/{}/{}.pl'.format(system.name,game,target)
programf='programs/{}/{}/{}.pl'.format(system.name,game,target)
resultsf=outpath+'{}.pl'.format(target)
system.do_test(dataf,programf,resultsf)
def do_test(system):
parmap(do_test_,list((system,game) for game in sorted(game_names('data/train'))))
# results is a list of (predication,label) pairs
# seems a bit cumbersome
def balanced_acc(results):
tp,tn,num_p,num_n=0.0,0.0,0.0,0.0
for prediction,label in results:
if label == 1:
num_p+=1
if label == 0:
num_n +=1
if prediction == 1 and label == 1:
tp+=1
if prediction == 0 and label == 0:
tn+=1
if num_p == 0 and num_n == 0:
return -1
elif num_p > 0 and num_n > 0:
return ((tp / num_p) + (tn / num_n))/2
elif num_p == 0:
return (tn / num_n)
elif num_n == 0:
return (tp / num_p)
def res_parser(resultsf):
with open(resultsf) as f:
for line in f:
xs=line.strip().split(',')
if len(xs)>1:
yield (int(xs[0]),int(xs[1]))
def perfectly_correct(xs):
return sum(1 for x in xs if int(x) == 1)
def print_results_(args):
(system, game) = args
inpath='exp/{}/test/{}/'.format(system.name, game)
sub_targets=targets(inpath)
scores = []
for target in ['next','goal','legal','terminal']:
target_scores=[]
for sub_target in sub_targets:
if sub_target.startswith(target):
resultsf='results/{}/{}/{}.pl'.format(system.name,game,sub_target)
target_scores += res_parser(resultsf)
print(game,target,int(balanced_acc(target_scores)*100))
scores.append(balanced_acc(target_scores))
return scores
def print_results(system):
args = [(system, game) for game in game_names('data/test')]
scores = [score for scores in parmap(print_results_, args) for score in scores]
print(system.name, int(np.mean(scores)*100), perfectly_correct(scores))
systems = [metagol.Metagol(),aleph.Aleph(),specialised_ilasp.SPECIALISED_ILASP()]
arg = sys.argv[1]
if arg == 'parse':
list(map(parse,systems))
if arg == 'train':
list(map(train,systems))
if arg == 'test':
list(map(do_test,systems))
if arg == 'results':
list(map(print_results,systems))
| 4,476 | 29.664384 | 121 | py |
mlj19-iggp | mlj19-iggp-master/common.py | import subprocess
def gen_atom(index,x):
syms = ['succ','input','between','true','number','index']
x=x.replace(' ','')[:-1]
(p,args)=x.split('(')
args=list(filter(lambda x: x!='',args.split(',')))
args=[str(index)]+args
for sym in syms:
if sym in p:
p=p.replace(sym,'my_{}'.format(sym))
break
return '{}({})'.format(p,','.join(args))
def pred(atom):
xs=atom.split('(')
return (xs[0],xs[1].count(',')+1)
def filter_by_targ(targ,xs):
return filter(lambda x: x.startswith(targ[0]),xs)
def parse_test(outpath,target,bk,pos,neg):
(p,a)=target
fname=outpath + '{}.pl'.format(p)
with open(fname,'w') as f:
for atom in bk:
f.write('{}.\n'.format(atom))
for atom in pos:
f.write('pos({}).\n'.format(atom))
for atom in neg:
f.write('neg({}).\n'.format(atom))
subprocess.call(['sort',fname,'-o',fname])
def parse_target(datafile):
with open(datafile,'r') as f:
episode=0
atoms = set()
bk,statics,pos,neg=[],[],[],[]
for x in f.read().split('---'):
xs=list(map(lambda x: x.strip(), x.strip().split('\n')))
h,t=xs[0],xs[1:]
if h == 'atoms:':
atoms.update(set(t))
elif h == 'statics:':
statics.extend(t)
elif h == 'background:':
episode+=1
bk.extend([gen_atom(episode,atom) for atom in t + statics])
elif h == 'positives:':
e_pos=set(t)
pos.extend([gen_atom(episode,atom) for atom in e_pos])
neg.extend([gen_atom(episode,atom) for atom in atoms if atom not in e_pos])
for targ in set(map(pred,pos+neg)):
yield (targ,bk,filter_by_targ(targ,pos),filter_by_targ(targ,neg)) | 1,851 | 31.491228 | 91 | py |
mlj19-iggp | mlj19-iggp-master/prolog.py | import subprocess
def swipl(action,load_files,outfile=None,timeout=None):
call('swipl',action,load_files,outfile,timeout)
def yap(action,load_files,outfile=None,timeout=None):
call('yap',action,load_files,outfile,timeout)
def call(prolog_version,action,load_files,outfile=None,timeout=None):
load_files = map(lambda x: "'{}'".format(x),load_files)
cmd = "load_files([{}],[silent(true)]). ".format(','.join(load_files))
cmd+=action
if outfile == None:
p = subprocess.Popen([prolog_version,'-q'], stdin=subprocess.PIPE)
call_p(p,cmd,timeout)
else:
with open(outfile, 'w') as outf:
p = subprocess.Popen([prolog_version,'-q','-G8g'], stdin=subprocess.PIPE, stdout=outf)
call_p(p,cmd,timeout)
def call_p(p,cmd,timeout):
try:
print(cmd)
p.stdin.write(cmd.encode())
p.communicate(timeout=timeout)
except Exception as e:
print(e)
finally:
p.kill() | 972 | 31.433333 | 98 | py |
mlj19-iggp | mlj19-iggp-master/metagol.py | import common
import subprocess
import prolog
import string
import config as cfg
class Metagol:
name='metagol'
metagol_runner='metagol/runner'
def __init__(self):
pass
def parse_train(self,datafile,outpath,game,target):
for (subtarget,bk,pos,neg) in common.parse_target(datafile):
prims=set(map(common.pred,bk))
(p,a)=subtarget
fname=outpath + '{}.pl'.format(p)
with open(fname,'w') as f:
for atom in bk:
f.write('{}.\n'.format(atom))
for atom in pos:
f.write('pos({}).\n'.format(atom))
for atom in neg:
f.write('neg({}).\n'.format(atom))
for (p,a) in prims:
# add negation
f.write('not_{0}({1}) :- \+ {0}({1}).\n'.format(p,','.join(string.ascii_uppercase[:a])))
f.write('prim({}/{}).\n'.format(p,a))
f.write('prim(not_{}/{}).\n'.format(p,a))
subprocess.call(['sort',fname,'-o',fname])
def parse_test(self,datafile,outpath,game,target):
for (subtarget,bk,pos,neg) in common.parse_target(datafile):
common.parse_test(outpath,subtarget,bk,pos,neg)
def train(self,inpath,outfile,target):
trainf=inpath+target
prolog.swipl(action='learn,halt.',load_files=[self.metagol_runner,trainf],outfile=outfile,timeout=cfg.learning_timeout)
def do_test(self,dataf,programf,outf):
prolog.swipl('do_test,halt.',[self.metagol_runner,dataf,programf],outf,timeout=None) | 1,607 | 37.285714 | 127 | py |
mmvae-public | mmvae-public/src/main.py | import argparse
import datetime
import sys
import json
from collections import defaultdict
from pathlib import Path
from tempfile import mkdtemp
import numpy as np
import torch
from torch import optim
import models
import objectives
from utils import Logger, Timer, save_model, save_vars, unpack_data
parser = argparse.ArgumentParser(description='Multi-Modal VAEs')
parser.add_argument('--experiment', type=str, default='', metavar='E',
help='experiment name')
parser.add_argument('--model', type=str, default='mnist_svhn', metavar='M',
choices=[s[4:] for s in dir(models) if 'VAE_' in s],
help='model name (default: mnist_svhn)')
parser.add_argument('--obj', type=str, default='elbo', metavar='O',
choices=['elbo', 'iwae', 'dreg'],
help='objective to use (default: elbo)')
parser.add_argument('--K', type=int, default=20, metavar='K',
help='number of particles to use for iwae/dreg (default: 10)')
parser.add_argument('--looser', action='store_true', default=False,
help='use the looser version of IWAE/DREG')
parser.add_argument('--llik_scaling', type=float, default=0.,
help='likelihood scaling for cub images/svhn modality when running in'
'multimodal setting, set as 0 to use default value')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='batch size for data (default: 256)')
parser.add_argument('--epochs', type=int, default=10, metavar='E',
help='number of epochs to train (default: 10)')
parser.add_argument('--latent-dim', type=int, default=20, metavar='L',
help='latent dimensionality (default: 20)')
parser.add_argument('--num-hidden-layers', type=int, default=1, metavar='H',
help='number of hidden layers in enc and dec (default: 1)')
parser.add_argument('--pre-trained', type=str, default="",
help='path to pre-trained model (train from scratch if empty)')
parser.add_argument('--learn-prior', action='store_true', default=False,
help='learn model prior parameters')
parser.add_argument('--logp', action='store_true', default=False,
help='estimate tight marginal likelihood on completion')
parser.add_argument('--print-freq', type=int, default=0, metavar='f',
help='frequency with which to print stats (default: 0)')
parser.add_argument('--no-analytics', action='store_true', default=False,
help='disable plotting analytics')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disable CUDA use')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# args
args = parser.parse_args()
# random seed
# https://pytorch.org/docs/stable/notes/randomness.html
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# load args from disk if pretrained model path is given
pretrained_path = ""
if args.pre_trained:
pretrained_path = args.pre_trained
args = torch.load(args.pre_trained + '/args.rar')
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
# load model
modelC = getattr(models, 'VAE_{}'.format(args.model))
model = modelC(args).to(device)
if pretrained_path:
print('Loading model {} from {}'.format(model.modelName, pretrained_path))
model.load_state_dict(torch.load(pretrained_path + '/model.rar'))
model._pz_params = model._pz_params
if not args.experiment:
args.experiment = model.modelName
# set up run path
runId = datetime.datetime.now().isoformat()
experiment_dir = Path('../experiments/' + args.experiment)
experiment_dir.mkdir(parents=True, exist_ok=True)
runPath = mkdtemp(prefix=runId, dir=str(experiment_dir))
sys.stdout = Logger('{}/run.log'.format(runPath))
print('Expt:', runPath)
print('RunID:', runId)
# save args to run
with open('{}/args.json'.format(runPath), 'w') as fp:
json.dump(args.__dict__, fp)
# -- also save object because we want to recover these for other things
torch.save(args, '{}/args.rar'.format(runPath))
# preparation for training
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=1e-3, amsgrad=True)
train_loader, test_loader = model.getDataLoaders(args.batch_size, device=device)
objective = getattr(objectives,
('m_' if hasattr(model, 'vaes') else '')
+ args.obj
+ ('_looser' if (args.looser and args.obj != 'elbo') else ''))
t_objective = getattr(objectives, ('m_' if hasattr(model, 'vaes') else '') + 'iwae')
def train(epoch, agg):
model.train()
b_loss = 0
for i, dataT in enumerate(train_loader):
data = unpack_data(dataT, device=device)
optimizer.zero_grad()
loss = -objective(model, data, K=args.K)
loss.backward()
optimizer.step()
b_loss += loss.item()
if args.print_freq > 0 and i % args.print_freq == 0:
print("iteration {:04d}: loss: {:6.3f}".format(i, loss.item() / args.batch_size))
agg['train_loss'].append(b_loss / len(train_loader.dataset))
print('====> Epoch: {:03d} Train loss: {:.4f}'.format(epoch, agg['train_loss'][-1]))
def test(epoch, agg):
model.eval()
b_loss = 0
with torch.no_grad():
for i, dataT in enumerate(test_loader):
data = unpack_data(dataT, device=device)
loss = -t_objective(model, data, K=args.K)
b_loss += loss.item()
if i == 0:
model.reconstruct(data, runPath, epoch)
if not args.no_analytics:
model.analyse(data, runPath, epoch)
agg['test_loss'].append(b_loss / len(test_loader.dataset))
print('====> Test loss: {:.4f}'.format(agg['test_loss'][-1]))
def estimate_log_marginal(K):
"""Compute an IWAE estimate of the log-marginal likelihood of test data."""
model.eval()
marginal_loglik = 0
with torch.no_grad():
for dataT in test_loader:
data = unpack_data(dataT, device=device)
marginal_loglik += -t_objective(model, data, K).item()
marginal_loglik /= len(test_loader.dataset)
print('Marginal Log Likelihood (IWAE, K = {}): {:.4f}'.format(K, marginal_loglik))
if __name__ == '__main__':
with Timer('MM-VAE') as t:
agg = defaultdict(list)
for epoch in range(1, args.epochs + 1):
train(epoch, agg)
test(epoch, agg)
save_model(model, runPath + '/model.rar')
save_vars(agg, runPath + '/losses.rar')
model.generate(runPath, epoch)
if args.logp: # compute as tight a marginal likelihood as possible
estimate_log_marginal(5000)
| 6,968 | 40.482143 | 93 | py |
mmvae-public | mmvae-public/src/vis.py | # visualisation related functions
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from matplotlib.lines import Line2D
from umap import UMAP
def custom_cmap(n):
"""Create customised colormap for scattered latent plot of n categories.
Returns colormap object and colormap array that contains the RGB value of the colors.
See official matplotlib document for colormap reference:
https://matplotlib.org/examples/color/colormaps_reference.html
"""
# first color is grey from Set1, rest other sensible categorical colourmap
cmap_array = sns.color_palette("Set1", 9)[-1:] + sns.husl_palette(n - 1, h=.6, s=0.7)
cmap = colors.LinearSegmentedColormap.from_list('mmdgm_cmap', cmap_array)
return cmap, cmap_array
def embed_umap(data):
"""data should be on cpu, numpy"""
embedding = UMAP(metric='euclidean',
n_neighbors=40,
# angular_rp_forest=True,
# random_state=torch.initial_seed(),
transform_seed=torch.initial_seed())
return embedding.fit_transform(data)
def plot_embeddings(emb, emb_l, labels, filepath):
cmap_obj, cmap_arr = custom_cmap(n=len(labels))
plt.figure()
plt.scatter(emb[:, 0], emb[:, 1], c=emb_l, cmap=cmap_obj, s=25, alpha=0.2, edgecolors='none')
l_elems = [Line2D([0], [0], marker='o', color=cm, label=l, alpha=0.5, linestyle='None')
for (cm, l) in zip(cmap_arr, labels)]
plt.legend(frameon=False, loc=2, handles=l_elems)
plt.savefig(filepath, bbox_inches='tight')
plt.close()
def tensor_to_df(tensor, ax_names=None):
assert tensor.ndim == 2, "Can only currently convert 2D tensors to dataframes"
df = pd.DataFrame(data=tensor, columns=np.arange(tensor.shape[1]))
return df.melt(value_vars=df.columns,
var_name=('variable' if ax_names is None else ax_names[0]),
value_name=('value' if ax_names is None else ax_names[1]))
def tensors_to_df(tensors, head=None, keys=None, ax_names=None):
dfs = [tensor_to_df(tensor, ax_names=ax_names) for tensor in tensors]
df = pd.concat(dfs, keys=(np.arange(len(tensors)) if keys is None else keys))
df.reset_index(level=0, inplace=True)
if head is not None:
df.rename(columns={'level_0': head}, inplace=True)
return df
def plot_kls_df(df, filepath):
_, cmap_arr = custom_cmap(df[df.columns[0]].nunique() + 1)
with sns.plotting_context("notebook", font_scale=2.0):
g = sns.FacetGrid(df, height=12, aspect=2)
g = g.map(sns.boxplot, df.columns[1], df.columns[2], df.columns[0], palette=cmap_arr[1:],
order=None, hue_order=None)
g = g.set(yscale='log').despine(offset=10)
plt.legend(loc='best', fontsize='22')
plt.savefig(filepath, bbox_inches='tight')
plt.close()
| 2,938 | 39.260274 | 97 | py |
mmvae-public | mmvae-public/src/utils.py | import math
import os
import shutil
import sys
import time
import torch
import torch.distributions as dist
import torch.nn.functional as F
from datasets import CUBImageFt
# Classes
class Constants(object):
eta = 1e-6
log2 = math.log(2)
log2pi = math.log(2 * math.pi)
logceilc = 88 # largest cuda v s.t. exp(v) < inf
logfloorc = -104 # smallest cuda v s.t. exp(v) > 0
# https://stackoverflow.com/questions/14906764/how-to-redirect-stdout-to-both-file-and-console-with-scripting
class Logger(object):
def __init__(self, filename, mode="a"):
self.terminal = sys.stdout
self.log = open(filename, mode)
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
class Timer:
def __init__(self, name):
self.name = name
def __enter__(self):
self.begin = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.elapsed = self.end - self.begin
self.elapsedH = time.gmtime(self.elapsed)
print('====> [{}] Time: {:7.3f}s or {}'
.format(self.name,
self.elapsed,
time.strftime("%H:%M:%S", self.elapsedH)))
# Functions
def save_vars(vs, filepath):
"""
Saves variables to the given filepath in a safe manner.
"""
if os.path.exists(filepath):
shutil.copyfile(filepath, '{}.old'.format(filepath))
torch.save(vs, filepath)
def save_model(model, filepath):
"""
To load a saved model, simply use
`model.load_state_dict(torch.load('path-to-saved-model'))`.
"""
save_vars(model.state_dict(), filepath)
if hasattr(model, 'vaes'):
for vae in model.vaes:
fdir, fext = os.path.splitext(filepath)
save_vars(vae.state_dict(), fdir + '_' + vae.modelName + fext)
def is_multidata(dataB):
return isinstance(dataB, list) or isinstance(dataB, tuple)
def unpack_data(dataB, device='cuda'):
# dataB :: (Tensor, Idx) | [(Tensor, Idx)]
""" Unpacks the data batch object in an appropriate manner to extract data """
if is_multidata(dataB):
if torch.is_tensor(dataB[0]):
if torch.is_tensor(dataB[1]):
return dataB[0].to(device) # mnist, svhn, cubI
elif is_multidata(dataB[1]):
return dataB[0].to(device), dataB[1][0].to(device) # cubISft
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB[1])))
elif is_multidata(dataB[0]):
return [d.to(device) for d in list(zip(*dataB))[0]] # mnist-svhn, cubIS
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB[0])))
elif torch.is_tensor(dataB):
return dataB.to(device)
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB)))
def get_mean(d, K=100):
"""
Extract the `mean` parameter for given distribution.
If attribute not available, estimate from samples.
"""
try:
mean = d.mean
except NotImplementedError:
samples = d.rsample(torch.Size([K]))
mean = samples.mean(0)
return mean
def log_mean_exp(value, dim=0, keepdim=False):
return torch.logsumexp(value, dim, keepdim=keepdim) - math.log(value.size(dim))
def kl_divergence(d1, d2, K=100):
"""Computes closed-form KL if available, else computes a MC estimate."""
if (type(d1), type(d2)) in torch.distributions.kl._KL_REGISTRY:
return torch.distributions.kl_divergence(d1, d2)
else:
samples = d1.rsample(torch.Size([K]))
return (d1.log_prob(samples) - d2.log_prob(samples)).mean(0)
def pdist(sample_1, sample_2, eps=1e-5):
"""Compute the matrix of all squared pairwise distances. Code
adapted from the torch-two-sample library (added batching).
You can find the original implementation of this function here:
https://github.com/josipd/torch-two-sample/blob/master/torch_two_sample/util.py
Arguments
---------
sample_1 : torch.Tensor or Variable
The first sample, should be of shape ``(batch_size, n_1, d)``.
sample_2 : torch.Tensor or Variable
The second sample, should be of shape ``(batch_size, n_2, d)``.
norm : float
The l_p norm to be used.
batched : bool
whether data is batched
Returns
-------
torch.Tensor or Variable
Matrix of shape (batch_size, n_1, n_2). The [i, j]-th entry is equal to
``|| sample_1[i, :] - sample_2[j, :] ||_p``."""
if len(sample_1.shape) == 2:
sample_1, sample_2 = sample_1.unsqueeze(0), sample_2.unsqueeze(0)
B, n_1, n_2 = sample_1.size(0), sample_1.size(1), sample_2.size(1)
norms_1 = torch.sum(sample_1 ** 2, dim=-1, keepdim=True)
norms_2 = torch.sum(sample_2 ** 2, dim=-1, keepdim=True)
norms = (norms_1.expand(B, n_1, n_2)
+ norms_2.transpose(1, 2).expand(B, n_1, n_2))
distances_squared = norms - 2 * sample_1.matmul(sample_2.transpose(1, 2))
return torch.sqrt(eps + torch.abs(distances_squared)).squeeze() # batch x K x latent
def NN_lookup(emb_h, emb, data):
indices = pdist(emb.to(emb_h.device), emb_h).argmin(dim=0)
# indices = torch.tensor(cosine_similarity(emb, emb_h.cpu().numpy()).argmax(0)).to(emb_h.device).squeeze()
return data[indices]
class FakeCategorical(dist.Distribution):
support = dist.constraints.real
has_rsample = True
def __init__(self, locs):
self.logits = locs
self._batch_shape = self.logits.shape
@property
def mean(self):
return self.logits
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
return self.rsample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.logits.expand([*sample_shape, *self.logits.shape]).contiguous()
def log_prob(self, value):
# value of shape (K, B, D)
lpx_z = -F.cross_entropy(input=self.logits.view(-1, self.logits.size(-1)),
target=value.expand(self.logits.size()[:-1]).long().view(-1),
reduction='none',
ignore_index=0)
return lpx_z.view(*self.logits.shape[:-1])
# it is inevitable to have the word embedding dimension summed up in
# cross-entropy loss ($\sum -gt_i \log(p_i)$ with most gt_i = 0, We adopt the
# operationally equivalence here, which is summing up the sentence dimension
# in objective.
| 6,857 | 32.950495 | 110 | py |
mmvae-public | mmvae-public/src/objectives.py | # objectives of choice
import torch
from numpy import prod
from utils import log_mean_exp, is_multidata, kl_divergence
# helper to vectorise computation
def compute_microbatch_split(x, K):
""" Checks if batch needs to be broken down further to fit in memory. """
B = x[0].size(0) if is_multidata(x) else x.size(0)
S = sum([1.0 / (K * prod(_x.size()[1:])) for _x in x]) if is_multidata(x) \
else 1.0 / (K * prod(x.size()[1:]))
S = int(1e8 * S) # float heuristic for 12Gb cuda memory
assert (S > 0), "Cannot fit individual data in memory, consider smaller K"
return min(B, S)
def elbo(model, x, K=1):
"""Computes E_{p(x)}[ELBO] """
qz_x, px_z, _ = model(x)
lpx_z = px_z.log_prob(x).view(*px_z.batch_shape[:2], -1) * model.llik_scaling
kld = kl_divergence(qz_x, model.pz(*model.pz_params))
return (lpx_z.sum(-1) - kld.sum(-1)).mean(0).sum()
def _iwae(model, x, K):
"""IWAE estimate for log p_\theta(x) -- fully vectorised."""
qz_x, px_z, zs = model(x, K)
lpz = model.pz(*model.pz_params).log_prob(zs).sum(-1)
lpx_z = px_z.log_prob(x).view(*px_z.batch_shape[:2], -1) * model.llik_scaling
lqz_x = qz_x.log_prob(zs).sum(-1)
return lpz + lpx_z.sum(-1) - lqz_x
def iwae(model, x, K):
"""Computes an importance-weighted ELBO estimate for log p_\theta(x)
Iterates over the batch as necessary.
"""
S = compute_microbatch_split(x, K)
lw = torch.cat([_iwae(model, _x, K) for _x in x.split(S)], 1) # concat on batch
return log_mean_exp(lw).sum()
def _dreg(model, x, K):
"""DREG estimate for log p_\theta(x) -- fully vectorised."""
_, px_z, zs = model(x, K)
lpz = model.pz(*model.pz_params).log_prob(zs).sum(-1)
lpx_z = px_z.log_prob(x).view(*px_z.batch_shape[:2], -1) * model.llik_scaling
qz_x = model.qz_x(*[p.detach() for p in model.qz_x_params]) # stop-grad for \phi
lqz_x = qz_x.log_prob(zs).sum(-1)
lw = lpz + lpx_z.sum(-1) - lqz_x
return lw, zs
def dreg(model, x, K, regs=None):
"""Computes a doubly-reparameterised importance-weighted ELBO estimate for log p_\theta(x)
Iterates over the batch as necessary.
"""
S = compute_microbatch_split(x, K)
lw, zs = zip(*[_dreg(model, _x, K) for _x in x.split(S)])
lw = torch.cat(lw, 1) # concat on batch
zs = torch.cat(zs, 1) # concat on batch
with torch.no_grad():
grad_wt = (lw - torch.logsumexp(lw, 0, keepdim=True)).exp()
if zs.requires_grad:
zs.register_hook(lambda grad: grad_wt.unsqueeze(-1) * grad)
return (grad_wt * lw).sum()
# multi-modal variants
def m_elbo_naive(model, x, K=1):
"""Computes E_{p(x)}[ELBO] for multi-modal vae --- NOT EXPOSED"""
qz_xs, px_zs, zss = model(x)
lpx_zs, klds = [], []
for r, qz_x in enumerate(qz_xs):
kld = kl_divergence(qz_x, model.pz(*model.pz_params))
klds.append(kld.sum(-1))
for d, px_z in enumerate(px_zs[r]):
lpx_z = px_z.log_prob(x[d]) * model.vaes[d].llik_scaling
lpx_zs.append(lpx_z.view(*px_z.batch_shape[:2], -1).sum(-1))
obj = (1 / len(model.vaes)) * (torch.stack(lpx_zs).sum(0) - torch.stack(klds).sum(0))
return obj.mean(0).sum()
def m_elbo(model, x, K=1):
"""Computes importance-sampled m_elbo (in notes3) for multi-modal vae """
qz_xs, px_zs, zss = model(x)
lpx_zs, klds = [], []
for r, qz_x in enumerate(qz_xs):
kld = kl_divergence(qz_x, model.pz(*model.pz_params))
klds.append(kld.sum(-1))
for d in range(len(px_zs)):
lpx_z = px_zs[d][d].log_prob(x[d]).view(*px_zs[d][d].batch_shape[:2], -1)
lpx_z = (lpx_z * model.vaes[d].llik_scaling).sum(-1)
if d == r:
lwt = torch.tensor(0.0)
else:
zs = zss[d].detach()
lwt = (qz_x.log_prob(zs) - qz_xs[d].log_prob(zs).detach()).sum(-1)
lpx_zs.append(lwt.exp() * lpx_z)
obj = (1 / len(model.vaes)) * (torch.stack(lpx_zs).sum(0) - torch.stack(klds).sum(0))
return obj.mean(0).sum()
def _m_iwae(model, x, K=1):
"""IWAE estimate for log p_\theta(x) for multi-modal vae -- fully vectorised"""
qz_xs, px_zs, zss = model(x, K)
lws = []
for r, qz_x in enumerate(qz_xs):
lpz = model.pz(*model.pz_params).log_prob(zss[r]).sum(-1)
lqz_x = log_mean_exp(torch.stack([qz_x.log_prob(zss[r]).sum(-1) for qz_x in qz_xs]))
lpx_z = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1)
.mul(model.vaes[d].llik_scaling).sum(-1)
for d, px_z in enumerate(px_zs[r])]
lpx_z = torch.stack(lpx_z).sum(0)
lw = lpz + lpx_z - lqz_x
lws.append(lw)
return torch.cat(lws) # (n_modality * n_samples) x batch_size, batch_size
def m_iwae(model, x, K=1):
"""Computes iwae estimate for log p_\theta(x) for multi-modal vae """
S = compute_microbatch_split(x, K)
x_split = zip(*[_x.split(S) for _x in x])
lw = [_m_iwae(model, _x, K) for _x in x_split]
lw = torch.cat(lw, 1) # concat on batch
return log_mean_exp(lw).sum()
def _m_iwae_looser(model, x, K=1):
"""IWAE estimate for log p_\theta(x) for multi-modal vae -- fully vectorised
This version is the looser bound---with the average over modalities outside the log
"""
qz_xs, px_zs, zss = model(x, K)
lws = []
for r, qz_x in enumerate(qz_xs):
lpz = model.pz(*model.pz_params).log_prob(zss[r]).sum(-1)
lqz_x = log_mean_exp(torch.stack([qz_x.log_prob(zss[r]).sum(-1) for qz_x in qz_xs]))
lpx_z = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1)
.mul(model.vaes[d].llik_scaling).sum(-1)
for d, px_z in enumerate(px_zs[r])]
lpx_z = torch.stack(lpx_z).sum(0)
lw = lpz + lpx_z - lqz_x
lws.append(lw)
return torch.stack(lws) # (n_modality * n_samples) x batch_size, batch_size
def m_iwae_looser(model, x, K=1):
"""Computes iwae estimate for log p_\theta(x) for multi-modal vae
This version is the looser bound---with the average over modalities outside the log
"""
S = compute_microbatch_split(x, K)
x_split = zip(*[_x.split(S) for _x in x])
lw = [_m_iwae_looser(model, _x, K) for _x in x_split]
lw = torch.cat(lw, 2) # concat on batch
return log_mean_exp(lw, dim=1).mean(0).sum()
def _m_dreg(model, x, K=1):
"""DERG estimate for log p_\theta(x) for multi-modal vae -- fully vectorised"""
qz_xs, px_zs, zss = model(x, K)
qz_xs_ = [vae.qz_x(*[p.detach() for p in vae.qz_x_params]) for vae in model.vaes]
lws = []
for r, vae in enumerate(model.vaes):
lpz = model.pz(*model.pz_params).log_prob(zss[r]).sum(-1)
lqz_x = log_mean_exp(torch.stack([qz_x_.log_prob(zss[r]).sum(-1) for qz_x_ in qz_xs_]))
lpx_z = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1)
.mul(model.vaes[d].llik_scaling).sum(-1)
for d, px_z in enumerate(px_zs[r])]
lpx_z = torch.stack(lpx_z).sum(0)
lw = lpz + lpx_z - lqz_x
lws.append(lw)
return torch.cat(lws), torch.cat(zss)
def m_dreg(model, x, K=1):
"""Computes dreg estimate for log p_\theta(x) for multi-modal vae """
S = compute_microbatch_split(x, K)
x_split = zip(*[_x.split(S) for _x in x])
lw, zss = zip(*[_m_dreg(model, _x, K) for _x in x_split])
lw = torch.cat(lw, 1) # concat on batch
zss = torch.cat(zss, 1) # concat on batch
with torch.no_grad():
grad_wt = (lw - torch.logsumexp(lw, 0, keepdim=True)).exp()
if zss.requires_grad:
zss.register_hook(lambda grad: grad_wt.unsqueeze(-1) * grad)
return (grad_wt * lw).sum()
def _m_dreg_looser(model, x, K=1):
"""DERG estimate for log p_\theta(x) for multi-modal vae -- fully vectorised
This version is the looser bound---with the average over modalities outside the log
"""
qz_xs, px_zs, zss = model(x, K)
qz_xs_ = [vae.qz_x(*[p.detach() for p in vae.qz_x_params]) for vae in model.vaes]
lws = []
for r, vae in enumerate(model.vaes):
lpz = model.pz(*model.pz_params).log_prob(zss[r]).sum(-1)
lqz_x = log_mean_exp(torch.stack([qz_x_.log_prob(zss[r]).sum(-1) for qz_x_ in qz_xs_]))
lpx_z = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1)
.mul(model.vaes[d].llik_scaling).sum(-1)
for d, px_z in enumerate(px_zs[r])]
lpx_z = torch.stack(lpx_z).sum(0)
lw = lpz + lpx_z - lqz_x
lws.append(lw)
return torch.stack(lws), torch.stack(zss)
def m_dreg_looser(model, x, K=1):
"""Computes dreg estimate for log p_\theta(x) for multi-modal vae
This version is the looser bound---with the average over modalities outside the log
"""
S = compute_microbatch_split(x, K)
x_split = zip(*[_x.split(S) for _x in x])
lw, zss = zip(*[_m_dreg_looser(model, _x, K) for _x in x_split])
lw = torch.cat(lw, 2) # concat on batch
zss = torch.cat(zss, 2) # concat on batch
with torch.no_grad():
grad_wt = (lw - torch.logsumexp(lw, 1, keepdim=True)).exp()
if zss.requires_grad:
zss.register_hook(lambda grad: grad_wt.unsqueeze(-1) * grad)
return (grad_wt * lw).mean(0).sum()
| 9,267 | 40.375 | 95 | py |
mmvae-public | mmvae-public/src/datasets.py | import io
import json
import os
import pickle
from collections import Counter, OrderedDict
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
from nltk.tokenize import sent_tokenize, word_tokenize
from torch.utils.data import Dataset
from torchvision import transforms, models, datasets
class OrderedCounter(Counter, OrderedDict):
"""Counter that remembers the order elements are first encountered."""
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
class CUBSentences(Dataset):
def __init__(self, root_data_dir, split, transform=None, **kwargs):
"""split: 'trainval' or 'test' """
super().__init__()
self.data_dir = os.path.join(root_data_dir, 'cub')
self.split = split
self.max_sequence_length = kwargs.get('max_sequence_length', 32)
self.min_occ = kwargs.get('min_occ', 3)
self.transform = transform
os.makedirs(os.path.join(root_data_dir, "lang_emb"), exist_ok=True)
self.gen_dir = os.path.join(self.data_dir, "oc:{}_msl:{}".
format(self.min_occ, self.max_sequence_length))
if split == 'train':
self.raw_data_path = os.path.join(self.data_dir, 'text_trainvalclasses.txt')
elif split == 'test':
self.raw_data_path = os.path.join(self.data_dir, 'text_testclasses.txt')
else:
raise Exception("Only train or test split is available")
os.makedirs(self.gen_dir, exist_ok=True)
self.data_file = 'cub.{}.s{}'.format(split, self.max_sequence_length)
self.vocab_file = 'cub.vocab'
if not os.path.exists(os.path.join(self.gen_dir, self.data_file)):
print("Data file not found for {} split at {}. Creating new... (this may take a while)".
format(split.upper(), os.path.join(self.gen_dir, self.data_file)))
self._create_data()
else:
self._load_data()
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sent = self.data[str(idx)]['idx']
if self.transform is not None:
sent = self.transform(sent)
return sent, self.data[str(idx)]['length']
@property
def vocab_size(self):
return len(self.w2i)
@property
def pad_idx(self):
return self.w2i['<pad>']
@property
def eos_idx(self):
return self.w2i['<eos>']
@property
def unk_idx(self):
return self.w2i['<unk>']
def get_w2i(self):
return self.w2i
def get_i2w(self):
return self.i2w
def _load_data(self, vocab=True):
with open(os.path.join(self.gen_dir, self.data_file), 'rb') as file:
self.data = json.load(file)
if vocab:
self._load_vocab()
def _load_vocab(self):
if not os.path.exists(os.path.join(self.gen_dir, self.vocab_file)):
self._create_vocab()
with open(os.path.join(self.gen_dir, self.vocab_file), 'r') as vocab_file:
vocab = json.load(vocab_file)
self.w2i, self.i2w = vocab['w2i'], vocab['i2w']
def _create_data(self):
if self.split == 'train' and not os.path.exists(os.path.join(self.gen_dir, self.vocab_file)):
self._create_vocab()
else:
self._load_vocab()
with open(self.raw_data_path, 'r') as file:
text = file.read()
sentences = sent_tokenize(text)
data = defaultdict(dict)
pad_count = 0
for i, line in enumerate(sentences):
words = word_tokenize(line)
tok = words[:self.max_sequence_length - 1]
tok = tok + ['<eos>']
length = len(tok)
if self.max_sequence_length > length:
tok.extend(['<pad>'] * (self.max_sequence_length - length))
pad_count += 1
idx = [self.w2i.get(w, self.w2i['<exc>']) for w in tok]
id = len(data)
data[id]['tok'] = tok
data[id]['idx'] = idx
data[id]['length'] = length
print("{} out of {} sentences are truncated with max sentence length {}.".
format(len(sentences) - pad_count, len(sentences), self.max_sequence_length))
with io.open(os.path.join(self.gen_dir, self.data_file), 'wb') as data_file:
data = json.dumps(data, ensure_ascii=False)
data_file.write(data.encode('utf8', 'replace'))
self._load_data(vocab=False)
def _create_vocab(self):
assert self.split == 'train', "Vocablurary can only be created for training file."
with open(self.raw_data_path, 'r') as file:
text = file.read()
sentences = sent_tokenize(text)
occ_register = OrderedCounter()
w2i = dict()
i2w = dict()
special_tokens = ['<exc>', '<pad>', '<eos>']
for st in special_tokens:
i2w[len(w2i)] = st
w2i[st] = len(w2i)
texts = []
unq_words = []
for i, line in enumerate(sentences):
words = word_tokenize(line)
occ_register.update(words)
texts.append(words)
for w, occ in occ_register.items():
if occ > self.min_occ and w not in special_tokens:
i2w[len(w2i)] = w
w2i[w] = len(w2i)
else:
unq_words.append(w)
assert len(w2i) == len(i2w)
print("Vocablurary of {} keys created, {} words are excluded (occurrence <= {})."
.format(len(w2i), len(unq_words), self.min_occ))
vocab = dict(w2i=w2i, i2w=i2w)
with io.open(os.path.join(self.gen_dir, self.vocab_file), 'wb') as vocab_file:
data = json.dumps(vocab, ensure_ascii=False)
vocab_file.write(data.encode('utf8', 'replace'))
with open(os.path.join(self.gen_dir, 'cub.unique'), 'wb') as unq_file:
pickle.dump(np.array(unq_words), unq_file)
with open(os.path.join(self.gen_dir, 'cub.all'), 'wb') as a_file:
pickle.dump(occ_register, a_file)
self._load_vocab()
class CUBImageFt(Dataset):
def __init__(self, root_data_dir, split, device):
"""split: 'trainval' or 'test' """
super().__init__()
self.data_dir = os.path.join(root_data_dir, 'cub')
self.data_file = os.path.join(self.data_dir, split)
self.gen_dir = os.path.join(self.data_dir, 'resnet101_2048')
self.gen_ft_file = os.path.join(self.gen_dir, '{}.ft'.format(split))
self.gen_data_file = os.path.join(self.gen_dir, '{}.data'.format(split))
self.split = split
tx = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor()
])
self.dataset = datasets.ImageFolder(self.data_file, transform=tx)
os.makedirs(self.gen_dir, exist_ok=True)
if not os.path.exists(self.gen_ft_file):
print("Data file not found for CUB image features at `{}`. "
"Extracting resnet101 features from CUB image dataset... "
"(this may take a while)".format(self.gen_ft_file))
self._create_ft_mat(device)
else:
self._load_ft_mat()
def __len__(self):
return len(self.ft_mat)
def __getitem__(self, idx):
return self.ft_mat[idx]
def _load_ft_mat(self):
self.ft_mat = torch.load(self.gen_ft_file)
def _load_data(self):
self.data_mat = torch.load(self.gen_data_file)
def _create_ft_mat(self, device):
resnet = models.resnet101(pretrained=True)
modules = list(resnet.children())[:-1]
self.model = nn.Sequential(*modules)
self.model.eval()
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
loader = torch.utils.data.DataLoader(self.dataset, batch_size=256,
shuffle=False, **kwargs)
with torch.no_grad():
ft_mat = torch.cat([self.model(data[0]).squeeze() for data in loader])
torch.save(ft_mat, self.gen_ft_file)
del ft_mat
data_mat = torch.cat([data[0].squeeze() for data in loader])
torch.save(data_mat, self.gen_data_file)
self._load_ft_mat()
| 8,431 | 32.19685 | 101 | py |
mmvae-public | mmvae-public/src/report/analyse_cub.py | """Calculate cross and joint coherence of language and image generation on CUB dataset using CCA."""
import argparse
import os
import sys
import torch
import torch.nn.functional as F
# relative import hack (sorry)
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir) # for system user
os.chdir(parentdir) # for pycharm user
import models
from utils import Logger, Timer, unpack_data
from helper import cca, fetch_emb, fetch_weights, fetch_pc, apply_weights, apply_pc
# variables
RESET = True
USE_PCA = True
maxSentLen = 32
minOccur = 3
lenEmbedding = 300
lenWindow = 3
fBase = 96
vocab_dir = '../data/cub/oc:{}_sl:{}_s:{}_w:{}'.format(minOccur, maxSentLen, lenEmbedding, lenWindow)
batch_size = 256
# args
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Analysing MM-DGM results')
parser.add_argument('--save-dir', type=str, default=".",
metavar='N', help='save directory of results')
parser.add_argument('--no-cuda', action='store_true', default=True,
help='disables CUDA use')
cmds = parser.parse_args()
runPath = cmds.save_dir
sys.stdout = Logger('{}/analyse.log'.format(runPath))
args = torch.load(runPath + '/args.rar')
# cuda stuff
needs_conversion = cmds.no_cuda and args.cuda
conversion_kwargs = {'map_location': lambda st, loc: st} if needs_conversion else {}
args.cuda = not cmds.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
torch.manual_seed(args.seed)
forward_args = {'drop_modality': True} if args.model == 'mcubISft' else {}
# load trained model
modelC = getattr(models, 'VAE_{}'.format(args.model))
model = modelC(args)
if args.cuda:
model.cuda()
model.load_state_dict(torch.load(runPath + '/model.rar', **conversion_kwargs), strict=False)
train_loader, test_loader = model.getDataLoaders(batch_size, device=device)
N = len(test_loader.dataset)
# generate word embeddings and sentence weighting
emb_path = os.path.join(vocab_dir, 'cub.emb')
weights_path = os.path.join(vocab_dir, 'cub.weights')
vocab_path = os.path.join(vocab_dir, 'cub.vocab')
pc_path = os.path.join(vocab_dir, 'cub.pc')
emb = fetch_emb(lenWindow, minOccur, emb_path, vocab_path, RESET)
weights = fetch_weights(weights_path, vocab_path, RESET, a=1e-3)
emb = torch.from_numpy(emb).to(device)
weights = torch.from_numpy(weights).to(device).type(emb.dtype)
u = fetch_pc(emb, weights, train_loader, pc_path, RESET)
# set up word to sentence functions
fn_to_emb = lambda data, emb=emb, weights=weights, u=u: \
apply_pc(apply_weights(emb, weights, data), u)
def calculate_corr(images, embeddings):
global RESET
if not os.path.exists(runPath + '/images_mean.pt') or RESET:
generate_cca_projection()
RESET = False
im_mean = torch.load(runPath + '/images_mean.pt')
emb_mean = torch.load(runPath + '/emb_mean.pt')
im_proj = torch.load(runPath + '/im_proj.pt')
emb_proj = torch.load(runPath + '/emb_proj.pt')
with torch.no_grad():
corr = F.cosine_similarity((images - im_mean) @ im_proj,
(embeddings - emb_mean) @ emb_proj).mean()
return corr
def generate_cca_projection():
images, sentences = [torch.cat(l) for l in zip(*[(d[0], d[1][0]) for d in train_loader])]
emb = fn_to_emb(sentences.int())
corr, (im_proj, emb_proj) = cca([images, emb], k=40)
print("Largest eigen value from CCA: {:.3f}".format(corr[0]))
torch.save(images.mean(dim=0), runPath + '/images_mean.pt')
torch.save(emb.mean(dim=0), runPath + '/emb_mean.pt')
torch.save(im_proj, runPath + '/im_proj.pt')
torch.save(emb_proj, runPath + '/emb_proj.pt')
def cross_coherence():
model.eval()
with torch.no_grad():
i2t = []
s2i = []
gt = []
for i, dataT in enumerate(test_loader):
# get the inputs
images, sentences = unpack_data(dataT, device=device)
if images.shape[0] != batch_size:
break
_, px_zs, _ = model([images, sentences], K=1, **forward_args)
cross_sentences = px_zs[0][1].mean.argmax(dim=-1).squeeze(0)
cross_images = px_zs[1][0].mean.squeeze(0)
# calculate correlation with CCA:
i2t.append(calculate_corr(images, fn_to_emb(cross_sentences)))
s2i.append(calculate_corr(cross_images, fn_to_emb(sentences.int())))
gt.append(calculate_corr(images, fn_to_emb(sentences.int())))
print("Coherence score: \nground truth {:10.9f}, \nimage to sentence {:10.9f}, "
"\nsentence to image {:10.9f}".format(sum(gt) / len(gt),
sum(i2t) / len(gt),
sum(s2i) / len(gt)))
def joint_coherence():
model.eval()
with torch.no_grad():
pzs = model.pz(*model.pz_params).sample([1000])
gen_images = model.vaes[0].dec(pzs)[0].squeeze(1)
gen_sentences = model.vaes[1].dec(pzs)[0].argmax(dim=-1).squeeze(1)
score = calculate_corr(gen_images, fn_to_emb(gen_sentences))
print("joint generation {:10.9f}".format(score))
if __name__ == '__main__':
with Timer('MM-VAE analysis') as t:
print('-' * 89)
cross_coherence()
print('-' * 89)
joint_coherence()
| 5,427 | 36.694444 | 101 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.