repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
Alqueraf/lms-app | foosball/adb_foos.py | import CHIP_IO.GPIO as GPIO
import time
import os
import urllib2
import socket
import logging
from subprocess import call
TIMESTR = time.strftime("%Y%m%d-%H%M%S")
logging.basicConfig(level=logging.DEBUG, filename='foos_' + TIMESTR + '.log')
TABLE_ID = "5FLOOR"
BTNRESET = "GPIO2"
BTN1 = "GPIO3"
BTN2 = "GPIO4"
LED1 = "GPIO5"
LED2 = "GPIO6"
OFF = GPIO.HIGH
ON = GPIO.LOW
FLASH_TIME = 0.2
FLASH_COUNT = 10
GPIO.setup(LED1, GPIO.OUT)
GPIO.setup(LED2, GPIO.OUT)
GPIO.output(LED1, OFF)
GPIO.output(LED2, OFF)
GPIO.setup(BTN1, GPIO.IN)
GPIO.setup(BTN2, GPIO.IN)
GPIO.setup(BTNRESET, GPIO.IN)
def printlog(message):
print(message)
logging.info(message)
def blink(speed, count, *leds):
while count > 0:
count -= 1
for led in leds:
GPIO.output(led, ON)
time.sleep(speed)
for led in leds:
GPIO.output(led, OFF)
time.sleep(speed)
def command(command):
call(command, shell=True)
# Initialize
GPIO.output(LED1, ON)
GPIO.output(LED2, ON)
#Init adb
printlog("Initilizing ADB")
command('adb devices')
blink(FLASH_TIME, FLASH_COUNT, LED1, LED2)
flashing = False
def updateScore(side):
try:
if side == 0:
command('adb shell am broadcast -a action_goal --es scoringSide "SIDE_1"')
else:
command('adb shell am broadcast -a action_goal --es scoringSide "SIDE_2"')
return True
except:
logging.exception("Update Score failure:")
return False
def buttoncallback(button):
global flashing
if flashing:
return
flashing = True
if button == BTN1:
printlog("TEAM 1 GOAL!")
GPIO.output(LED1, ON)
if updateScore(0):
blink(FLASH_TIME, FLASH_COUNT, LED1)
else:
GPIO.output(LED1, OFF)
else:
printlog("TEAM 2 GOAL!")
GPIO.output(LED2, ON)
if updateScore(1):
blink(FLASH_TIME, FLASH_COUNT, LED2)
else:
GPIO.output(LED2, OFF)
flashing = False
GPIO.add_event_detect(BTN1, GPIO.FALLING, buttoncallback)
GPIO.add_event_detect(BTN2, GPIO.FALLING, buttoncallback)
printlog ("Awaiting Button press")
try:
GPIO.wait_for_edge(BTNRESET, GPIO.FALLING)
printlog("Reset button pressed")
blink(0.1, 20, LED1, LED2)
os.system("reboot")
except:
logging.exception("Exiting reset button loop:")
GPIO.cleanup()
printlog("Cleaning up")
GPIO.cleanup()
|
PhoeniXuzoo/NU-Projects | EE475/Ch6P16.py | <filename>EE475/Ch6P16.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
def readData(csvname):
data = np.loadtxt(csvname, delimiter=',')
x = data[:-1, :]
y = data[-1:, :]
return x, y
fot = lambda x : np.exp(-x) / (1 + np.exp(-x))
sot = lambda x : ( 1 / (1 + np.exp(x))) * (1 - ( 1 / (1 + np.exp(x))))
# power is y_p * x_p.T * w
# firstOrderTerm is e^-power / (1 + e^-power)
def first_order(x, y, w, class_weight, power):
total = np.zeros(w.shape)
firstOrderTerm = fot(power)
for i in range(np.size(y)):
total += class_weight[i] * firstOrderTerm[:,i] * y[:,i] * x[:,[i]]
return (-1) * (total / np.size(y))
def second_order(x, y, w, class_weight, power):
total = np.zeros([x.shape[0], x.shape[0]])
secondOrderTerm = sot(power)
for i in range(np.size(y)):
total += class_weight[i] * secondOrderTerm[:, i] * x[:, [i]] * x[:, [i]].T
return total / np.size(y)
def newton_method(x, y, w, class_weight):
power = y * np.transpose(np.dot(x.T, w))
firstOrder = first_order(x, y, w, class_weight, power)
secondOrder = second_order(x, y, w, class_weight, power)
return w - np.dot(np.linalg.inv(secondOrder), firstOrder)
def costFunc(x, y, w, class_weight):
temp = np.log(1 + np.exp(-y*np.transpose(np.dot(np.transpose(x), w))))
cost = 0
for i in range(np.size(y)):
cost += temp[0][i] * class_weight[i]
return cost / float(np.size(y))
if __name__ == "__main__":
csvname = '3d_classification_data_v2_mbalanced.csv'
x, y = readData(csvname)
w = np.ones([x.shape[0] + 1, 1])
x = np.insert(x, 0, values=np.ones([1, x.shape[1]]), axis=0)
positiveOneWeight = 7/11
negativeOneWeight = 4/11
class_weight = []
for i in range(np.size(y)):
if (y[:, i] > 0):
class_weight.append(positiveOneWeight)
else:
class_weight.append(negativeOneWeight)
position = x[[1, 2]]
positiveOneXList = []
positiveOneYList = []
negativeOneXList = []
negativeOneYList = []
for i in range(position.shape[1]):
if (y[0][i] > 0):
positiveOneXList.append(position[0][i])
positiveOneYList.append(position[1][i])
else:
negativeOneXList.append(position[0][i])
negativeOneYList.append(position[1][i])
plt.scatter(positiveOneXList, positiveOneYList, color='red')
plt.scatter(negativeOneXList, negativeOneYList, color='blue')
for i in range(5):
w = newton_method(x, y, w, class_weight)
a = -(w[1][0]/w[2][0])
b = -(w[0][0]/w[2][0])
foo = lambda x : a * x + b
i = -0.1
xList = []
yList = []
while (i < 1.1):
xList.append(i)
yList.append(foo(i))
i += 0.1
plt.plot(xList, yList)
plt.show()
|
PhoeniXuzoo/NU-Projects | EE475/Ch6P13.py | import numpy as np
import matplotlib.pyplot as plt
## softmax: 0.1 600
## perceptron: 0.05 550
def readData(csvname):
data = np.loadtxt(csvname, delimiter=',')
x = data[:-1, :]
y = data[-1:, :]
return x, y
def softmaxCostFunc(x, y, w):
cost = np.sum(np.log(1 + np.exp(-y*np.transpose(np.dot(np.transpose(x), w)))))
return cost / float(np.size(y))
def gradientDescentOneStepForSoftmax(x, y, w, alpha=0.1):
total = np.zeros([9,1])
for i in range(np.size(y)):
power = np.exp(-y[:,i] * np.dot(x[:,i], w))
term = power / (1 + power)
total += term * y[:,i] * x[:,[i]]
w = w + alpha * (1/np.size(y)) * total
return w
def perceptronCostFunc(x, y, w):
cost = 0
a = (-y*np.transpose(np.dot(np.transpose(x), w)))[0]
for i in range(len(a)):
cost += a[i] if (a[i] > 0) else 0
return cost / float(np.size(y))
def gradientDescentOneStepForPerceptron(x, y, w, alpha=0.05):
total = np.zeros([9,1])
for i in range(np.size(y)):
term = -y[:,i] * np.dot(x[:,[i]].T, w)
total += 0 if term <= 0 else -y[:,i] * x[:,[i]]
w = w - alpha * (1/np.size(y)) * total
return w
if __name__ == "__main__":
csvname = 'breast_cancer_data.csv'
x, y = readData(csvname)
w = np.ones([x.shape[0] + 1, 1])
x = np.insert(x, 0, values=np.ones([1, x.shape[1]]), axis=0)
xSoftList = [0]
ySoftList = [softmaxCostFunc(x, y, w)]
for i in range(600):
w = gradientDescentOneStepForSoftmax(x, y, w)
xSoftList.append(i+1)
ySoftList.append(softmaxCostFunc(x, y, w))
yPredic = np.transpose(np.dot(np.transpose(x), w))
wrong = 0
for i in range(np.size(yPredic)):
if ((yPredic[0][i] > 0) != (y[0][i] > 0)):
wrong += 1
print("Softmax Wrong Prediction: ", wrong)
w = np.ones([x.shape[0], 1])
xPerceptronList = [0]
yPerceptronList = [perceptronCostFunc(x, y, w)]
for i in range(550):
w = gradientDescentOneStepForPerceptron(x, y, w)
xPerceptronList.append(i+1)
yPerceptronList.append(perceptronCostFunc(x, y, w))
plt.plot(xSoftList, ySoftList, label="Softmax Cost Function",color="#F08080")
plt.plot(xPerceptronList, yPerceptronList, label="Perceptro Cost Function")
plt.legend(loc="upper right")
plt.show()
plt.close()
yPredic = np.transpose(np.dot(np.transpose(x), w))
wrong = 0
for i in range(np.size(yPredic)):
if ((yPredic[0][i] > 0) != (y[0][i] > 0)):
wrong += 1
print("Perceptron Wrong Prediction: ", wrong)
|
PhoeniXuzoo/NU-Projects | EE475/9.2.py | <filename>EE475/9.2.py
import ssl
from sklearn.datasets import fetch_openml
import autograd.numpy as np
from autograd import grad
from autograd.misc.flatten import flatten_func
import matplotlib.pyplot as plt
import random
# import MNIST
x, y = fetch_openml('mnist_784', version=1, return_X_y=True)
# re-shape input/output data
x = x.T
y = np.array([int(v) for v in y])[np.newaxis,:]
for i in range(len(x)):
if np.average(x[i]) == 0.0:
continue
x[i] = (x[i] - np.mean(x[i])) / np.std(x[i])
num_sample = 50000
inds = np.random.permutation(y.shape[1])[:num_sample]
x = x[:,inds]
y = y[:,inds]
lam=0.00001
print(np.shape(x))
print(np.shape(y))
x_1=x
y_1=y
import data_transformer
x_sample_edgebased_features = data_transformer.edge_transformer(x)
x_2=x_sample_edgebased_features
print('shape of transformed input ', x_sample_edgebased_features.shape)
def model(x,w):
a = w[0] + np.dot(x.T, w[1:])
return a.T
def multiclass_softmax(w):
all_evals = model(x, w)
a = np.log(np.sum(np.exp(all_evals), axis=0))
b = all_evals[y.astype(int).flatten(), np.arange(np.size(y))]
cost = np.sum(a - b)
cost = cost + lam * np.linalg.norm(w[1:, :], 'fro') ** 2
return cost / float(np.size(y))
def gradientDescent(w, mp_flat):
gradient = grad(mp_flat)
grad_eval = gradient(w)
return w - 0.01 * grad_eval
#w = 0.1*np.ones([x.shape[0] + 1, 10])
#ms_flat, unflatten_func, w = flatten_func(multiclass_softmax, w)
def training(x_1):
w = 0.01 * np.ones([x_1.shape[0] + 1, 10])
ms_flat, unflatten_func, w = flatten_func(multiclass_softmax, w)
xList = [0]
yList = [ms_flat(w)]
for epoch in range(20):
for i in range(250):
x = x_1[:, i*200:i*200 + 200]
y = y_1[:, i*200:i*200 + 200]
w = gradientDescent(w, ms_flat)
xList.append(epoch + 1)
yList.append(ms_flat(w))
plt.plot(xList, yList)
training(x_1)
print("Succuss!")
x=x_2
training(x_2)
print("Finish!")
plt.show() |
PhoeniXuzoo/NU-Projects | EE475/edgebased_feature_extractor.py | <gh_stars>0
from autograd import numpy as np
class tensor_conv_layer:
# convolution function
def conv_function(self, tensor_window):
tensor_window = np.reshape(tensor_window, (
np.shape(tensor_window)[0], np.shape(tensor_window)[1] * np.shape(tensor_window)[2]))
t = np.dot(self.kernels, tensor_window.T)
return t
# pooling / downsampling parameters
def pool_function(self, tensor_window):
t = np.max(tensor_window, axis=(1, 2))
return t
# activation
def activation(self, tensor_window):
return np.maximum(0, tensor_window)
# pad image with appropriate number of zeros for convolution
def pad_tensor(self, tensor, kernel_size):
odd_nums = np.array([int(2 * n + 1) for n in range(100)])
pad_val = np.argwhere(odd_nums == kernel_size)[0][0]
tensor_padded = np.zeros(
(np.shape(tensor)[0], np.shape(tensor)[1] + 2 * pad_val, np.shape(tensor)[2] + 2 * pad_val))
tensor_padded[:, pad_val:-pad_val, pad_val:-pad_val] = tensor
return tensor_padded
# sliding window for image augmentation
def sliding_window_tensor(self, tensor, window_size, stride, func):
# grab image size, set container for results
image_size = np.shape(tensor)[1]
results = []
# slide window over input image with given window size / stride and function
for i in np.arange(0, image_size - window_size + 1, stride):
for j in np.arange(0, image_size - window_size + 1, stride):
# take a window of input tensor
tensor_window = tensor[:, i:i + window_size, j:j + window_size]
# now process entire windowed tensor at once
tensor_window = np.array(tensor_window)
yo = func(tensor_window)
# store weight
results.append(yo)
# re-shape properly
results = np.array(results)
results = results.swapaxes(0, 1)
if func == self.conv_function:
results = results.swapaxes(1, 2)
return results
# make feature map
def make_feature_tensor(self, tensor):
# create feature map via convolution --> returns flattened convolution calculations
conv_stride = 1
feature_tensor = self.sliding_window_tensor(tensor, self.kernel_size, conv_stride, self.conv_function)
# re-shape convolution output ---> to square of same size as original input
num_filters = np.shape(feature_tensor)[0]
num_images = np.shape(feature_tensor)[1]
square_dim = int((np.shape(feature_tensor)[2]) ** (0.5))
feature_tensor = np.reshape(feature_tensor, (num_filters, num_images, square_dim, square_dim))
# shove feature map through nonlinearity
feature_tensor = self.activation(feature_tensor)
# pool feature map --- i.e., downsample it
pool_stride = 2
pool_window_size = 3
downsampled_feature_map = []
for t in range(np.shape(feature_tensor)[0]):
temp_tens = feature_tensor[t, :, :, :]
d = self.sliding_window_tensor(temp_tens, pool_window_size, pool_stride, self.pool_function)
downsampled_feature_map.append(d)
downsampled_feature_map = np.array(downsampled_feature_map)
# return downsampled feature map --> flattened
return downsampled_feature_map
# our normalization function
def normalize(self, data, data_mean, data_std):
normalized_data = (data - data_mean) / (data_std + 10 ** (-5))
return normalized_data
# convolution layer
def conv_layer(self, tensor, kernels):
#### prep input tensor #####
# pluck out dimensions for image-tensor reshape
num_images = np.shape(tensor)[0]
num_kernels = np.shape(kernels)[0]
# create tensor out of input images (assumed to be stacked vertically as columns)
tensor = np.reshape(tensor, (
np.shape(tensor)[0], int((np.shape(tensor)[1]) ** (0.5)), int((np.shape(tensor)[1]) ** (0.5))), order='F')
# pad tensor
kernel = kernels[0]
self.kernel_size = np.shape(kernel)[0]
padded_tensor = self.pad_tensor(tensor, self.kernel_size)
#### prep kernels - reshape into array for more effecient computation ####
self.kernels = np.reshape(kernels, (np.shape(kernels)[0], np.shape(kernels)[1] * np.shape(kernels)[2]))
#### compute convolution feature maps / downsample via pooling one map at a time over entire tensor #####
# compute feature map for current image using current convolution kernel
feature_tensor = self.make_feature_tensor(padded_tensor)
feature_tensor = feature_tensor.swapaxes(0, 1)
feature_tensor = np.reshape(feature_tensor, (
np.shape(feature_tensor)[0], np.shape(feature_tensor)[1] * np.shape(feature_tensor)[2]), order='F')
return feature_tensor |
PhoeniXuzoo/NU-Projects | EE475/Ch7P3.py | <gh_stars>0
import autograd.numpy as np
import matplotlib.pyplot as plt
from autograd import grad
from autograd.misc.flatten import flatten_func
def readData(csvname):
data = np.loadtxt(csvname, delimiter=',')
x = data[:-1, :]
y = data[-1:, :]
return x, y
def model(x, w):
a = w[0] + np.dot(x.T, w[1:])
return a.T
def multiclass_perceptron (w):
all_evals = model(x, w)
a = np.max(all_evals, axis = 0)
b = all_evals[y.astype(int).flatten(), np.arange(np.size(y))]
cost = np.sum(a - b)
cost = cost + lam*np.linalg.norm(w[1: ,:], 'fro')**2
return cost / float(np.size(y))
def gradientDescent(w, mp_flat):
gradient = grad(mp_flat)
grad_eval = gradient(w)
return w - 0.1 * grad_eval
def statistic(x, y, w):
prediction = np.argmax(model(x,w),axis=0)
wrong = 0
for i in range(np.size(y)):
if prediction[i] != y[0][i]:
wrong += 1
return wrong
# global variable
lam = 10 ** -5
csvname = '3class_data.csv'
x, y = readData(csvname)
lam = 10 ** -5
if __name__ == "__main__":
w = np.ones([x.shape[0] + 1, 3])
w += 1
mp_flat, unflatten_func, w = flatten_func(multiclass_perceptron, w)
xList = [0]
yList = [mp_flat(w)]
for i in range(100):
w = gradientDescent(w, mp_flat)
xList.append(i+1)
yList.append(mp_flat(w))
w = w.reshape(3, 3)
plt.plot(xList, yList)
plt.show()
print("misclassification: ", statistic(x, y, w))
|
PhoeniXuzoo/NU-Projects | EE475/Ch3P5.py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def gradientDescentEveryStep(derivative, startPoint, stepLength):
return startPoint - stepLength * derivative(startPoint)
if __name__ == "__main__":
# g(w) = (1/50) * ( w^4 + w^2 + w)
with PdfPages('Chapter3Practice5.pdf') as pdf:
polynomial = np.poly1d([1/50, 0, 1/50, 1/5, 0])
# derivative of g(w)
derivative = polynomial.deriv()
stepLength = [1, 0.1, 0.01]
for alpha in stepLength:
startPoint = 2
fig = plt.figure()
plt.title("Exercise 3.5: steplength is " + str(alpha))
xList = [0]
yList = [polynomial(startPoint)]
for i in range(1000):
startPoint = gradientDescentEveryStep(derivative, startPoint, alpha)
xList.append(i+1)
yList.append(polynomial(startPoint))
plt.plot(xList, yList)
plt.show()
pdf.savefig(fig)
plt.close()
|
PhoeniXuzoo/NU-Projects | EE475/Ch3P6.py | <reponame>PhoeniXuzoo/NU-Projects
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def gradientDescentEveryStep(derivative, startPoint, stepLength):
return startPoint - stepLength * derivative(startPoint)
if __name__ == "__main__":
# g(w) = |x|
with PdfPages('Chapter3Practice6.pdf') as pdf:
costFunc = lambda x : abs(x)
derivative = lambda x: 1 if (x >= 0) else -1
# fixed steplength
startPoint = 1.75
fig = plt.figure()
plt.title("Exercise 3.6: fixed steplength is 0.5")
xList = [0]
yList = [costFunc(startPoint)]
for i in range(20):
startPoint = gradientDescentEveryStep(derivative, startPoint, 0.5)
xList.append(i+1)
yList.append(costFunc(startPoint))
plt.plot(xList, yList)
plt.show()
pdf.savefig(fig)
plt.close()
# diminishing step length
startPoint = 1.75
fig = plt.figure()
plt.title("Exercise 3.6: diminishing steplength")
xList = [0]
yList = [costFunc(startPoint)]
for i in range(20):
startPoint = gradientDescentEveryStep(derivative, startPoint, 1/(i+1))
xList.append(i + 1)
yList.append(costFunc(startPoint))
plt.plot(xList, yList)
plt.show()
pdf.savefig(fig)
plt.close()
|
PhoeniXuzoo/NU-Projects | EE475/Ch4P5c.py | from autograd import grad
from autograd import hessian
import autograd.numpy as np
import matplotlib.pyplot as plt
def newtons_method(g, max_its, w):
gradient = grad(g)
hess = hessian(g)
epsilon = 10 ** (-7)
weight_history = [w]
cost_history = [g(w)]
for k in range(max_its):
grad_eval = gradient(w)
hess_eval = hess(w)
hess_eval.shape = (int((np.size(hess_eval)) ** (0.5)), int((np.size(hess_eval)) ** (0.5)))
A = hess_eval + epsilon * np.eye(w.size)
b = grad_eval
w = np.linalg.solve(A, np.dot(A, w) - b)
weight_history.append(w)
cost_history.append(g(w))
return weight_history, cost_history
def g(w):
return np.log(1 + np.exp(np.dot(np.transpose(w), w)))
if __name__ == "__main__":
w = 1 * np.ones([2, 1])
max_its = 10
weight_history, cost_history = newtons_method(g, max_its, w)
print(weight_history)
print(cost_history)
xList = []
yList = []
for i in range(max_its):
xList.append(i+1)
yList.append(cost_history[i][0][0])
fig = plt.figure()
plt.title("Exercise 4.5 (c)")
plt.plot(xList, yList)
plt.show()
plt.close()
|
PhoeniXuzoo/NU-Projects | EE475/Ch3P8.py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def gradientDescentEveryStep(derivative, startPoint, stepLength):
return startPoint - stepLength * derivative(startPoint)
def constFunc(w):
total = 0
for i in w:
total += i*i
return total
if __name__ == "__main__" :
# g(w) = w^T * w w is a 10*1 vector
with PdfPages('Chapter3Practice8.pdf') as pdf:
derivate = lambda w: 2 * w
stepLength = [0.001, 0.1, 1]
for alpha in stepLength:
startPoint = np.ones(10) * 10
fig = plt.figure()
plt.title("Exercise 3.8: steplength is " + str(alpha))
xList = [0]
yList = [constFunc(startPoint)]
for i in range(100):
startPoint = gradientDescentEveryStep(derivate, startPoint, alpha)
xList.append(i+1)
yList.append(constFunc(startPoint))
plt.plot(xList, yList)
plt.show()
pdf.savefig(fig)
plt.close() |
PhoeniXuzoo/NU-Projects | EE475/Ch9P2.py | <reponame>PhoeniXuzoo/NU-Projects
import ssl
from sklearn.datasets import fetch_openml
import autograd.numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import MultipleLocator
from autograd import grad
from autograd.misc.flatten import flatten_func
import copy
ssl._create_default_https_context = ssl._create_unverified_context
class tensor_conv_layer:
def conv_function(self, tensor_window):
tensor_window = np.reshape(tensor_window, (
np.shape(tensor_window)[0], np.shape(tensor_window)[1] * np.shape(tensor_window)[2]))
t = np.dot(self.kernels, tensor_window.T)
return t
def pool_function(self, tensor_window):
t = np.max(tensor_window, axis=(1, 2))
return t
def activation(self, tensor_window):
return np.maximum(0, tensor_window)
def pad_tensor(self, tensor, kernel_size):
odd_nums = np.array([int(2 * n + 1) for n in range(100)])
pad_val = np.argwhere(odd_nums == kernel_size)[0][0]
tensor_padded = np.zeros(
(np.shape(tensor)[0], np.shape(tensor)[1] + 2 * pad_val, np.shape(tensor)[2] + 2 * pad_val))
tensor_padded[:, pad_val:-pad_val, pad_val:-pad_val] = tensor
return tensor_padded
def sliding_window_tensor(self, tensor, window_size, stride, func):
image_size = np.shape(tensor)[1]
results = []
for i in np.arange(0, image_size - window_size + 1, stride):
for j in np.arange(0, image_size - window_size + 1, stride):
tensor_window = tensor[:, i:i + window_size, j:j + window_size]
tensor_window = np.array(tensor_window)
yo = func(tensor_window)
results.append(yo)
results = np.array(results)
results = results.swapaxes(0, 1)
if func == self.conv_function:
results = results.swapaxes(1, 2)
return results
def make_feature_tensor(self, tensor):
conv_stride = 1
feature_tensor = self.sliding_window_tensor(tensor, self.kernel_size, conv_stride, self.conv_function)
num_filters = np.shape(feature_tensor)[0]
num_images = np.shape(feature_tensor)[1]
square_dim = int((np.shape(feature_tensor)[2]) ** (0.5))
feature_tensor = np.reshape(feature_tensor, (num_filters, num_images, square_dim, square_dim))
feature_tensor = self.activation(feature_tensor)
pool_stride = 2
pool_window_size = 3
downsampled_feature_map = []
for t in range(np.shape(feature_tensor)[0]):
temp_tens = feature_tensor[t, :, :, :]
d = self.sliding_window_tensor(temp_tens, pool_window_size, pool_stride, self.pool_function)
downsampled_feature_map.append(d)
downsampled_feature_map = np.array(downsampled_feature_map)
return downsampled_feature_map
def normalize(self, data, data_mean, data_std):
normalized_data = (data - data_mean) / (data_std + 10 ** (-5))
return normalized_data
def conv_layer(self, tensor, kernels):
num_images = np.shape(tensor)[0]
num_kernels = np.shape(kernels)[0]
tensor = np.reshape(tensor, (
np.shape(tensor)[0], int((np.shape(tensor)[1]) ** (0.5)), int((np.shape(tensor)[1]) ** (0.5))), order='F')
kernel = kernels[0]
self.kernel_size = np.shape(kernel)[0]
padded_tensor = self.pad_tensor(tensor, self.kernel_size)
self.kernels = np.reshape(kernels, (np.shape(kernels)[0], np.shape(kernels)[1] * np.shape(kernels)[2]))
feature_tensor = self.make_feature_tensor(padded_tensor)
feature_tensor = feature_tensor.swapaxes(0, 1)
feature_tensor = np.reshape(feature_tensor, (
np.shape(feature_tensor)[0], np.shape(feature_tensor)[1] * np.shape(feature_tensor)[2]), order='F')
return feature_tensor
def readData():
x, y = fetch_openml('mnist_784', version=1, return_X_y=True)
x = x.T
y = np.array([int(v) for v in y])[np.newaxis,:]
for i in range(len(x)):
if np.average(x[i]) == 0.0:
continue
x[i] = (x[i] - np.mean(x[i])) / np.std(x[i])
return x[:, 0:50000], y[:, 0:50000]
def edge_transformer(x):
kernels = np.array([
[[-1, -1, -1],
[ 0, 0, 0],
[ 1, 1, 1]],
[[-1, -1, 0],
[-1, 0, 1],
[ 0, 1, 1]],
[[-1, 0, 1],
[-1, 0, 1],
[-1, 0, 1]],
[[ 0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]],
[[ 1, 0, -1],
[ 1, 0, -1],
[ 1, 0, -1]],
[[ 0, -1, -1],
[ 1, 0, -1],
[ 1, 1, 0]],
[[ 1, 1, 1],
[ 0, 0, 0],
[-1, -1, -1]],
[[ 1, 1, 0],
[ 1, 0, -1],
[ 0, -1, -1]]])
demo = tensor_conv_layer()
x_transformed = demo.conv_layer(x.T,kernels).T
return x_transformed
#global variable
x_batch, y_batch = readData()
x = x_batch
y = y_batch
x_edgebased_features = edge_transformer(x)
lam = 10**(-5)
def model(vector,w):
a = w[0] + np.dot(vector.T, w[1:])
return a.T
def multiclass_softmax(w):
all_evals = model(x, w)
a = np.log(np.sum(np.exp(all_evals), axis=0))
b = all_evals[y.astype(int).flatten(), np.arange(np.size(y))]
cost = np.sum(a - b)
cost = cost + lam * np.linalg.norm(w[1:, :], 'fro') ** 2
return cost / float(np.size(y))
def gradientDescent(w, mp_flat):
gradient = grad(mp_flat)
grad_eval = gradient(w)
return w - 0.01 * grad_eval
if __name__ == "__main__":
plt.figure()
w = np.ones([x_batch.shape[0] + 1, 10])
w *= 0.01
y1wrong = []
prediction = np.argmax(model(x_batch, w), axis=0)
wrong = 0
for i in range(np.size(y_batch)):
if prediction[i] != y_batch[0][i]:
wrong += 1
y1wrong.append(wrong)
ms_flat, unflatten_func, w = flatten_func(multiclass_softmax, w)
xList = [0]
x = x_batch[:, 0:200]
y = y_batch[:, 0:200]
yList = [ms_flat(w)]
for epoch in range(20):
for i in range(250):
x = x_batch[:, i * 200: i * 200 + 200]
y = y_batch[:, i * 200: i * 200 + 200]
w = gradientDescent(w, ms_flat)
xList.append(epoch + 1)
yList.append(ms_flat(w))
w_temp = copy.deepcopy(w)
w_temp = w_temp.reshape(x_batch.shape[0] + 1, 10)
prediction = np.argmax(model(x_batch, w_temp), axis=0)
wrong = 0
for i in range(np.size(y_batch)):
if prediction[i] != y_batch[0][i]:
wrong += 1
y1wrong.append(wrong)
plt.plot(xList, yList, label="raw data point", color='blue')
w = np.ones([x_edgebased_features.shape[0] + 1, 10])
w *= 0.01
y2wrong = []
prediction = np.argmax(model(x_edgebased_features, w), axis=0)
wrong = 0
for i in range(np.size(y_batch)):
if prediction[i] != y_batch[0][i]:
wrong += 1
y2wrong.append(wrong)
ms_flat, unflatten_func, w = flatten_func(multiclass_softmax, w)
xList = [0]
x = x_edgebased_features[:, 0:200]
y = y_batch[:, 0:200]
yList = [ms_flat(w)]
for epoch in range(20):
for i in range(250):
x = x_edgebased_features[:, i * 200: i * 200 + 200]
y = y_batch[:, i * 200: i * 200 + 200]
w = gradientDescent(w, ms_flat)
xList.append(epoch + 1)
yList.append(ms_flat(w))
w_temp = copy.deepcopy(w)
w_temp = w_temp.reshape(x_edgebased_features.shape[0] + 1, 10)
prediction = np.argmax(model(x_edgebased_features, w_temp), axis=0)
wrong = 0
for i in range(np.size(y_batch)):
if prediction[i] != y_batch[0][i]:
wrong += 1
y2wrong.append(wrong)
plt.plot(xList, yList, label="edge histogram based", color="red")
plt.legend(loc="upper right")
xmajorLocator = MultipleLocator(5)
ax = plt.gca()
ax.xaxis.set_major_locator(xmajorLocator)
plt.show()
plt.close()
plt.figure()
plt.plot(xList, y1wrong, label="raw data point", color="blue")
plt.plot(xList, y2wrong, label="edge histogram based", color="red")
plt.legend(loc="upper right")
ymajorLocator = MultipleLocator(1000)
ax = plt.gca()
ax.xaxis.set_major_locator(xmajorLocator)
ax.yaxis.set_major_locator(ymajorLocator)
plt.ylim(0, 10000)
plt.show()
plt.close()
|
PhoeniXuzoo/NU-Projects | EE475/Ch5P9b.py | <reponame>PhoeniXuzoo/NU-Projects<gh_stars>0
import numpy as np
def readData(csvname):
data = np.loadtxt(csvname, delimiter=',')
x = data[:-1, :]
y = data[-1:, :]
for i in range(len(x)):
x[i] = (x[i] - np.mean(x[i])) / np.std(x[i])
return x, y
def gradientDescentEachStep(w, x, y):
w_T = np.transpose(w)
new_W = []
for i in range(len(w)):
total = 0
for j in range(len(y[0])):
total += (np.dot(w_T, np.transpose(np.array([x[:,j]]))) - y[0][j]) * x[i][j]
new_W.append(w[i][0] - 0.1 * (1/len(y[0])) * total)
for i in range(len(w)):
w[i][0] = new_W[i]
return w
if __name__ == "__main__":
csvname = 'auto_data.csv'
x, y = readData(csvname)
w = np.ones([x.shape[0] + 1,1])
x = np.insert(x, 0, values=np.ones([1, x.shape[1]]), axis=0)
for i in range(1000):
w = gradientDescentEachStep(w, x, y)
totalForRMSE = 0
totalForMAD = 0
w_T = np.transpose(w)
for i in range(len(y[0])):
temp = (np.dot(w_T, np.transpose(np.array([x[:,i]]))) - y[0][i])
totalForRMSE += temp ** 2
totalForMAD += abs(temp)
MSE = totalForRMSE / len(y[0])
RMSE = MSE ** 0.5
MAD = totalForMAD / len(y[0])
print("RMSE is ", RMSE)
print("MAD is ", MAD)
|
PhoeniXuzoo/NU-Projects | EE475/Ch5P2.py | import csv
import numpy as np
import math
def readData():
rawData = csv.reader(open("kleibers_law_data.csv", 'r'))
xList = []
yList = []
i = 0
for raw in rawData:
if i == 0:
xList = raw
i += 1
else:
yList = raw
for i in range(len(xList)):
xList[i] = math.log(float(xList[i]))
yList[i] = math.log(float(yList[i]))
return xList, yList
if __name__ == "__main__":
xList, yList = readData()
xBar = np.mean(xList)
yBar = np.mean(yList)
a = 0
b = 0
for i in range(len(xList)):
a += (xList[i] - xBar) * (yList[i] - yBar)
b += (xList[i] - xBar) ** 2
w1 = a / b
w2 = yBar - w1 * xBar
print(w1, w2)
|
PhoeniXuzoo/NU-Projects | EE475/data_transformer.py | from autograd import numpy as np
import edgebased_feature_extractor
# edge-based feature extraction
def edge_transformer(x):
# edge-based directions for kernel-based feature extraction
kernels = np.array([
[[-1, -1, -1],
[ 0, 0, 0],
[ 1, 1, 1]],
[[-1, -1, 0],
[-1, 0, 1],
[ 0, 1, 1]],
[[-1, 0, 1],
[-1, 0, 1],
[-1, 0, 1]],
[[ 0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]],
[[ 1, 0, -1],
[ 1, 0, -1],
[ 1, 0, -1]],
[[ 0, -1, -1],
[ 1, 0, -1],
[ 1, 1, 0]],
[[ 1, 1, 1],
[ 0, 0, 0],
[-1, -1, -1]],
[[ 1, 1, 0],
[ 1, 0, -1],
[ 0, -1, -1]]])
# compute edge-based features
demo = edgebased_feature_extractor.tensor_conv_layer()
x_transformed = demo.conv_layer(x.T,kernels).T
return x_transformed |
PhoeniXuzoo/NU-Projects | EE475/Ch7P2.py | <reponame>PhoeniXuzoo/NU-Projects
import numpy as np
import matplotlib.pyplot as plt
import copy
def readData(csvname):
data = np.loadtxt(csvname, delimiter=',')
x = data[:-1, :]
y = data[-1:, :]
return x, y
def drawpoint(x, y):
position = x[[1,2]]
zeroXList = []
zeroYList = []
oneXList = []
oneYList = []
twoXList = []
twoYList = []
threeXList = []
threeYList = []
for i in range(position.shape[1]):
if (int(y[0][i]) == 0):
zeroXList.append(position[0][i])
zeroYList.append(position[1][i])
elif (int(y[0][i]) == 1):
oneXList.append(position[0][i])
oneYList.append(position[1][i])
elif (int(y[0][i]) == 2):
twoXList.append(position[0][i])
twoYList.append(position[1][i])
elif (int(y[0][i]) == 3):
threeXList.append(position[0][i])
threeYList.append(position[1][i])
plt.scatter(zeroXList, zeroYList, color = 'blue')
plt.scatter(oneXList, oneYList, color = 'red')
plt.scatter(twoXList, twoYList, color='green')
plt.scatter(threeXList, threeYList, color='yellow')
return plt
def drawline(w, c):
a = -(w[1][0] / w[2][0])
b = -(w[0][0] / w[2][0])
foo = lambda x: a * x + b
i = 0.0
xList = []
yList = []
while (i < 1.0):
xList.append(i)
yList.append(foo(i))
i += 0.1
plt.plot(xList, yList, color = c)
class Logistic:
def __init__(self, label, x, y, w, epoch, alpha):
self.label = label
self.epoch = epoch
self.alpha = alpha
self.x = copy.deepcopy(x)
self.w = copy.deepcopy(w)
self.y = copy.deepcopy(y)
for i in range(np.size(self.y)):
if (int(self.y[0][i]) == int(label)):
self.y[0][i] = 1.0
else:
self.y[0][i] = -1.0
# def softmaxCostFunc(self):
# cost = np.sum(np.log(1 + np.exp(-(self.y)*np.transpose(np.dot(np.transpose(self.x), self.w)))))
# return cost / float(np.size(self.y))
#
# def gradientDescentOneStepForSoftmax(self, alpha):
# total = np.zeros([3, 1])
# for i in range(np.size(self.y)):
# power = np.exp(-self.y[:, i] * np.dot(self.x[:, i], self.w))
# term = power / (1 + power)
# total += term * self.y[:, i] * self.x[:, [i]]
#
# self.w = self.w + alpha * (1 / np.size(self.y)) * total
def perceptronCostFunc(self):
cost = 0
a = (-(self.y) * np.transpose(np.dot(np.transpose(self.x), self.w)))[0]
for i in range(len(a)):
cost += a[i] if (a[i] > 0) else 0
return cost / float(np.size(self.y))
def gradientDescentOneStepForPerceptron(self, alpha):
total = np.zeros([3, 1])
for i in range(np.size(self.y)):
term = -self.y[:, i] * np.dot(self.x[:, [i]].T, self.w)
total += 0 if term <= 0 else -self.y[:, i] * self.x[:, [i]]
self.w = self.w - alpha * (1 / np.size(self.y)) * total
def normalizeW(self):
i = 1
sum = 0.0
while (i < np.size(self.w)):
sum += self.w[i][0] ** 2
i += 1
return self.w / (sum ** 0.5)
# def softmaxTrain(self):
# for i in range(self.epoch):
# self.gradientDescentOneStepForSoftmax(self.alpha)
# return self.normalizeW()
def perceptronTrain(self):
for i in range(self.epoch):
self.gradientDescentOneStepForPerceptron(self.alpha)
return self.normalizeW()
def statistic(x, y, zeroW, oneW, twoW, threeW):
wrong = 0
for i in range(np.shape(x)[1]):
zero = np.dot(x[:,[i]].T, zeroW)
one = np.dot(x[:,[i]].T, oneW)
two = np.dot(x[:,[i]].T, twoW)
three = np.dot(x[:,[i]].T, threeW)
if (int(y[0][i]) == 0):
if not (zero >= one and zero >= two and zero >= three):
wrong += 1
elif (int(y[0][i]) == 1):
if not (one >= zero and one >= two and one >= three):
wrong += 1
elif (int(y[0][i]) == 2):
if not (two >= zero and two >= one and two >= three):
wrong += 1
elif (int(y[0][i]) == 3):
if not (three >= zero and three >= one and three >= two):
wrong += 1
return wrong
if __name__ == "__main__":
csvname = '4class_data.csv'
x, y = readData(csvname)
w = np.ones([x.shape[0] + 1, 1])
x = np.insert(x, 0, values=np.ones([1, x.shape[1]]), axis=0)
plt = drawpoint(x, y)
zeroClassifier = Logistic(0.0, x, y, w, 1400, 0.05)
zeroW = zeroClassifier.perceptronTrain()
drawline(zeroW, 'blue')
oneClassifier = Logistic(1.0, x, y, w, 250, 0.05)
oneW = oneClassifier.perceptronTrain()
drawline(oneW, 'red')
w = np.zeros([x.shape[0], 1])
w += 0.05
twoClassifier = Logistic(2.0, x, y, w, 120, 0.01)
twoW = twoClassifier.perceptronTrain()
drawline(twoW, 'green')
threeClassifier = Logistic(3.0, x, y, w, 505, 0.01)
threeW = threeClassifier.perceptronTrain()
drawline(threeW, 'yellow')
plt.show()
print("MisClassifications: ", statistic(x, y, zeroW, oneW, twoW, threeW))
|
PhoeniXuzoo/NU-Projects | EE475/Ch6P15.py | <filename>EE475/Ch6P15.py
import numpy as np
import matplotlib.pyplot as plt
def readData(csvname):
data = np.loadtxt(csvname, delimiter=',')
x = data[:-1, :]
y = data[-1:, :]
for i in range(len(x)):
x[i] = (x[i] - np.mean(x[i])) / np.std(x[i])
return x, y
def perceptronCostFunc(x, y, w, class_index):
cost = 0
a = (-y*np.transpose(np.dot(np.transpose(x), w)))[0]
for i in range(len(a)):
cost += (class_index[i] * a[i]) if (a[i] > 0) else 0
return cost / float(np.size(y))
def gradientDescentOneStepForPerceptron(x, y, w, class_index, alpha=0.00020):
total = np.zeros([21,1])
for i in range(np.size(y)):
term = -y[:,i] * np.dot(x[:,[i]].T, w)
total += 0 if term <= 0 else -y[:,i] * x[:,[i]] * class_index[i]
w = w - alpha * (1/np.size(y)) * total
return w
if __name__ == "__main__":
csvname = 'credit_dataset.csv'
x, y = readData(csvname)
w = np.zeros([x.shape[0] + 1, 1])
w += 0.5
alpha = 0.0025
goodWeight = 1
badWeight = 1
iteration = 5000
x = np.insert(x, 0, values=np.ones([1, x.shape[1]]), axis=0)
print("alpha: ", alpha)
print("iteration: ", iteration)
class_index = []
for i in range(np.size(y)):
if (y[:, i] > 0):
class_index.append(goodWeight)
else:
class_index.append(badWeight)
xPerceptronList = [0]
yPerceptronList = [perceptronCostFunc(x, y, w, class_index)]
for i in range(iteration):
w = gradientDescentOneStepForPerceptron(x, y, w, class_index, alpha)
xPerceptronList.append(i + 1)
yPerceptronList.append(perceptronCostFunc(x, y, w, class_index))
plt.plot(xPerceptronList, yPerceptronList)
plt.show()
plt.close()
yPredic = np.transpose(np.dot(np.transpose(x), w))
correct = 0
for i in range(np.size(yPredic)):
if ((yPredic[0][i] > 0) == (y[0][i] > 0)):
correct += 1
print("Accuracy: ", correct / 1000) |
Mitrofanov/enhanced-rds-monitoring | tests/sample_input.py | # Sample RDSOS output from a MySQL instance
my_sql_dict = {
u"engine": u"MYSQL",
u"instanceID": u"ppr9b9oge80i99",
u"instanceResourceID": u"db-H4UK4SA7E62QPMAYTMPK5XWETQ",
u"timestamp": u"2017-09-19T00:21:07Z",
u"version": 1,
u"uptime": u"272 days, 1:28:18",
u"numVCPUs": 2,
u"cpuUtilization": {
u"guest": 0,
u"irq": 0.72,
u"system": 7.25,
u"wait": 4.35,
u"idle": 78.99,
u"user": 3.62,
u"total": 21.01,
u"steal": 1.45,
u"nice": 3.62
},
u"loadAverageMinute": {
u"fifteen": 0.4,
u"five": 0.3,
u"one": 0.29
},
u"memory": {
u"writeback": 0,
u"hugePagesFree": 0,
u"hugePagesRsvd": 0,
u"hugePagesSurp": 0,
u"cached": 3288068,
u"hugePagesSize": 2048,
u"free": 2746028,
u"hugePagesTotal": 0,
u"inactive": 838368,
u"pageTables": 5448,
u"dirty": 144,
u"mapped": 25548,
u"active": 3766972,
u"total": 7697596,
u"slab": 239772,
u"buffers": 301616
},
u"tasks": {
u"sleeping": 256,
u"zombie": 0,
u"running": 4,
u"stopped": 0,
u"total": 262,
u"blocked": 2
},
u"swap": {
u"cached": 0,
u"total": 7703160,
u"free": 7703160
},
u"network": [
{
u"interface": u"eth0",
u"rx": 821640,
u"tx": 2856222
}
],
u"diskIO": [
{
u"writeKbPS": 20,
u"readIOsPS": 0,
u"await": 1.6,
u"readKbPS": 0,
u"rrqmPS": 0,
u"util": 0.8,
u"avgQueueLen": 0.01,
u"tps": 5,
u"readKb": 0,
u"device": u"rdsdev",
u"writeKb": 20,
u"avgReqSz": 4,
u"wrqmPS": 0,
u"writeIOsPS": 5
}
],
u"fileSys": [
{
u"used": 626316,
u"name": u"rdsfilesys",
u"usedFiles": 523,
u"usedFilePercent": 0.01,
u"maxFiles": 6553600,
u"mountPoint": u"/rdsdbdata",
u"total": 103053476,
u"usedPercent": 0.61
}
],
u"processList": [
{
u"vss": 6365136,
u"name": u"mysqld",
u"tgid": 3305,
u"parentID": 1,
u"memoryUsedPc": 9.14,
u"cpuUsedPc": 0,
u"id": 765,
u"rss": 703620
},
{
u"vss": 6365136,
u"name": u"mysqld",
u"tgid": 3305,
u"parentID": 1,
u"memoryUsedPc": 9.14,
u"cpuUsedPc": 5.5,
u"id": 766,
u"rss": 703620
},
{
u"vss": 6365136,
u"name": u"mysqld",
u"tgid": 3305,
u"parentID": 1,
u"memoryUsedPc": 9.14,
u"cpuUsedPc": 0,
u"id": 780,
u"rss": 703620
},
{
u"vss": 6365136,
u"name": u"mysqld",
u"tgid": 3305,
u"parentID": 1,
u"memoryUsedPc": 9.14,
u"cpuUsedPc": 0,
u"id": 781,
u"rss": 703620
},
{
u"vss": 6365136,
u"name": u"mysqld",
u"tgid": 3305,
u"parentID": 1,
u"memoryUsedPc": 9.14,
u"cpuUsedPc": 0,
u"id": 3305,
u"rss": 703620
},
{
u"vss": 6365136,
u"name": u"mysqld",
u"tgid": 3305,
u"parentID": 1,
u"memoryUsedPc": 9.14,
u"cpuUsedPc": 6,
u"id": 4111,
u"rss": 703620
},
{
u"vss": 6365136,
u"name": u"mysqld",
u"tgid": 3305,
u"parentID": 1,
u"memoryUsedPc": 9.14,
u"cpuUsedPc": 5.5,
u"id": 10888,
u"rss": 703620
},
{
u"vss": 6365136,
u"name": u"mysqld",
u"tgid": 3305,
u"parentID": 1,
u"memoryUsedPc": 9.14,
u"cpuUsedPc": 6,
u"id": 19825,
u"rss": 703620
},
{
u"vss": 6365136,
u"name": u"mysqld",
u"tgid": 3305,
u"parentID": 1,
u"memoryUsedPc": 9.14,
u"cpuUsedPc": 5.5,
u"id": 31951,
u"rss": 703620
},
{
u"vss": 6365136,
u"name": u"mysqld",
u"tgid": 3305,
u"parentID": 1,
u"memoryUsedPc": 9.14,
u"cpuUsedPc": 5.5,
u"id": 32015,
u"rss": 703620
},
{
u"vss": 935380,
u"name": u"OS processes",
u"tgid": 0,
u"parentID": 0,
u"memoryUsedPc": 0.35,
u"cpuUsedPc": 0.5,
u"id": 0,
u"rss": 27824
},
{
u"vss": 1293204,
u"name": u"RDS processes",
u"tgid": 0,
u"parentID": 0,
u"memoryUsedPc": 4.27,
u"cpuUsedPc": 4,
u"id": 0,
u"rss": 329184
}
]
}
# Sample RDSOS output from a SQL Server instance
sql_server_dict = {
u"engine": u"SqlServer",
u"instanceID": u"trevor",
u"instanceResourceID": u"db-YWCA2G6UQEA3NYZ54IS6XEBGUE",
u"timestamp": u"2017-09-12T23:58:59Z",
u"version": 1,
u"uptime": u"0 days, 00:10:43",
u"numVCPUs": 1,
u"cpuUtilization": {
u"idle": 64.13,
u"kern": 11.85,
u"user": 24.01
},
u"memory": {
u"commitTotKb": 1073308,
u"commitLimitKb": 1572464,
u"commitPeakKb": 1109112,
u"physTotKb": 1048176,
u"physAvailKb": 125072,
u"sysCacheKb": 182448,
u"kernTotKb": 192596,
u"kernPagedKb": 136120,
u"kernNonpagedKb": 56476,
u"sqlServerTotKb": 123432,
u"pageSize": 4096
},
u"system": {
u"handles": 14874,
u"threads": 634,
u"processes": 44
},
u"disks": [
{
u"name": u"rdsdbdata",
u"totalKb": 20838336,
u"usedKb": 162752,
u"usedPc": 0.78,
u"availKb": 20675584,
u"availPc": 99.22,
u"rdCountPS": 0,
u"rdBytesPS": 0,
u"wrCountPS": 0,
u"wrBytesPS": 0
}
],
u"network": [
{
u"interface": "Ethernet 2",
u"rdBytesPS": 0,
u"wrBytesPS": 0
}
],
u"processList": [
{
u"name": u"OS processes",
u"cpuUsedPc": 0.3,
u"memUsedPc": 8.46,
u"workingSetKb": 249908,
u"workingSetPrivKb": 88728,
u"workingSetShareableKb": 161180,
u"virtKb": 57985212532
},
{
u"name": u"RDS processes",
u"cpuUsedPc": 1.52,
u"memUsedPc": 20.61,
u"workingSetKb": 367720,
u"workingSetPrivKb": 215996,
u"workingSetShareableKb": 151724,
u"virtKb": 4331792332
},
{
u"name": u"sqlwriter.exe",
u"pid": 1736,
u"cpuUsedPc": 0,
u"memUsedPc": 0.1,
u"workingSetKb": 5876,
u"workingSetPrivKb": 1020,
u"workingSetShareableKb": 4856,
u"virtKb": 38280
},
{
u"name": u"fdlauncher.exe",
u"pid": 2436,
u"cpuUsedPc": 0,
u"memUsedPc": 0.05,
u"workingSetKb": 3576,
u"workingSetPrivKb": 568,
u"workingSetShareableKb": 3008,
u"virtKb": 26776
},
{
u"name": u"sqlservr.exe",
u"pid": 3624,
u"cpuUsedPc": 32.22,
u"memUsedPc": 11.12,
u"workingSetKb": 158676,
u"workingSetPrivKb": 116608,
u"workingSetShareableKb": 42068,
u"virtKb": 2694704
},
{
u"name": u"sqlservr.exe",
u"pid": 3624,
u"tid": 3628,
u"cpuUsedPc": 0
},
{
u"name": u"sqlservr.exe",
u"pid": 3624,
u"tid": 3636,
u"cpuUsedPc": 0
},
{
u"name": u"sqlservr.exe",
u"pid": 3624,
u"tid": 3644,
u"cpuUsedPc": 0
},
{
u"name": u"sqlservr.exe",
u"pid": 3624,
u"tid": 3648,
u"cpuUsedPc": 0
},
{
u"name": u"sqlservr.exe",
u"pid": 3624,
u"tid": 3660,
u"cpuUsedPc": 0
},
{
u"name": u"sqlservr.exe",
u"pid": 3624,
u"tid": 3664,
u"cpuUsedPc": 0
},
{
u"name": u"sqlservr.exe",
u"pid": 3624,
u"tid": 3668,
u"cpuUsedPc": 0
},
{
u'name': u"sqlservr.exe",
u"pid": 3624,
u"tid": 3672,
u"cpuUsedPc": 32.22
},
{
u"name": u"sqlservr.exe",
u"pid": 3624,
u"tid": 3676,
u"cpuUsedPc": 0
},
{
u"name": u"sqlservr.exe",
u"pid": 3624,
u"tid": 3680,
u"cpuUsedPc": 0
},
]
}
# Sample RDSOS output for an Aurora instance
aurora_dict = {
u"engine": u"Aurora",
u"instanceID": u"trevor",
u"instanceResourceID": u"db-F3AZGY5JTAS65TT4E7U3CDS6AM",
u"timestamp": u"2017-09-12T23:02:53Z",
u"version": 1,
u"uptime": u"0:44:23",
u"numVCPUs": 1,
u"cpuUtilization": {
u"guest": 0,
u"irq": 0.04,
u"system": 1.86,
u"wait": 2.61,
u"idle": 90.08,
u"user": 5.01,
u"total": 9.92,
u"steal": 0.36,
u"nice": 0.04
},
u"loadAverageMinute": {
u"fifteen": 0.13,
u"five": 0.1,
u"one": 0.3
},
u"memory": {
u"writeback": 0,
u"hugePagesFree": 1024,
u"hugePagesRsvd": 0,
u"hugePagesSurp": 0,
u"cached": 489508,
u"hugePagesSize": 2048,
u"free": 99192,
u"hugePagesTotal": 367616,
u"inactive": 260216,
u"pageTables": 6244,
u"dirty": 1400,
u"mapped": 84816,
u"active": 890436,
u"total": 2052380,
u"slab": 38916,
u"buffers": 67024
},
u"tasks": {
u"sleeping": 233,
u"zombie": 0,
u"running": 2,
u"stopped": 0,
u"total": 235,
u"blocked": 0
},
u"swap": {
u"cached": 0,
u"total": 0,
u"free": 0
},
u"network": [
{
u"interface": u"eth0",
u"rx": 58724202.3,
u"tx": 2613197.9
}
],
u"diskIO": [
{
u"readLatency": 2.74,
u"writeLatency": 1.5,
u"writeThroughput": 476409.8,
u"readThroughput": 665190.4,
u"readIOsPS": 40.6,
u"diskQueueDepth": 0,
u"writeIOsPS": 1426.8
}
],
u"fileSys": [
{
u"used": 68956,
u"name": u"rdsfilesys",
u"usedFiles": 211,
u"usedFilePercent": 0.01,
u"maxFiles": 2097152,
u"mountPoint": u"/rdsdbdata",
u"total": 32890736,
u"usedPercent": 0.21
}
],
u"processList": [
{
u"vss": 1232180,
u"name": u"aurora",
u"tgid": 5439,
u"vmlimit": 2052380,
u"parentID": 1,
u"memoryUsedPc": 12.47,
u"cpuUsedPc": 0,
u"id": 5439,
u"rss": 255908
},
{
u"vss": 691648,
u"name": u"OS processes",
u"tgid": 0,
u"vmlimit": u"",
u"parentID": 0,
u"memoryUsedPc": 1.15,
u"cpuUsedPc": 0,
u"id": 0,
u"rss": 23888
},
{
u"vss": 3247344,
u"name": u"RDS processes",
u"tgid": 0,
u"vmlimit": u"",
u"parentID": 0,
u"memoryUsedPc": 20.91,
u"cpuUsedPc": 0,
u"id": 0,
u"rss": 429908
}
]
}
|
Mitrofanov/enhanced-rds-monitoring | enhanced_rds/metric_maps.py | <filename>enhanced_rds/metric_maps.py<gh_stars>0
"""
The default structure(s) of the metric payloads delivered to the Lambda.
Note that the Aurora section only contains the structures in which there is
meaningful difference from the standard version.
"""
# Standard set of metric info
METRICS = [
u'cpuUtilization',
u'diskIO',
u'fileSys',
u'loadAverageMinute',
u'memory',
u'network',
u'swap',
u'tasks',
u'OSprocesses',
u'RDSprocesses'
]
PROCESS_METRICS = [
u'vss',
u'rss',
u'memoryUsedPc',
u'cpuUsedPc'
]
METRICS_DIMS = {
u'diskIO': [u'device'],
u'fileSys': [u'name', u'mountPoint'],
u'network': [u'interface']
}
# Metric info for Aurora instances.
METRICS_AURORA_DIMS = {
u'diskIO': [], # Workaround to account for Aurora diskIO metrics
u'fileSys': [u'name', u'mountPoint'],
u'network': [u'interface']
}
# Metric info for Microsoft SQL instances.
METRICS_MICROSOFT = [
u'cpuUtilization',
u'disks',
u'memory',
u'network',
u'OSprocesses',
u'RDSprocesses',
u'system'
]
PROCESS_METRICS_MICROSOFT = [
u'cpuUsedPc',
u'memUsedPc',
u'workingSetKb',
u'workingSetPrivKb',
u'workingSetShareableKb',
u'virtKb'
]
METRICS_MICROSOFT_DIMS = {
u'disks': [u'name'],
u'network': [u'interface']
}
|
Mitrofanov/enhanced-rds-monitoring | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
with open('requirements.txt') as f:
requirements = [line.strip() for line in f.readlines()]
setup(
name='enhanced_rds',
version='0.1.0',
description='AWS Lambda function wrapper to capture metrics from enhanced RDS monitoring logs',
zip_safe=True,
packages=find_packages(),
install_requires=requirements,
author='<NAME>',
author_email='<EMAIL>',
license='Apache Software License v2',
url='https://github.com/signalfx/enhanced-rds-monitoring'
)
|
krava2020/pong | Ping_Pong.py | import pygame
pygame.init()
mw_w = 500
mw_h = 500
mw = pygame.display.set_mode((mw_w, mw_h))
mw.fill((0,255,0))
clock = pygame.time.Clock()
FPS = 60
class Sprite:
def __init__(self, image_name, x, y, width, hight, speed):
self.image = pygame.transform.scale(
pygame.image.load(image_name), (width, hight))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.speed = speed
def reset(self):
mw.blit(self.image, (self.rect.x, self.rect.y))
#qwqwqwqwqwqwqwqwq
class Rockets(Sprite):
#def __init__(self, image_name, x, y, width,
#hight, speed, ball_speed1, ball_speed2):
#super().__init__(self, image_name, x, y, width, hight, speed)
#self.b_speed1 = ball_speed1
#self.b_speed2 = ball_speed2
def instant_(self):
self.spped_x = 3
self.speed_y = 3
def rocket_l(self):
key_ = pygame.key.get_pressed()
if key_[pygame.K_w] and self.rect.y > 0:
self.rect.y -= self.speed
if key_[pygame.K_s] and self.rect.y < mw_h - 106:
self.rect.y += self.speed
def rocket_r(self):
key_ = pygame.key.get_pressed()
if key_[pygame.K_UP] and self.rect.y > 0:
self.rect.y -= self.speed
if key_[pygame.K_DOWN] and self.rect.y < mw_h - 106:
self.rect.y += self.speed
def move(self):
speed1 = 3
speed2 = 3
self.rect.x += speed1
self.rect.y += speed2
if self.rect.x > 500:
speed1 *= -1
if self.rect.y > mw_h - 26 or self.rect.y < 0:
speed2 *= -1
rct_l = Rockets('rc.png', 5, 0, 16, 106, 3)
rct_r = Rockets('rc.png', mw_w - 20, 0, 16, 106, 3)
ball = Rockets('ball.png', 220, 4, 26, 26, 5)
ball.instand_()
game = True
speed1 = 3
speed2 = 3
while game:
for e in pygame.event.get():
if e.type == pygame.QUIT:
game = False
if game:
mw.fill((0,255,0))
rct_l.rocket_l()
rct_r.rocket_r()
#ball.move()
ball.rect.x += speed1
ball.rect.y += speed2
if ball.rect.x > mw_w - 26 or ball.rect.x < 0:
speed1 *= -1
if ball.rect.y > mw_h - 26 or ball.rect.y < 0:
speed2 *= -1
if pygame.sprite.collide_rect
rct_l.reset()
rct_r.reset()
ball.reset()
pygame.display.update()
clock.tick(FPS) |
mbeyers/NCI-DOE-Collab-Pilot1-Tumor_Classifier | Pilot1/TC1/tc1_infer.py | from __future__ import print_function
import pandas as pd
import numpy as np
import os
import sys
import gzip
import argparse
try:
import configparser
except ImportError:
import ConfigParser as configparser
from keras import backend as K
from keras.layers import Input, Dense, Dropout, Activation, Conv1D, MaxPooling1D, Flatten
from keras.optimizers import SGD, Adam, RMSprop
from keras.models import Sequential, Model, model_from_json, model_from_yaml
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path2 = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path2)
import tc1 as bmk
import candle
def initialize_parameters(default_model = 'tc1_default_model.txt'):
# Build benchmark object
tc1Bmk = bmk.BenchmarkTC1(file_path, default_model, 'keras',
prog='tc1_baseline', desc='Multi-task (DNN) for data extraction from clinical reports - Pilot 3 Benchmark 1')
# Initialize parameters
gParameters = candle.finalize_parameters(tc1Bmk)
#benchmark.logger.info('Params: {}'.format(gParameters))
return gParameters
def run(gParameters):
X_train, Y_train, X_test, Y_test = bmk.load_data(gParameters)
print('X_test shape:', X_test.shape)
print('Y_test shape:', Y_test.shape)
# this reshaping is critical for the Conv1D to work
X_test = np.expand_dims(X_test, axis=2)
print('X_test shape:', X_test.shape)
output_dir = gParameters['output_dir']
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_name = gParameters['model_name']
# load json and create model
json_file = open('{}/{}.model.json'.format(output_dir, model_name), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model_json = model_from_json(loaded_model_json)
# load yaml and create model
yaml_file = open('{}/{}.model.yaml'.format(output_dir, model_name), 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model_yaml = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model_json.load_weights('{}/{}.model.h5'.format(output_dir, model_name))
print("Loaded json model from disk")
# evaluate json loaded model on test data
loaded_model_json.compile(loss=gParameters['loss'],
optimizer=gParameters['optimizer'],
metrics=[gParameters['metrics']])
score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0)
print('json Test score:', score_json[0])
print('json Test accuracy:', score_json[1])
print("json %s: %.2f%%" % (loaded_model_json.metrics_names[1], score_json[1]*100))
# load weights into new model
loaded_model_yaml.load_weights('{}/{}.model.h5'.format(output_dir, model_name))
print("Loaded yaml model from disk")
# evaluate loaded model on test data
loaded_model_yaml.compile(loss=gParameters['loss'],
optimizer=gParameters['optimizer'],
metrics=[gParameters['metrics']])
score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0)
print('yaml Test score:', score_yaml[0])
print('yaml Test accuracy:', score_yaml[1])
print("yaml %s: %.2f%%" % (loaded_model_yaml.metrics_names[1], score_yaml[1]*100))
def main():
gParameters = initialize_parameters()
run(gParameters)
if __name__ == '__main__':
main()
try:
K.clear_session()
except AttributeError: # theano does not have this function
pass
|
quantori/nsaph-platform-deployment | project/examples/test-workflow.py | #!/usr/bin/env python3
from cwl_airflow.extensions.cwldag import CWLDAG
args = {
"cwl": {
"debug": True,
"parallel": True
}
}
dag = CWLDAG(
workflow="/opt/airflow/project/examples/test-workflow.cwl",
dag_id="Test-Workflow",
default_args=args
) |
quantori/nsaph-platform-deployment | project/python_sample_project/setup.py | from setuptools import setup, find_packages
with open("README.md", "r") as readme:
long_description = readme.read()
setup(
name='pisample',
version="0.1",
url='',
license='Apache 2.0',
author='<NAME>',
author_email='<EMAIL>',
description='Sample Python project for CWL-Airflow',
long_description = long_description,
long_description_content_type = "text/markdown",
py_modules = ['pi'],
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: Harvard University :: Development",
"Operating System :: OS Independent"]
)
|
quantori/nsaph-platform-deployment | project/examples/medicaid.py | <gh_stars>0
#!/usr/bin/env python3
from cwl_airflow.extensions.cwldag import CWLDAG
args = {
"cwl": {
"debug": True,
"parallel": True
}
}
dag = CWLDAG(
workflow="/opt/airflow/project/cms/src/cwl/medicaid.cwl",
dag_id="medicaid",
default_args=args
) |
quantori/nsaph-platform-deployment | project/examples/1st-tool.py | #!/usr/bin/env python3
from cwl_airflow.extensions.cwldag import CWLDAG
args = {
"cwl": {
"debug": True,
"parallel": True
}
}
dag = CWLDAG(
workflow="/opt/airflow/project/examples/1st-tool.cwl",
dag_id="1st-tool",
default_args=args
) |
quantori/nsaph-platform-deployment | project/python_sample_project/pi.py | <reponame>quantori/nsaph-platform-deployment
import sys
def calculate(n:int) -> float:
k = 1
s = 0
for i in range(n):
if i % 2 == 0:
s += 4/k
else:
s -= 4/k
k += 2
return s
if __name__ == '__main__':
print(calculate(int(sys.argv[1])))
|
quantori/nsaph-platform-deployment | project/examples/airnow.py | <gh_stars>0
#!/usr/bin/env python3
from cwl_airflow.extensions.cwldag import CWLDAG
args = {
"cwl": {
"debug": True,
"parallel": True
}
}
dag = CWLDAG(
workflow="/opt/airflow/project/epa/src/cwl/airnow.cwl",
dag_id="AirNow",
default_args=args
) |
quantori/nsaph-platform-deployment | project/examples/rpi.py | #!/usr/bin/env python3
from cwl_airflow.extensions.cwldag import CWLDAG
args = {
"cwl": {
"debug": True,
"parallel": True
}
}
dag = CWLDAG(
workflow="/opt/airflow/project/examples/rpi.cwl",
dag_id="rpi",
default_args=args
) |
andrewschoen/cephci | tests/ceph_installer/test_ceph_deploy.py | <reponame>andrewschoen/cephci
import logging
import time
from ceph.utils import keep_alive, setup_deb_repos
from ceph.utils import setup_repos, create_ceph_conf
logger = logging.getLogger(__name__)
log = logger
def run(**kw):
log.info("Running test")
ceph_nodes = kw.get('ceph_nodes')
config = kw.get('config')
if config.get('ubuntu_repo'):
ubuntu_repo = config.get('ubuntu_repo')
if config.get('base_url'):
base_url = config.get('base_url')
installer_url = None
if config.get('installer_url'):
installer_url = config.get('installer_url')
if config.get('skip_setup') is True:
log.info("Skipping setup of ceph cluster")
return 0
ceph1 = ceph_nodes[0]
out, _ = ceph1.exec_command(cmd='uuidgen')
uuid = out.read().strip().decode()
ceph_mon_nodes = []
mon_names = ''
all_nodes = ''
for ceph in ceph_nodes:
if ceph.role == 'mon':
ceph_mon_nodes.append(ceph)
mon_names = mon_names + ceph.shortname + ' '
all_nodes = all_nodes + ceph.shortname + ' '
ceph_conf = create_ceph_conf(fsid=uuid, mon_hosts=ceph_mon_nodes)
keys = ''
hosts = ''
hostkeycheck = 'Host *\n\tStrictHostKeyChecking no\n\tServerAliveInterval 2400\n'
for ceph in ceph_nodes:
ceph.generate_id_rsa()
keys = keys + ceph.id_rsa_pub
hosts = hosts + ceph.ip_address + "\t" + ceph.hostname \
+ "\t" + ceph.shortname + "\n"
for ceph in ceph_nodes:
keys_file = ceph.write_file(
file_name='.ssh/authorized_keys', file_mode='a')
hosts_file = ceph.write_file(
sudo=True, file_name='/etc/hosts', file_mode='a')
ceph.exec_command(
cmd='[ -f ~/.ssh/config ] && chmod 700 ~/.ssh/config',
check_ec=False)
ssh_config = ceph.write_file(file_name='.ssh/config', file_mode='w')
keys_file.write(keys)
hosts_file.write(hosts)
ssh_config.write(hostkeycheck)
keys_file.flush()
hosts_file.flush()
ssh_config.flush()
ceph.exec_command(cmd='chmod 600 ~/.ssh/authorized_keys')
ceph.exec_command(cmd='chmod 400 ~/.ssh/config')
for ceph in ceph_nodes:
if config.get('use_cdn') is False:
if ceph.pkg_type == 'deb':
setup_deb_repos(ceph, ubuntu_repo)
# install python2 on xenial
ceph.exec_command(cmd='sudo apt-get install -y python')
else:
setup_repos(ceph, base_url, installer_url)
else:
log.info("Using the cdn repo for the test")
log.info("Updating metadata")
if ceph.pkg_type == 'rpm':
ceph.exec_command(cmd='sudo yum update metadata')
ceph1.exec_command(cmd='mkdir cd')
ceph1.exec_command(sudo=True, cmd='cd cd; yum install -y ceph-deploy')
ceph1.exec_command(
cmd='cd cd; ceph-deploy new {mons}'.format(mons=mon_names))
cc = ceph1.write_file(file_name='cd/ceph.conf', file_mode='w')
cc.write(ceph_conf)
cc.flush()
out, err = ceph1.exec_command(
cmd='cd cd; ceph-deploy install {all_n}'.format(
all_n=all_nodes), timeout=600, check_ec=False)
running = True
while running:
keep_alive(ceph_nodes)
log.info("Wait for 120 seconds before next check")
time.sleep(120)
if out.channel.exit_status_ready():
log.info(
"Command completed on remote node %d",
out.channel.recv_exit_status())
running = False
log.info(out.read().decode())
log.info(err.read().decode())
else:
log.info("Command still running")
out, err = ceph1.exec_command(
cmd='cd cd; ceph-deploy mon create-initial', timeout=300)
if ceph1.exit_status != 0:
log.error("Failed during mon create-initial")
return ceph1.exit_status
for cnode in ceph_nodes:
if cnode.role == 'osd':
hostname = cnode.shortname
devices = cnode.no_of_volumes
dev = 98 # start with b
for vol in range(0, devices):
device = hostname + ':' + '/dev/vd' + chr(dev)
# --dmcrypt {device}
ceph1.exec_command(
cmd='cd cd; ceph-deploy osd prepare {device}'.format(device=device), timeout=300)
device = hostname + ':' + '/dev/vd' + chr(dev) + '1'
ceph1.exec_command(
cmd='cd cd; ceph-deploy osd activate {device}'.format(device=device), timeout=60)
time.sleep(2)
dev = dev + 1
if ceph1.exit_status != 0:
log.error("Failed during osd activate")
return ceph1.exit_status
elif cnode.role == 'client':
ceph1.exec_command(
cmd='cd cd; ceph-deploy admin ' + cnode.shortname
)
return 0
|
andrewschoen/cephci | tests/ceph_ansible/shrink_mon.py | <reponame>andrewschoen/cephci<filename>tests/ceph_ansible/shrink_mon.py
import logging
import re
log = logging.getLogger(__name__)
def run(ceph_cluster, **kw):
"""
Remove monitor from cluster using shrink-mon.yml
Args:
ceph_cluster (ceph.ceph.Ceph): ceph cluster
Returns:
int: non-zero on failure, zero on pass
"""
log.info("Shrinking monitor")
config = kw.get('config')
build = config.get('build', config.get('rhbuild'))
mon_to_kill_list = config.get('mon-to-kill')
mon_to_kill = None
mon_short_name_list = [ceph_node.shortname for ceph_node in ceph_cluster.get_nodes('mon')]
for _mon_to_kill in mon_to_kill_list:
matcher = re.compile(_mon_to_kill)
matched_short_names = list(filter(matcher.match, mon_short_name_list))
if len(matched_short_names) > 0:
shrinked_nodes = [ceph_node for ceph_node in ceph_cluster if ceph_node.shortname in matched_short_names]
for ceph_node in shrinked_nodes:
ceph_node.remove_ceph_object(ceph_node.get_ceph_objects('mon')[0])
else:
raise RuntimeError('No match for {node_name}'.format(node_name=_mon_to_kill))
mon_to_kill = ','.join([mon_to_kill, ','.join(matched_short_names)]) if mon_to_kill else ','.join(
matched_short_names)
ceph_installer = ceph_cluster.get_ceph_object('installer')
ceph_installer.node.obtain_root_permissions('/var/log')
ansible_dir = ceph_installer.ansible_dir
ceph_installer.exec_command(sudo=True, cmd='cp -R {ansible_dir}/infrastructure-playbooks/shrink-mon.yml '
'{ansible_dir}/shrink-mon.yml'.format(ansible_dir=ansible_dir))
out, err = ceph_installer.exec_command(cmd='export ANSIBLE_DEPRECATION_WARNINGS=False ; cd {ansible_dir} ; '
'ansible-playbook -vvvv -e ireallymeanit=yes shrink-mon.yml '
'-e mon_to_kill={mon_to_kill} -i hosts'.format(ansible_dir=ansible_dir,
mon_to_kill=mon_to_kill),
long_running=True)
if err != 0:
log.error("Failed during ansible playbook run")
return err
return ceph_cluster.check_health(build)
|
slimpotatoes/STEM_Moire_GPA | src/gpa.py | <reponame>slimpotatoes/STEM_Moire_GPA
# GPA Module
import mask as mask
import data as data
import numpy as np
from skimage.restoration import unwrap_phase
def gpa(mask_id, datastruct):
"""Pefrom GPA on the GUI id, mask_id using the data in datastructure:
1. Load elements of GUI (center, radius) to create the mask
2. Load the Fourier transform of the ISMHexp (element to mask)
3. Create the mask in the image space
4. Store the approximation of the unstrain reference as the center of the circle g_uns
5. Mask the Fourier transform of ISMHexp
6. Calculate the phase of the masked section of ISMHexp
7. Calculate the g_M vector by taking the gradient of the phase
8. Calculate the variation of the g_M vector as the difference of g_M and g_uns
9. Calculate the phase corrected by removing the contribution of g_uns to be only related to delta_g
10. Store delta_g and the phase related to delta_g"""
# Load the elements
center = data.SMGData.load_g(datastruct, mask_id, 'Mask')[0]
r = data.SMGData.load_g(datastruct, mask_id, 'Mask')[1]
ft_ismh_exp = data.SMGData.load(datastruct, 'FTISMHexp')
# Generate the mask in the image space
m, g_uns = mask.mask_gaussian(center, r, ft_ismh_exp.shape)
# Store the unstrain reference in the datastructure
data.SMGData.store_g(datastruct, mask_id, 'gMuns', g_uns)
# Mask and calculate the phase component
masked_ft_ismh_exp = np.multiply(m, np.fft.fftshift(ft_ismh_exp))
phase_g_m = np.angle(np.fft.ifft2(np.fft.ifftshift(masked_ft_ismh_exp)))
data.SMGData.store_g(datastruct, mask_id, 'phaseraw', phase_g_m)
# Calculate g_m and the variation of g_M
g_m = np.array([1 / (2 * np.pi) * np.gradient(unwrap_phase(phase_g_m))[0],
1 / (2 * np.pi) * np.gradient(unwrap_phase(phase_g_m))[1]])
delta_g_m = np.subtract(g_m, g_uns)
# Calculate phase corrected by removing the contribution of g_uns to be only related to delta_g
mesh_x, mesh_y = np.meshgrid(np.arange(phase_g_m.shape[0]), np.arange(phase_g_m.shape[1]))
unwrapped_phase_delta_g_m = np.array(unwrap_phase(phase_g_m)) - 2 * np.pi * (
np.multiply(g_uns[1], mesh_x) + np.multiply(g_uns[0], mesh_y))
phase_delta_g_m = unwrapped_phase_delta_g_m - np.round(unwrapped_phase_delta_g_m / (2 * np.pi)) * 2 * np.pi
# Store the final data
data.SMGData.store_g(datastruct, mask_id, 'deltagM', delta_g_m)
data.SMGData.store_g(datastruct, mask_id, 'phasegM', phase_delta_g_m)
|
slimpotatoes/STEM_Moire_GPA | src/userinput.py | # Input Module
import dm3_lib as dm3_lib
import numpy as np
import data as data
def load_files(file_path_smh, file_path_ic, datastruct):
"""Load the files by asking the controller the filepaths smh and ic provided by the user and store the appropriate
data in the data structure object (dictionary). Before storing, the data are verified"""
dm3_meta_smh = dm3_lib.DM3(file_path_smh)
dm3_meta_ic = dm3_lib.DM3(file_path_ic)
verify_i(dm3_meta_smh.imagedata, dm3_meta_ic.imagedata)
pixel_smh = dm3_meta_smh.pxsize
pixel_ic = dm3_meta_ic.pxsize
verify_p(pixel_smh[0], pixel_ic[0])
verify_p_unit(pixel_smh[1].decode("ascii"), pixel_ic[1].decode("ascii"))
data.SMGData.store(datastruct, 'ISMHexp', dm3_meta_smh.imagedata)
data.SMGData.store(datastruct, 'p', pixel_smh[0])
data.SMGData.store(datastruct, 'ICref', dm3_meta_ic.imagedata)
data.SMGData.store(datastruct, 'pref', pixel_ic[0])
print('Files loaded')
print('Pixel size SMH: ', pixel_smh[0], 'nm')
print('Pixel size Reference: ', pixel_ic[0], 'nm')
def verify_i(ismh, ic):
"""Verify if the input images ismh and ic are 2D arrays of real numbers"""
for value in np.nditer(np.isreal(ismh)):
if value is False:
raise Exception('The STEM Moire hologram is not composed of real numbers.')
for value in np.nditer(np.isreal(ic)):
if value is False:
raise Exception('The Reference Image is not composed of real numbers.')
def verify_p(p, pref):
"""Verify if the pixel size input p and pref are real numbers strictly positive and if there are different."""
if p < 0 or isinstance(p, float) is False:
raise Exception('The pixel size of the STEM Moire hologram is not a real number strictly positive.')
if pref < 0 or isinstance(pref, float) is False:
raise Exception('The pixel size of the Reference image is not a real number strictly positive.')
if p == pref:
print('The pixel size is the same for the STEM Moire hologram and the Reference image. The software '
'is now running in classic HRSTEM GPA mode.')
# Improve it with a raise warning but it doesn't continue
def verify_p_unit(unit_p, unit_pref):
"""Verify if the unit of the pixel size inputs unit_p and unit_pref are in nm"""
if unit_p != 'nm':
raise Exception('The pixel size of the STEM Moire hologram is not in nanometer.')
if unit_pref != 'nm':
raise Exception('The pixel size of the Reference image is not in nanometer.')
if unit_p != unit_pref:
raise Exception('The units used for the pixel size between the two images are different.')
|
slimpotatoes/STEM_Moire_GPA | src/test_mask.py | import pytest
import mask as mask
import numpy as np
# Test cases
test_case_0 = dict()
test_case_0['center'] = (2, 3)
test_case_0['radius'] = 1
test_case_0['image_test'] = np.ones(shape=(6, 6))
test_case_0['image_to_assert'] = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]])
test_case_0['g0'] = np.array([0, -1/6])
test_case_1 = dict()
test_case_1['center'] = (2, 3)
test_case_1['radius'] = 1
test_case_1['image_test'] = np.ones(shape=(5, 5))
test_case_1['image_to_assert'] = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
[0, 0, 0, 0, 0], [0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]])
test_case_1['g0'] = np.array([1/10, -1/10])
test_case_2 = dict()
test_case_2['center'] = (3, 2)
test_case_2['radius'] = 1
test_case_2['image_test'] = np.ones(shape=(6, 6))
test_case_2['image_to_assert'] = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]])
test_case_2['g0'] = np.array([-1/6, 0])
test_case_3 = dict()
test_case_3['center'] = (4, 3)
test_case_3['radius'] = 1
test_case_3['image_test'] = np.ones(shape=(6, 6))
test_case_3['image_to_assert'] = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]])
test_case_3['g0'] = np.array([0, 1/6])
test_case_4 = dict()
test_case_4['center'] = (3, 4)
test_case_4['radius'] = 1
test_case_4['image_test'] = np.ones(shape=(6, 6))
test_case_4['image_to_assert'] = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0]])
test_case_4['g0'] = np.array([1/6, 0])
test_cases = [test_case_0, test_case_1, test_case_2, test_case_3, test_case_4]
# -------------------------------------------------
# Test of classic mask on test cases
# -------------------------------------------------
# -------------------------------------------------
# Test of gaussian mask checking position of center and g_0 on test cases
# -------------------------------------------------
'''@pytest.mark.parametrize('image, image_g', image_test_after_classic_mask)
def test_gaussian_mask(image, image_g):
image_masked = mask.mask_gaussian(center, radius, image.shape)
assert(image_masked[0][3, 2] == 1)
assert(np.all(image_masked[1] == image_g))'''
@pytest.mark.parametrize("test_case", test_cases)
def test_classic_masking(test_case):
image_masked = np.array(mask.mask_classic(test_case['center'], test_case['radius'],
np.shape(test_case['image_test']))[0])
#print(image_masked)
#print(test_case['image_to_assert'])
assert np.all(image_masked==test_case['image_to_assert'])
@pytest.mark.parametrize("test_case", test_cases)
def test_classic_g0(test_case):
image_masked = np.array(mask.mask_classic(test_case['center'], test_case['radius'],
np.shape(test_case['image_test']))[1][:,0,0])
#print(image_masked)
#print(test_case['g0'])
assert np.all(image_masked==test_case['g0'])
@pytest.mark.parametrize("test_case", test_cases)
def test_gaussian_g0(test_case):
image_masked = np.array(mask.mask_gaussian(test_case['center'], test_case['radius'],
np.shape(test_case['image_test']))[1][:,0,0])
#print(image_masked)
#print(test_case['g0'])
assert np.all(image_masked==test_case['g0']) |
slimpotatoes/STEM_Moire_GPA | src/rotatecalc.py | import numpy as np
import math
import data
def angle_rotation(gui):
first_position = [gui.line_rot.LineCoords[0][1], gui.line_rot.LineCoords[0][0]]
second_position = [gui.line_rot.LineCoords[1][1], gui.line_rot.LineCoords[1][0]]
print('first position = ', first_position)
print('second position = ', second_position)
first_point = [first_position[1], -first_position[0]]
second_point = [second_position[1], -second_position[0]]
print('first point = ', first_point)
print('second point = ', second_point)
# x1 = math.sqrt((second_point[0] - first_point[0]) ** 2)
# x2 = math.sqrt((second_point[1] - first_point[1]) ** 2)
norm = math.sqrt((second_point[0] - first_point[0]) ** 2 + (second_point[1] - first_point[1]) ** 2)
horizontal_proj = (second_point[0] - first_point[0]) / norm
vertical_proj = (second_point[1] - first_point[1]) / norm
if vertical_proj >= 0:
theta = np.arccos(horizontal_proj)
else:
theta = - np.arccos(horizontal_proj)
print('theta = ', theta)
return theta
def rotate_tensor(datastruct, gui):
theta = angle_rotation(gui)
exx = data.SMGData.load(datastruct, 'Exx')
eyy = data.SMGData.load(datastruct, 'Eyy')
exy = data.SMGData.load(datastruct, 'Exy')
rxy = data.SMGData.load(datastruct, 'Rxy')
epsilon = np.array([[exx, exy], [exy, eyy]])
omega = np.array([[np.zeros(rxy.shape), rxy], [-rxy, np.zeros(rxy.shape)]])
r = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
epsilon = np.transpose(epsilon, axes=[2, 3, 0, 1])
omega = np.transpose(omega, axes=[2, 3, 0, 1])
for i in range(0, epsilon[:, :, 0, 0].shape[0]):
for j in range(0, epsilon[:, :, 0, 0].shape[1]):
epsilon[i, j] = np.dot(np.transpose(r), np.dot(epsilon[i, j], r))
omega[i, j] = np.dot(np.transpose(r), np.dot(omega[i, j], r))
epsilon = np.transpose(epsilon, axes=[2, 3, 0, 1])
omega = np.transpose(omega, axes=[2, 3, 0, 1])
data.SMGData.store(datastruct, 'Exx', epsilon[0, 0])
data.SMGData.store(datastruct, 'Eyy', epsilon[1, 1])
data.SMGData.store(datastruct, 'Exy', epsilon[1, 0])
data.SMGData.store(datastruct, 'Rxy', omega[1, 0])
|
slimpotatoes/STEM_Moire_GPA | src/test_conversion.py | <reponame>slimpotatoes/STEM_Moire_GPA
import pytest
import data
import numpy as np
import conversion
mask = 'Mask1'
# Create elements used by conversion in the data structure
# -------------------------------
# Test cases
# -------------------------------
# Test 24 in TestPlan.pdf
test_case_0 = dict()
datastruct_0 = data.SMGData()
datastruct_0.create_branch(mask)
datastruct_0.store('p', 1/3)
datastruct_0.store_g(mask, 'gMuns', np.transpose(np.array([[[1, -1]]]), axes=(2, 0, 1)))
test_case_0['datastruct'] = datastruct_0
test_case_0['n_horizontal'] = 1
test_case_0['m_vertical'] = 2
test_case_0['to_assert'] = np.transpose(np.array([[[-1, 0]]]), axes=(2, 0, 1))
test_case_1 = dict()
datastruct_1 = data.SMGData()
datastruct_1.create_branch(mask)
datastruct_1.store('p', 1/3)
datastruct_1.store_g(mask, 'gMuns', np.transpose(np.array([[[1, -1]]]), axes=(2, 0, 1)))
test_case_1['datastruct'] = datastruct_1
test_case_1['n_horizontal'] = -1
test_case_1['m_vertical'] = -2
test_case_1['to_assert'] = np.transpose(np.array([[[3, -2]]]), axes=(2, 0, 1))
test_case_2 = dict()
datastruct_2 = data.SMGData()
datastruct_2.create_branch(mask)
datastruct_2.store('p', 1/3)
datastruct_2.store_g(mask, 'gMuns', np.transpose(np.array([[[1, 0]]]), axes=(2, 0, 1)))
test_case_2['datastruct'] = datastruct_2
test_case_2['n_horizontal'] = 0
test_case_2['m_vertical'] = -2
test_case_2['to_assert'] = np.transpose(np.array([[[3, 0]]]), axes=(2, 0, 1))
test_case_3 = dict()
datastruct_3 = data.SMGData()
datastruct_3.create_branch(mask)
datastruct_3.store('p', 1/3)
datastruct_3.store_g(mask, 'gMuns', np.transpose(np.array([[[-1, 0]]]), axes=(2, 0, 1)))
test_case_3['datastruct'] = datastruct_3
test_case_3['n_horizontal'] = 0
test_case_3['m_vertical'] = 2
test_case_3['to_assert'] = np.transpose(np.array([[[-3, 0]]]), axes=(2, 0, 1))
test_cases = [test_case_0, test_case_1, test_case_2, test_case_3]
@pytest.mark.parametrize("test_case", test_cases)
def test_conversion(test_case):
conversion.conversion(mask, test_case['n_horizontal'], test_case['m_vertical'], test_case['datastruct'])
assert np.all(np.array(test_case['datastruct'].load_g(mask, 'gCuns')) == test_case['to_assert'])
|
slimpotatoes/STEM_Moire_GPA | src/test_data.py | """ Tests of the SMGData class from data.py """
import pytest
import data
# Test setup
# SMGData instance
smgData = data.SMGData()
smgData.create_branch('Mask1')
smgData.create_branch('Mask2')
# This definition should have been provided by the SMGData interface
possibleKeys = [
"ICref",
"pref",
"FTISMHexp",
"FTISMHsim",
"FTISMHsimDisplay",
"Uref",
"Exx",
"Eyy",
"Exy",
"Rxy",
]
# This definition should have been provided by the SMGData interface
possibleKeysOfGuiIds = [
"Mask",
"gMuns",
"deltagM",
"phasegM",
"shiftg",
"gCuns",
]
possibleGuiIds = [
"Mask1",
"Mask2",
]
# -------------------------------------------------
# Test of positive (OK) cases of data stored
# -------------------------------------------------
@pytest.mark.parametrize("key", possibleKeys)
def test_data_store_load_ok(key):
smgData.store(key, key)
assert(smgData.SMGData[key] == key)
# -------------------------------------------------
# Test of positive (OK) cases of data stored in an existing branch
# -------------------------------------------------
@pytest.mark.parametrize("key", possibleKeysOfGuiIds)
@pytest.mark.parametrize("gui_id", possibleGuiIds)
def test_data_store_load_g_ok(gui_id, key):
smgData.store_g(gui_id, key, key)
assert(smgData.SMGData[gui_id][key] == key)
assert(smgData.load_g(gui_id, key) == key)
# -------------------------------------------------
# Test of positive cases of data to load
# -------------------------------------------------
@pytest.mark.parametrize("key", possibleKeys)
def test_data_store_load_ok(key):
smgData.store(key, key)
assert (smgData.load(key) == key)
# -------------------------------------------------
# Test of positive cases of data to load from an existing branch
# -------------------------------------------------
@pytest.mark.parametrize("key", possibleKeysOfGuiIds)
@pytest.mark.parametrize("gui_id", possibleGuiIds)
def test_data_store_load_g_ok(gui_id, key):
smgData.store_g(gui_id, key, key)
assert (smgData.load_g(gui_id, key) == key)
# -------------------------------------------------
# Test of positive cases of non-string data to store
# -------------------------------------------------
def test_data_save_load_non_string_ok():
smgData.store(possibleKeys[0], 1) # an integer
assert(smgData.load(possibleKeys[0]) == 1)
smgData.store(possibleKeys[1], 1.25) # a float
assert(smgData.load(possibleKeys[1]) == 1.25)
arr_len = 100000 # a big array
my_array = [None] * arr_len
for i in range(0, arr_len):
my_array[i] = i
smgData.store(possibleKeys[2], my_array)
my_array2 = smgData.load(possibleKeys[2])
assert(my_array == my_array2)
# -------------------------------------------------
# Test of negative (NOK) cases of data save and load
# -------------------------------------------------
def test_data_store_nok_illegal_key():
with pytest.raises(Exception) as e_info:
smgData.store('an illegal key', 0)
assert(str(e_info.value) == 'Key does not exist in the data structure')
def test_data_load_nok_illegal_key():
with pytest.raises(Exception) as e_info:
smgData.load('an illegal key')
assert(str(e_info.value) == 'Key does not exist in the data structure')
|
slimpotatoes/STEM_Moire_GPA | src/conversion.py | # Conversion Module
import numpy as np
import data as data
import math as math
def conversion(mask_id, n_horizontal, m_vertical, datastruct):
"""Convert the Moire 3D array into the Crystal 2D array and store it in datastruct.
The Moire 3D array is loaded from the data structure (datastruct) using the keyword (mask_id = string).
The horizontal and vertical component of the Moire 3D array are separated and each component are converted using
the integer n_horizontal and m_vertical and the pixel size (strictly positive real number loaded from datastuct)."""
# Load the pixel size (p) from the data structure and check if p is strictly positive
p = data.SMGData.load(datastruct, 'p')
if p <= 0:
raise Exception('Pixel size negative or zero, conversion cannot be performed')
# Normalize the unstrained reference Moire 3D array (gMuns = 3D array -- 2D vector on each pixel of a
# 2D image and separate components)
g_m_uns = data.SMGData.load_g(datastruct, mask_id, 'gMuns')
# Generate the correction 3D array to apply on the unstrained reference Moire 3D array on each component
correction = np.ones(g_m_uns.shape)
# Warning g[0] component along x (vertical axis pointing down)
correction[0, :, :] = - m_vertical * correction[0, :, :]
# Warning g[1] component along y (horizontal axis pointing right)
correction[1, :, :] = n_horizontal * correction[1, :, :]
# Apply correction to get the unstrained reference crystalline 3D array and store it in the data structure
g_c_uns = g_m_uns + correction
data.SMGData.store_g(datastruct, mask_id, 'gCuns', g_c_uns)
# Inform user of the completion of the conversion and provide the norm of the crystalline wave vector
norm = 1 / p * math.sqrt(g_c_uns[0, 0, 0] ** 2 + g_c_uns[1, 0, 0] ** 2)
print('Conversion done !!')
print('g norm = ', norm, ' nm-1')
|
slimpotatoes/STEM_Moire_GPA | src/main.py | <gh_stars>1-10
# ############################# #
# #
# STEM Moire GPA Software #
# #
# ############################# #
#
# #####################################################################################
#
# Python script calculating the 2D relative strain maps from a STEM Moire hologram.
# <NAME> <<EMAIL>>
# https://github.com/slimpotatoes/STEM_Moire_GPA
# 18/06/2019
#
# ###########################################
# STEM Moire GPA control Module
import matplotlib.pyplot as plt
import gui as gui
import data as data
import userinput as userinput
import smhsimulation as smhsimu
import gpa as gpa
import unstrainref as uref
import conversion as conversion
import straincalc as strain
import rotatecalc as rotate
def main():
"""Connection of the different events (button clicked by user) with the process steps of
STEM Moire GPA processing"""
def flow_input(event):
"""Input Process
1. Call the GUI to create an open file dialog for the user to input files.
2. Verify and import files in smgdata.
3. Display the SMH and ICref images to the user."""
if not event.inaxes == smggui.event_input.ax:
raise Exception('Improper input axis')
file_path_smh, file_path_ic = smggui.open_files()
userinput.load_files(file_path_smh, file_path_ic, smgdata)
smggui.guismhexp(smgdata)
def flow_smhsim(event):
"""Simulation of the STEM Moire hologram Process
1. Call smh_sim function in smhsimulation module to simulate the STEM Moire hologram from ICref and store
the results in smgdata.
2. Display the results of the simulation to the user using guismhsim window."""
if not event.inaxes == smggui.event_smhsim.ax:
raise Exception('Improper shmsim axis')
smhsimu.smh_sim(smgdata)
smggui.guismhsim(smgdata)
def flow_gpa(event):
"""Geometrical Phase Analysis Process
1. Collect the mask selected by the user on the guismhsim window.
2. Perform the GPA calculation on the selected mask and store the results in smgdata.
3. Display the GPA result (phase image) to the user using guiphase window."""
if not event.inaxes == smggui.event_gpa.ax:
raise Exception('Improper gpa axis')
mask_selected = smggui.mask_selection()
gpa.gpa(mask_selected, smgdata)
smggui.guiphase(mask_selected, smgdata)
def flow_ref(event):
"""Unstrained reference definition Process. On the two phase images calculated by GPA,
1. Call the update_zerostrain function in unstrainref module to update the unstrain reference on the
phase image and store the results in smgdata.
2. Display the updated phase image with the new unstrained reference on the guiphase window."""
if not event.inaxes == smggui.event_ref.ax:
raise Exception('Improper ref axis')
for mask_id in ['Mask1', 'Mask2']:
uref.update_zerostrain(mask_id, smgdata)
smggui.update_phase(mask_id, smgdata)
def flow_convert(event):
"""Moire to crystal data conversion Process. Call the conversion function in the conversion module for both
masks."""
if not event.inaxes == smggui.event_convert.ax:
raise Exception('Improper convert axis')
print(smggui.h_1, smggui.h_2, smggui.v_1, smggui.v_2)
conversion.conversion('Mask1', smggui.h_1, smggui.v_1, smgdata)
conversion.conversion('Mask2', smggui.h_2, smggui.v_2, smgdata)
def flow_strain(event):
"""Strain tensor calculation from two non collinear crystalline wave vector Process"""
if not event.inaxes == smggui.event_strain.ax:
raise Exception('Improper strain axis')
strain.strain_calculation('Mask1', 'Mask2', smgdata)
rotate.rotate_tensor(smgdata, smggui)
smggui.guistrain(smgdata)
"""Creation of the GUI and the Data object"""
smgdata = data.SMGData()
smggui = gui.SMGGUI(smgdata)
"""Call of the GUI module functions to pop up the initial windows for the user"""
smggui.guiconv()
smggui.guiflow()
"""Connection of the event "button clicked by the user" to a function"""
smggui.event_input.on_clicked(flow_input)
smggui.event_smhsim.on_clicked(flow_smhsim)
smggui.event_gpa.on_clicked(flow_gpa)
smggui.event_ref.on_clicked(flow_ref)
smggui.event_convert.on_clicked(flow_convert)
smggui.event_strain.on_clicked(flow_strain)
plt.show()
if __name__ == "__main__":
main()
|
slimpotatoes/STEM_Moire_GPA | src/data.py | <filename>src/data.py<gh_stars>1-10
# Data Structure Module
class SMGData(object):
"""Module storing and loading intermediate data used during the processing"""
def __init__(self):
self.SMGData = dict()
self.SMGData['ISMHexp'] = None
self.SMGData['p'] = None
self.SMGData['ICref'] = None
self.SMGData['pref'] = None
self.SMGData['FTISMHexp'] = None
self.SMGData['FTISMHsim'] = None
self.SMGData['FTISMHsimDisplay'] = None
self.SMGData['Uref'] = None
self.SMGData['Exx'] = None
self.SMGData['Eyy'] = None
self.SMGData['Exy'] = None
self.SMGData['Rxy'] = None
def create_branch(self, gui_id):
"""Create a (sub)dictionary in dictionary SMGData associated with the string key gui_id
representing the id of a the mask GUI object."""
if gui_id not in self.SMGData.keys():
self.SMGData[gui_id] = dict()
self.SMGData[gui_id]['Mask'] = None
self.SMGData[gui_id]['gMuns'] = None
self.SMGData[gui_id]['phaseraw'] = None
self.SMGData[gui_id]['deltagM'] = None
self.SMGData[gui_id]['phasegM'] = None
self.SMGData[gui_id]['shiftg'] = None
self.SMGData[gui_id]['gCuns'] = None
else:
raise Exception('Key gui_id already exists, branch creation aborted')
def remove_branch(self, gui_id):
"""Remove the (sub)dictionary in dictionary SMGData associated with the string key gui_id
representing the id of a the mask GUI object."""
if gui_id in self.SMGData.keys():
del self.SMGData[gui_id]
else:
return
# raise Warning('Key gui_id does not exist, the deletion process did not occur')
def store(self, key, a):
"""Store in dictionary SMGData an object a associated with the string key."""
if key in self.SMGData.keys():
self.SMGData[key] = a
else:
raise Exception('Key does not exist in the data structure')
def load(self, key):
"""Return from dictionary SMGData the object associated with the string key."""
if key in self.SMGData.keys():
return self.SMGData[key]
else:
raise Exception('Key does not exist in the data structure')
def store_g(self, gui_id, key, a):
"""Store in (sub)dictionary SMGData[gui_id] an object a associated with the string key."""
if gui_id in self.SMGData.keys():
if key in self.SMGData[gui_id].keys():
self.SMGData[gui_id][key] = a
else:
raise Exception('Key is gui_id does not exist in the data structure')
else:
raise Exception('Gui id does not exist in the data structure')
def load_g(self, gui_id, key):
"""Return from dictionary SMGData[gui_id] the object associated with the string key."""
if gui_id in self.SMGData.keys():
if key in self.SMGData[gui_id].keys():
return self.SMGData[gui_id][key]
else:
raise Exception('Key is gui_id does not exist in the data structure')
else:
raise Exception('Gui id does not exist in the data structure')
|
slimpotatoes/STEM_Moire_GPA | src/manual_test_gpa.py | <reponame>slimpotatoes/STEM_Moire_GPA
# Test of GPA Module
import numpy as np
import math as math
import data as data
import gpa as gpa
import matplotlib.pyplot as plt
import statistics
# #######################################
# Test #2 in TestPlan document
# #######################################
data_test_2 = data.SMGData() # Data structure for test 2
# Generated STEM Moire hologram (256x256 pixels) using numpy arrays with a unique periodicity of 16 pixels along
# horizontal axis. (Very easy test case)
x2 = np.linspace(0, 255, 256)
y2 = np.linspace(0, 255, 256)
mx2, my2 = np.meshgrid(x2, y2)
ismh2 = np.sin(mx2 * 2 * np.pi / 16)
ft_ismh2 = np.fft.fft2(ismh2)
# Store data in datastructure
data_test_2.store('ISMHexp', ismh2)
data_test_2.store('FTISMHexp', ft_ismh2)
# Create branch for mask
data_test_2.create_branch('Mask1')
# Circle of radius 1 centered around coordinate (192, 128)
r2 = 1
center2 = (144, 128)
circle2 = center2, r2
# Store mask properties into datastructure
data_test_2.store_g('Mask1', 'Mask', circle2)
# Entering gpa
gpa.gpa('Mask1', data_test_2)
# Display data
# Input/Output data - Phase is supposed to be constant and equal to 0, anything different from 0 represents the error.
fig_test2 = plt.figure(figsize=(13, 9))
fig_test2_ax1 = fig_test2.add_subplot(2, 3, 1)
fig_test2_ax2 = fig_test2.add_subplot(2, 3, 4)
fig_test2_ax3 = fig_test2.add_subplot(2, 3, 2)
fig_test2_ax4 = fig_test2.add_subplot(2, 3, 5)
fig_test2_ax5 = fig_test2.add_subplot(2, 3, 3)
fig_test2_ax6 = fig_test2.add_subplot(2, 3, 6)
fig_test2_ax1.imshow(ismh2, cmap='gray')
fig_test2_ax1.set_title('I_SMH')
fig_test2_ax2.imshow(np.log1p(np.fft.fftshift(np.abs(ft_ismh2 ** 2))), cmap='gray')
fig_test2_ax2.set_title('Fourier Transform of I_SMH')
fig_test2_ax3.imshow(data_test_2.load_g('Mask1', 'phaseraw'), cmap='gray', vmin=-np.pi, vmax=np.pi)
fig_test2_ax3.set_title('Raw Phase')
fig_test2_ax4.imshow(data_test_2.load_g('Mask1', 'phasegM'), cmap='gray', vmin=-np.pi, vmax=np.pi)
fig_test2_ax4.set_title('Phase corrected')
fig_test2_ax5.imshow(data_test_2.load_g('Mask1', 'deltagM')[0], cmap='gray', vmin=-1, vmax=1)
fig_test2_ax5.set_title('Vertical component of Δg')
fig_test2_ax6.imshow(data_test_2.load_g('Mask1', 'deltagM')[1], cmap='gray', vmin=-1, vmax=1)
fig_test2_ax6.set_title('Horizontal component of Δg')
fig_test2.savefig('/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/'
'Test_2_explanation.png', dpi=300, bbox_inches='tight')
# Input/Output data in 1D
fig_test2_1d = plt.figure(figsize=(13, 9))
fig_test2_1d_ax1 = fig_test2_1d.add_subplot(2, 3, 1)
fig_test2_1d_ax2 = fig_test2_1d.add_subplot(2, 3, 4)
fig_test2_1d_ax3 = fig_test2_1d.add_subplot(2, 3, 2)
fig_test2_1d_ax4 = fig_test2_1d.add_subplot(2, 3, 5)
fig_test2_1d_ax5 = fig_test2_1d.add_subplot(2, 3, 3)
fig_test2_1d_ax6 = fig_test2_1d.add_subplot(2, 3, 6)
fig_test2_1d_ax1.plot(ismh2[128, :])
fig_test2_1d_ax1.set_title('I_SMH')
fig_test2_1d_ax2.plot(x2, np.log1p(np.fft.fftshift(np.abs(ft_ismh2 ** 2)))[128, :])
fig_test2_1d_ax2.set_title('Fourier Transform of I_SMH')
fig_test2_1d_ax3.plot(x2, data_test_2.load_g('Mask1', 'phaseraw')[128, :])
fig_test2_1d_ax3.set_ylim(-np.pi, np.pi)
fig_test2_1d_ax3.set_title('Raw Phase')
fig_test2_1d_ax4.plot(x2, data_test_2.load_g('Mask1', 'phasegM')[128, :])
fig_test2_1d_ax4.set_ylim(-np.pi, np.pi)
fig_test2_1d_ax4.set_title('Phase corrected')
fig_test2_1d_ax5.plot(x2, data_test_2.load_g('Mask1', 'deltagM')[0, 128, :])
fig_test2_1d_ax5.set_ylim(-1, 1)
fig_test2_1d_ax5.set_title('Vertical component of Δg')
fig_test2_1d_ax6.plot(x2, data_test_2.load_g('Mask1', 'deltagM')[1, 128, :])
fig_test2_1d_ax6.set_ylim(-1, 1)
fig_test2_1d_ax6.set_title('Horizontal component of Δg')
fig_test2_1d.savefig('/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/'
'Test_2_explanation_1D.png', dpi=300, bbox_inches='tight')
plt.show()
# -------------------- Test #2 Improvement ---------------
# periodicity in pixels: q
q = [3, 4, 4.1, 4.2, 4.5, 16, 100]
data2 = []
for periodicity in q:
ismh = np.sin(mx2 * 2 * np.pi / periodicity)
ft_ismh = np.fft.fft2(ismh)
circle = (128 + round(256 / periodicity), 128), r2
data_i = data.SMGData()
data_i.store('ISMHexp', ismh)
data_i.store('FTISMHexp', ft_ismh)
data_i.create_branch('Mask1')
data_i.store_g('Mask1', 'Mask', circle)
gpa.gpa('Mask1', data_i)
data2.append(data_i)
fig_test2_multiple_q = plt.figure(figsize=(13, 6))
fig_test2_multiple_q_ax1 = fig_test2_multiple_q.add_subplot(1, 2, 1)
fig_test2_multiple_q_ax2 = fig_test2_multiple_q.add_subplot(1, 2, 2)
fig_test2_multiple_q_ismh = plt.figure(figsize=(13, 6))
fig_test2_multiple_q_ismh_ax1 = fig_test2_multiple_q_ismh.add_subplot(1, 2, 1)
fig_test2_multiple_q_ismh_ax2 = fig_test2_multiple_q_ismh.add_subplot(1, 2, 2)
fig_test2_error_delta_g = plt.figure(figsize=(13, 9))
fig_test2_error_delta_g_ax = fig_test2_error_delta_g.add_subplot(1, 1, 1)
count = 0
mean_test_2 = []
stand_dev_test_2 = []
for elements in data2:
fig_test2_multiple_q_ax1.plot(x2, elements.load_g('Mask1', 'phasegM')[128, :], linewidth=3, label=str(q[count]))
fig_test2_multiple_q_ax2.plot(x2, elements.load_g('Mask1', 'deltagM')[1, 128, :], linewidth=3, label=str(q[count]))
fig_test2_multiple_q_ismh_ax1.plot(x2[160:180], elements.load('ISMHexp')[128, 160:180], linewidth=3,
label=str(q[count]))
fig_test2_multiple_q_ismh_ax2.plot(x2, np.log1p(np.fft.fftshift(np.abs(elements.load('FTISMHexp') ** 2)))[128, :],
linewidth=3, label=str(q[count]))
error_delta_g = np.abs(elements.load_g('Mask1', 'deltagM')[1, 128, :])
fig_test2_error_delta_g_ax.plot(x2, error_delta_g, linewidth=3, label=str(q[count]))
mean_test_2.append(statistics.mean(elements.load_g('Mask1', 'deltagM')[1, 128, :]))
stand_dev_test_2.append(statistics.stdev(elements.load_g('Mask1', 'deltagM')[1, 128, :]))
count += 1
fig_test2_multiple_q_ax1.set_ylim(-np.pi, np.pi)
fig_test2_multiple_q_ax1.legend()
fig_test2_multiple_q_ax1.set_title('Phase corrected')
fig_test2_multiple_q_ax2.set_title('Horizontal component of Δg')
fig_test2_multiple_q_ax2.set_ylim(-0.01, 0.01)
fig_test2_multiple_q_ismh_ax1.set_title('I_SMH')
fig_test2_multiple_q_ismh_ax2.set_title('Fourier Transform of I_SMH')
fig_test2_multiple_q_ismh_ax2.legend()
fig_test2_error_delta_g_ax.set_ylim(0, 0.0025)
fig_test2_error_delta_g_ax.legend()
fig_test2_error_delta_g_ax.set_title('Error of the horizontal component of Δg ')
fig_test2_multiple_q.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_2_test_results.png',
dpi=300, bbox_inches='tight')
fig_test2_multiple_q_ismh.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_2_test_cases.png',
dpi=300, bbox_inches='tight')
fig_test2_error_delta_g.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_2_test_error.png',
dpi=300, bbox_inches='tight')
plt.show()
print('Mean test 2 = ', mean_test_2)
print('StDev test 2 = ', stand_dev_test_2)
# -------------------- Testing the vertical direction -------------------
data_test_2_v = data.SMGData() # Data structure for test 2
# Generated STEM Moire hologram (256x256 pixels) using numpy arrays with a unique periodicity of 16 pixels along
# horizontal axis. (Very easy test case)
x2_v = np.linspace(0, 255, 256)
y2_v = np.linspace(0, 255, 256)
mx2_v, my2_v = np.meshgrid(x2_v, y2_v)
ismh2_v = np.sin(my2_v * 2 * np.pi / 16)
ft_ismh2_v = np.fft.fft2(ismh2_v)
# Store data in datastructure
data_test_2_v.store('ISMHexp', ismh2_v)
data_test_2_v.store('FTISMHexp', ft_ismh2_v)
# Create branch for mask
data_test_2_v.create_branch('Mask1')
# Circle of radius 1 centered around coordinate (192, 128)
r2_v = 1
center2_v = (128, 144)
circle2_v = center2_v, r2_v
# Store mask properties into datastructure
data_test_2_v.store_g('Mask1', 'Mask', circle2_v)
# Entering gpa
gpa.gpa('Mask1', data_test_2_v)
# Display data
# Input/Output data - Phase is supposed to be constant and equal to 0, anything different from 0 represents the error.
fig_test2_v = plt.figure(figsize=(13, 9))
fig_test2_ax1_v = fig_test2_v.add_subplot(2, 3, 1)
fig_test2_ax2_v = fig_test2_v.add_subplot(2, 3, 4)
fig_test2_ax3_v = fig_test2_v.add_subplot(2, 3, 2)
fig_test2_ax4_v = fig_test2_v.add_subplot(2, 3, 5)
fig_test2_ax5_v = fig_test2_v.add_subplot(2, 3, 3)
fig_test2_ax6_v = fig_test2_v.add_subplot(2, 3, 6)
fig_test2_ax1_v.imshow(ismh2_v, cmap='gray')
fig_test2_ax1_v.set_title('I_SMH')
fig_test2_ax2_v.imshow(np.log1p(np.fft.fftshift(np.abs(ft_ismh2_v ** 2))), cmap='gray')
fig_test2_ax2_v.set_title('Fourier Transform of I_SMH')
fig_test2_ax3_v.imshow(data_test_2_v.load_g('Mask1', 'phaseraw'), cmap='gray', vmin=-np.pi, vmax=np.pi)
fig_test2_ax3_v.set_title('Raw Phase')
fig_test2_ax4_v.imshow(data_test_2_v.load_g('Mask1', 'phasegM'), cmap='gray', vmin=-np.pi, vmax=np.pi)
fig_test2_ax4_v.set_title('Phase corrected')
fig_test2_ax5_v.imshow(data_test_2_v.load_g('Mask1', 'deltagM')[0], cmap='gray', vmin=-1, vmax=1)
fig_test2_ax5_v.set_title('Vertical component of Δg')
fig_test2_ax6_v.imshow(data_test_2_v.load_g('Mask1', 'deltagM')[1], cmap='gray', vmin=-1, vmax=1)
fig_test2_ax6_v.set_title('Horizontal component of Δg')
fig_test2_v.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_2_v_explanation.png',
dpi=300, bbox_inches='tight')
# Input/Output data in 1D
fig_test2_1d_v = plt.figure(figsize=(13, 9))
fig_test2_1d_ax1_v = fig_test2_1d_v.add_subplot(2, 3, 1)
fig_test2_1d_ax2_v = fig_test2_1d_v.add_subplot(2, 3, 4)
fig_test2_1d_ax3_v = fig_test2_1d_v.add_subplot(2, 3, 2)
fig_test2_1d_ax4_v = fig_test2_1d_v.add_subplot(2, 3, 5)
fig_test2_1d_ax5_v = fig_test2_1d_v.add_subplot(2, 3, 3)
fig_test2_1d_ax6_v = fig_test2_1d_v.add_subplot(2, 3, 6)
fig_test2_1d_ax1_v.plot(ismh2_v[:, 128])
fig_test2_1d_ax1_v.set_title('I_SMH')
fig_test2_1d_ax2_v.plot(y2_v, np.log1p(np.fft.fftshift(np.abs(ft_ismh2_v ** 2)))[:, 128])
fig_test2_1d_ax2_v.set_title('Fourier Transform of I_SMH')
fig_test2_1d_ax3_v.plot(y2_v, data_test_2_v.load_g('Mask1', 'phaseraw')[:, 128])
fig_test2_1d_ax3_v.set_ylim(-np.pi, np.pi)
fig_test2_1d_ax3_v.set_title('Raw Phase')
fig_test2_1d_ax4_v.plot(y2_v, data_test_2_v.load_g('Mask1', 'phasegM')[:, 128])
fig_test2_1d_ax4_v.set_ylim(-np.pi, np.pi)
fig_test2_1d_ax4_v.set_title('Phase corrected')
fig_test2_1d_ax5_v.plot(y2_v, data_test_2_v.load_g('Mask1', 'deltagM')[0, :, 128])
fig_test2_1d_ax5_v.set_ylim(-1, 1)
fig_test2_1d_ax5_v.set_title('Vertical component of Δg')
fig_test2_1d_ax6_v.plot(y2_v, data_test_2_v.load_g('Mask1', 'deltagM')[1, :, 128])
fig_test2_1d_ax6_v.set_ylim(-1, 1)
fig_test2_1d_ax6_v.set_title('Horizontal component of Δg')
fig_test2_1d_v.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_2_v_explanation_1D.png',
dpi=300, bbox_inches='tight')
plt.show()
# -------------------- Test #2 Improvement ---------------
# periodicity in pixels: q
q_v = [3, 4, 4.1, 4.2, 4.5, 16, 100]
data2_v = []
for periodicity in q_v:
ismh_v = np.sin(my2_v * 2 * np.pi / periodicity)
ft_ismh_v = np.fft.fft2(ismh_v)
circle_v = (128, 128 + round(256 / periodicity)), r2_v
data_i_v = data.SMGData()
data_i_v.store('ISMHexp', ismh_v)
data_i_v.store('FTISMHexp', ft_ismh_v)
data_i_v.create_branch('Mask1')
data_i_v.store_g('Mask1', 'Mask', circle_v)
gpa.gpa('Mask1', data_i_v)
data2_v.append(data_i_v)
fig_test2_multiple_q_v = plt.figure(figsize=(13, 6))
fig_test2_multiple_q_ax1_v = fig_test2_multiple_q_v.add_subplot(1, 2, 1)
fig_test2_multiple_q_ax2_v = fig_test2_multiple_q_v.add_subplot(1, 2, 2)
fig_test2_multiple_q_ismh_v = plt.figure(figsize=(13, 6))
fig_test2_multiple_q_ismh_ax1_v = fig_test2_multiple_q_ismh_v.add_subplot(1, 2, 1)
fig_test2_multiple_q_ismh_ax2_v = fig_test2_multiple_q_ismh_v.add_subplot(1, 2, 2)
fig_test2_error_delta_g_v = plt.figure(figsize=(13, 9))
fig_test2_error_delta_g_ax_v = fig_test2_error_delta_g_v.add_subplot(1, 1, 1)
count = 0
mean_test_2_v = []
stand_dev_test_2_v = []
for elements in data2_v:
fig_test2_multiple_q_ax1_v.plot(y2_v, elements.load_g('Mask1', 'phasegM')[:, 128],
linewidth=3, label=str(q[count]))
fig_test2_multiple_q_ax2_v.plot(y2_v, elements.load_g('Mask1', 'deltagM')[0, :, 128],
linewidth=3, label=str(q[count]))
fig_test2_multiple_q_ismh_ax1_v.plot(y2_v[160:180], elements.load('ISMHexp')[160:180, 128],
linewidth=3, label=str(q_v[count]))
fig_test2_multiple_q_ismh_ax2_v.plot(y2_v,
np.log1p(np.fft.fftshift(np.abs(elements.load('FTISMHexp') ** 2)))[:, 128],
linewidth=3, label=str(q_v[count]))
error_delta_g_v = np.abs(elements.load_g('Mask1', 'deltagM')[0, :, 128])
fig_test2_error_delta_g_ax_v.plot(y2_v, error_delta_g_v, linewidth=3, label=str(q_v[count]))
mean_test_2_v.append(statistics.mean(elements.load_g('Mask1', 'deltagM')[0, :, 128]))
stand_dev_test_2_v.append(statistics.stdev(elements.load_g('Mask1', 'deltagM')[0, :, 128]))
count += 1
fig_test2_multiple_q_ax1_v.set_ylim(-np.pi, np.pi)
fig_test2_multiple_q_ax1_v.legend()
fig_test2_multiple_q_ax1_v.set_title('Phase corrected')
fig_test2_multiple_q_ax2_v.set_title('Vertical component of Δg')
fig_test2_multiple_q_ax2_v.set_ylim(-0.01, 0.01)
fig_test2_multiple_q_ismh_ax1_v.set_title('I_SMH')
fig_test2_multiple_q_ismh_ax2_v.set_title('Fourier Transform of I_SMH')
fig_test2_multiple_q_ismh_ax2_v.legend()
fig_test2_error_delta_g_ax_v.set_ylim(0, 0.0025)
fig_test2_error_delta_g_ax_v.legend()
fig_test2_error_delta_g_ax_v.set_title('Error of the vertical component of Δg ')
fig_test2_multiple_q_v.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_2_v_test_results.png',
dpi=300, bbox_inches='tight')
fig_test2_multiple_q_ismh_v.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_2_v_test_cases.png',
dpi=300, bbox_inches='tight')
fig_test2_error_delta_g_v.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_2_v_test_error.png',
dpi=300, bbox_inches='tight')
plt.show()
print('Mean test 2 = ', mean_test_2_v)
print('StDev test 2 = ', stand_dev_test_2_v)
# #######################################
# Test #3 in TestPlan document
# #######################################
data_test_3 = data.SMGData()
# STEM Moire hologram with a unique periodicity of 4 pixels along horizontal axis on half image and a periodicity of
# 4.5 pixel along the same axis on the second half of the image (easy case). This leads to o a 'delta g' of
# 1/4-1/4.5 = 0.0277778.
x3a = np.linspace(0, 127, 128)
x3b = np.linspace(128, 255, 128)
y3 = np.linspace(0, 255, 256)
mx3a, my3a = np.meshgrid(x3a, y3)
mx3b, my3b = np.meshgrid(x3b, y3)
ismh3a = np.sin(mx3a * 2 * np.pi / 4)
ismh3b = np.sin(mx3b * 2 * np.pi / 4.5)
ismh3 = np.concatenate((ismh3a, ismh3b), axis=1)
ft_ismh3 = np.fft.fft2(ismh3)
# Store data in datastructure
data_test_3.store('ISMHexp', ismh3)
data_test_3.store('FTISMHexp', ft_ismh3)
# Create branch for mask
data_test_3.create_branch('Mask1')
# Circle of radius R centered around coordinate (192, 128)
r3 = 20
center3 = (192, 128)
circle3 = center3, r3
# Store mask properties into datastructure
data_test_3.store_g('Mask1', 'Mask', circle3)
# Entering gpa
gpa.gpa('Mask1', data_test_3)
# Display data
# Input/Output data - Phase is supposed to be constant and equal to 0, anything different from 0 represents the error.
fig_test3 = plt.figure(figsize=(13, 9))
fig_test3_ax1 = fig_test3.add_subplot(2, 3, 1)
fig_test3_ax2 = fig_test3.add_subplot(2, 3, 4)
fig_test3_ax3 = fig_test3.add_subplot(2, 3, 2)
fig_test3_ax4 = fig_test3.add_subplot(2, 3, 5)
fig_test3_ax5 = fig_test3.add_subplot(2, 3, 3)
fig_test3_ax6 = fig_test3.add_subplot(2, 3, 6)
fig_test3_ax1.imshow(ismh3, cmap='gray')
fig_test3_ax2.imshow(np.log1p(np.fft.fftshift(np.abs(ft_ismh3 ** 2))), cmap='gray')
fig_test3_ax3.imshow(data_test_3.load_g('Mask1', 'phaseraw'), cmap='gray', vmin=-np.pi, vmax=np.pi)
fig_test3_ax4.imshow(data_test_3.load_g('Mask1', 'phasegM'), cmap='gray', vmin=-np.pi, vmax=np.pi)
fig_test3_ax5.imshow(data_test_3.load_g('Mask1', 'deltagM')[0], cmap='gray', vmin=-1, vmax=1)
fig_test3_ax6.imshow(data_test_3.load_g('Mask1', 'deltagM')[1], cmap='gray', vmin=-1, vmax=1)
fig_test3_ax1.set_title('I_SMH')
fig_test3_ax2.set_title('Fourier Transform of I_SMH')
fig_test3_ax3.set_title('Raw Phase')
fig_test3_ax4.set_title('Phase corrected')
fig_test3_ax5.set_title('Vertical component of Δg')
fig_test3_ax6.set_title('Horizontal component of Δg')
fig_test3.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_3_explanation.png',
dpi=300, bbox_inches='tight')
# Input/Output data in 1D
fig_test3_1d = plt.figure(figsize=(13, 9))
fig_test3_1d_ax1 = fig_test3_1d.add_subplot(2, 3, 1)
fig_test3_1d_ax2 = fig_test3_1d.add_subplot(2, 3, 4)
fig_test3_1d_ax3 = fig_test3_1d.add_subplot(2, 3, 2)
fig_test3_1d_ax4 = fig_test3_1d.add_subplot(2, 3, 5)
fig_test3_1d_ax5 = fig_test3_1d.add_subplot(2, 3, 3)
fig_test3_1d_ax6 = fig_test3_1d.add_subplot(2, 3, 6)
fig_test3_1d_ax1.plot(x2[50:206], ismh3[128, 50:206])
fig_test3_1d_ax2.plot(x2, np.log1p(np.fft.fftshift(np.abs(ft_ismh3 ** 2)))[128, :])
fig_test3_1d_ax3.plot(x2, data_test_3.load_g('Mask1', 'phaseraw')[128, :])
fig_test3_1d_ax3.set_ylim(-np.pi, np.pi)
fig_test3_1d_ax4.plot(x2, data_test_3.load_g('Mask1', 'phasegM')[128, :])
fig_test3_1d_ax4.set_ylim(-np.pi, np.pi)
fig_test3_1d_ax5.plot(x2, data_test_3.load_g('Mask1', 'deltagM')[0, 128, :])
fig_test3_1d_ax5.set_ylim(-0.1, 0.1)
fig_test3_1d_ax6.plot(x2, data_test_3.load_g('Mask1', 'deltagM')[1, 128, :])
fig_test3_1d_ax6.set_ylim(-0.1, 0.1)
fig_test3_1d_ax1.set_title('I_SMH')
fig_test3_1d_ax2.set_title('Fourier Transform of I_SMH')
fig_test3_1d_ax3.set_title('Raw Phase')
fig_test3_1d_ax4.set_title('Phase corrected')
fig_test3_1d_ax5.set_title('Vertical component of Δg')
fig_test3_1d_ax6.set_title('Horizontal component of Δg')
fig_test3_1d.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_3_explanation_1D.png',
dpi=300, bbox_inches='tight')
plt.show()
# -------------------- Test #3 Improvement ---------------
# Different strain level: dg
dg = [1/4 - 1/3, 1/4 - 1/3.8, 1/4 - 1/3.9, 1/4 - 1/4, 1/4 - 1/4.1, 1/4 - 1/4.2, 1/4 - 1/5]
# Different strain periodicity
dq = [3, 3.8, 3.9, 4, 4.1, 4.2, 5]
data3 = []
for strain in dq:
ismhb = np.sin(mx3b * 2 * np.pi / strain)
ismh = np.concatenate((ismh3a, ismhb), axis=1)
ft_ismh = np.fft.fft2(ismh)
circle = (128 + round(256 / 4), 128), r3
print(circle)
data_i = data.SMGData()
data_i.store('ISMHexp', ismh)
data_i.store('FTISMHexp', ft_ismh)
data_i.create_branch('Mask1')
data_i.store_g('Mask1', 'Mask', circle)
gpa.gpa('Mask1', data_i)
data3.append(data_i)
fig_test3_multiple_dq = plt.figure(figsize=(13, 6))
fig_test3_multiple_dq_ax1 = fig_test3_multiple_dq.add_subplot(1, 2, 1)
fig_test3_multiple_dq_ax2 = fig_test3_multiple_dq.add_subplot(1, 2, 2)
count = 0
strain_values = []
for elements in data3:
fig_test3_multiple_dq_ax1.plot(x2[100:200], elements.load_g('Mask1', 'phasegM')[128, 100:200],
linewidth=3, label=str(dq[count]))
fig_test3_multiple_dq_ax2.plot(x2, elements.load_g('Mask1', 'deltagM')[1, 128, :],
linewidth=3, label=str(dq[count]))
strain_value = elements.load_g('Mask1', 'deltagM')[1, 128, 200]
strain_values.append(strain_value)
count += 1
fig_test3_multiple_dq_ax1.set_ylim(-np.pi, np.pi)
fig_test3_multiple_dq_ax1.legend()
fig_test3_multiple_dq_ax1.set_title('Raw phase')
fig_test3_multiple_dq_ax2.set_title('Horizontal component of Δg')
fig_test3_multiple_dq_ax2.set_ylim(-0.1, 0.1)
fig_test3_multiple_dq.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_3_test_results.png',
dpi=300, bbox_inches='tight')
print('dg = ', dg)
print('strain = ', strain_values)
print('error = ', np.abs(np.array(dg)-np.array(strain_values)))
plt.show()
# ----------------------- Test vertical component ----------------------
data_test_3_v = data.SMGData()
# STEM Moire hologram with a unique periodicity of 4 pixels along horizontal axis on half image and a periodicity of
# 4.5 pixel along the same axis on the second half of the image (easy case). This leads to o a 'delta g' of
# 1/4-1/4.5 = 0.0277778.
y3a_v = np.linspace(0, 127, 128)
y3b_v = np.linspace(128, 255, 128)
x3_v = np.linspace(0, 255, 256)
mx3a_v, my3a_v = np.meshgrid(x3_v, y3a_v)
mx3b_v, my3b_v = np.meshgrid(x3_v, y3b_v)
ismh3a_v = np.sin(my3a_v * 2 * np.pi / 4)
ismh3b_v = np.sin(my3b_v * 2 * np.pi / 4.5)
ismh3_v = np.concatenate((ismh3a_v, ismh3b_v), axis=0)
ft_ismh3_v = np.fft.fft2(ismh3_v)
# Store data in datastructure
data_test_3_v.store('ISMHexp', ismh3_v)
data_test_3_v.store('FTISMHexp', ft_ismh3_v)
# Create branch for mask
data_test_3_v.create_branch('Mask1')
# Circle of radius R centered around coordinate (192, 128)
r3_v = 20
center3_v = (128, 192)
circle3_v = center3_v, r3_v
# Store mask properties into datastructure
data_test_3_v.store_g('Mask1', 'Mask', circle3_v)
# Entering gpa
gpa.gpa('Mask1', data_test_3_v)
# Display data
# Input/Output data - Phase is supposed to be constant and equal to 0, anything different from 0 represents the error.
fig_test3_v = plt.figure(figsize=(13, 9))
fig_test3_ax1_v = fig_test3_v.add_subplot(2, 3, 1)
fig_test3_ax2_v = fig_test3_v.add_subplot(2, 3, 4)
fig_test3_ax3_v = fig_test3_v.add_subplot(2, 3, 2)
fig_test3_ax4_v = fig_test3_v.add_subplot(2, 3, 5)
fig_test3_ax5_v = fig_test3_v.add_subplot(2, 3, 3)
fig_test3_ax6_v = fig_test3_v.add_subplot(2, 3, 6)
fig_test3_ax1_v.imshow(ismh3_v, cmap='gray')
fig_test3_ax2_v.imshow(np.log1p(np.fft.fftshift(np.abs(ft_ismh3_v ** 2))), cmap='gray')
fig_test3_ax3_v.imshow(data_test_3_v.load_g('Mask1', 'phaseraw'), cmap='gray', vmin=-np.pi, vmax=np.pi)
fig_test3_ax4_v.imshow(data_test_3_v.load_g('Mask1', 'phasegM'), cmap='gray', vmin=-np.pi, vmax=np.pi)
fig_test3_ax5_v.imshow(data_test_3_v.load_g('Mask1', 'deltagM')[0], cmap='gray', vmin=-1, vmax=1)
fig_test3_ax6_v.imshow(data_test_3_v.load_g('Mask1', 'deltagM')[1], cmap='gray', vmin=-1, vmax=1)
fig_test3_ax1_v.set_title('I_SMH')
fig_test3_ax2_v.set_title('Fourier Transform of I_SMH')
fig_test3_ax3_v.set_title('Raw Phase')
fig_test3_ax4_v.set_title('Phase corrected')
fig_test3_ax5_v.set_title('Vertical component of Δg')
fig_test3_ax6_v.set_title('Horizontal component of Δg')
fig_test3_v.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_3_v_explanation.png',
dpi=300, bbox_inches='tight')
# Input/Output data in 1D
fig_test3_1d_v = plt.figure(figsize=(13, 9))
fig_test3_1d_ax1_v = fig_test3_1d_v.add_subplot(2, 3, 1)
fig_test3_1d_ax2_v = fig_test3_1d_v.add_subplot(2, 3, 4)
fig_test3_1d_ax3_v = fig_test3_1d_v.add_subplot(2, 3, 2)
fig_test3_1d_ax4_v = fig_test3_1d_v.add_subplot(2, 3, 5)
fig_test3_1d_ax5_v = fig_test3_1d_v.add_subplot(2, 3, 3)
fig_test3_1d_ax6_v = fig_test3_1d_v.add_subplot(2, 3, 6)
fig_test3_1d_ax1_v.plot(x2[50:206], ismh3_v[50:206, 128])
fig_test3_1d_ax2_v.plot(x2, np.log1p(np.fft.fftshift(np.abs(ft_ismh3_v ** 2)))[:, 128])
fig_test3_1d_ax3_v.plot(x2, data_test_3_v.load_g('Mask1', 'phaseraw')[:, 128])
fig_test3_1d_ax3_v.set_ylim(-np.pi, np.pi)
fig_test3_1d_ax4_v.plot(x2, data_test_3_v.load_g('Mask1', 'phasegM')[:, 128])
fig_test3_1d_ax4_v.set_ylim(-np.pi, np.pi)
fig_test3_1d_ax5_v.plot(x2, data_test_3_v.load_g('Mask1', 'deltagM')[0, :, 128])
fig_test3_1d_ax5_v.set_ylim(-0.1, 0.1)
fig_test3_1d_ax6_v.plot(x2, data_test_3_v.load_g('Mask1', 'deltagM')[1, :, 128])
fig_test3_1d_ax6_v.set_ylim(-0.1, 0.1)
fig_test3_1d_ax1_v.set_title('I_SMH')
fig_test3_1d_ax2_v.set_title('Fourier Transform of I_SMH')
fig_test3_1d_ax3_v.set_title('Raw Phase')
fig_test3_1d_ax4_v.set_title('Phase corrected')
fig_test3_1d_ax5_v.set_title('Vertical component of Δg')
fig_test3_1d_ax6_v.set_title('Horizontal component of Δg')
fig_test3_1d_v.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_3_v_explanation_1D.png',
dpi=300, bbox_inches='tight')
# -------------------- Test #3 Improvement ---------------
# Different strain level: dg
dg = [1/4 - 1/3, 1/4 - 1/3.8, 1/4 - 1/3.9, 1/4 - 1/4, 1/4 - 1/4.1, 1/4 - 1/4.2, 1/4 - 1/5]
# Different strain periodicity
dq = [3, 3.8, 3.9, 4, 4.1, 4.2, 5]
data3 = []
for strain in dq:
ismhb = np.sin(mx3b * 2 * np.pi / strain)
ismh = np.concatenate((ismh3a, ismhb), axis=1)
ft_ismh = np.fft.fft2(ismh)
circle = (128 + round(256 / 4), 128), r3
print(circle)
data_i = data.SMGData()
data_i.store('ISMHexp', ismh)
data_i.store('FTISMHexp', ft_ismh)
data_i.create_branch('Mask1')
data_i.store_g('Mask1', 'Mask', circle)
gpa.gpa('Mask1', data_i)
data3.append(data_i)
fig_test3_multiple_dq = plt.figure(figsize=(13, 6))
fig_test3_multiple_dq_ax1 = fig_test3_multiple_dq.add_subplot(1, 2, 1)
fig_test3_multiple_dq_ax2 = fig_test3_multiple_dq.add_subplot(1, 2, 2)
count = 0
strain_values = []
for elements in data3:
fig_test3_multiple_dq_ax1.plot(x2[100:200], elements.load_g('Mask1', 'phasegM')[128, 100:200],
linewidth=3, label=str(dq[count]))
fig_test3_multiple_dq_ax2.plot(x2, elements.load_g('Mask1', 'deltagM')[1, 128, :],
linewidth=3, label=str(dq[count]))
strain_value = elements.load_g('Mask1', 'deltagM')[1, 128, 200]
strain_values.append(strain_value)
count += 1
fig_test3_multiple_dq_ax1.set_ylim(-np.pi, np.pi)
fig_test3_multiple_dq_ax1.legend()
fig_test3_multiple_dq_ax1.set_title('Raw phase')
fig_test3_multiple_dq_ax2.set_title('Horizontal component of Δg')
fig_test3_multiple_dq_ax2.set_ylim(-0.1, 0.1)
fig_test3_multiple_dq.savefig(
'/media/alex/Work/PhD/Course/CAS 741/project/STEM_Moire_GPA/Doc/TestReport/Figures/Test_3_test_results.png',
dpi=300, bbox_inches='tight')
print('dg = ', dg)
print('strain = ', strain_values)
print('error = ', np.abs(np.array(dg)-np.array(strain_values)))
plt.show()
# #######################################
# Test #4 in TestPlan document
# #######################################
data_test_4 = data.SMGData()
# STEM Moire hologram with a unique periodicity of 4 pixels along horizontal axis on half image and a periodicity of
# 4.5 pixel along the same axis on the second half of the image (easy case). This leads to o a 'delta g' of
# 1/4-1/4.5 = 0.0277778.
x4a = np.linspace(0, 127, 128)
x4b = np.linspace(128, 255, 128)
y4 = np.linspace(0, 255, 256)
mx4a, my4a = np.meshgrid(x4a, y4)
mx4b, my4b = np.meshgrid(x4b, y4)
ismh4a = np.sin(mx4a * 2 * np.pi / 4)
ismh4b = np.sin(mx4b * 2 * np.pi / 4.5)
ismh4 = np.concatenate((ismh4a, ismh4b), axis=1)
ft_ismh4 = np.fft.fft2(ismh4)
# Store data in datastructure
data_test_4.store('ISMHexp', ismh4)
data_test_4.store('FTISMHexp', ft_ismh4)
# Mask parameter
r4 = [1, 2, 4, 8, 16, 32, 64, 128, 256]
center4 = (192, 128)
# Create the amount of branches corresponding the the radius tested and store the mask properties
for radius in r4:
data_test_4.create_branch(str(radius))
circle4 = center4, radius
data_test_4.store_g(str(radius), 'Mask', circle4)
# Looping on the various radius into gpa
for radius in r4:
gpa.gpa(str(radius), data_test_4)
count = 1
fig_test4 = plt.figure()
fig_test4_1d = plt.figure()
fig_test4_deltag_1d = plt.figure()
N_sublpot = math.ceil(math.sqrt(len(r4)))
print(math.ceil(math.sqrt(len(r4))))
for element in r4:
fig_test4.add_subplot(N_sublpot, N_sublpot, count).imshow(data_test_4.load_g(
str(element), 'phasegM'), cmap='gray', vmin=-np.pi, vmax=np.pi)
fig_ax = fig_test4_1d.add_subplot(N_sublpot, N_sublpot, count)
fig_ax.plot(x2, data_test_4.load_g(str(element), 'phasegM')[128, :])
fig_ax.set_ylim(-np.pi, np.pi)
fig_ax_g = fig_test4_deltag_1d.add_subplot(N_sublpot, N_sublpot, count)
fig_ax_g.plot(x2, data_test_4.load_g(
str(element), 'deltagM')[1, 128, :])
fig_ax_g.set_ylim(-0.1, 0.1)
count += 1
plt.show()
|
slimpotatoes/STEM_Moire_GPA | src/smhsimulation.py | # STEM Moire hologram simulation module
import numpy as np
import data as data
import math as math
from scipy.ndimage.interpolation import shift
def smh_sim(datastruct):
"""Simulate the Fourier transform of the STEM Moire hologram based on the reference image"""
# Make the Fourier transform of the STEM Moire hologram and store it.
data.SMGData.store(datastruct, 'FTISMHexp', np.fft.fft2(data.SMGData.load(datastruct, 'ISMHexp')))
# Load the intermediate variables from the reference image to make the simulation.
ft_ic = np.fft.fftshift(np.abs(np.fft.fft2(data.SMGData.load(datastruct, 'ICref')) ** 2))
p = data.SMGData.load(datastruct, 'p')
pref = data.SMGData.load(datastruct, 'pref')
n_lim = int(math.floor(p/pref))
fov = ft_ic.shape[0]
tile = int(pref / p * fov)
ft_ismh_sim = np.ndarray(ft_ic.shape)
# Simulation by calculating the STEM Moire hologram equation.
counter = 0
if n_lim % 2 != 0:
initial_coordinate = (int(0.5 * (fov - n_lim * tile)), int(0.5 * (fov - n_lim * tile)))
ft_ic_square = np.ndarray((len(range(0, n_lim)), len(range(0, n_lim)), tile, tile))
else:
n_lim -= 1
initial_coordinate = (int(0.5 * (fov - n_lim * tile)), int(0.5 * (fov - n_lim * tile)))
ft_ic_square = np.ndarray((len(range(0, n_lim)), len(range(0, n_lim)), tile, tile))
# Print statement to inform user
print('Shape in pixel of reference: ', ft_ismh_sim.shape)
print('Shape in pixel of the tile: ', ft_ic_square.shape)
print('Tile size (in pixel): ', tile)
print('Number of tiles: ', n_lim ** 2)
print('Please wait the calculation can take some time !!!')
for i in range(0, n_lim):
for j in range(0, n_lim):
ft_ismh_sim += shift(ft_ic, [int(i - 0.5 * n_lim) * tile, int(j - 0.5 * n_lim) * tile],
cval=0, order=0, prefilter=False)
a = int(initial_coordinate[0] + i * tile)
b = a + tile
c = int(initial_coordinate[0] + j * tile)
d = c + tile
ft_ic_square[i][j] = ft_ic[a:b, c:d]
counter += 1
# print('Loop number: ', counter)
print('Simulation done')
# Storing data into data structure.
data.SMGData.store(datastruct, 'FTISMHsim', ft_ismh_sim[
int(0.5*(ft_ismh_sim.shape[0] - tile)):int(0.5*(ft_ismh_sim.shape[0] + tile)),
int(0.5*(ft_ismh_sim.shape[0] - tile)):int(0.5*(ft_ismh_sim.shape[0] + tile))])
data.SMGData.store(datastruct, 'FTISMHsimDisplay', ft_ic_square)
|
slimpotatoes/STEM_Moire_GPA | src/mask.py | <reponame>slimpotatoes/STEM_Moire_GPA
# Mask Module
import numpy as np
def mask_classic(center, r, shape):
"""Return the mask function in the image space I defined by shape (see MIS). The classic mask takes the center of a
circle center = (xc,yc) and its radius r and put 1 if the image space is inside the circle and 0 outside. In
addition, the center of the mask is used to return g_0 that correspond to a first estimation of the unstrained
reference."""
"""Do not forget event coordinate (x,y) from matplotlib should be switched compared to numpy array indexing"""
g_0 = np.array([(center[1] - 0.5 * shape[0]) / shape[0] * np.ones(shape),
(center[0] - 0.5 * shape[1]) / shape[1] * np.ones(shape)])
mask = np.ndarray(shape=shape)
"""Do not forget event coordinate (x,y) from matplotlib should be switched compared to numpy array indexing"""
for i in range(0, shape[1]):
for j in range(0, shape[0]):
if ((i - center[1]) ** 2 + (j - center[0]) ** 2) < (r ** 2):
mask[i, j] = 1
else:
mask[i, j] = 0
return mask, g_0
def mask_gaussian(center, r, shape):
"""Return the mask function in the image space I defined by shape (see MIS). The Gaussian mask takes the center of a
circle center = (xc,yc) and its radius r to generate a 2D gaussian function centered around the circle. In
addition, the center of the mask is used to return g_0 that correspond to the unstrained reference."""
"""Do not forget event coordinate (x,y) from matplotlib should be switched compared to numpy array indexing"""
g_0 = np.array([(center[1] - 0.5 * shape[0]) / shape[0] * np.ones(shape),
(center[0] - 0.5 * shape[1]) / shape[1] * np.ones(shape)])
"""Do not forget event coordinate (x,y) from matplotlib should be switched compared to numpy array indexing
- r corresponds to 3 * sigma => 99% gaussian mask included in circle"""
const = 1 / (2 * (r / 3) ** 2)
mesh_x, mesh_y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]))
delta_x = (mesh_x - center[0]) ** 2
delta_y = (mesh_y - center[1]) ** 2
mask = np.exp(-(delta_x + delta_y) * const)
return mask, g_0
|
slimpotatoes/STEM_Moire_GPA | src/gui.py | # STEM Moire GPA GUI Module
import matplotlib.pyplot as plt
import matplotlib.gridspec as grid
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.artist as artist
import matplotlib.patches as patch
from tkinter import filedialog
from matplotlib.widgets import Button
from matplotlib.widgets import TextBox
from matplotlib_scalebar.scalebar import ScaleBar
import data as data
import numpy as np
import guimaskmanager as maskmanag
import guirectanglemanager as rectmanag
import guilinemanager as linemanag
import guidisplay as display
class SMGGUI(object):
def __init__(self, datastruct):
self.fig_GUIFlow = None
self.event_input = None
self.event_smhsim = None
self.event_gpa = None
self.event_ref = None
self.event_convert = None
self.event_strain = None
self.fig_SMHexp = None
self.ax_fig_SMHexp = None
self.fig_SMHSim = None
self.fig_GPA_M1 = None
self.fig_GPA_M2 = None
self.fig_GPA_M1_ax = None
self.fig_GPA_M2_ax= None
self.rectangle_M1 = None
self.rectangle_M2 = None
self.fig_NM = None
self.fig_strain = None
self.mask =dict()
self.mask_selected = None
self.h_1 = 0
self.h_2 = 0
self.v_1 = 0
self.v_2 = 0
self.datastruct = datastruct
self.line_rot = None
def guiflow(self):
self.fig_GUIFlow = plt.figure(num='SMG Flow', figsize=(2, 5))
gs_button = grid.GridSpec(6, 1)
self.event_input = Button(self.fig_GUIFlow.add_axes(self.fig_GUIFlow.add_subplot(gs_button[0, 0])), 'Input')
self.event_smhsim = Button(self.fig_GUIFlow.add_axes(self.fig_GUIFlow.add_subplot(gs_button[1, 0])), 'SMHSim')
self.event_gpa = Button(self.fig_GUIFlow.add_axes(self.fig_GUIFlow.add_subplot(gs_button[2, 0])), 'GPA')
self.event_ref = Button(self.fig_GUIFlow.add_axes(self.fig_GUIFlow.add_subplot(gs_button[3, 0])), 'Ref')
self.event_convert = Button(self.fig_GUIFlow.add_axes(self.fig_GUIFlow.add_subplot(gs_button[4, 0])), 'Convert')
self.event_strain = Button(self.fig_GUIFlow.add_axes(self.fig_GUIFlow.add_subplot(gs_button[5, 0])), 'Strain')
self.fig_GUIFlow.canvas.mpl_connect('key_press_event', self.custom_display)
def guiconv(self):
def collect_shift_h1(text):
if text != '':
if isinstance(int(text), int) == True:
self.h_1 = int(text)
else:
return
else:
return
def collect_shift_v1(text):
if text != '':
if isinstance(int(text), int) == True:
self.v_1 = int(text)
else:
return
else:
return
def collect_shift_h2(text):
if text != '':
if isinstance(int(text), int) == True:
self.h_2 = int(text)
else:
return
else:
return
def collect_shift_v2(text):
if text != '':
if isinstance(int(text), int) == True:
self.v_2 = int(text)
else:
return
else:
return
self.fig_NM = plt.figure(figsize=(1.2,2), num='(n,m) shift')
self.fig_NM.canvas.mpl_disconnect(self.fig_NM.canvas.manager.key_press_handler_id)
fig_ax_text_1 = self.fig_NM.add_axes(plt.axes([0.1, 0.8, 0.8, 0.1]))
fig_ax_text_2 = self.fig_NM.add_axes(plt.axes([0.1, 0.4, 0.8, 0.1]))
fig_ax_text_1.set_axis_off()
fig_ax_text_1.format_coord = lambda x, y: ""
fig_ax_text_1.text(0, 0.2, 'Shift related to the Red mask')
fig_ax_text_2.set_axis_off()
fig_ax_text_2.text(0, 0.2, 'Shift related to the Blue mask')
fig_ax_text_2.format_coord = lambda x, y: ""
self.textbox_h_1 = TextBox(self.fig_NM.add_axes(plt.axes([0.6, 0.65, 0.2, 0.1])),
'Horizontal shift :', initial='0', label_pad=0.2)
self.textbox_v_1 = TextBox(self.fig_NM.add_axes(plt.axes([0.6, 0.55, 0.2, 0.1])),
'Vertical shift :', initial='0', label_pad=0.2)
self.textbox_h_2 = TextBox(self.fig_NM.add_axes(plt.axes([0.6, 0.25, 0.2, 0.1])),
'Horizontal shift :', initial='0', label_pad=0.2)
self.textbox_v_2 = TextBox(self.fig_NM.add_axes(plt.axes([0.6, 0.15, 0.2, 0.1])),
'Vertical shift :', initial='0', label_pad=0.2)
id1h = self.textbox_h_1.on_text_change(collect_shift_h1)
id2h = self.textbox_h_2.on_text_change(collect_shift_h2)
id1v = self.textbox_v_1.on_text_change(collect_shift_v1)
id2v = self.textbox_v_2.on_text_change(collect_shift_v2)
def guismhexp(self, datastruct):
self.fig_SMHexp = plt.figure(num='SMH and reference image')
self.ax_fig_SMHexp = self.fig_SMHexp.add_subplot(1, 2, 1)
self.ax_fig_SMHexp.imshow(data.SMGData.load(datastruct, 'ISMHexp'), cmap='gray')
scalebar1 = ScaleBar(data.SMGData.load(datastruct, 'p') * 10 ** -9)
plt.gca().add_artist(scalebar1)
self.line_rot = linemanag.LineDraw(self.ax_fig_SMHexp)
self.line_rot.ConnectDraw()
self.fig_SMHexp.add_subplot(1, 2, 2).imshow(data.SMGData.load(datastruct, 'ICref'), cmap='gray')
scalebar2 = ScaleBar(data.SMGData.load(datastruct, 'pref') * 10 ** -9)
plt.gca().add_artist(scalebar2)
plt.show()
def guismhsim(self, datastruct):
def edit_mode(event):
if event.key == 'e':
print('Edit mode open, please edit your masks')
for circle in self.circles:
circle.connect()
for element in self.mask.keys():
data.SMGData.remove_branch(datastruct, element)
if event.key == 'd':
print('Edit mode closed, please select the mask for the GPA process')
for circle in self.circles:
self.mask[artist.Artist.get_gid(circle.artist)] = (circle.artist.center, circle.artist.radius)
circle.disconnect_edit()
for element in self.mask.keys():
data.SMGData.create_branch(datastruct, element)
data.SMGData.store_g(datastruct, element, 'Mask', self.mask[element])
print(element, ' Center : ', self.mask[element][0], ' Radius : ', self.mask[element][1])
self.fig_SMHsim = plt.figure(num='SMH Simulation')
self.fig_SMHsim.canvas.mpl_connect('key_press_event', edit_mode)
self.fig_SMHsim_axis = self.fig_SMHsim.add_subplot(1,2,1)
ftsmhexp = data.SMGData.load(datastruct, 'FTISMHexp')
self.fig_SMHsim_axis.imshow(np.log1p(self.fft_display(ftsmhexp)), cmap='gray')
self.fig_SMHsim.add_subplot(1, 2, 2).imshow(np.log1p(data.SMGData.load(datastruct, 'FTISMHsim')), cmap='gray')
fticsquare = data.SMGData.load(datastruct, 'FTISMHsimDisplay')
colormaps_Moire, colormaps_IC = self.generate_colormap_smhsim(fticsquare.shape[0] * fticsquare.shape[1])
ftsmhsim = plt.figure(num='Simulated SMH with colors')
icsplit = plt.figure(num='Ic split into tiles')
# A cleaner
ZERO = np.zeros(fticsquare[0][0].shape)
ftsmhsimaxis = ftsmhsim.add_subplot(1, 1, 1)
ftsmhsimaxis.imshow(ZERO, cmap="gray", alpha=1)
max_general = np.max(np.max(np.max(np.max(np.log1p(fticsquare)))))
count = 0
for i in range(0, fticsquare.shape[0]):
for j in range(0, fticsquare.shape[1]):
# Threshold - Color on Moire FFT reconstructed
Test = np.log1p(fticsquare[i, j])
mask = Test[:, :] < (0.67 * max_general)
Test[mask] = 0
cmap_moire = colors.LinearSegmentedColormap.from_list('my_cmap', colormaps_Moire[count])
cmap_ic = colors.LinearSegmentedColormap.from_list('my_cmap', colormaps_IC[count])
ftsmhsimaxis.imshow(Test, cmap = cmap_moire, alpha = .7, clim=(20, 40))
# Color on split HRES FFT
icsplitaxis = icsplit.add_subplot(fticsquare.shape[0], fticsquare.shape[0], count + 1)
icsplitaxis.imshow(np.log1p(fticsquare[i, j]), cmap=cmap_ic, clim=(27.5, 34.5))
icsplitaxis.xaxis.set_visible(False)
icsplitaxis.yaxis.set_visible(False)
count += 1
smgmaskcreate = maskmanag.MaskCreator(self.fig_SMHsim_axis,ftsmhexp)
circle1 = smgmaskcreate.make_circle('Mask1')
circle2 = smgmaskcreate.make_circle('Mask2', colored='b',off_center=(20,20))
self.mask[circle1[0]] = circle1[1]
self.mask[circle2[0]] = circle2[1]
self.circles = []
for el in self.fig_SMHsim_axis.artists:
smgmaskedit = maskmanag.MaskEditor(el)
self.circles.append(smgmaskedit)
plt.show()
def guiphase(self, mask_id, datastruct):
def close_window(event):
if event.canvas.figure == self.fig_GPA_M1:
self.fig_GPA_M1.canvas.mpl_disconnect(cid1close)
self.fig_GPA_M1.canvas.mpl_disconnect(cid1draw)
self.fig_GPA_M1 = None
elif event.canvas.figure == self.fig_GPA_M2:
self.fig_GPA_M2.canvas.mpl_disconnect(cid2close)
self.fig_GPA_M2.canvas.mpl_disconnect(cid2draw)
self.fig_GPA_M2 = None
def change_rectangle(event):
if event.canvas.figure == self.fig_GPA_M1 and self.fig_GPA_M2 != None and self.phaseref.done == 1:
self.phaseref.done = 0
self.phaseref2.remove_rectangle()
rectangle = self.fig_GPA_M1_ax.findobj(patch.Rectangle)[0]
self.phaseref2.create_rectangle(rectangle)
U = self.reference_extract(rectangle)
data.SMGData.store(datastruct, 'Uref', U)
elif event.canvas.figure == self.fig_GPA_M2 and self.fig_GPA_M1 != None and self.phaseref2.done == 1:
self.phaseref2.done = 0
self.phaseref.remove_rectangle()
rectangle = self.fig_GPA_M2_ax.findobj(patch.Rectangle)[0]
self.phaseref.create_rectangle(rectangle)
U = self.reference_extract(rectangle)
data.SMGData.store(datastruct, 'Uref', U)
else:
return
if mask_id == 'Mask1':
if self.fig_GPA_M1 == None:
self.fig_GPA_M1 = plt.figure(num='GPA - Mask Red')
self.fig_GPA_M1_ax = self.fig_GPA_M1.add_subplot(1, 1, 1)
phase = data.SMGData.load_g(datastruct, mask_id, 'phasegM')
self.image_mask_1 = self.fig_GPA_M1_ax.imshow(phase, cmap='gray')
self.rectangle_M1 = rectmanag.make_rectangle(self.fig_GPA_M1_ax, phase)
self.phaseref = rectmanag.RectEditor(self.fig_GPA_M1, self.fig_GPA_M1_ax, self.rectangle_M1)
self.phaseref.connect()
cid1close = self.fig_GPA_M1.canvas.mpl_connect('close_event', close_window)
cid1draw = self.fig_GPA_M1.canvas.mpl_connect('draw_event', change_rectangle)
plt.show()
else:
self.fig_GPA_M1_ax.imshow(data.SMGData.load_g(datastruct, mask_id, 'phasegM'), cmap='gray')
plt.draw()
if mask_id == 'Mask2':
if self.fig_GPA_M2 == None:
self.fig_GPA_M2 = plt.figure(num='GPA - Mask Blue')
self.fig_GPA_M2_ax = self.fig_GPA_M2.add_subplot(1, 1, 1)
phase = data.SMGData.load_g(datastruct, mask_id, 'phasegM')
self.image_mask_2 = self.fig_GPA_M2_ax.imshow(phase, cmap='gray')
self.rectangle_M2 = rectmanag.make_rectangle(self.fig_GPA_M2_ax, phase)
self.phaseref2 = rectmanag.RectEditor(self.fig_GPA_M2, self.fig_GPA_M2_ax, self.rectangle_M2)
self.phaseref2.connect()
cid2close = self.fig_GPA_M2.canvas.mpl_connect('close_event', close_window)
cid2draw = self.fig_GPA_M2.canvas.mpl_connect('draw_event', change_rectangle)
plt.show()
else:
self.fig_GPA_M2_ax.imshow(data.SMGData.load_g(datastruct, mask_id, 'phasegM'), cmap='gray')
plt.draw()
else:
return
def guistrain(self, datastruct):
self.fig_strain = plt.figure(num='Strain maps')
fig_strain_ax_exx = self.fig_strain.add_subplot(2, 2, 1)
fig_strain_ax_eyy = self.fig_strain.add_subplot(2, 2, 2)
fig_strain_ax_exy = self.fig_strain.add_subplot(2, 2, 3)
fig_strain_ax_rxy = self.fig_strain.add_subplot(2, 2, 4)
exx = fig_strain_ax_exx.imshow(data.SMGData.load(datastruct,'Exx'), cmap='bwr', vmin=-0.02, vmax=0.02)
eyy = fig_strain_ax_eyy.imshow(data.SMGData.load(datastruct, 'Eyy'), cmap='bwr', vmin=-0.02, vmax=0.02)
exy = fig_strain_ax_exy.imshow(data.SMGData.load(datastruct, 'Exy'), cmap='bwr', vmin=-0.02, vmax=0.02)
rxy = fig_strain_ax_rxy.imshow(data.SMGData.load(datastruct, 'Rxy'), cmap='bwr', vmin=-0.02,vmax=0.02)
fig_strain_ax_exx.set_title('εxx')
fig_strain_ax_exx.xaxis.set_visible(False)
fig_strain_ax_eyy.set_title('εyy')
fig_strain_ax_eyy.xaxis.set_visible(False)
fig_strain_ax_exy.set_title('εxy')
fig_strain_ax_exy.xaxis.set_visible(False)
fig_strain_ax_rxy.set_title('ωxy')
fig_strain_ax_rxy.xaxis.set_visible(False)
fig_strain_ax_rxy.add_artist(ScaleBar(data.SMGData.load(datastruct, 'p') * 10 ** -9))
plt.show()
@staticmethod
def fft_display(fft):
return np.fft.fftshift(np.abs(fft ** 2))
@staticmethod
def generate_colormap_smhsim(total_tiles):
initial_color_map = cm.get_cmap(name="jet")
coef_weighted_color = np.arange(0, total_tiles).astype(float) / (total_tiles - 1)
customized_color_map_max = []
for elements in coef_weighted_color:
customized_color_map_max.append(initial_color_map(elements))
customized_color_maps_Moire = []
customized_color_maps_Ic =[]
for elements in customized_color_map_max:
customized_color_maps_Moire.append([(0, 0, 0, 0.0), elements])
customized_color_maps_Ic.append([(0, 0, 0, 0.8), elements])
return customized_color_maps_Moire, customized_color_maps_Ic
def mask_selection(self):
for circle in self.circles:
if circle.mask_selected is not None:
self.mask_selected = circle.mask_selected
print('Mask selected')
return self.mask_selected
def reference_extract(self, rectangle):
x0, y0 = rectangle.get_xy()
x1, y1 = x0 + rectangle.get_width(), y0 + rectangle.get_height()
print(int(x0), int(y0), int(x1), int(y1))
return int(x0), int(y0), int(x1), int(y1)
def update_phase(self, mask_id, datastruct):
if mask_id == 'Mask1':
phase = data.SMGData.load_g(datastruct, mask_id, 'phasegM')
print('Mask 1 update')
self.fig_GPA_M1_ax.imshow(phase, cmap='gray')
self.fig_GPA_M1.canvas.draw()
elif mask_id == 'Mask2':
print('Mask 2 update')
phase = data.SMGData.load_g(datastruct, mask_id, 'phasegM')
self.fig_GPA_M2_ax.imshow(phase, cmap='gray')
self.fig_GPA_M2.canvas.draw()
else:
pass
@staticmethod
def open_files():
file_path_smh = filedialog.askopenfilename(title="Load the STEM Moire hologram")
file_path_ic = filedialog.askopenfilename(title="Load the reference image")
return file_path_smh, file_path_ic
# NOT DOCUMENTED AND NEED TO BE DESIGNED PROPERLY
def custom_display(self, event):
if event.key == '1':
data_to_display = data.SMGData.load(self.datastruct,'ISMHexp')
p = data.SMGData.load(self.datastruct, 'p')
display.GUIDisplay(data_to_display, cal=p)
if event.key == '2':
data_to_display = data.SMGData.load(self.datastruct, 'ICref')
p = data.SMGData.load(self.datastruct, 'pref')
display.GUIDisplay(data_to_display, cal=p)
if event.key == '3':
data_to_display = data.SMGData.load(self.datastruct, 'FTISMHexp')
display.GUIDisplay(data_to_display)
if event.key == '4':
data_to_display = data.SMGData.load(self.datastruct, 'FTISMHsim')
display.GUIDisplay(data_to_display)
if event.key == '5':
data_to_display = data.SMGData.load_g(self.datastruct,'Mask1', 'PhasegM')
p = data.SMGData.load(self.datastruct, 'p')
display.GUIDisplay(data_to_display, cal=p)
if event.key == '6':
data_to_display = data.SMGData.load_g(self.datastruct, 'Mask2', 'PhasegM')
p = data.SMGData.load(self.datastruct, 'p')
display.GUIDisplay(data_to_display, cal=p)
if event.key == '7':
data_to_display = data.SMGData.load(self.datastruct,'Exx')
p = data.SMGData.load(self.datastruct, 'p')
display.GUIDisplay(data_to_display, cal=p)
if event.key == '8':
data_to_display = data.SMGData.load(self.datastruct,'Eyy')
p = data.SMGData.load(self.datastruct, 'p')
display.GUIDisplay(data_to_display, cal=p)
if event.key == '9':
data_to_display = data.SMGData.load(self.datastruct,'Exy')
p = data.SMGData.load(self.datastruct, 'p')
display.GUIDisplay(data_to_display, cal=p)
if event.key == '0':
data_to_display = data.SMGData.load(self.datastruct,'Rxy')
p = data.SMGData.load(self.datastruct, 'p')
display.GUIDisplay(data_to_display, cal=p)
|
slimpotatoes/STEM_Moire_GPA | src/guimaskmanager.py | # Module Mask Manager that is used by GUI
from matplotlib.patches import Circle
import matplotlib.artist as artist
import math
class MaskCreator(object):
def __init__(self, axis, image):
self.axis = axis
self.image = image
self.circle = None
self.colored = None
self.off_center = None
def make_circle(self, mask_id, colored='r', off_center=(0, 0)):
self.colored = colored
self.off_center = off_center
"""Create circle gui object"""
self.circle = Circle((self.image.shape[0]/2 + self.off_center[0], self.image.shape[1]/2+self.off_center[1]),
self.image.shape[0]/6, color=self.colored, fill=True, alpha=0.3, linewidth=3)
circle_artist = self.axis.add_artist(self.circle)
circle_artist.set_gid(mask_id)
self.axis.figure.canvas.draw()
return Circle.get_gid(self.circle), (self.circle.center, self.circle.radius),
class MaskEditor(object):
def __init__(self, artist):
self.artist = artist
self.press = None
self.mask_selected = None
self.circle = None
self.cidpress = None
self.cidrelease = None
self.cidmotion = None
def connect(self):
self.cidpress = self.artist.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.cidrelease = self.artist.figure.canvas.mpl_connect('button_release_event', self.on_release)
self.cidmotion = self.artist.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
def on_press(self, event):
if event.inaxes != self.artist.axes:
return
contains, attrd = self.artist.contains(event)
if not contains:
self.artist.fill = True
self.artist.figure.canvas.draw()
self.mask_selected = None
return
(x0, y0) = self.artist.center
self.press = x0, y0, event.xdata, event.ydata
self.artist.fill = False
self.artist.figure.canvas.draw()
#print(artist.Artist.get_gid(self.artist))
self.mask_selected = artist.Artist.get_gid(self.artist)
def on_motion(self, event):
if self.press is None:
return
if event.inaxes != self.artist.axes:
return
if event.button == 1:
x0, y0, xpress, ypress = self.press
dx = event.xdata - xpress
dy = event.ydata - ypress
self.artist.center = x0 + dx, y0 + dy
if event.button == 3:
x0, y0, xpress, ypress = self.press
r = math.sqrt((event.xdata-x0) ** 2 + (event.ydata-y0) ** 2)
self.artist.set_radius(r)
self.artist.figure.canvas.draw()
def on_release(self, event):
self.press = None
self.artist.figure.canvas.draw()
def disconnect(self):
self.artist.figure.canvas.mpl_disconnect(self.cidpress)
self.artist.figure.canvas.mpl_disconnect(self.cidrelease)
self.artist.figure.canvas.mpl_disconnect(self.cidmotion)
def disconnect_edit(self):
self.artist.figure.canvas.mpl_disconnect(self.cidmotion)
|
slimpotatoes/STEM_Moire_GPA | src/test_straincalc.py | import pytest
import straincalc
import numpy as np
import data
# ----------------
# Test cases
# ---------------
# Test 27 from TestPlan.pdf
datastruct_0 = data.SMGData()
datastruct_0.create_branch('Mask1')
datastruct_0.create_branch('Mask2')
datastruct_0.store('p', 1)
g_c_uns_1 = np.transpose(np.array([[[1, 2]]]), axes=(2, 0, 1))
g_c_uns_2 = np.transpose(np.array([[[3, 4]]]), axes=(2, 0, 1))
delta_g_1 = np.transpose(np.array([[[0, 0]]]), axes=(2, 0, 1))
delta_g_2 = np.transpose(np.array([[[0, 0]]]), axes=(2, 0, 1))
datastruct_0.store_g('Mask1', 'gCuns', g_c_uns_1)
datastruct_0.store_g('Mask2', 'gCuns', g_c_uns_2)
datastruct_0.store_g('Mask1', 'deltagM', delta_g_1)
datastruct_0.store_g('Mask2', 'deltagM', delta_g_2)
test_case_0 = dict()
test_case_0['datastruct'] = datastruct_0
test_case_0['to_assert_strain'] = np.array([[[0]], [[0]], [[0]], [[0]]])
# Test 28 from TestPlan.pdf
datastruct_1 = data.SMGData()
datastruct_1.create_branch('Mask1')
datastruct_1.create_branch('Mask2')
datastruct_1.store('p', 1)
g_c_uns_1 = np.transpose(np.array([[[1, 0]]]), axes=(2, 0, 1))
g_c_uns_2 = np.transpose(np.array([[[0, 1]]]), axes=(2, 0, 1))
delta_g_1 = np.transpose(np.array([[[-0.1, 0]]]), axes=(2, 0, 1))
delta_g_2 = np.transpose(np.array([[[0, 0]]]), axes=(2, 0, 1))
datastruct_1.store_g('Mask1', 'gCuns', g_c_uns_1)
datastruct_1.store_g('Mask2', 'gCuns', g_c_uns_2)
datastruct_1.store_g('Mask1', 'deltagM', delta_g_1)
datastruct_1.store_g('Mask2', 'deltagM', delta_g_2)
test_case_1 = dict()
test_case_1['datastruct'] = datastruct_1
test_case_1['to_assert_strain'] = np.array([[[0]], [[1/9]], [[0]], [[0]]])
datastruct_2 = data.SMGData()
datastruct_2.create_branch('Mask1')
datastruct_2.create_branch('Mask2')
datastruct_2.store('p', 1)
g_c_uns_1 = np.transpose(np.array([[[1, 0]]]), axes=(2, 0, 1))
g_c_uns_2 = np.transpose(np.array([[[0, 1]]]), axes=(2, 0, 1))
delta_g_1 = np.transpose(np.array([[[0, 0]]]), axes=(2, 0, 1))
delta_g_2 = np.transpose(np.array([[[0, 0.1]]]), axes=(2, 0, 1))
datastruct_2.store_g('Mask1', 'gCuns', g_c_uns_1)
datastruct_2.store_g('Mask2', 'gCuns', g_c_uns_2)
datastruct_2.store_g('Mask1', 'deltagM', delta_g_1)
datastruct_2.store_g('Mask2', 'deltagM', delta_g_2)
test_case_2 = dict()
test_case_2['datastruct'] = datastruct_2
test_case_2['to_assert_strain'] = np.array([[[-1/11]], [[0]], [[0]], [[0]]])
datastruct_3 = data.SMGData()
datastruct_3.create_branch('Mask1')
datastruct_3.create_branch('Mask2')
datastruct_3.store('p', 1)
g_c_uns_1 = np.transpose(np.array([[[1, 0]]]), axes=(2, 0, 1))
g_c_uns_2 = np.transpose(np.array([[[0, 1]]]), axes=(2, 0, 1))
delta_g_1 = np.transpose(np.array([[[0.1, 0]]]), axes=(2, 0, 1))
delta_g_2 = np.transpose(np.array([[[0, -0.1]]]), axes=(2, 0, 1))
datastruct_3.store_g('Mask1', 'gCuns', g_c_uns_1)
datastruct_3.store_g('Mask2', 'gCuns', g_c_uns_2)
datastruct_3.store_g('Mask1', 'deltagM', delta_g_1)
datastruct_3.store_g('Mask2', 'deltagM', delta_g_2)
test_case_3 = dict()
test_case_3['datastruct'] = datastruct_3
test_case_3['to_assert_strain'] = np.array([[[1/9]], [[-1/11]], [[0]], [[0]]])
datastruct_4 = data.SMGData()
datastruct_4.create_branch('Mask1')
datastruct_4.create_branch('Mask2')
datastruct_4.store('p', 1)
g_c_uns_1 = np.transpose(np.array([[[1, 0]]]), axes=(2, 0, 1))
g_c_uns_2 = np.transpose(np.array([[[0, 1]]]), axes=(2, 0, 1))
delta_g_1 = np.transpose(np.array([[[0, 0.01]]]), axes=(2, 0, 1))
delta_g_2 = np.transpose(np.array([[[0.01, 0]]]), axes=(2, 0, 1))
datastruct_4.store_g('Mask1', 'gCuns', g_c_uns_1)
datastruct_4.store_g('Mask2', 'gCuns', g_c_uns_2)
datastruct_4.store_g('Mask1', 'deltagM', delta_g_1)
datastruct_4.store_g('Mask2', 'deltagM', delta_g_2)
test_case_4 = dict()
test_case_4['datastruct'] = datastruct_4
test_case_4['to_assert_strain'] = np.array([[[0]], [[0]], [[1/0.9999*1/100]], [[0]]])
datastruct_5 = data.SMGData()
datastruct_5.create_branch('Mask1')
datastruct_5.create_branch('Mask2')
datastruct_5.store('p', 1)
g_c_uns_1 = np.transpose(np.array([[[1, 0]]]), axes=(2, 0, 1))
g_c_uns_2 = np.transpose(np.array([[[0, 1]]]), axes=(2, 0, 1))
delta_g_1 = np.transpose(np.array([[[0, -0.01]]]), axes=(2, 0, 1))
delta_g_2 = np.transpose(np.array([[[0.01, 0]]]), axes=(2, 0, 1))
datastruct_5.store_g('Mask1', 'gCuns', g_c_uns_1)
datastruct_5.store_g('Mask2', 'gCuns', g_c_uns_2)
datastruct_5.store_g('Mask1', 'deltagM', delta_g_1)
datastruct_5.store_g('Mask2', 'deltagM', delta_g_2)
test_case_5 = dict()
test_case_5['datastruct'] = datastruct_5
test_case_5['to_assert_strain'] = np.array([[[0]], [[0]], [[0]], [[-1/1.0001*1/100]]])
test_cases = [test_case_0, test_case_1, test_case_2, test_case_3, test_case_4, test_case_5]
@pytest.mark.parametrize("test_case", test_cases)
def test_strain_calc_easy(test_case):
straincalc.strain_calculation('Mask1', 'Mask2', test_case['datastruct'])
results = np.array([test_case['datastruct'].load('Exx'), test_case['datastruct'].load('Eyy'),
test_case['datastruct'].load('Exy'), test_case['datastruct'].load('Rxy')])
print(results)
print(test_case['to_assert_strain'])
assert np.all(np.isclose(results, test_case['to_assert_strain'], atol=0.001)) |
slimpotatoes/STEM_Moire_GPA | src/guilinemanager.py | <filename>src/guilinemanager.py
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
class LineDraw(object):
epsilon = 20
def __init__(self, axis):
self.axis = axis
self.canvas = self.axis.figure.canvas
self.LineStartx = None
self.LineStarty = None
self.LineEndx = None
self.LineEndy = None
self.LineCoords = np.empty((2, 2))
self.line = None
self.Dragging = False
self.vertex = 1
self.width = 1
self.WidthData = self.WidthDataCoords()
def ConnectDraw(self):
print('draw a line!')
self.cidclick = self.canvas.mpl_connect('button_press_event',
self.LineStart)
self.cidmotion = self.canvas.mpl_connect('motion_notify_event',
self.DrawLine)
self.cidrelease = self.canvas.mpl_connect('button_release_event',
self.LineEnd)
self.ciddraw = self.canvas.mpl_connect('draw_event', self.DrawCanvas)
# print(self.LineStart)
def DisconnectDraw(self):
self.canvas.mpl_disconnect(self.cidclick)
self.canvas.mpl_disconnect(self.cidrelease)
self.canvas.mpl_disconnect(self.ciddraw)
self.canvas.mpl_disconnect(self.cidmotion)
def ConnectMove(self):
print('move your line!')
self.cidendpick = self.canvas.mpl_connect('button_press_event',
self.MoveLinePress)
self.cidenddrag = self.canvas.mpl_connect('motion_notify_event',
self.DrawLine)
self.cidendrelease = self.canvas.mpl_connect('button_release_event',
self.MoveLineUpdate)
self.cidwidth = self.canvas.mpl_connect('scroll_event',
self.ChangeWidth)
self.cidenddraw = self.canvas.mpl_connect('draw_event', self.DrawCanvas)
def DisconnectMove(self):
self.canvas.mpl_disconnect(self.cidendpick)
self.canvas.mpl_disconnect(self.cidenddrag)
self.canvas.mpl_disconnect(self.cidendrelease)
self.canvas.mpl_disconnect(self.cidenddraw)
self.canvas.mpl_disconnect(self.cidwidth)
def LineStart(self, event):
if event.inaxes != self.axis:
return
self.LineCoords[0] = [event.xdata, event.ydata]
self.line = mlines.Line2D(self.LineCoords[:, 0], self.LineCoords[:, 0],
lw=self.WidthData, c='g', animated=True)
self.axis.add_line(self.line)
self.background = self.canvas.copy_from_bbox(self.axis.bbox)
def LineEnd(self, event):
if event.inaxes != self.axis:
return
self.LineCoords[1] = [event.xdata, event.ydata]
self.CoordTransform = self.axis.transData.inverted()
self.WidthData = self.WidthDataCoords()
self.line = mlines.Line2D(self.LineCoords[:, 0], self.LineCoords[:, 1],
lw=self.WidthData, c='g', marker='o', alpha=0.5, solid_capstyle='butt')
self.axis.add_line(self.line)
self.Dragging = True
self.DisconnectDraw()
self.ConnectMove()
plt.draw()
def DrawLine(self, event):
if event.button != 1:
return
if event.inaxes is None:
return
if self.vertex == 0:
x = (event.xdata, self.LineCoords[1, 0])
y = (event.ydata, self.LineCoords[1, 1])
elif self.vertex == 1:
x = (self.LineCoords[0, 0], event.xdata)
y = (self.LineCoords[0, 1], event.ydata)
else:
return
self.line.set_data(x, y)
self.canvas.restore_region(self.background)
self.axis.draw_artist(self.line)
self.canvas.blit(self.axis.bbox)
self.canvas.draw()
def DrawCanvas(self, event):
self.background = self.canvas.copy_from_bbox(self.axis.bbox)
if self.line is not None:
self.axis.draw_artist(self.line)
self.WidthData = self.WidthDataCoords()
self.line.set_linewidth(self.WidthData)
self.canvas.blit(self.axis.bbox)
def MoveLinePress(self, event):
self.vertex = self.GetPoint(event)
# print(self.vertex)
def MoveLineUpdate(self, event):
if self.vertex is not None:
self.LineCoords[self.vertex] = [event.xdata, event.ydata]
self.line.set_data(self.LineCoords[:, 0], self.LineCoords[:, 1])
self.canvas.draw()
def WidthDataCoords(self):
diff0 = self.axis.transData.inverted().transform((1, 0))
diff1 = self.axis.transData.inverted().transform((2, 0))
diff = (diff1 - diff0) * self.width
# print(diff)
return diff[0]
def ChangeWidth(self, event):
if event.button == 'up':
self.width += 1
elif event.button == 'down' and self.width > 1:
self.width -= 1
else:
return
self.WidthData = self.WidthDataCoords()
self.line.set_linewidth(self.WidthData)
self.canvas.draw()
def GetPoint(self, event):
delta2 = np.sum((self.LineCoords - [event.xdata, event.ydata]) ** 2, axis=1)
index = np.where(delta2 == np.min(delta2))[0]
if delta2[index] >= self.epsilon ** 2:
index = None
return index
|
slimpotatoes/STEM_Moire_GPA | src/straincalc.py | # Strain calculation Module
import numpy as np
import data as data
import rotatecalc
def strain_calculation(mask_id_1, mask_id_2, datastruct):
p = data.SMGData.load(datastruct, 'p')
if p <= 0:
raise Exception('Pixel size negative or zero, strain calculation cannot be performed')
# Load the crystalline 3D arrays and their corresponding delta G 3D arrays and calibrate them with the pixel size p
g_c_uns_1 = 1 / p * data.SMGData.load_g(datastruct, mask_id_1, 'gCuns')
g_c_uns_2 = 1 / p * data.SMGData.load_g(datastruct, mask_id_2, 'gCuns')
delta_g_1 = 1 / p * data.SMGData.load_g(datastruct, mask_id_1, 'deltagM')
delta_g_2 = 1 / p * data.SMGData.load_g(datastruct, mask_id_2, 'deltagM')
identity = np.ones(delta_g_1[0, :, :].shape)
identity_image = np.array([[identity, np.zeros(identity.shape)], [np.zeros(identity.shape), identity]])
# Raw data
'''g_ref = np.array([[g_c_uns_1[0, :, :], g_c_uns_2[0, :, :]],
[g_c_uns_1[1, :, :], g_c_uns_2[1, :, :]]])
delta_g = np.array([[delta_g_1[0, :, :], delta_g_2[0, :, :]],
[delta_g_1[1, :, :], delta_g_2[1, :, :]]])'''
# Data rotated by 90 degrees R = ([0, -1],[1. 0])
'''g_ref = np.array([[g_c_uns_2[0, :, :], (-1) * g_c_uns_1[0, :, :]],
[g_c_uns_2[1, :, :], (-1) * g_c_uns_1[1, :, :]]])
delta_g = np.array([[delta_g_2[0, :, :], (-1) * delta_g_1[0, :, :]],
[delta_g_2[1, :, :], (-1) * delta_g_1[1, :, :]]])'''
# Data rotated by 90 degrees R = ([0, 1],[-1. 0]) --- Base rotation not vector rotation
'''g_ref = np.array([[(-1) * g_c_uns_2[0, :, :], g_c_uns_1[0, :, :]],
[(-1) * g_c_uns_2[1, :, :], g_c_uns_1[1, :, :]]])
delta_g = np.array([[(-1) * delta_g_2[0, :, :], delta_g_1[0, :, :]],
[(-1) * delta_g_2[1, :, :], delta_g_1[1, :, :]]])'''
# Raw data adapted to base rotation (90 degree CCW)
g_ref = np.array([[(-1) * g_c_uns_1[1, :, :], (-1) * g_c_uns_2[1, :, :]],
[g_c_uns_1[0, :, :], g_c_uns_2[0, :, :]]])
delta_g = np.array([[(-1) * delta_g_1[1, :, :], (-1) * delta_g_2[1, :, :]],
[delta_g_1[0, :, :], delta_g_2[0, :, :]]])
g = np.add(g_ref, delta_g)
t_g_ref_pixel = np.transpose(np.transpose(g_ref, axes=[2, 3, 0, 1]), axes=[0, 1, 3, 2])
t_g_pixel = np.transpose(np.transpose(g, axes=[2, 3, 0, 1]), axes=[0, 1, 3, 2])
identity_pixel = np.transpose(identity_image, axes=[2, 3, 0, 1])
# Store intermediate values because of memory ?? LOL
inv_t_g_pixel = np.linalg.inv(t_g_pixel)
displacement = np.zeros(t_g_pixel.shape)
for i in range(0, t_g_ref_pixel[:, :, 0, 0].shape[0]):
for j in range(0, t_g_ref_pixel[:, :, 0, 0].shape[1]):
displacement[i, j] = np.dot(inv_t_g_pixel[i, j], t_g_ref_pixel[i, j])
# Calculate gradient deformation tensor nabla(u)
d = np.subtract(displacement, identity_pixel)
# Calculate strain tensor
epsilon = 0.5 * np.array(np.add(d, np.transpose(d, axes=[0, 1, 3, 2])))
omega = 0.5 * np.array(np.subtract(d, np.transpose(d, axes=[0, 1, 3, 2])))
# Put them in image format and store
epsilon_image = np.transpose(epsilon, axes=[2, 3, 0, 1])
omega_image = np.transpose(omega, axes=[2, 3, 0, 1])
data.SMGData.store(datastruct, 'Exx', epsilon_image[0, 0])
data.SMGData.store(datastruct, 'Eyy', epsilon_image[1, 1])
data.SMGData.store(datastruct, 'Exy', epsilon_image[1, 0])
data.SMGData.store(datastruct, 'Rxy', omega_image[1, 0])
print('2D strain calculation done !')
|
victorwyee/great_expectations | great_expectations/expectations/metrics/map_metric.py | import uuid
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import numpy as np
from great_expectations.core import ExpectationConfiguration
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.exceptions.metric_exceptions import (
MetricError,
MetricProviderError,
)
from great_expectations.execution_engine import ExecutionEngine, PandasExecutionEngine
from great_expectations.execution_engine.execution_engine import (
MetricDomainTypes,
MetricFunctionTypes,
MetricPartialFunctionTypes,
)
from great_expectations.execution_engine.sparkdf_execution_engine import (
F,
SparkDFExecutionEngine,
)
from great_expectations.execution_engine.sqlalchemy_execution_engine import (
SqlAlchemyExecutionEngine,
sa,
)
from great_expectations.expectations.metrics.metric_provider import (
MetricProvider,
metric_partial,
)
from great_expectations.expectations.registry import (
get_metric_provider,
register_metric,
)
from great_expectations.validator.validation_graph import MetricConfiguration
def column_function_partial(
engine: Type[ExecutionEngine], partial_fn_type: str = None, **kwargs
):
"""Provides engine-specific support for authing a metric_fn with a simplified signature.
A metric function that is decorated as a column_function_partial will be called with the engine-specific column type
and any value_kwargs associated with the Metric for which the provider function is being declared.
Args:
engine:
**kwargs:
Returns:
An annotated metric_function which will be called with a simplified signature.
"""
domain_type = MetricDomainTypes.COLUMN
if issubclass(engine, PandasExecutionEngine):
if partial_fn_type is None:
partial_fn_type = MetricPartialFunctionTypes.MAP_SERIES
partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)
if partial_fn_type != MetricPartialFunctionTypes.MAP_SERIES:
raise ValueError(
"PandasExecutionEngine only supports map_series for column_function_partial partial_fn_type"
)
def wrapper(metric_fn: Callable):
@metric_partial(
engine=engine,
partial_fn_type=partial_fn_type,
domain_type=domain_type,
**kwargs,
)
@wraps(metric_fn)
def inner_func(
cls,
execution_engine: "PandasExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
filter_column_isnull = kwargs.get(
"filter_column_isnull", getattr(cls, "filter_column_isnull", False)
)
(
df,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=domain_type
)
if filter_column_isnull:
df = df[df[accessor_domain_kwargs["column"]].notnull()]
values = metric_fn(
cls,
df[accessor_domain_kwargs["column"]],
**metric_value_kwargs,
_metrics=metrics,
)
return values, compute_domain_kwargs, accessor_domain_kwargs
return inner_func
return wrapper
elif issubclass(engine, SqlAlchemyExecutionEngine):
if partial_fn_type is None:
partial_fn_type = MetricPartialFunctionTypes.MAP_FN
partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)
if partial_fn_type not in [MetricPartialFunctionTypes.MAP_FN]:
raise ValueError(
"SqlAlchemyExecutionEngine only supports map_fn for column_function_partial partial_fn_type"
)
def wrapper(metric_fn: Callable):
@metric_partial(
engine=engine,
partial_fn_type=partial_fn_type,
domain_type=domain_type,
**kwargs,
)
@wraps(metric_fn)
def inner_func(
cls,
execution_engine: "SqlAlchemyExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
filter_column_isnull = kwargs.get(
"filter_column_isnull", getattr(cls, "filter_column_isnull", False)
)
if filter_column_isnull:
compute_domain_kwargs = execution_engine.add_column_row_condition(
metric_domain_kwargs
)
else:
# We do not copy here because if compute domain is different, it will be copied by get_compute_domain
compute_domain_kwargs = metric_domain_kwargs
(
selectable,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=compute_domain_kwargs, domain_type=domain_type
)
column_name = accessor_domain_kwargs["column"]
dialect = execution_engine.dialect_module
column_function = metric_fn(
cls,
sa.column(column_name),
**metric_value_kwargs,
_dialect=dialect,
_table=selectable,
_metrics=metrics,
)
return column_function, compute_domain_kwargs, accessor_domain_kwargs
return inner_func
return wrapper
elif issubclass(engine, SparkDFExecutionEngine):
if partial_fn_type is None:
partial_fn_type = MetricPartialFunctionTypes.MAP_FN
partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)
if partial_fn_type not in [
MetricPartialFunctionTypes.MAP_FN,
MetricPartialFunctionTypes.WINDOW_FN,
]:
raise ValueError(
"SparkDFExecutionEngine only supports map_fn and window_fn for column_function_partial partial_fn_type"
)
def wrapper(metric_fn: Callable):
@metric_partial(
engine=engine,
partial_fn_type=partial_fn_type,
domain_type=domain_type,
**kwargs,
)
@wraps(metric_fn)
def inner_func(
cls,
execution_engine: "SparkDFExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
filter_column_isnull = kwargs.get(
"filter_column_isnull", getattr(cls, "filter_column_isnull", False)
)
if filter_column_isnull:
compute_domain_kwargs = execution_engine.add_column_row_condition(
metric_domain_kwargs
)
else:
# We do not copy here because if compute domain is different, it will be copied by get_compute_domain
compute_domain_kwargs = metric_domain_kwargs
(
data,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=compute_domain_kwargs, domain_type=domain_type
)
column_name = accessor_domain_kwargs["column"]
column_function = metric_fn(
cls,
column=data[column_name],
**metric_value_kwargs,
_metrics=metrics,
_compute_domain_kwargs=compute_domain_kwargs,
)
return column_function, compute_domain_kwargs, accessor_domain_kwargs
return inner_func
return wrapper
else:
raise ValueError("Unsupported engine for column_function_partial")
def column_condition_partial(
engine: Type[ExecutionEngine],
partial_fn_type: Optional[Union[str, MetricPartialFunctionTypes]] = None,
**kwargs,
):
"""Provides engine-specific support for authing a metric_fn with a simplified signature. A column_condition_partial
must provide a map function that evalues to a boolean value; it will be used to provide supplemental metrics, such
as the unexpected_value count, unexpected_values, and unexpected_rows.
A metric function that is decorated as a column_condition_partial will be called with the engine-specific column type
and any value_kwargs associated with the Metric for which the provider function is being declared.
Args:
engine:
**kwargs:
Returns:
An annotated metric_function which will be called with a simplified signature.
"""
domain_type = MetricDomainTypes.COLUMN
if issubclass(engine, PandasExecutionEngine):
if partial_fn_type is None:
partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_SERIES
partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)
if partial_fn_type not in [MetricPartialFunctionTypes.MAP_CONDITION_SERIES]:
raise ValueError(
"PandasExecutionEngine only supports map_condition_series for column_condition_partial partial_fn_type"
)
def wrapper(metric_fn: Callable):
@metric_partial(
engine=engine,
partial_fn_type=partial_fn_type,
domain_type=domain_type,
**kwargs,
)
@wraps(metric_fn)
def inner_func(
cls,
execution_engine: "PandasExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
filter_column_isnull = kwargs.get(
"filter_column_isnull", getattr(cls, "filter_column_isnull", True)
)
(
df,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=domain_type
)
if filter_column_isnull:
df = df[df[accessor_domain_kwargs["column"]].notnull()]
meets_expectation_series = metric_fn(
cls,
df[accessor_domain_kwargs["column"]],
**metric_value_kwargs,
_metrics=metrics,
)
return (
~meets_expectation_series,
compute_domain_kwargs,
accessor_domain_kwargs,
)
return inner_func
return wrapper
elif issubclass(engine, SqlAlchemyExecutionEngine):
if partial_fn_type is None:
partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_FN
partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)
if partial_fn_type not in [
MetricPartialFunctionTypes.MAP_CONDITION_FN,
MetricPartialFunctionTypes.WINDOW_CONDITION_FN,
]:
raise ValueError(
"SqlAlchemyExecutionEngine only supports map_condition_fn for column_condition_partial partial_fn_type"
)
def wrapper(metric_fn: Callable):
@metric_partial(
engine=engine,
partial_fn_type=partial_fn_type,
domain_type=domain_type,
**kwargs,
)
@wraps(metric_fn)
def inner_func(
cls,
execution_engine: "SqlAlchemyExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
filter_column_isnull = kwargs.get(
"filter_column_isnull", getattr(cls, "filter_column_isnull", True)
)
(
selectable,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
metric_domain_kwargs, domain_type=domain_type
)
column_name = accessor_domain_kwargs["column"]
dialect = execution_engine.dialect_module
sqlalchemy_engine = execution_engine.engine
expected_condition = metric_fn(
cls,
sa.column(column_name),
**metric_value_kwargs,
_dialect=dialect,
_table=selectable,
_sqlalchemy_engine=sqlalchemy_engine,
_metrics=metrics,
)
if filter_column_isnull:
# If we "filter" (ignore) nulls then we allow null as part of our new expected condition
unexpected_condition = sa.and_(
sa.not_(sa.column(column_name).is_(None)),
sa.not_(expected_condition),
)
else:
unexpected_condition = sa.not_(expected_condition)
return (
unexpected_condition,
compute_domain_kwargs,
accessor_domain_kwargs,
)
return inner_func
return wrapper
elif issubclass(engine, SparkDFExecutionEngine):
if partial_fn_type is None:
partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_FN
partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)
if partial_fn_type not in [
MetricPartialFunctionTypes.MAP_CONDITION_FN,
MetricPartialFunctionTypes.WINDOW_CONDITION_FN,
]:
raise ValueError(
"SparkDFExecutionEngine only supports map_condition_fn and window_condition_fn for column_condition_partial partial_fn_type"
)
def wrapper(metric_fn: Callable):
@metric_partial(
engine=engine,
partial_fn_type=partial_fn_type,
domain_type=domain_type,
**kwargs,
)
@wraps(metric_fn)
def inner_func(
cls,
execution_engine: "SparkDFExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
filter_column_isnull = kwargs.get(
"filter_column_isnull", getattr(cls, "filter_column_isnull", True)
)
(
data,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=domain_type
)
column_name = accessor_domain_kwargs["column"]
column = data[column_name]
expected_condition = metric_fn(
cls,
column,
**metric_value_kwargs,
_table=data,
_metrics=metrics,
_compute_domain_kwargs=compute_domain_kwargs,
_accessor_domain_kwargs=accessor_domain_kwargs,
)
if partial_fn_type == MetricPartialFunctionTypes.WINDOW_CONDITION_FN:
if filter_column_isnull:
compute_domain_kwargs = (
execution_engine.add_column_row_condition(
compute_domain_kwargs, column_name=column_name
)
)
unexpected_condition = ~expected_condition
else:
if filter_column_isnull:
unexpected_condition = column.isNotNull() & ~expected_condition
else:
unexpected_condition = ~expected_condition
return (
unexpected_condition,
compute_domain_kwargs,
accessor_domain_kwargs,
)
return inner_func
return wrapper
else:
raise ValueError("Unsupported engine for column_condition_partial")
def _pandas_map_condition_unexpected_count(
cls,
execution_engine: "PandasExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
"""Returns unexpected count for MapExpectations"""
return np.count_nonzero(metrics["unexpected_condition"][0])
def _pandas_column_map_condition_values(
cls,
execution_engine: "PandasExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
"""Return values from the specified domain that match the map-style metric in the metrics dictionary."""
(
boolean_map_unexpected_values,
compute_domain_kwargs,
accessor_domain_kwargs,
) = metrics["unexpected_condition"]
df, _, _ = execution_engine.get_compute_domain(
domain_kwargs=compute_domain_kwargs, domain_type="identity"
)
###
# NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we
# currently handle filter_column_isnull differently than other map_fn / map_condition
# cases.
###
filter_column_isnull = kwargs.get(
"filter_column_isnull", getattr(cls, "filter_column_isnull", False)
)
if filter_column_isnull:
df = df[df[accessor_domain_kwargs["column"]].notnull()]
if "column" in accessor_domain_kwargs:
domain_values = df[accessor_domain_kwargs["column"]]
else:
raise ValueError(
"_pandas_column_map_condition_values requires a column in accessor_domain_kwargs"
)
result_format = metric_value_kwargs["result_format"]
if result_format["result_format"] == "COMPLETE":
return list(domain_values[boolean_map_unexpected_values == True])
else:
return list(
domain_values[boolean_map_unexpected_values == True][
: result_format["partial_unexpected_count"]
]
)
def _pandas_column_map_series_and_domain_values(
cls,
execution_engine: "PandasExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
"""Return values from the specified domain that match the map-style metric in the metrics dictionary."""
(
boolean_map_unexpected_values,
compute_domain_kwargs,
accessor_domain_kwargs,
) = metrics["unexpected_condition"]
(
map_series,
compute_domain_kwargs_2,
accessor_domain_kwargs_2,
) = metrics["metric_partial_fn"]
assert (
compute_domain_kwargs == compute_domain_kwargs_2
), "map_series and condition must have the same compute domain"
assert (
accessor_domain_kwargs == accessor_domain_kwargs_2
), "map_series and condition must have the same accessor kwargs"
df, _, _ = execution_engine.get_compute_domain(
domain_kwargs=compute_domain_kwargs, domain_type="identity"
)
###
# NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we
# currently handle filter_column_isnull differently than other map_fn / map_condition
# cases.
###
filter_column_isnull = kwargs.get(
"filter_column_isnull", getattr(cls, "filter_column_isnull", False)
)
if filter_column_isnull:
df = df[df[accessor_domain_kwargs["column"]].notnull()]
if "column" in accessor_domain_kwargs:
domain_values = df[accessor_domain_kwargs["column"]]
else:
raise ValueError(
"_pandas_column_map_series_and_domain_values requires a column in accessor_domain_kwargs"
)
result_format = metric_value_kwargs["result_format"]
if result_format["result_format"] == "COMPLETE":
return (
list(domain_values[boolean_map_unexpected_values == True]),
list(map_series[boolean_map_unexpected_values == True]),
)
else:
return (
list(
domain_values[boolean_map_unexpected_values == True][
: result_format["partial_unexpected_count"]
]
),
list(
map_series[boolean_map_unexpected_values == True][
: result_format["partial_unexpected_count"]
]
),
)
def _pandas_map_condition_index(
cls,
execution_engine: "PandasExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
(
boolean_mapped_unexpected_values,
compute_domain_kwargs,
accessor_domain_kwargs,
) = metrics.get("unexpected_condition")
df, _, _ = execution_engine.get_compute_domain(
domain_kwargs=compute_domain_kwargs, domain_type="identity"
)
###
# NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we
# currently handle filter_column_isnull differently than other map_fn / map_condition
# cases.
###
filter_column_isnull = kwargs.get(
"filter_column_isnull", getattr(cls, "filter_column_isnull", False)
)
if filter_column_isnull:
df = df[df[accessor_domain_kwargs["column"]].notnull()]
data = df[accessor_domain_kwargs["column"]]
result_format = metric_value_kwargs["result_format"]
if result_format["result_format"] == "COMPLETE":
return list(df[boolean_mapped_unexpected_values == True].index)
else:
return list(
df[boolean_mapped_unexpected_values == True].index[
: result_format["partial_unexpected_count"]
]
)
def _pandas_column_map_condition_value_counts(
cls,
execution_engine: "PandasExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
"""Returns respective value counts for distinct column values"""
(
boolean_mapped_unexpected_values,
compute_domain_kwargs,
accessor_domain_kwargs,
) = metrics.get("unexpected_condition")
df, _, _ = execution_engine.get_compute_domain(
domain_kwargs=compute_domain_kwargs, domain_type="identity"
)
###
# NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we
# currently handle filter_column_isnull differently than other map_fn / map_condition
# cases.
###
filter_column_isnull = kwargs.get(
"filter_column_isnull", getattr(cls, "filter_column_isnull", False)
)
if filter_column_isnull:
df = df[df[accessor_domain_kwargs["column"]].notnull()]
data = df[accessor_domain_kwargs["column"]]
if "column" in accessor_domain_kwargs:
domain_values = df[accessor_domain_kwargs["column"]]
else:
raise ValueError(
"_pandas_column_map_condition_value_counts requires a column in accessor_domain_kwargs"
)
result_format = metric_value_kwargs["result_format"]
value_counts = None
try:
value_counts = domain_values[
boolean_mapped_unexpected_values == True
].value_counts()
except ValueError:
try:
value_counts = (
domain_values[boolean_mapped_unexpected_values == True]
.apply(tuple)
.value_counts()
)
except ValueError:
pass
if not value_counts:
raise MetricError("Unable to compute value counts")
if result_format["result_format"] == "COMPLETE":
return value_counts
else:
return value_counts[result_format["partial_unexpected_count"]]
def _pandas_map_condition_rows(
cls,
execution_engine: "PandasExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
"""Return values from the specified domain (ignoring the column constraint) that match the map-style metric in the metrics dictionary."""
(
boolean_mapped_unexpected_values,
compute_domain_kwargs,
accessor_domain_kwargs,
) = metrics.get("unexpected_condition")
df, _, _ = execution_engine.get_compute_domain(
domain_kwargs=compute_domain_kwargs, domain_type="identity"
)
###
# NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we
# currently handle filter_column_isnull differently than other map_fn / map_condition
# cases.
###
filter_column_isnull = kwargs.get(
"filter_column_isnull", getattr(cls, "filter_column_isnull", False)
)
if filter_column_isnull:
df = df[df[accessor_domain_kwargs["column"]].notnull()]
data = df[accessor_domain_kwargs["column"]]
result_format = metric_value_kwargs["result_format"]
if result_format["result_format"] == "COMPLETE":
return df[boolean_mapped_unexpected_values == True]
else:
return df[boolean_mapped_unexpected_values == True][
result_format["partial_unexpected_count"]
]
def _sqlalchemy_map_condition_unexpected_count_aggregate_fn(
cls,
execution_engine: "SqlAlchemyExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
"""Returns unexpected count for MapExpectations"""
unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(
"unexpected_condition"
)
return (
sa.func.sum(
sa.case(
[(unexpected_condition, 1)],
else_=0,
)
),
compute_domain_kwargs,
accessor_domain_kwargs,
)
def _sqlalchemy_map_condition_unexpected_count_value(
cls,
execution_engine: "SqlAlchemyExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
"""Returns unexpected count for MapExpectations. This is a *value* metric, which is useful for
when the unexpected_condition is a window function.
"""
unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(
"unexpected_condition"
)
(selectable, _, _,) = execution_engine.get_compute_domain(
compute_domain_kwargs, domain_type="identity"
)
temp_table_name: str = f"ge_tmp_{str(uuid.uuid4())[:8]}"
if execution_engine.dialect == "mssql":
# mssql expects all temporary table names to have a prefix '#'
temp_table_name = f"#{temp_table_name}"
with execution_engine.engine.begin():
metadata: sa.MetaData = sa.MetaData(execution_engine.engine)
temp_table_obj: sa.Table = sa.Table(
temp_table_name,
metadata,
sa.Column("condition", sa.Integer, primary_key=False, nullable=False),
)
temp_table_obj.create(execution_engine.engine, checkfirst=True)
count_case_statement: List[sa.sql.elements.Label] = [
sa.case(
[
(
unexpected_condition,
1,
)
],
else_=0,
).label("condition")
]
inner_case_query: sa.sql.dml.Insert = temp_table_obj.insert().from_select(
count_case_statement,
sa.select(count_case_statement).select_from(selectable),
)
execution_engine.engine.execute(inner_case_query)
unexpected_count_query: sa.Select = (
sa.select(
[
sa.func.sum(sa.column("condition")).label("unexpected_count"),
]
)
.select_from(temp_table_obj)
.alias("UnexpectedCountSubquery")
)
unexpected_count = execution_engine.engine.execute(
sa.select(
[
unexpected_count_query.c.unexpected_count,
]
)
).scalar()
return convert_to_json_serializable(unexpected_count)
def _sqlalchemy_column_map_condition_values(
cls,
execution_engine: "SqlAlchemyExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
"""
Particularly for the purpose of finding unexpected values, returns all the metric values which do not meet an
expected Expectation condition for ColumnMapExpectation Expectations.
"""
unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(
"unexpected_condition"
)
(selectable, _, _,) = execution_engine.get_compute_domain(
compute_domain_kwargs, domain_type="identity"
)
result_format = metric_value_kwargs["result_format"]
if "column" not in accessor_domain_kwargs:
raise ValueError(
"_sqlalchemy_column_map_condition_values requires a column in accessor_domain_kwargs"
)
query = (
sa.select(
[sa.column(accessor_domain_kwargs.get("column")).label("unexpected_values")]
)
.select_from(selectable)
.where(unexpected_condition)
)
if result_format["result_format"] != "COMPLETE":
query = query.limit(result_format["partial_unexpected_count"])
return [
val.unexpected_values
for val in execution_engine.engine.execute(query).fetchall()
]
def _sqlalchemy_column_map_condition_value_counts(
cls,
execution_engine: "SqlAlchemyExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
"""
Returns value counts for all the metric values which do not meet an expected Expectation condition for instances
of ColumnMapExpectation.
"""
unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(
"unexpected_condition"
)
(selectable, _, _,) = execution_engine.get_compute_domain(
compute_domain_kwargs, domain_type="identity"
)
if "column" not in accessor_domain_kwargs:
raise ValueError(
"_sqlalchemy_column_map_condition_value_counts requires a column in accessor_domain_kwargs"
)
column = sa.column(accessor_domain_kwargs["column"])
return execution_engine.engine.execute(
sa.select([column, sa.func.count(column)])
.select_from(selectable)
.where(unexpected_condition)
.group_by(column)
).fetchall()
def _sqlalchemy_map_condition_rows(
cls,
execution_engine: "SqlAlchemyExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
"""
Returns all rows of the metric values which do not meet an expected Expectation condition for instances
of ColumnMapExpectation.
"""
unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(
"unexpected_condition"
)
(selectable, _, _,) = execution_engine.get_compute_domain(
compute_domain_kwargs, domain_type="identity"
)
result_format = metric_value_kwargs["result_format"]
query = (
sa.select([sa.text("*")]).select_from(selectable).where(unexpected_condition)
)
if result_format["result_format"] != "COMPLETE":
query = query.limit(result_format["partial_unexpected_count"])
return execution_engine.engine.execute(query).fetchall()
def _spark_map_condition_unexpected_count_aggregate_fn(
cls,
execution_engine: "SparkDFExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(
"unexpected_condition"
)
return (
F.sum(F.when(unexpected_condition, 1).otherwise(0)),
compute_domain_kwargs,
accessor_domain_kwargs,
)
def _spark_map_condition_unexpected_count_value(
cls,
execution_engine: "SparkDFExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
# fn_domain_kwargs maybe updated to reflect null filtering
unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(
"unexpected_condition"
)
(df, _, _) = execution_engine.get_compute_domain(
domain_kwargs=compute_domain_kwargs, domain_type="identity"
)
data = df.withColumn("__unexpected", unexpected_condition)
filtered = data.filter(F.col("__unexpected") == True).drop(F.col("__unexpected"))
return filtered.count()
def spark_column_map_condition_values(
cls,
execution_engine: "SparkDFExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(
"unexpected_condition"
)
(df, _, _) = execution_engine.get_compute_domain(
domain_kwargs=compute_domain_kwargs, domain_type="identity"
)
data = df.withColumn("__unexpected", unexpected_condition)
if "column" not in accessor_domain_kwargs:
raise ValueError(
"spark_column_map_condition_values requires a column in accessor_domain_kwargs"
)
column_name = accessor_domain_kwargs["column"]
result_format = metric_value_kwargs["result_format"]
filtered = data.filter(F.col("__unexpected") == True).drop(F.col("__unexpected"))
if result_format["result_format"] == "COMPLETE":
rows = filtered.select(F.col(column_name)).collect()
else:
rows = (
filtered.select(F.col(column_name))
.limit(result_format["partial_unexpected_count"])
.collect()
)
return [row[column_name] for row in rows]
def _spark_column_map_condition_value_counts(
cls,
execution_engine: "SparkDFExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(
"unexpected_condition"
)
(df, _, _) = execution_engine.get_compute_domain(
domain_kwargs=compute_domain_kwargs, domain_type="identity"
)
data = df.withColumn("__unexpected", unexpected_condition)
if "column" not in accessor_domain_kwargs:
raise ValueError(
"spark_column_map_condition_values requires a column in accessor_domain_kwargs"
)
column_name = accessor_domain_kwargs["column"]
result_format = metric_value_kwargs["result_format"]
filtered = data.filter(F.col("__unexpected") == True).drop(F.col("__unexpected"))
value_counts = filtered.groupBy(F.col(column_name)).count()
if result_format["result_format"] == "COMPLETE":
rows = value_counts.collect()
else:
rows = value_counts.collect()[: result_format["partial_unexpected_count"]]
return rows
def _spark_map_condition_rows(
cls,
execution_engine: "PandasExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
**kwargs,
):
unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(
"unexpected_condition"
)
(df, _, _) = execution_engine.get_compute_domain(
domain_kwargs=compute_domain_kwargs, domain_type="identity"
)
data = df.withColumn("__unexpected", unexpected_condition)
result_format = metric_value_kwargs["result_format"]
filtered = data.filter(F.col("__unexpected") == True).drop(F.col("__unexpected"))
if result_format["result_format"] == "COMPLETE":
return filtered.collect()
else:
return filtered.limit(result_format["partial_unexpected_count"]).collect()
class MapMetricProvider(MetricProvider):
condition_domain_keys = (
"batch_id",
"table",
"row_condition",
"condition_parser",
)
function_domain_keys = (
"batch_id",
"table",
"row_condition",
"condition_parser",
)
condition_value_keys = tuple()
function_value_keys = tuple()
filter_column_isnull = True
@classmethod
def _register_metric_functions(cls):
if not hasattr(cls, "function_metric_name") and not hasattr(
cls, "condition_metric_name"
):
return
for attr, candidate_metric_fn in cls.__dict__.items():
if not hasattr(candidate_metric_fn, "metric_engine"):
# This is not a metric
continue
metric_fn_type = getattr(candidate_metric_fn, "metric_fn_type")
engine = candidate_metric_fn.metric_engine
if not issubclass(engine, ExecutionEngine):
raise ValueError(
"metric functions must be defined with an Execution Engine"
)
if metric_fn_type in [
MetricPartialFunctionTypes.MAP_CONDITION_SERIES,
MetricPartialFunctionTypes.MAP_CONDITION_FN,
MetricPartialFunctionTypes.WINDOW_CONDITION_FN,
]:
if not hasattr(cls, "condition_metric_name"):
raise ValueError(
"A MapMetricProvider must have a metric_condition_name to have a decorated column_condition_partial method."
)
condition_provider = candidate_metric_fn
metric_name = cls.condition_metric_name
metric_domain_keys = cls.condition_domain_keys
metric_value_keys = cls.condition_value_keys
metric_definition_kwargs = getattr(
condition_provider, "metric_definition_kwargs", dict()
)
domain_type = getattr(
condition_provider,
"domain_type",
metric_definition_kwargs.get(
"domain_type", MetricDomainTypes.TABLE
),
)
if issubclass(engine, PandasExecutionEngine):
register_metric(
metric_name=metric_name + ".condition",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=condition_provider,
metric_fn_type=metric_fn_type,
)
register_metric(
metric_name=metric_name + ".unexpected_count",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_map_condition_unexpected_count,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=metric_name + ".unexpected_index_list",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_map_condition_index,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=metric_name + ".unexpected_rows",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_map_condition_rows,
metric_fn_type=MetricFunctionTypes.VALUE,
)
if domain_type == MetricDomainTypes.COLUMN:
register_metric(
metric_name=metric_name + ".unexpected_values",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_column_map_condition_values,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=metric_name + ".unexpected_value_counts",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_pandas_column_map_condition_value_counts,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif issubclass(engine, SqlAlchemyExecutionEngine):
register_metric(
metric_name=metric_name + ".condition",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=condition_provider,
metric_fn_type=metric_fn_type,
)
if metric_fn_type == MetricPartialFunctionTypes.MAP_CONDITION_FN:
register_metric(
metric_name=metric_name + ".unexpected_count.aggregate_fn",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_map_condition_unexpected_count_aggregate_fn,
metric_fn_type=MetricPartialFunctionTypes.AGGREGATE_FN,
)
register_metric(
metric_name=metric_name + ".unexpected_count",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=None,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif (
metric_fn_type == MetricPartialFunctionTypes.WINDOW_CONDITION_FN
):
register_metric(
metric_name=metric_name + ".unexpected_count",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_map_condition_unexpected_count_value,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=metric_name + ".unexpected_rows",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_map_condition_rows,
metric_fn_type=MetricFunctionTypes.VALUE,
)
if domain_type == MetricDomainTypes.COLUMN:
register_metric(
metric_name=metric_name + ".unexpected_values",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_column_map_condition_values,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=metric_name + ".unexpected_value_counts",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_sqlalchemy_column_map_condition_value_counts,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif issubclass(engine, SparkDFExecutionEngine):
register_metric(
metric_name=metric_name + ".condition",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=condition_provider,
metric_fn_type=metric_fn_type,
)
if metric_fn_type == MetricPartialFunctionTypes.MAP_CONDITION_FN:
register_metric(
metric_name=metric_name + ".unexpected_count.aggregate_fn",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_map_condition_unexpected_count_aggregate_fn,
metric_fn_type=MetricPartialFunctionTypes.AGGREGATE_FN,
)
register_metric(
metric_name=metric_name + ".unexpected_count",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=None,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif (
metric_fn_type == MetricPartialFunctionTypes.WINDOW_CONDITION_FN
):
register_metric(
metric_name=metric_name + ".unexpected_count",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_map_condition_unexpected_count_value,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=metric_name + ".unexpected_rows",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_map_condition_rows,
metric_fn_type=MetricFunctionTypes.VALUE,
)
if domain_type == MetricDomainTypes.COLUMN:
register_metric(
metric_name=metric_name + ".unexpected_values",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=spark_column_map_condition_values,
metric_fn_type=MetricFunctionTypes.VALUE,
)
register_metric(
metric_name=metric_name + ".unexpected_value_counts",
metric_domain_keys=metric_domain_keys,
metric_value_keys=(*metric_value_keys, "result_format"),
execution_engine=engine,
metric_class=cls,
metric_provider=_spark_column_map_condition_value_counts,
metric_fn_type=MetricFunctionTypes.VALUE,
)
elif metric_fn_type in [
MetricPartialFunctionTypes.MAP_SERIES,
MetricPartialFunctionTypes.MAP_FN,
MetricPartialFunctionTypes.WINDOW_FN,
]:
if not hasattr(cls, "function_metric_name"):
raise ValueError(
"A MapMetricProvider must have a function_metric_name to have a decorated column_function_partial method."
)
map_function_provider = candidate_metric_fn
metric_name = cls.function_metric_name
metric_domain_keys = cls.function_domain_keys
metric_value_keys = cls.function_value_keys
register_metric(
metric_name=metric_name + ".map",
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=map_function_provider,
metric_fn_type=metric_fn_type,
)
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
metric_name = metric.metric_name
base_metric_value_kwargs = {
k: v for k, v in metric.metric_value_kwargs.items() if k != "result_format"
}
dependencies = dict()
metric_suffix = ".unexpected_count"
if metric_name.endswith(metric_suffix):
try:
_ = get_metric_provider(metric_name + ".aggregate_fn", execution_engine)
has_aggregate_fn = True
except MetricProviderError:
has_aggregate_fn = False
if has_aggregate_fn:
dependencies["metric_partial_fn"] = MetricConfiguration(
metric_name + ".aggregate_fn",
metric.metric_domain_kwargs,
base_metric_value_kwargs,
)
else:
dependencies["unexpected_condition"] = MetricConfiguration(
metric_name[: -len(metric_suffix)] + ".condition",
metric.metric_domain_kwargs,
base_metric_value_kwargs,
)
# MapMetric uses the condition to build unexpected_count.aggregate_fn as well
metric_suffix = ".unexpected_count.aggregate_fn"
if metric_name.endswith(metric_suffix):
dependencies["unexpected_condition"] = MetricConfiguration(
metric_name[: -len(metric_suffix)] + ".condition",
metric.metric_domain_kwargs,
base_metric_value_kwargs,
)
for metric_suffix in [
".unexpected_values",
".unexpected_value_counts",
".unexpected_index_list",
".unexpected_rows",
]:
if metric_name.endswith(metric_suffix):
dependencies["unexpected_condition"] = MetricConfiguration(
metric_name[: -len(metric_suffix)] + ".condition",
metric.metric_domain_kwargs,
base_metric_value_kwargs,
)
try:
_ = get_metric_provider(metric_name + ".map", execution_engine)
dependencies["metric_map_fn"] = MetricConfiguration(
metric_name + ".map",
metric.metric_domain_kwargs,
metric.metric_value_kwargs,
)
except MetricProviderError:
pass
return dependencies
class ColumnMapMetricProvider(MapMetricProvider):
condition_domain_keys = (
"batch_id",
"table",
"column",
"row_condition",
"condition_parser",
)
function_domain_keys = (
"batch_id",
"table",
"column",
"row_condition",
"condition_parser",
)
condition_value_keys = tuple()
function_value_keys = tuple()
|
hadi1976/python_project | codes/main.py | <reponame>hadi1976/python_project<filename>codes/main.py
from typing import TextIO
from pipes import TypeOfPipe
import flowrate_factors
import performances
import plots
from config import *
"""
Author: <NAME>
Author: <NAME>
The script "main.py" has the function of integrating the outputs from all
the codes, through the implementation of functions. These functions are going
to be called in the "GUI.py" code.
"""
"""
First part of the code: Integrating "flowrate_factors" outputs through
functions, for be referenced in the GUI.
"""
def consumption():
"""
:return: int of the consumption depending on the selected country in
(lit/sec.Capita).
"""
country_consumption = flowrate_factors.get_flow_discharge()
return country_consumption
def hourly():
"""
:return: float of the flow discharge per hour in (lit/hour).
"""
flow_hourly = flowrate_factors.max_hourly_flow()
return "%.2f" % flow_hourly
def daily():
"""
:return: float of the flow discharge per day in (lit/day).
"""
flow_daily = flowrate_factors.max_daily_flow()
return "%.2f" % flow_daily
"""
Second part of the code: Instantiating the objects from the TypeOfPipe class.
And, integrating this class outputs through functions, to be called in the GUI.
"""
# Reading the length, inputted by the user, from the GUI.
with open(transmission_pipe_length) as file:
transmission_pipe_length = float(file.read())
# Instantiating transmission_pipeline as an object from 'TypeOfPipe' class.
transmission_pipeline = TypeOfPipe(flowrate_factors.max_daily_flow(),
transmission_pipe_length)
# Reading the length, inputted by the user, from the GUI.
with open(distribution_pipe_length) as file:
distribution_pipe_length = float(file.read())
# Instantiating distribution_pipeline as an object from 'TypeOfPipe' class.
distribution_pipeline = TypeOfPipe(flowrate_factors.max_hourly_flow(),
distribution_pipe_length)
# Referencing the method 'pipe_dimensioning' from 'TypeOfPipe' class.
transmission_dim = transmission_pipeline.pipe_dimensioning()
distribution_dim = distribution_pipeline.pipe_dimensioning()
# Integrating the pipes dimensions, for be referenced in the GUI.
def diameter_transmission():
"""
:return: int of the diameter of the transmission pipeline in (mm).
"""
return str(transmission_dim[4])
def velocity_transmission():
"""
:return: float of the velocity of the transmission pipeline in (m/s).
"""
return str(transmission_dim[5])
def reynolds_transmission():
"""
:return: float of the reynolds number of the transmission pipeline.
"""
return "%.2f" % transmission_dim[0]
def friction_transmission():
"""
:return: float of the friction factor of the transmission pipeline.
"""
return "%.2f" % transmission_dim[1]
def slope_transmission():
"""
:return: float of the slope of the transmission pipeline in (%).
"""
return "%.5f" % transmission_dim[2]
def loss_transmission():
"""
:return: float of the head loss of the transmission pipeline in (m).
"""
return "%.5f" % transmission_dim[3]
def diameter_distribution():
"""
:return: int of the diameter of the distribution pipeline in (mm).
"""
return str(distribution_dim[4])
def velocity_distribution():
"""
:return: float of te velocity of the distribution pipeline in (m/s).
"""
return str(distribution_dim[5])
def reynolds_distribution():
"""
:return: float of the reynolds number of the distribution pipeline.
"""
return "%.2f" % distribution_dim[0]
def friction_distribution():
"""
:return: float of the friction factor of the distribution pipeline.
"""
return "%.2f" % distribution_dim[1]
def slope_distribution():
"""
:return: float of the slope of the distribution pipeline in (%).
"""
return "%.5f" % distribution_dim[2]
def head_distribution():
"""
:return: float of the head loss of the distribution pipeline in (m).
"""
return "%.5f" % distribution_dim[3]
"""
Third part of the code: Pumps "Model 2009 1760 RPM" performances graphs:
basic, parallel arrangement, series arrangement and combined arrangement.
Also, the theoretical explanation for the user, about what can be seen in the
plot. This two functions will be called in the GUI.
"""
# Conditions for pump one model 2009 1760 RPM curves performance.
with open(user_elevation) as file:
elevation = float(file.read())
def plot():
"""
:return: The plot according thar corresponds to the user input and the
fulfillment with the if/elif statements.
"""
# Conditions for pump one model 2009 1760 RPM curves performance.
if performances.max_elevation_one >= elevation > 0 and \
performances.max_flow_one >= performances.flow_discharge > 0:
return plots.plot_one()
# Conditions for pump two model 8013 1760 RPM curves performance.
elif performances.max_elevation_two >= elevation > 0 and \
performances.flow_discharge <= performances.max_flow_two:
return plots.plot_two()
# Conditions for pump two model 8013 1760 RPM parallel curves performance.
elif performances.user_elevation > performances.max_elevation_two and \
float(performances.flow_discharge) < \
float(performances.max_flow_two) and\
performances.user_elevation > 0:
return plots.plot_three()
# Conditions for pump two model 8013 1760 RPM curves performance in series.
elif performances.max_elevation_two > elevation > 0 and \
performances.flow_discharge > performances.max_flow_two:
return plots.plot_four()
# Conditions for pump two model 8013 1760 RPM curves performance combined.
elif performances.user_elevation > performances.max_elevation_two and \
performances.flow_discharge > performances.max_flow_two and \
performances.user_elevation > 0:
return plots.plot_five()
def explanation():
"""
:return: The explanation of the plot according thar corresponds to the
user input and the fulfillment with the if/elif statements.
"""
# Conditions for pump one model 2009 1760 RPM curves performance.
if performances.max_elevation_one >= elevation > 0 and \
performances.max_flow_one >= performances.flow_discharge > 0:
return ("For an elevation of %d m and flow discharge of %d lit/sec,"
"is needed one pump of Model 2009 1760 RPM, with the "
"performance shown in the graph."
% (performances.user_elevation, performances.flow_discharge))
# Conditions for pump two model 8013 1760 RPM curves performance.
elif performances.max_elevation_two >= elevation > 0 and \
performances.flow_discharge <= performances.max_flow_two:
return (
"For an elevation of %d m and flow discharge of %d, "
"is needed one pump of Model 8013 1760 RPM, with the "
"performance shown in the graph."
% (performances.user_elevation,
performances.flow_discharge))
# Conditions pump two model 8013 1760 RPM curves performance in parallel.
elif performances.user_elevation > performances.max_elevation_two and \
float(performances.flow_discharge) \
< float(performances.max_flow_two) and \
performances.user_elevation > 0:
x = 1
for x in range(100000):
# Breaks when arranged pumps elevation is higher than user one.
if (
x * performances.max_elevation_two) > \
performances.user_elevation:
break
return (
"For an elevation of %d m and flow discharge of %d lit/sec, "
"in the Model 8013 1760 RPM, %d pumps has to be arranged in"
" parallel, in order to supply the demand. Please observe the "
"graph shown for a better understanding."
% (performances.user_elevation,
performances.flow_discharge, x))
# Conditions for pump two model 8013 1760 RPM curves performance in series.
elif performances.max_elevation_two > elevation > 0 and \
performances.flow_discharge > performances.max_flow_two:
x = 1
for x in range(100000):
# Breaks when arranged pumps flow is higher than the user one.
if (x * performances.max_flow_two) > performances.flow_discharge:
break
return ("For an elevation of %d m and flow discharge of %d lit/sec, "
"in the Model 8013 1760 RPM, %d pumps has to be arranged in "
"series, in order to supply the demand. Please observe the "
"graph shown for a better understanding."
% (performances.user_elevation,
performances.flow_discharge, x))
# Conditions for pump two model 8013 1760 RPM curves performance combined.
elif performances.user_elevation > performances.max_elevation_two and \
performances.flow_discharge > performances.max_flow_two and \
performances.user_elevation > 0:
x = 1
z = 1
# Iterating for finding the amount un pumps arranged in parallel.
for x in range(100000):
# Breaks when arranged pumps elevation is higher than user one.
if (
x * performances.max_elevation_two) > \
performances.user_elevation:
break
for z in range(100000):
# Breaks when arranged pumps flow is higher than the user one.
if (z * performances.max_flow_two) > performances.flow_discharge:
break
return ("For an elevation of %d m and flow discharge of %d lit/sec, "
"in the Model 8013 1760 RPM, %d pumps has to be arranged in "
"parallel with %d series stages, in order to supply the "
"demand. "
"Please observe the graph "
"shown for a better understanding."
% (performances.user_elevation, performances.flow_discharge,
x, z))
|
hadi1976/python_project | codes/plots.py | <filename>codes/plots.py
from config import *
import performances
import pumps_figure
"""
Author: <NAME>
The script "plots" as its name says, has the function of process the data input
by the user, as the outputs from files as: "flowrate_factors", "pumps_figure",
and "performances", in order to elaborate the graph according to the
established conditions.
"""
def plot_one():
"""
This function plots the pump one performance curves according to the
best-fit elevation with the user input. Also, prints the pump model
according to the elevation and flow discharge.
param: a: int of the best-fit elevation in (m).
:return: The best-fit plot for the pump one, according to the user inputs.
"""
# a is the int of the return from the function 'pump_one_performance' from
# the file 'performances.py'. This elevation is in (m).
a = performances.pump_one_performance()
# Conditions for finding the best-fit curve performance and plot it.
if a == 13:
plt.figure(num=None, dpi=120)
plt.plot(pumps_figure.flux_rate1.flow_range(),
pumps_figure.Pump1.head_formula(),
label="1.5HP(1.1kw)")
elif a == 15:
plt.figure(num=None, dpi=120)
plt.plot(pumps_figure.flux_rate2.flow_range(),
pumps_figure.Pump2.head_formula(),
label="2HP(1.5kw)")
elif a == 17:
plt.figure(num=None, dpi=120)
plt.plot(pumps_figure.flux_rate3.flow_range(),
pumps_figure.Pump3.head_formula(),
label="3HP(2.2kw)")
elif a == 23:
plt.figure(num=None, dpi=120)
plt.plot(pumps_figure.flux_rate4.flow_range(),
pumps_figure.Pump4.head_formula(),
label="5HP(3.6kw)")
elif a == 28:
plt.figure(num=None, dpi=120)
plt.plot(pumps_figure.flux_rate5.flow_range(),
pumps_figure.Pump5.head_formula(),
label="7.5HP(5.6kw)")
# Plotting legend specifications.
plt.legend()
plt.xlabel("flow rate Lit/Sec")
plt.ylabel("Head (m)")
plt.title("Model 2009 1760 RPM")
plt.savefig("pump3.png")
plt.show()
def plot_two():
"""
This function plots the pump two, performances curves according to the
best-fit elevation with the user input. Also, prints the pump model
according to the elevation and flow discharge.
param: b: int of the best-fit elevation in (m).
:return: The best-fit plot for the pump two, according to the user inputs.
"""
# b is the int of the return from the function 'pump_two_performance'
# from the file 'performances.py'. This elevation is in (m).
b = performances.pump_two_performance()
# Conditions for finding the best-fit curve performance and plot it.
if b == 22:
plt.figure(num=None, dpi=120)
plt.plot(pumps_figure.flux_rate2_1.flow_range(),
pumps_figure.Pump2_1.head_formula(),
label="60HP(44kw)")
elif b == 29:
plt.figure(num=None, dpi=120)
plt.plot(pumps_figure.flux_rate2_2.flow_range(),
pumps_figure.Pump2_2.head_formula(),
label="75HP(56kw)")
elif b == 35:
plt.figure(num=None, dpi=120)
plt.plot(pumps_figure.flux_rate2_3.flow_range(),
pumps_figure.Pump2_3.head_formula(),
label="100HP(74.5kw)")
elif b == 43:
plt.figure(num=None, dpi=120)
plt.plot(pumps_figure.flux_rate2_4.flow_range(),
pumps_figure.Pump2_4.head_formula(),
label="125HP(93.2kw)")
elif b == 55:
plt.figure(num=None, dpi=120)
plt.plot(pumps_figure.flux_rate2_5.flow_range(),
pumps_figure.Pump2_5.head_formula(),
label="150HP(111.85kw)")
# Plotting legend specifications.
plt.legend()
plt.xlabel("flow rate Lit/Sec")
plt.ylabel("Head (m)")
plt.title("Model 8013 1760 RPM")
plt.savefig("pump4.png")
plt.show()
def plot_three():
"""
This function plots the parallel arrangement, when the user input an
elevation that is higher than the maximum elevation of the pump two
curve performance.
:return: The plot of the parallel arrangement. In case the user input an
elevation higher than the maximum elevation capacity of the pump two.
"""
# Iterating for finding the amount un pumps arranged in parallel.
for x in range(100000):
# Breaks when arranged pumps elevation is higher than user one.
if (x * performances.max_elevation_two) > performances.user_elevation:
break
equation_parallel = \
((x + 1) * -0.0003) * \
(pumps_figure.flux_rate2_5.flow_range() ** 2) + \
((x + 1) * 0.0521) * pumps_figure.flux_rate2_5.flow_range() \
+ ((x + 1) * 53.481)
# Plotting the arranged performance curves.
plt.plot(pumps_figure.flux_rate2_5.flow_range(), equation_parallel,
label="150HP(111.85kw)")
# Plotting legend specifications.
plt.legend()
plt.xlabel("flow rate Lit/Sec")
plt.ylabel("Head (m)")
plt.title("Model 8013 1760 RPM")
plt.savefig("pump5.png")
plt.show()
def plot_four():
"""
This function plots the series arrangement, when the user input a
flow discharge that is higher than the maximum flow of the pump two
curve performance.
:return: The plot of the series arrangement. In case the flow discharge
is higher than the maximum flow capacity of the pump two.
"""
# Iterating for finding the amount un pumps arranged in series.
for x in range(100000):
# Breaks when arranged pumps flow is higher than the user one.
if (x * performances.max_flow_two) > performances.flow_discharge:
break
equation_series = \
(-0.0003) * ((pumps_figure.flux_rate2_5.flow_range() * x) ** 2) + \
0.0521 * (pumps_figure.flux_rate2_5.flow_range() * x) + 53.481
# Plotting the arranged performance curves.
plt.plot((pumps_figure.flux_rate2_5.flow_range() * (x + 1)),
equation_series,
label="150HP(111.85kw)")
# Plotting legend specifications.
plt.legend()
plt.xlabel("flow rate Lit/Sec")
plt.ylabel("Head (m)")
plt.title("Model 8013 1760 RPM")
plt.savefig("pump6.png")
plt.show()
def plot_five():
"""
This function plots the parallel and series arrangements, when the
user input an elevation and the result of the 'flow_discharge.py'
is a flow is higher than the maximum elevation and flow of the pump
two curve performance.
:return: The plot of the series and parallel arrangement. In case the
elevation and flow discharge are higher than the maximum capacity of
the pump two.
"""
# Iterating for finding the amount un pumps arranged in parallel.
for x in range(100000):
# Breaks when arranged pumps elevation is higher than the user one.
if (x * performances.max_elevation_two) > performances.user_elevation:
break
equation_parallel_comb = \
(x * -0.0003) * (pumps_figure.flux_rate2_5.flow_range() ** 2) + \
(x * 0.0521) * pumps_figure.flux_rate2_5.flow_range() + (
x * 53.481)
plt.plot(pumps_figure.flux_rate2_5.flow_range(),
equation_parallel_comb,
label="150HP(111.85kw)")
# Iterating for finding the amount un pumps arranged in series.
for z in range(100000):
# Breaks when arranged pumps flow is higher than the user one.
if (z * performances.max_flow_two) > performances.flow_discharge:
break
equation_series_comb = \
(-0.0003) * ((pumps_figure.flux_rate2_5.flow_range() * z) ** 2) + \
0.0521 * (pumps_figure.flux_rate2_5.flow_range() * z) + 53.481
# Plotting the arranged performance curves.
plt.plot((pumps_figure.flux_rate2_5.flow_range() * (z + 1)),
equation_series_comb,
label="150HP(111.85kw)")
# Plotting legend specifications.
plt.legend()
plt.xlabel("flow rate Lit/Sec")
plt.ylabel("Head (m)")
plt.title("Model 8013 1760 RPM")
plt.savefig("pump6.png")
plt.show()
|
hadi1976/python_project | codes/diameters.py | <reponame>hadi1976/python_project<gh_stars>0
from config import*
class DiameterReader:
"""
Author: <NAME>
The class 'DiameterReader' has the function to read the
'EconomicDiameter' csv file. Making it reusable for others
water distribution designing codes.
"""
def __init__(self, csv_file_name="EconomicDiameter.csv", delimiter=","):
"""
param csv_file_name: csv file with the economic diameter and velocity
according to the flow discharge.
param delimiter: separator in the csv file.
"""
self.sep = delimiter
self.size_diameter_velocity = csv_file_name
self.read_diameter_data(csv_file_name)
def read_diameter_data(self, csv_file_name):
"""
Reading the csv file and creating a pandas dataframe.
param: csv_file_name:csv file with the economic diameter and velocity
according to the flow discharge.
"""
self.size_diameter_velocity = pd.read_csv(csv_file_name,
header=0,
sep=self.sep,
usecols=[0, 1, 2],
names=["Flow", "Diameter",
"Velocity"]
)
def __call__(self, csv_file_name, *args, **kwargs):
print()
"""
It is used to make the object callable (as a function),
so if we have an instance x that defines __call__(self, csv_file_name)
we can do x(csv_file_name), which is actually a shortcut to
x.__call__ (csv_file_name).
""" |
hadi1976/python_project | codes/pipes.py | <filename>codes/pipes.py
from config import *
from diameters import DiameterReader
class TypeOfPipe:
"""
Author: <NAME>
The class 'TypeOfPipe' has the function of dimensioning the pipes
parameters that are needed for supplying the demand in a sewer system.
"""
def __init__(self, flow_discharge, length):
"""
param: flow_discharge: float of the flow discharge
(daily or hourly) in (lit/sec).
param: length: int of the pipe length in (m).
"""
self.flow_discharge = float(flow_discharge)
self.length = float(length)
self.diameter = int # Variable returned in (mm), by the method two.
self.velocity = float # Variable returned in (m/s), by the method two.
def get_diameter_velocity_data(self):
"""
This method is iterating over the 'EconomicDiameter' csv file, which
is in the class 'DiameterReader'.
param: economic_diameter_csv: Instantiation of the DiameterReader
class.
param: df: 'size_diameter_velocity" dataframe.
param: self.diameter: int of the diameter in (mm) read from the
'EconomicDiameters' csv.
param: self.velocity: float of the velocity in (m/s) read from the
'EconomicDiameters' csv.
"""
# Instantiating DiameterReader class.
economic_diameter_csv = DiameterReader()
# Renaming the pandas dataframe "size_diameter_velocity" as "df".
df = economic_diameter_csv.size_diameter_velocity
# Iteration for finding the right diameter and velocity.
for i in df.index:
if self.flow_discharge <= df["Flow"][i]:
self.diameter = df['Diameter'][i]
self.velocity = df['Velocity'][i]
break
elif self.flow_discharge > df["Flow"][i]:
self.diameter = 1000
self.velocity = 1.75
return self.diameter
def pipe_dimensioning(self):
"""
The main function of this method is to use best-fit diameter from the
previous method. Using it for dimensioning a pipe.
param: diam: int of 'self.diameter' in (mm).
param: reynolds: float of the Reynolds Number in the pipe.
param: friction: float of the friction coefficient in the pipe.
param: slope: float of the pipe distance slope in (%).
param: head_loss: float of the head losses of the pipe in (m).
"""
diam = self.get_diameter_velocity_data()
# Calculating Reynolds number of the pipe.
reynolds = (1000 * ((self.flow_discharge / 1000) /
((math.pi * (diam / 1000) ** 2) / 4))) / 0.0089
# Calculating the friction factor of the pipe.
friction = (1 / (-2 * math.log10(((0.0001 / (diam / 1000))
/ 3.715) + (15 / reynolds)))) ** 2
# Calculating the slope of the distance traveled by the pipe.
slope = friction * (((self.flow_discharge / 1000) /
((math.pi * (diam / 1000) ** 2) / 4)) /
(2 * 9.82 * diam)) * 1000
# Calculating the losses in the pipe.
head_loss = (slope / self.length) * 1000
# Parameters returned in a list format.
return [reynolds, friction, slope, head_loss, self.diameter,
self.velocity]
|
hadi1976/python_project | codes/performances.py | from flowrate_factors import max_daily_flow
from config import*
"""" Author: <NAME>
The script 'performances.py' has the function of evaluating the performances
of the two pump capacities for the model 2009 1760 RPM and the model 8013 1760
RPM. This evaluation is according to: elevation (m) and flow discharge
(lit/sec). """
with open(user_elevation) as file:
user_elevation = int(file.read())
flow_discharge = max_daily_flow() # Output: flow_discharge.
# Constant parameters of the pumps capacity performance.
pump_one_elevation = [13, 15, 17, 23, 28]
max_elevation_one = max(pump_one_elevation)
pump_one_flow = [12.5, 14, 15, 16, 17.5]
max_flow_one = max(pump_one_flow)
pump_two_elevation = [22, 29, 35, 43, 55]
max_elevation_two = max(pump_two_elevation)
pump_two_flow = [155, 205, 240, 255, 270]
max_flow_two = max(pump_two_flow)
def pump_one_performance():
"""
param: user_elevation: float of the elevation of the town in m.
param: flow_discharge: float of the flow discharge in lit/sec.
param: pump_one_elevation: int of the elevations of the pump one (m)
regarding flow discharge (lit/sec).
param: pump_one_flow: int of the flow discharge of the pump one (lit/sec)
regarding elevations (m).
param: max_elevation_one: int of the maximal elevation reached by pump one
(m).
param: max_flow_one: int of the maximal flow discharge reached by pump one
(lit/sec).
"""
# Verifying that user inputs demand, can be supplied with the pump one.
if user_elevation <= pump_one_elevation[-1] and flow_discharge <= \
pump_one_flow[-1]:
# Iterating for finding the best-fit performance curve.
i = 1
for i in range(len(pump_one_elevation)):
if user_elevation < pump_one_elevation[i]:
break
j = 1
for j in range(len(pump_one_flow)):
if flow_discharge < pump_one_flow[j]:
break
y = max(i, j)
# Returning the elevation of the best-fits performing curve.
return pump_one_elevation[y]
def pump_two_performance():
"""
param: pump_two_elevation: int of the elevations of the pump two (m)
regarding flow discharge (lit/sec).
param: pump_two_flow: int of the flow discharge of the pump two (lit/sec)
regarding elevations (m).
param: max_elevation_two: int of the maximal elevation reached by pump
two (m).
param: max_flow_two: int of the maximal flow discharge reached by pump
two (lit/sec).
"""
# Verifying that user inputs demand, can be supplied with the pump two.
if user_elevation <= pump_two_elevation[-1] and flow_discharge <= \
pump_two_flow[-1]:
# Iterating for finding the best-fit performance curve.
i = 1
for i in range(len(pump_two_elevation)):
if user_elevation < pump_two_elevation[i]:
break
j = 1
for j in range(len(pump_two_flow)):
if flow_discharge < pump_two_flow[j]:
break
y = max(i, j)
# Returning the elevation of the best-fits performing curve.
return pump_two_elevation[y] |
hadi1976/python_project | codes/flowrate_factors.py | from config import *
"""
Author: <NAME>
"""
# read the country name from text file
with open(country) as file:
country_name = file.read()
# read the number of inhabitants from text file
with open(inhabitants) as file:
number_inhabitants = int(file.read())
def get_flow_discharge():
"""
this function read the xlsx file which contains of country names and the
respective consumption value. Then it created a dictionary which the
keys are country name and the value is flow discharge. after the user
give us the name of the country it returns it's respective flow
discharge :return Water consumption in Lit/day.capita
"""
dictionary_country = dict()
wb = load_workbook(country_codes)
ws = wb.active
for row in range(2, ws.max_row + 1):
if ws["B" + str(row)].value is None:
pass
else:
dictionary_country[ws["B" + str(row)].value] = int(
ws["C" + str(row)].value)
return dictionary_country[country_name]
def hour_factor():
"""
This function give us the hourly factor which will be used in pipes design
:return:hourly_factor
"""
if number_inhabitants <= 1000:
hourly_factor = 5.66
return hourly_factor
elif 1000 < number_inhabitants <= 10000:
hourly_factor = 3.84
return hourly_factor
elif 10000 < number_inhabitants <= 100000:
hourly_factor = 2.61
return hourly_factor
elif 100000 < number_inhabitants <= 1000000:
hourly_factor = 1.77
return hourly_factor
def day_factor():
"""
This function give us the daily factor which will be used in
max_daily_flow function.
:return:daily_factor
"""
if number_inhabitants <= 1000:
daily_factor = 2.32
return daily_factor
elif 1000 < number_inhabitants <= 10000:
daily_factor = 1.95
return daily_factor
elif 10000 < number_inhabitants <= 100000:
daily_factor = 1.64
return daily_factor
elif 100000 < number_inhabitants <= 1000000:
daily_factor = 1.38
return daily_factor
def max_hourly_flow():
"""
Calculation of Max hourly flow rate.This will be used in pipes design.
:return: flow2
"""
flow2 = ((get_flow_discharge() / 24) * number_inhabitants *
hour_factor()) / 3600
return float(flow2)
def max_daily_flow():
"""
Calculation of Max daily flow rate. This will be used in pipes design.
:return:flow1
"""
flow1 = ((get_flow_discharge() * number_inhabitants
* day_factor())) / 86400
return float(flow1)
|
hadi1976/python_project | codes/GUI.py | from config import *
"""
Author: <NAME>
in this file I made a stand alone GUI which takes all the inputs from the user,
save it in a text file, and show the results.
"""
class MyApp(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.Label018 = None
self.Label013 = None
self.logo = None
self.Label017 = None
self.Label016 = None
self.Label015 = None
self.Label014 = None
self.Label012 = None
self.Label011 = None
self.Label010 = None
self.Label009 = None
self.Label008 = None
self.MyImage = None
self.Label007 = None
self.Label006 = None
self.Label005 = None
self.Label004 = None
self.Label003 = None
self.Label002 = None
self.Label001 = None
self.pop_up2 = None
self.Label66 = None
self.Label55 = None
self.Label44 = None
self.Label33 = None
self.Label22 = None
self.Label11 = None
self.Label00 = None
self.pop_up = None
self.DistanceSD = None
self.DistanceSS = None
self.ElevationsStorage = float
self.ElevationReservoir = float
self.ElevationTown = float
self.NumberOfInhabitants = int
self.CountryName = None
self.master.title("GUI project")
self.master.iconbitmap("gui_pics/pipe.ico")
# Set geometry: upper-left corner of the window
ww = 1100 # width
wh = 600 # height
wx = (self.master.winfo_screenwidth() - ww) / 2
wy = (self.master.winfo_screenheight() - wh) / 2
# assign geometry
self.master.geometry("%dx%d+%d+%d" % (ww, wh, wx, wy))
# assign space holders around widgets
self.dx = 5
self.dy = 5
# Image
logo = tk.PhotoImage(file="gui_pics/image.png")
logo = logo.subsample(1, 1) # controls size
self.l_img = tk.Label(master, image=logo)
self.l_img.image = logo
self.l_img.grid(row=11, column=1)
# Label
self.Label1 = tk.Label(master, text="Number of inhabitants")
self.Label1.grid(column=0, row=1, padx=self.dx, pady=self.dy)
self.Label2 = tk.Label(master, text="Elevation of the storage tank")
self.Label2.grid(column=0, row=2, padx=self.dx, pady=self.dy)
self.Label5 = tk.Label(master, text="Distance between Source and "
"Storage tank")
self.Label5.grid(column=0, row=5, padx=self.dx, pady=self.dy)
self.Label6 = tk.Label(master, text="Distance between Storage tank "
"and Distribution net")
self.Label6.grid(column=0, row=6, padx=self.dx, pady=self.dy)
# Entry
# Number of Inhabitants
self.Number_of_Inhabitants = tk.IntVar()
self.Number_of_Inhabitants = tk.Entry(master, width=20)
self.Number_of_Inhabitants.grid(column=1, row=1, padx=5, pady=5)
# Elevation of the town
self.Elevation_town = tk.IntVar()
self.Elevation_town = tk.Entry(master, width=20)
self.Elevation_town.grid(column=1, row=2, padx=5, pady=5)
# Distance between Source and Storage tank
self.Distance1 = tk.IntVar()
self.Distance1 = tk.Entry(master, width=20)
self.Distance1.grid(column=1, row=5, padx=5, pady=5)
# Distance between Storage tank and Distribution net
self.Distance2 = tk.IntVar()
self.Distance2 = tk.Entry(master, width=20)
self.Distance2.grid(column=1, row=6, padx=5, pady=5)
# Combobox
self.selected_country = tk.StringVar()
self.cbox = ttk.Combobox(master, width=20,
textvariable=self.selected_country)
self.cbox.grid(column=0, row=0, padx=self.dx, pady=self.dy)
self.cbox['state'] = 'readonly'
with open(file="Countries.txt", mode="r") as ff:
self.cbox['values'] = ff.read().splitlines()
self.cbox.set("Country names")
self.cbox_selection = self.cbox.get()
# define Button to assign the values
self.button_country = tk.Button(master, text="Assign",
command=lambda:
self.save_country_name())
self.button_country.grid(column=3, row=0, padx=self.dx, pady=self.dy)
self.button1 = tk.Button(master, text="Assign",
command=lambda: self.save_info1())
self.button1.grid(column=3, row=1, padx=self.dx, pady=self.dy)
self.button2 = tk.Button(master, text="Assign",
command=lambda: self.save_info2())
self.button2.grid(column=3, row=2, padx=self.dx, pady=self.dy)
self.button5 = tk.Button(master, text="Assign",
command=lambda: self.save_info5())
self.button5.grid(column=3, row=5, padx=self.dx, pady=self.dy)
self.button6 = tk.Button(master, text="Assign",
command=lambda: self.save_info6())
self.button6.grid(column=3, row=6, padx=self.dx, pady=self.dy)
self.button7 = tk.Button(master, text="Check your Inputs", bg="red",
fg="blue",
command=lambda: self.check_inputs())
self.button7.grid(column=3, row=7, padx=5, pady=5)
self.button8 = tk.Button(master, text="Show results", bg="pink",
fg="blue",
command=lambda: self.show_results())
self.button8.grid(column=3, row=8, padx=5, pady=5)
# if the user wants to see the original pumps figures
self.button9 = tk.Button(master, text="Show pumps", bg="black",
fg="white",
command=lambda: self.show_pumps())
self.button9.grid(column=3, row=9, padx=5, pady=5)
self.button_quit = tk.Button(master, text="Quit", bg="yellow",
fg="red", command=lambda: self.quit_gui())
self.button_quit.grid(column=3, row=10, padx=5, pady=5)
def save_country_name(self):
"""
This method would insert the user's country in a txt file name,
country.txt So when the user click on Assign button the country name
wil be written in text file.
"""
try:
with open("inputs/country.txt", "w") as file:
self.CountryName = self.selected_country.get()
file.write(self.CountryName)
except OSError:
showinfo("Error", "Please insert a country name.")
def save_info1(self):
"""
This method would insert the user's number of inhabitants in a txt
file name, Input1.txt .So when the user click on Assign button the
input wil be written in text file.
"""
try:
if type(int(self.Number_of_Inhabitants.get())) == int:
if 0 < int(self.Number_of_Inhabitants.get()) < 1000000:
with open("inputs/Input1.txt", "w") as file:
self.NumberOfInhabitants = \
self.Number_of_Inhabitants.get()
file.write(self.NumberOfInhabitants)
else:
showinfo("Error",
"Please insert a number below one million ")
except ValueError:
showinfo("Error", "Please insert an integer number.")
def save_info2(self):
"""
This method would insert the user's elevation town in a txt file name,
Input2.txt .So when the user click on Assign button the input
will be written in text file.
"""
try:
if type(int(self.Elevation_town.get())) == int:
if (int(self.Elevation_town.get())) > 0:
with open("inputs/Input2.txt", "w") as file:
self.ElevationTown = self.Elevation_town.get()
file.write(self.ElevationTown)
else:
showinfo("Error", "Please insert a positive number.")
except ValueError:
showinfo("Error", "Please insert an integer number.")
def save_info5(self):
"""
This method would insert the user's pipe length between the reservoir
and tank in a txt file name, Input5.txt .So when the user click on
Assign button the input will be written in text file.
"""
try:
if type(int(self.Distance1.get())) == int:
if (int(self.Distance1.get())) > 0:
with open("inputs/Input5.txt", "w") as file:
self.DistanceSS = self.Distance1.get()
file.write(self.DistanceSS)
else:
showinfo("Error", "Please insert a positive number.")
except ValueError:
showinfo("Error", "Please insert an integer number.")
def save_info6(self):
"""
This method would insert the user's pipe length between the tank
and town in a txt file name, Input6.txt .So when the user click on
Assign button the input will be written in text file.
"""
try:
if type(int(self.Distance2.get())) == int:
if (int(self.Distance2.get())) > 0:
with open("inputs/Input6.txt", "w") as file:
self.DistanceSD = self.Distance2.get()
file.write(self.DistanceSD)
else:
showinfo("Error", "Please insert a positive number.")
except ValueError:
showinfo("Error", "Please insert an integer number")
def check_inputs(self):
"""
In this method when the user click on the respective button(Check
your Inputs), The program navigate to the text files were all the
inputs are saved. And It shows the values that the user has entered.
"""
self.pop_up = tk.Toplevel(master=self)
# Geometry of pop up window
self.pop_up.geometry("400x400")
self.pop_up.title("pop_up Window")
# add a label for showing the selected country
file1 = open("inputs/country.txt", "r")
show_country = file1.read()
file1.close()
self.Label00 = tk.Label(self.pop_up,
text="The name of the "
"Country is: " + show_country)
self.Label00.grid(column=0, row=1, padx=self.dx, pady=self.dy)
# add a label for showing the selected country
file2 = open("inputs/Input1.txt", "r")
show_inhabitants = file2.read()
file2.close()
self.Label11 = tk.Label(self.pop_up,
text="Number of "
"inhabitants is: " + show_inhabitants)
self.Label11.grid(column=0, row=2, padx=self.dx, pady=self.dy)
# add a label for showing the Elevation town
file3 = open("inputs/Input2.txt", "r")
show_elevation = file3.read()
file3.close()
self.Label22 = tk.Label(self.pop_up,
text="Elevation "
"of the town is: " + show_elevation)
self.Label22.grid(column=0, row=3, padx=self.dx, pady=self.dy)
# add a label for showing the Distance between Reservoir and Tank
file6 = open("inputs/Input5.txt", "r")
show_distance1 = file6.read()
file6.close()
self.Label55 = tk.Label(self.pop_up,
text="Distance between Source and "
"Storage tank is: " + show_distance1)
self.Label55.grid(column=0, row=6, padx=self.dx, pady=self.dy)
# add a label for showing the Distance between Tank and Town
file7 = open("inputs/Input6.txt", "r")
show_distance2 = file7.read()
file7.close()
self.Label66 = tk.Label(self.pop_up,
text="Distance between Storage"
" tank and "
"Distribution net is: " + show_distance2)
self.Label66.grid(column=0, row=7, padx=self.dx, pady=self.dy)
def show_results(self):
"""
In this Method when the user click on the respective button(Show
results), It would import all the outputs that we want from this
project, e.g. The consumption value, The hourly flow discharge,
Diameter of the pipe,etc.
"""
from main import consumption, hourly, daily, diameter_transmission, \
velocity_transmission, reynolds_transmission, \
diameter_distribution, \
velocity_distribution, reynolds_distribution, \
friction_transmission, \
friction_distribution, slope_transmission, slope_distribution, \
loss_transmission, head_distribution, plot, explanation
self.pop_up2 = tk.Toplevel(master=self)
# Geometry of pop up window
self.pop_up2.geometry("500x500")
self.pop_up2.title("Show results")
# Label for country consumption
self.Label001 = tk.Label(self.pop_up2,
text="The consumption value is: "
+ str(consumption()) + " Lit/day.Capita")
self.Label001.grid(column=0, row=1, padx=self.dx, pady=self.dy)
self.Label002 = tk.Label(self.pop_up2,
text="The hourly flow discharge : "
+ hourly() + " Lit/hour")
self.Label002.grid(column=0, row=2, padx=self.dx, pady=self.dy)
self.Label003 = tk.Label(self.pop_up2,
text="The daily flow discharge : "
+ daily() + " Lit/day")
self.Label003.grid(column=0, row=3, padx=self.dx, pady=self.dy)
# Label for asserting the transmission and distribution
self.Label004 = tk.Label(self.pop_up2,
text="Transmission pipeline ")
self.Label004.grid(column=0, row=4, padx=self.dx, pady=self.dy)
self.Label005 = tk.Label(self.pop_up2,
text="Distribution pipeline ")
self.Label005.grid(column=1, row=4, padx=self.dx, pady=self.dy)
# Label Diameter for Transmission pipeline
self.Label007 = tk.Label(self.pop_up2,
text="Diameter: "
+ diameter_transmission() + " mm")
self.Label007.grid(column=0, row=5, padx=self.dx, pady=self.dy)
# Label Diameter for Distribution pipeline
self.Label008 = tk.Label(self.pop_up2,
text="Diameter: "
+ diameter_distribution() + " mm")
self.Label008.grid(column=1, row=5, padx=self.dx, pady=self.dy)
# Label for velocity number
self.Label009 = tk.Label(self.pop_up2,
text="Velocity: "
+ velocity_transmission() + " m/sec")
self.Label009.grid(column=0, row=6, padx=self.dx, pady=self.dy)
self.Label009 = tk.Label(self.pop_up2,
text="Velocity: "
+ velocity_distribution() + " m/sec")
self.Label009.grid(column=1, row=6, padx=self.dx, pady=self.dy)
# Label for reynolds
self.Label010 = tk.Label(self.pop_up2,
text="Reynolds number: "
+ reynolds_transmission() + "")
self.Label010.grid(column=0, row=7, padx=self.dx, pady=self.dy)
self.Label011 = tk.Label(self.pop_up2,
text="Reynolds number: "
+ reynolds_distribution() + "")
self.Label011.grid(column=1, row=7, padx=self.dx, pady=self.dy)
# Label Friction loss
self.Label012 = tk.Label(self.pop_up2,
text="Friction coefficient: "
+ friction_transmission() + "")
self.Label012.grid(column=0, row=8, padx=self.dx, pady=self.dy)
self.Label013 = tk.Label(self.pop_up2,
text="Friction coefficient: "
+ friction_distribution() + "")
self.Label013.grid(column=1, row=8, padx=self.dx, pady=self.dy)
# Label Slope
self.Label014 = tk.Label(self.pop_up2,
text="Slope: "
+ slope_transmission() + "%")
self.Label014.grid(column=0, row=9, padx=self.dx, pady=self.dy)
self.Label015 = tk.Label(self.pop_up2,
text="Slope: "
+ slope_distribution() + "%")
self.Label015.grid(column=1, row=9, padx=self.dx, pady=self.dy)
# Label Head loss
self.Label016 = tk.Label(self.pop_up2,
text="Head loss: "
+ loss_transmission() + "")
self.Label016.grid(column=0, row=10, padx=self.dx, pady=self.dy)
self.Label017 = tk.Label(self.pop_up2,
text="Head loss: "
+ head_distribution() + "")
self.Label017.grid(column=1, row=10, padx=self.dx, pady=self.dy)
# plotting the pump that we need
self.Label018 = tk.Label(self.pop_up2,
text="Amount and type of "
"pump required: " + explanation(),
wraplength=250)
self.Label018.grid(row=11, padx=self.dx, pady=self.dy)
plot()
@staticmethod
def show_pumps():
from pumps_figure import plot_pump1, plot_pump2
plot_pump1()
plot_pump2()
def quit_gui(self):
self.master.destroy()
if __name__ == '__main__':
MyApp().mainloop()
|
hadi1976/python_project | codes/config.py | """
Author: <NAME>
The "config.py" file, is importing all the libraries and modules that are
required for the correct functionality of this code.As well as, the path
of the GUI inputs.
Also, in case one of the libraries or modules is not properly installed
or the path of the GUI cannot be found. The user will be able to realize
that is an ImportError with its respective message.
"""
try:
import os
import logging
import math
except ImportError:
logging.info("ERROR: Cannot import basic Python libraries.")
try:
import numpy as np
import pandas as pd
except ImportError:
logging.info("ERROR: Cannot import SciPy libraries.")
try:
import matplotlib.pyplot as plt
except ImportError:
logging.info("ERROR: Cannot import Matplotlib libraries")
try:
import tkinter as tk
from tkinter.messagebox import showinfo
from tkinter import ttk
except ImportError:
logging.info("ERROR: Cannot import tkinter modules")
try:
from openpyxl import load_workbook
except ImportError:
logging.info("ERROR: Cannot import openpyxl libraries")
try:
p = os.path.abspath("")
country = p + "/inputs/country.txt"
country_codes = p + "/Country_Codes_and_Names.xlsx"
inhabitants = p + "/inputs/Input1.txt"
user_elevation = p + "/inputs/Input2.txt"
transmission_pipe_length = p + "/inputs/Input5.txt"
distribution_pipe_length = p + "/inputs/Input6.txt"
except ImportError:
logging.info("ERROR: Cannot import the input path")
# Creating the log file.
logging.basicConfig(filename="my-logfile.log",
format="%(asctime)s - %(message)s",
filemode="w", level=logging.DEBUG)
|
hadi1976/python_project | codes/pumps_figure.py | from config import *
class FlowRatePump:
"""
Author:<NAME>
This class "FlowRatePump" has 3 attributes;start, stop, name
The Method is called flow_rate which gets the attributes and
create a list of 500 numbers between stop and start.
"""
def __init__(self, start, stop, name):
self.start = start
self.stop = stop
self.name = name
def flow_range(self):
"""
It gets the start,stop point and generate a
list of 500 numbers between them.
:return: self.name
"""
self.name = np.linspace(self.start, self.stop, num=500)
return self.name
# Object of FlowRatePump class for the first pump
flux_rate1 = FlowRatePump(0, 12, "flux_rate1")
flux_rate2 = FlowRatePump(0, 12.5, "flux_rate2")
flux_rate3 = FlowRatePump(0, 14, "flux_rate3")
flux_rate4 = FlowRatePump(0, 16, "flux_rate4")
flux_rate5 = FlowRatePump(0, 17.2, "flux_rate5")
# Object of FlowRatePump class for the second pump
flux_rate2_1 = FlowRatePump(0, 160, "flux_rate1")
flux_rate2_2 = FlowRatePump(0, 200, "flux_rate2")
flux_rate2_3 = FlowRatePump(0, 230, "flux_rate3")
flux_rate2_4 = FlowRatePump(0, 250, "flux_rate4")
flux_rate2_5 = FlowRatePump(0, 285, "flux_rate5")
class HeadPump:
"""
author:<NAME>
This class "HeadPump" create 6 attributes.
It gives us the pumps' equation which is a 3rd grade equation.
so the equation of our pump is; y=aX^3+bX^2+cX+d
in this class our X is flux_rate1,flux_rate2,...flux_rate5.
and y is our head pump. And the Method's name is head_formula which
gets all the attributes and generate the respective equation.
"""
def __init__(self, a, b, c, d, flux_rate, name):
self.a = a
self.b = b
self.c = c
self.d = d
self.name = name
self.flux_rate = flux_rate
def head_formula(self):
"""
It gets the coefficients of the formula
and generate the formula
:return: self.name
"""
self.name = self.a * (self.flux_rate ** 3) + self.b *\
(self.flux_rate ** 2) + self.c * (
self.flux_rate ** 1) + self.d * (
self.flux_rate ** 0)
return self.name
# Object of HeadPump class for the first pump
Pump1 = HeadPump(-0.0061, -0.01, 0.015, 11.364, flux_rate1.flow_range(),
"Pump1")
Pump2 = HeadPump(-0.0051, 0.0121, - 0.0341, 14.619, flux_rate2.flow_range(),
"Pump2")
Pump3 = HeadPump(-0.0034, - 0.014, 0.1205, 18.116, flux_rate3.flow_range(),
"Pump3")
Pump4 = HeadPump(0, -0.0612, 0.3119, 22.021, flux_rate4.flow_range(), "Pump4")
Pump5 = HeadPump(0, -0.0589, 0.3821, 26.731, flux_rate5.flow_range(), "Pump5")
# Object of HeadPump class for the second pump
Pump2_1 = HeadPump(0, -0.001, 0.07, 22.941, flux_rate2_1.flow_range(), "Pump1")
Pump2_2 = HeadPump(0, -0.0006, 0.0648, 28.838, flux_rate2_2.flow_range(),
"Pump2")
Pump2_3 = HeadPump(0, -0.0006, 0.0779, 35.279, flux_rate2_3.flow_range(),
"Pump3")
Pump2_4 = HeadPump(0, -0.0004, 0.0521, 43.847, flux_rate2_4.flow_range(),
"Pump4")
Pump2_5 = HeadPump(0, -0.0003, 0.0521, 53.481, flux_rate2_5.flow_range(),
"Pump5")
def plot_pump1():
"""
This function would plot all the pumps bg getting the flow_rate in X_axis
and head pump in y_axis.
"""
plt.figure(num=None, dpi=120)
plt.plot(flux_rate1.flow_range(), Pump1.head_formula(),
label="1.5HP(1.1kw)")
plt.plot(flux_rate2.flow_range(), Pump2.head_formula(), label="2HP(1.5kw)")
plt.plot(flux_rate3.flow_range(), Pump3.head_formula(), label="3HP(2.2kw)")
plt.plot(flux_rate4.flow_range(), Pump4.head_formula(), label="5HP(3.6kw)")
plt.plot(flux_rate5.flow_range(), Pump5.head_formula(),
label="7.5HP(5.6kw)")
plt.legend()
plt.xlabel("flow rate Lit/Sec")
plt.ylabel("Head (m)")
plt.title("Model 2009 1760 RPM")
plt.savefig("pump1.png")
plt.show()
def plot_pump2():
"""
This function would plot all the pumps bg getting the flow_rate in X_axis
and head pump in y_axis.
"""
plt.figure(num=None, dpi=120)
plt.plot(flux_rate2_1.flow_range(), Pump2_1.head_formula(),
label="60HP(44kw)")
plt.plot(flux_rate2_2.flow_range(), Pump2_2.head_formula(),
label="75HP(56kw)")
plt.plot(flux_rate2_3.flow_range(), Pump2_3.head_formula(),
label="100HP(74.5kw)")
plt.plot(flux_rate2_4.flow_range(), Pump2_4.head_formula(),
label="125HP(93.2kw)")
plt.plot(flux_rate2_5.flow_range(), Pump2_5.head_formula(),
label="150HP(111.85kw)")
plt.legend()
plt.xlabel("flow rate Lit/Sec")
plt.ylabel("Head (m)")
plt.title("Model 8013 1760 RPM")
plt.savefig("pump2.png")
plt.show()
|
grim13b/testtone | sinewave.py | # -*- coding: utf-8 -*-
__author__ = 'grim3lt.org'
import numpy
import struct
class SineWaveCreator:
def offset_header(self, fp):
for i in range(44):
fp.write(b'0')
def write(self, fp, a, f0, fs, depth, sec):
"""
a = amplitude
f0 = frequency
fs = sampling frequency
depth = bit depth
sec = seconds
"""
if a > 1.0:
return None
dr = 2 ** depth / 2 - 1
for n in range(sec * fs):
y = a * numpy.sin(2 * numpy.pi * f0 * n / fs)
s = int(y * dr)
fp.write(struct.pack('h', s))
fp.write(struct.pack('h', s))
def write_header(self, fp, fs, depth):
data_chunk_size = fp.tell()
fp.seek(0, 0)
fp.write(b'RIFF')
fp.write(struct.pack('I', data_chunk_size + 44)) # data size + format header size
fp.write(b'WAVE') # WAVE format chunk Header
fp.write(b'fmt ') # fmt chunk header
fp.write(struct.pack('I', 16)) # size of chunk
fp.write(struct.pack('H', 0x0001)) # liner pcm
fp.write(struct.pack('H', 0x0002)) # channel
fp.write(struct.pack('I', fs)) # sampling frequency(unit Hz)
fp.write(struct.pack('I', fs * 2 * 2)) # data transfer speed
fp.write(struct.pack('H', 2 * 2)) # data block size
fp.write(struct.pack('H', depth)) # bit depth
fp.write(b'data')
fp.write(struct.pack('I', data_chunk_size))
def main():
# サンプリング周波数です。 44100 や 96000 などを入力してください。
fs = 192000
# ビット深度は 16 bit 固定です。
bitDepth = 16
# [StartFreq, EndFreq, StepFreq]
album = [
[10, 200, 10],
[200, 2000, 100],
[2100, 5000, 100],
[5100, 8000, 100],
[8100, 10000, 100],
[10100, 12000, 100],
[12100, 15000, 100],
[15100, 18000, 100],
[18100, 20000, 100]
]
for track in album:
filename = '{0}-{1}Hz-{2}Hz.wav'.format(track[0], track[1], track[2])
with open(filename, 'w+b') as fp:
sw = SineWaveCreator()
sw.offset_header(fp)
for freq in range(track[0], track[1], track[2]):
sw.write(fp, 0.7, freq, fs, bitDepth, 3)
sw.write_header(fp, fs, bitDepth)
if __name__ == '__main__':
main()
|
taki0112/GDWCT-Tensorflow | GDWCT.py | <gh_stars>10-100
from ops import *
from utils import *
from glob import glob
import time
from tensorflow.contrib.data import prefetch_to_device, shuffle_and_repeat, map_and_batch
class GDWCT(object) :
def __init__(self, sess, args):
self.model_name = 'GDWCT'
self.sess = sess
self.checkpoint_dir = args.checkpoint_dir
self.result_dir = args.result_dir
self.log_dir = args.log_dir
self.sample_dir = args.sample_dir
self.dataset_name = args.dataset
self.augment_flag = args.augment_flag
self.epoch = args.epoch
self.iteration = args.iteration
self.gan_type = args.gan_type
self.batch_size = args.batch_size
self.print_freq = args.print_freq
self.save_freq = args.save_freq
self.num_style = args.num_style # for test
self.guide_img = args.guide_img
self.direction = args.direction
self.img_h = args.img_h
self.img_w = args.img_w
self.img_ch = args.img_ch
self.init_lr = args.lr
self.decay_flag = args.decay_flag
self.decay_start_epoch = args.decay_start_epoch
self.decay_step_epoch = args.decay_step_epoch
self.ch = args.ch
self.phase = args.phase
""" Weight """
self.gan_w = args.gan_w
self.recon_x_w = args.recon_x_w
self.recon_s_w = args.recon_s_w
self.recon_c_w = args.recon_c_w
self.recon_x_cyc_w = args.recon_x_cyc_w
self.lambda_w = args.lambda_w
self.lambda_c = args.lambda_c
""" Generator """
self.n_res = args.n_res
self.group_num = args.group_num
self.style_dim = args.style_dim
""" Discriminator """
self.n_dis = args.n_dis
self.n_scale = args.n_scale
self.sn = args.sn
self.sample_dir = os.path.join(args.sample_dir, self.model_dir)
check_folder(self.sample_dir)
self.trainA_dataset = glob('./dataset/{}/*.*'.format(self.dataset_name + '/trainA'))
self.trainB_dataset = glob('./dataset/{}/*.*'.format(self.dataset_name + '/trainB'))
self.dataset_num = max(len(self.trainA_dataset), len(self.trainB_dataset))
print("##### Information #####")
print("# gan type : ", self.gan_type)
print("# dataset : ", self.dataset_name)
print("# max dataset number : ", self.dataset_num)
print("# batch_size : ", self.batch_size)
print("# epoch : ", self.epoch)
print("# iteration per epoch : ", self.iteration)
print("# style in test phase : ", self.num_style)
print()
print("##### Generator #####")
print("# residual blocks : ", self.n_res)
print("# Style dimension : ", self.style_dim)
print("# group number : ", self.group_num)
print()
print("##### Discriminator #####")
print("# Discriminator layer : ", self.n_dis)
print("# Multi-scale Dis : ", self.n_scale)
print("# spectral norm : ", self.sn)
print()
##################################################################################
# Encoder and Decoders
##################################################################################
def content_encoder(self, x, reuse=False, scope='content_encoder'):
channel = self.ch
with tf.variable_scope(scope, reuse=reuse) :
x = conv(x, channel, kernel=7, stride=1, pad=3, pad_type='reflect', sn=self.sn, scope='conv_0')
x = instance_norm(x, scope='ins_0')
x = relu(x)
for i in range(2) :
x = conv(x, channel*2, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_'+str(i+1))
x = instance_norm(x, scope='ins_'+str(i+1))
x = relu(x)
channel = channel * 2
for i in range(self.n_res) :
x = resblock(x, channel, sn=self.sn, scope='resblock_'+str(i))
return x
def style_encoder(self, x, reuse=False, scope='style_encoder'):
# use group_norm
channel = self.ch
with tf.variable_scope(scope, reuse=reuse) :
x = conv(x, channel, kernel=7, stride=1, pad=3, pad_type='reflect', sn=self.sn, scope='conv_0')
x = group_norm(x, groups=self.group_num, scope='group_norm_0')
x = relu(x)
for i in range(2) :
x = conv(x, channel*2, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_'+str(i+1))
x = group_norm(x, groups=self.group_num, scope='group_norm_' + str(i+1))
x = relu(x)
channel = channel * 2
for i in range(2) :
x = conv(x, channel, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_'+str(i+1+2))
x = group_norm(x, groups=self.group_num, scope='group_norm_' + str(i+1+2))
x = relu(x)
x = global_avg_pooling(x) # global average pooling
return x
def generator(self, content, style, reuse=False, scope="decoder"):
channel = self.style_dim
with tf.variable_scope(scope, reuse=reuse) :
x = content
U_list = [] # for regularization
for i in range(self.n_res) :
if i == 0:
x, U = self.WCT(content, style, sn=self.sn, scope='front_WCT_' + str(i))
U_list.append(U)
x = no_norm_resblock(x, channel, sn=self.sn, scope='no_norm_resblock_' + str(i))
x, U = self.WCT(x, style, sn=self.sn, scope='back_WCT_' + str(i))
U_list.append(U)
for i in range(2) :
x = up_sample_nearest(x, scale_factor=2)
x = conv(x, channel//2, kernel=5, stride=1, pad=2, pad_type='reflect', sn=self.sn, scope='conv_'+str(i))
x = layer_norm(x, scope='layer_norm_' + str(i))
x = relu(x)
channel = channel // 2
x = conv(x, channels=self.img_ch, kernel=7, stride=1, pad=3, pad_type='reflect', sn=self.sn, scope='G_logit')
x = tanh(x)
return x, U_list
def WCT(self, content, style, sn=False, scope='wct'):
with tf.variable_scope(scope) :
mu = self.MLP(style, sn=sn, scope='MLP_mu')
ct = self.MLP(style, sn=sn, scope='MLP_CT')
alpha = tf.get_variable('alpha', shape=[1], initializer=tf.constant_initializer(0.6), constraint=lambda v: tf.clip_by_value(v, 0.0, 1.0))
x, U = GDWCT_block(content, ct, style_mu=mu, group_num=self.group_num)
x = alpha * x + (1 - alpha) * content
return x, U
def MLP(self, x, sn=False, scope='MLP'):
channel = self.style_dim
with tf.variable_scope(scope) :
for i in range(2) :
x = fully_connected(x, channel, sn=sn, scope='linear_' + str(i))
x = lrelu(x, 0.01)
x = fully_connected(x, channel, sn=sn, scope='logit')
x = tf.reshape(x, shape=[-1, 1, channel])
return x
##################################################################################
# Discriminator
##################################################################################
def discriminator(self, x_init, reuse=False, scope="discriminator"):
D_logit = []
with tf.variable_scope(scope, reuse=reuse) :
for scale in range(self.n_scale) :
channel = self.ch
x = conv(x_init, channel, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='ms_' + str(scale) + '_conv_0')
x = lrelu(x, 0.01)
for i in range(1, self.n_dis):
x = conv(x, channel * 2, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='ms_' + str(scale) +'_conv_' + str(i))
x = lrelu(x, 0.01)
channel = channel * 2
x = conv(x, channels=1, kernel=1, stride=1, sn=self.sn, scope='ms_' + str(scale) + '_D_logit')
D_logit.append(x)
x_init = down_sample(x_init)
return D_logit
##################################################################################
# Model
##################################################################################
def encoder_A(self, x_A, reuse=False):
style_A = self.style_encoder(x_A, reuse=reuse, scope='style_encoder_A')
content_A = self.content_encoder(x_A, reuse=reuse, scope='content_encoder_A')
return content_A, style_A
def encoder_B(self, x_B, reuse=False):
style_B = self.style_encoder(x_B, reuse=reuse, scope='style_encoder_B')
content_B = self.content_encoder(x_B, reuse=reuse, scope='content_encoder_B')
return content_B, style_B
def decoder_A(self, content_B, style_A, reuse=False):
x_ba, U_style_A = self.generator(content=content_B, style=style_A, reuse=reuse, scope='decoder_A')
return x_ba, U_style_A
def decoder_B(self, content_A, style_B, reuse=False):
x_ab, U_style_B = self.generator(content=content_A, style=style_B, reuse=reuse, scope='decoder_B')
return x_ab, U_style_B
def discriminate_real(self, x_A, x_B):
real_A_logit = self.discriminator(x_A, scope="discriminator_A")
real_B_logit = self.discriminator(x_B, scope="discriminator_B")
return real_A_logit, real_B_logit
def discriminate_fake(self, x_ba, x_ab):
fake_A_logit = self.discriminator(x_ba, reuse=True, scope="discriminator_A")
fake_B_logit = self.discriminator(x_ab, reuse=True, scope="discriminator_B")
return fake_A_logit, fake_B_logit
def build_model(self):
self.lr = tf.placeholder(tf.float32, name='learning_rate')
""" Input Image"""
Image_Data_Class = ImageData(self.img_h, self.img_w, self.img_ch, self.augment_flag)
trainA = tf.data.Dataset.from_tensor_slices(self.trainA_dataset)
trainB = tf.data.Dataset.from_tensor_slices(self.trainB_dataset)
gpu_device = '/gpu:0'
trainA = trainA.\
apply(shuffle_and_repeat(self.dataset_num)). \
apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)). \
apply(prefetch_to_device(gpu_device, None))
trainB = trainB. \
apply(shuffle_and_repeat(self.dataset_num)). \
apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)). \
apply(prefetch_to_device(gpu_device, None))
# When using dataset.prefetch, use buffer_size=None to let it detect optimal buffer size
trainA_iterator = trainA.make_one_shot_iterator()
trainB_iterator = trainB.make_one_shot_iterator()
self.domain_A = trainA_iterator.get_next()
self.domain_B = trainB_iterator.get_next()
""" Define Encoder, Generator, Discriminator """
# encode
content_a, style_a = self.encoder_A(self.domain_A)
content_b, style_b = self.encoder_B(self.domain_B)
# decode (cross domain)
x_ba, U_A = self.decoder_A(content_B=content_b, style_A=style_a)
x_ab, U_B = self.decoder_B(content_A=content_a, style_B=style_b)
# decode (within domain)
x_aa, _ = self.decoder_A(content_B=content_a, style_A=style_a, reuse=True)
x_bb, _ = self.decoder_B(content_A=content_b, style_B=style_b, reuse=True)
# encode again
content_ba, style_ba = self.encoder_A(x_ba, reuse=True)
content_ab, style_ab = self.encoder_B(x_ab, reuse=True)
# decode again (if needed)
x_aba, _ = self.decoder_A(content_B=content_ab, style_A=style_ba, reuse=True)
x_bab, _ = self.decoder_B(content_A=content_ba, style_B=style_ab, reuse=True)
real_A_logit, real_B_logit = self.discriminate_real(self.domain_A, self.domain_B)
fake_A_logit, fake_B_logit = self.discriminate_fake(x_ba, x_ab)
""" Define Loss """
G_adv_A = self.gan_w * generator_loss(self.gan_type, fake_A_logit)
G_adv_B = self.gan_w * generator_loss(self.gan_type, fake_B_logit)
D_adv_A = self.gan_w * discriminator_loss(self.gan_type, real_A_logit, fake_A_logit)
D_adv_B = self.gan_w * discriminator_loss(self.gan_type, real_B_logit, fake_B_logit)
recon_style_A = self.recon_s_w * L1_loss(style_ba, style_a)
recon_style_B = self.recon_s_w * L1_loss(style_ab, style_b)
recon_content_A = self.recon_c_w * L1_loss(content_ab, content_a)
recon_content_B = self.recon_c_w * L1_loss(content_ba, content_b)
cyc_recon_A = self.recon_x_cyc_w * L1_loss(x_aba, self.domain_A)
cyc_recon_B = self.recon_x_cyc_w * L1_loss(x_bab, self.domain_B)
recon_A = self.recon_x_w * L1_loss(x_aa, self.domain_A) # reconstruction
recon_B = self.recon_x_w * L1_loss(x_bb, self.domain_B) # reconstruction
whitening_A, coloring_A = group_wise_regularization(deep_whitening_transform(content_a), U_A, self.group_num)
whitening_B, coloring_B = group_wise_regularization(deep_whitening_transform(content_b), U_B, self.group_num)
whitening_A = self.lambda_w * whitening_A
whitening_B = self.lambda_w * whitening_B
coloring_A = self.lambda_c * coloring_A
coloring_B = self.lambda_c * coloring_B
G_reg_A = regularization_loss('decoder_A') + regularization_loss('encoder_A')
G_reg_B = regularization_loss('decoder_B') + regularization_loss('encoder_B')
D_reg_A = regularization_loss('discriminator_A')
D_reg_B = regularization_loss('discriminator_B')
Generator_A_loss = G_adv_A + \
recon_A + \
recon_style_A + \
recon_content_A + \
cyc_recon_B + \
whitening_A + \
coloring_A + \
G_reg_A
Generator_B_loss = G_adv_B + \
recon_B + \
recon_style_B + \
recon_content_B + \
cyc_recon_A + \
whitening_B + \
coloring_B + \
G_reg_B
Discriminator_A_loss = D_adv_A + D_reg_A
Discriminator_B_loss = D_adv_B + D_reg_B
self.Generator_loss = Generator_A_loss + Generator_B_loss
self.Discriminator_loss = Discriminator_A_loss + Discriminator_B_loss
""" Training """
t_vars = tf.trainable_variables()
G_vars = [var for var in t_vars if 'decoder' in var.name or 'encoder' in var.name]
D_vars = [var for var in t_vars if 'discriminator' in var.name]
self.G_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Generator_loss, var_list=G_vars)
self.D_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Discriminator_loss, var_list=D_vars)
"""" Summary """
self.all_G_loss = tf.summary.scalar("Generator_loss", self.Generator_loss)
self.all_D_loss = tf.summary.scalar("Discriminator_loss", self.Discriminator_loss)
self.G_A_loss = tf.summary.scalar("G_A_loss", Generator_A_loss)
self.G_B_loss = tf.summary.scalar("G_B_loss", Generator_B_loss)
self.D_A_loss = tf.summary.scalar("D_A_loss", Discriminator_A_loss)
self.D_B_loss = tf.summary.scalar("D_B_loss", Discriminator_B_loss)
self.G_A_adv_loss = tf.summary.scalar("G_A_adv_loss", G_adv_A)
self.G_A_style_loss = tf.summary.scalar("G_A_style_loss", recon_style_A)
self.G_A_content_loss = tf.summary.scalar("G_A_content_loss", recon_content_A)
self.G_A_cyc_loss = tf.summary.scalar("G_A_cyc_loss", cyc_recon_A)
self.G_A_identity_loss = tf.summary.scalar("G_A_identity_loss", recon_A)
self.G_A_whitening_loss = tf.summary.scalar("G_A_whitening_loss", whitening_A)
self.G_A_coloring_loss = tf.summary.scalar("G_A_coloring_loss", coloring_A)
self.G_B_adv_loss = tf.summary.scalar("G_B_adv_loss", G_adv_B)
self.G_B_style_loss = tf.summary.scalar("G_B_style_loss", recon_style_B)
self.G_B_content_loss = tf.summary.scalar("G_B_content_loss", recon_content_B)
self.G_B_cyc_loss = tf.summary.scalar("G_B_cyc_loss", cyc_recon_B)
self.G_B_identity_loss = tf.summary.scalar("G_B_identity_loss", recon_B)
self.G_B_whitening_loss = tf.summary.scalar("G_B_whitening_loss", whitening_B)
self.G_B_coloring_loss = tf.summary.scalar("G_B_coloring_loss", coloring_B)
self.alpha_var = []
for var in tf.trainable_variables():
if 'alpha' in var.name:
self.alpha_var.append(tf.summary.histogram(var.name, var))
self.alpha_var.append(tf.summary.scalar(var.name, tf.reduce_max(var)))
G_summary_list = [self.G_A_adv_loss,
self.G_A_style_loss, self.G_A_content_loss,
self.G_A_cyc_loss, self.G_A_identity_loss,
self.G_A_whitening_loss, self.G_A_coloring_loss,
self.G_A_loss,
self.G_B_adv_loss,
self.G_B_style_loss, self.G_B_content_loss,
self.G_B_cyc_loss, self.G_B_identity_loss,
self.G_B_whitening_loss, self.G_B_coloring_loss,
self.G_B_loss,
self.all_G_loss]
G_summary_list.extend(self.alpha_var)
self.G_loss = tf.summary.merge(G_summary_list)
self.D_loss = tf.summary.merge([self.D_A_loss, self.D_B_loss, self.all_D_loss])
""" Image """
self.fake_A = x_ba
self.fake_B = x_ab
self.real_A = self.domain_A
self.real_B = self.domain_B
""" Test """
""" Guided Image Translation """
self.content_image = tf.placeholder(tf.float32, [1, self.img_h, self.img_w, self.img_ch], name='content_image')
self.style_image = tf.placeholder(tf.float32, [1, self.img_h, self.img_w, self.img_ch], name='guide_style_image')
if self.direction == 'a2b' :
guide_content_A, _ = self.encoder_A(self.content_image, reuse=True)
_, guide_style_B = self.encoder_B(self.style_image, reuse=True)
self.guide_fake_B, _ = self.decoder_B(content_A=guide_content_A, style_B=guide_style_B, reuse=True)
else :
guide_content_B, _ = self.encoder_B(self.content_image, reuse=True)
_, guide_style_A = self.encoder_A(self.style_image, reuse=True)
self.guide_fake_A, _ = self.decoder_A(content_B=guide_content_B, style_A=guide_style_A, reuse=True)
def train(self):
# initialize all variables
tf.global_variables_initializer().run()
# saver to save model
self.saver = tf.train.Saver(max_to_keep=20)
# summary writer
self.writer = tf.summary.FileWriter(self.log_dir + '/' + self.model_dir, self.sess.graph)
# restore check-point if it exits
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
start_epoch = (int)(checkpoint_counter / self.iteration)
start_batch_id = checkpoint_counter - start_epoch * self.iteration
counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
start_epoch = 0
start_batch_id = 0
counter = 1
print(" [!] Load failed...")
# loop for epoch
start_time = time.time()
lr = self.init_lr
for epoch in range(start_epoch, self.epoch):
if self.decay_flag and self.decay_start_epoch > epoch :
lr = self.init_lr * pow(0.5, (epoch - self.decay_start_epoch) // self.decay_step_epoch)
for idx in range(start_batch_id, self.iteration):
train_feed_dict = {
self.lr : lr
}
# Update D
_, d_loss, summary_str = self.sess.run([self.D_optim, self.Discriminator_loss, self.D_loss], feed_dict = train_feed_dict)
self.writer.add_summary(summary_str, counter)
# Update G
batch_A_images, batch_B_images, fake_A, fake_B, _, g_loss, summary_str = self.sess.run([self.real_A, self.real_B, self.fake_A, self.fake_B, self.G_optim, self.Generator_loss, self.G_loss], feed_dict = train_feed_dict)
self.writer.add_summary(summary_str, counter)
# display training status
counter += 1
print("Epoch: [%2d] [%6d/%6d] time: %4.4f d_loss: %.8f, g_loss: %.8f" \
% (epoch, idx, self.iteration, time.time() - start_time, d_loss, g_loss))
if np.mod(idx+1, self.print_freq) == 0 :
save_images(batch_A_images, [self.batch_size, 1],
'./{}/real_A_{:03d}_{:05d}.jpg'.format(self.sample_dir, epoch, idx+1))
# save_images(batch_B_images, [self.batch_size, 1],
# './{}/real_B_{:03d}_{:05d}.jpg'.format(self.sample_dir, epoch, idx+1))
# save_images(fake_A, [self.batch_size, 1],
# './{}/fake_A_{:03d}_{:05d}.jpg'.format(self.sample_dir, epoch, idx+1))
save_images(fake_B, [self.batch_size, 1],
'./{}/fake_B_{:03d}_{:05d}.jpg'.format(self.sample_dir, epoch, idx+1))
if np.mod(counter - 1, self.save_freq) == 0 :
self.save(self.checkpoint_dir, counter)
# After an epoch, start_batch_id is set to zero
# non-zero value is only for the first epoch after loading pre-trained model
start_batch_id = 0
# save model for final step
self.save(self.checkpoint_dir, counter)
@property
def model_dir(self):
n_dis = str(self.n_scale) + 'multi_' + str(self.n_dis) + 'dis'
sn = ''
if self.sn :
sn = '_sn'
return "{}_{}_{}_{}_{}adv_{}style_{}content_{}identity_{}cyc_{}color_{}white{}".format(self.model_name, self.dataset_name, self.gan_type,
n_dis,
self.gan_w, self.recon_s_w, self.recon_c_w,
self.recon_x_w, self.recon_x_cyc_w,
self.lambda_c, self.lambda_w, sn)
def save(self, checkpoint_dir, step):
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess, os.path.join(checkpoint_dir, self.model_name + '.model'), global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.\
model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(ckpt_name.split('-')[-1])
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
def style_guide_test(self):
tf.global_variables_initializer().run()
test_A_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testA'))
test_B_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testB'))
style_file = load_test_data(self.guide_img, size_h=self.img_h, size_w=self.img_w)
self.saver = tf.train.Saver()
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
self.result_dir = os.path.join(self.result_dir, self.model_dir, 'guide')
check_folder(self.result_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# write html for visual comparison
index_path = os.path.join(self.result_dir, 'index.html')
index = open(index_path, 'w')
index.write("<html><body><table><tr>")
index.write("<th>name</th><th>input</th><th>output</th></tr>")
if self.direction == 'a2b' :
for sample_file in test_A_files: # A -> B
print('Processing A image: ' + sample_file)
sample_image = load_test_data(sample_file, size_h=self.img_h, size_w=self.img_w)
image_path = os.path.join(self.result_dir, '{}'.format(os.path.basename(sample_file)))
fake_img = self.sess.run(self.guide_fake_B, feed_dict={self.content_image: sample_image, self.style_image : style_file})
save_images(fake_img, [1, 1], image_path)
index.write("<td>%s</td>" % os.path.basename(image_path))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (sample_file if os.path.isabs(sample_file) else (
'../../..' + os.path.sep + sample_file), self.img_w, self.img_h))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (image_path if os.path.isabs(image_path) else (
'../../..' + os.path.sep + image_path), self.img_w, self.img_h))
index.write("</tr>")
else :
for sample_file in test_B_files: # B -> A
print('Processing B image: ' + sample_file)
sample_image = np.asarray(load_test_data(sample_file, size_h=self.img_h, size_w=self.img_w))
image_path = os.path.join(self.result_dir, '{}'.format(os.path.basename(sample_file)))
fake_img = self.sess.run(self.guide_fake_A, feed_dict={self.content_image: sample_image, self.style_image : style_file})
save_images(fake_img, [1, 1], image_path)
index.write("<td>%s</td>" % os.path.basename(image_path))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (sample_file if os.path.isabs(sample_file) else (
'../../..' + os.path.sep + sample_file), self.img_w, self.img_h))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (image_path if os.path.isabs(image_path) else (
'../../..' + os.path.sep + image_path), self.img_w, self.img_h))
index.write("</tr>")
index.close()
|
taki0112/GDWCT-Tensorflow | ops.py | <gh_stars>10-100
import tensorflow as tf
import tensorflow.contrib as tf_contrib
import numpy as np
from utils import pytorch_xavier_weight_factor, pytorch_kaiming_weight_factor
factor, mode, uniform = pytorch_kaiming_weight_factor(a=0.0, uniform=False)
weight_init = tf_contrib.layers.variance_scaling_initializer(factor=factor, mode=mode, uniform=uniform)
# weight_init = tf.random_normal_initializer(mean=0.0, stddev=0.02)
weight_regularizer = tf_contrib.layers.l2_regularizer(scale=0.0001)
weight_regularizer_fully = tf_contrib.layers.l2_regularizer(scale=0.0001)
##################################################################################
# Layer
##################################################################################
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, sn=False, scope='conv_0'):
with tf.variable_scope(scope):
if scope.__contains__("discriminator"):
weight_init = tf.random_normal_initializer(mean=0.0, stddev=0.02)
else:
weight_init = tf_contrib.layers.variance_scaling_initializer(factor=factor, mode=mode, uniform=uniform)
if pad > 0:
h = x.get_shape().as_list()[1]
if h % stride == 0:
pad = pad * 2
else:
pad = max(kernel - (h % stride), 0)
pad_top = pad // 2
pad_bottom = pad - pad_top
pad_left = pad // 2
pad_right = pad - pad_left
if pad_type == 'zero':
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]])
if pad_type == 'reflect':
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], mode='REFLECT')
if sn:
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.conv2d(input=x, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding='VALID')
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else:
x = tf.layers.conv2d(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, use_bias=use_bias)
return x
def fully_connected(x, units, use_bias=True, sn=False, scope='linear'):
with tf.variable_scope(scope):
x = flatten(x)
shape = x.get_shape().as_list()
channels = shape[-1]
if sn:
w = tf.get_variable("kernel", [channels, units], tf.float32,
initializer=weight_init, regularizer=weight_regularizer_fully)
if use_bias:
bias = tf.get_variable("bias", [units],
initializer=tf.constant_initializer(0.0))
x = tf.matmul(x, spectral_norm(w)) + bias
else:
x = tf.matmul(x, spectral_norm(w))
else:
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer_fully,
use_bias=use_bias)
return x
def flatten(x) :
return tf.layers.flatten(x)
def GDWCT_block(content, style, style_mu, group_num) :
content = deep_whitening_transform(content) # [bs, h, w, ch]
U, style = deep_coloring_transform(style, group_num) # [bs, 1, ch], [bs, ch, ch]
bs, h, w, ch = content.get_shape().as_list()
content = tf.reshape(content, shape=[bs, h*w, ch])
x = tf.matmul(content, style) + style_mu
x = tf.reshape(x, shape=[bs, h, w, ch])
return x, U
def deep_whitening_transform(c) :
mu, _ = tf.nn.moments(c, axes=[1, 2], keep_dims=True)
x = c - mu
return x
def deep_coloring_transform(s, group_num) :
# [batch_size, 1, channel] : S : MLP^CT(s)
bs, _, ch = s.get_shape().as_list()
# make U
l2_norm = tf.norm(s, axis=-1, keepdims=True)
U = s / l2_norm
# make D
eye = tf.eye(num_rows=ch, num_columns=ch, batch_shape=[bs]) # [batch_size, channel, channel]
D = l2_norm * eye
U_block_list = []
split_num = ch // group_num
for i in range(group_num) :
U_ = U[:, :, i * split_num: (i + 1) * split_num]
D_ = D[:, i * split_num: (i + 1) * split_num, i * split_num: (i + 1) * split_num]
block_matrix = U_ * D_ * tf.transpose(U_, perm=[0, 2, 1])
operator_matrix = tf.linalg.LinearOperatorFullMatrix(block_matrix)
U_block_list.append(operator_matrix)
U_block_diag_matrix = tf.linalg.LinearOperatorBlockDiag(U_block_list).to_dense()
return U, U_block_diag_matrix
def group_wise_regularization(c_whitening, U_list, group_num) :
""" Regularization """
""" whitening regularization """
bs, h, w, ch = c_whitening.get_shape().as_list()
c_whitening = tf.reshape(c_whitening, shape=[bs, h * w, ch])
c_whitening = tf.matmul(tf.transpose(c_whitening, perm=[0, 2, 1]), c_whitening) # covariance of x [bs, ch, ch]
bs, ch, _ = c_whitening.get_shape().as_list() # ch1 = ch2
index_matrix = make_index_matrix(bs, ch, ch, group_num)
group_convariance_x = tf.where(tf.equal(index_matrix, 1.0), c_whitening, tf.zeros_like(c_whitening))
group_convariance_x = tf.linalg.set_diag(group_convariance_x, tf.ones([bs, ch]))
whitening_regularization_loss = L1_loss(c_whitening, group_convariance_x)
""" coloring regularization """
split_num = ch // group_num
coloring_regularization_list = []
coloring_regularization_loss_list = []
for U in U_list :
# [bs, 1, ch]
for i in range(group_num):
U_ = U[:, :, i * split_num: (i + 1) * split_num]
U_TU = tf.matmul(tf.transpose(U_, perm=[0, 2, 1]), U_) # [bs, ch // group_num, ch // group_num]
coloring_regularization_list.append(L1_loss(U_TU, tf.eye(num_rows=ch // group_num, num_columns=ch // group_num, batch_shape=[bs])))
coloring_regularization_loss_list.append(tf.reduce_mean(coloring_regularization_list))
coloring_regularization_loss = tf.reduce_mean(coloring_regularization_loss_list)
return whitening_regularization_loss, coloring_regularization_loss
def make_index_matrix(bs, ch1, ch2, group_num) :
index_matrix = np.abs(np.kron(np.eye(ch1 // group_num, ch2 // group_num), np.eye(group_num, group_num) - 1))
index_matrix[index_matrix == 0] = -1
index_matrix[index_matrix == 1] = 0
index_matrix[index_matrix == -1] = 1
index_matrix = np.tile(index_matrix, [bs, 1, 1])
return index_matrix
##################################################################################
# Residual-block
##################################################################################
def resblock(x_init, channels, use_bias=True, sn=False, scope='resblock'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = conv(x_init, channels, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias, sn=sn)
x = instance_norm(x)
x = relu(x)
with tf.variable_scope('res2'):
x = conv(x, channels, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias, sn=sn)
x = instance_norm(x)
return x + x_init
def no_norm_resblock(x_init, channels, use_bias=True, sn=False, scope='resblock'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = conv(x_init, channels, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias, sn=sn)
x = relu(x)
with tf.variable_scope('res2'):
x = conv(x, channels, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias, sn=sn)
return x + x_init
##################################################################################
# Sampling
##################################################################################
def down_sample(x) :
return tf.layers.average_pooling2d(x, pool_size=3, strides=2, padding='SAME')
def up_sample(x, scale_factor=2):
_, h, w, _ = x.get_shape().as_list()
new_size = [h * scale_factor, w * scale_factor]
return tf.image.resize_nearest_neighbor(x, size=new_size)
def up_sample_nearest(x, scale_factor=2):
_, h, w, _ = x.get_shape().as_list()
new_size = [h * scale_factor, w * scale_factor]
return tf.image.resize_nearest_neighbor(x, size=new_size)
def global_avg_pooling(x):
gap = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
return gap
##################################################################################
# Activation function
##################################################################################
def lrelu(x, alpha=0.01):
# pytorch alpha is 0.01
return tf.nn.leaky_relu(x, alpha)
def relu(x):
return tf.nn.relu(x)
def tanh(x):
return tf.tanh(x)
##################################################################################
# Normalization function
##################################################################################
def instance_norm(x, scope='instance_norm'):
return tf_contrib.layers.instance_norm(x,
epsilon=1e-05,
center=True, scale=True,
scope=scope)
def layer_norm(x, scope='layer_norm') :
return tf_contrib.layers.layer_norm(x,
center=True, scale=True,
scope=scope)
def group_norm(x, groups=8, scope='group_norm'):
return tf.contrib.layers.group_norm(x, groups=groups, epsilon=1e-05,
center=True, scale=True,
scope=scope)
def spectral_norm(w, iteration=1):
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable("u", [1, w_shape[-1]], initializer=tf.random_normal_initializer(), trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
##################################################################################
# Loss function
##################################################################################
def discriminator_loss(gan_type, real, fake):
n_scale = len(real)
loss = []
real_loss = 0
fake_loss = 0
for i in range(n_scale) :
if gan_type == 'lsgan' :
real_loss = tf.reduce_mean(tf.squared_difference(real[i], 1.0))
fake_loss = tf.reduce_mean(tf.square(fake[i]))
if gan_type == 'gan' :
real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(real[i]), logits=real[i]))
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(fake[i]), logits=fake[i]))
if gan_type == 'hinge':
real_loss = -tf.reduce_mean(tf.minimum(real[i][-1] - 1, 0.0))
fake_loss = -tf.reduce_mean(tf.minimum(-fake[i][-1] - 1, 0.0))
loss.append(real_loss + fake_loss)
return tf.reduce_sum(loss)
def generator_loss(gan_type, fake):
n_scale = len(fake)
loss = []
fake_loss = 0
for i in range(n_scale) :
if gan_type == 'lsgan' :
fake_loss = tf.reduce_mean(tf.squared_difference(fake[i], 1.0))
if gan_type == 'gan' :
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake[i]), logits=fake[i]))
if gan_type == 'hinge':
# fake_loss = -tf.reduce_mean(relu(fake[i][-1]))
fake_loss = -tf.reduce_mean(fake[i][-1])
loss.append(fake_loss)
return tf.reduce_sum(loss)
def L1_loss(x, y):
loss = tf.reduce_mean(tf.abs(x - y))
return loss
def regularization_loss(scope_name) :
"""
If you want to use "Regularization"
g_loss += regularization_loss('generator')
d_loss += regularization_loss('discriminator')
"""
collection_regularization = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = []
for item in collection_regularization :
if scope_name in item.name :
loss.append(item)
return tf.reduce_sum(loss)
|
altuntasmuhammet/eksisozluk-scraper | altuntas/scraper/models/__init__.py | <gh_stars>1-10
from .eksisozlukbot import Entry |
altuntasmuhammet/eksisozluk-scraper | altuntas/altuntas/celery.py | <gh_stars>1-10
from __future__ import absolute_import
from celery import Celery
from django.conf import settings
import os
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'altuntas.settings')
app = Celery('altuntas')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) |
altuntasmuhammet/eksisozluk-scraper | altuntas/scraper/migrations/0001_initial.py | <filename>altuntas/scraper/migrations/0001_initial.py
# Generated by Django 3.2.5 on 2021-07-10 17:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('content', models.TextField()),
('favourite_count', models.IntegerField()),
('author', models.TextField()),
('created_date', models.DateTimeField()),
('edited_date', models.DateTimeField(blank=True)),
('eksisozluk_entry_id', models.IntegerField(unique=True)),
('eksisozluk_author_id', models.IntegerField()),
],
options={
'verbose_name': 'Eksisozluk Entry',
'verbose_name_plural': 'Eksisozluk Entries',
'db_table': 'web_eksisozluk_entries',
},
),
]
|
altuntasmuhammet/eksisozluk-scraper | altuntas/scraper/migrations/0002_alter_entry_table.py | <filename>altuntas/scraper/migrations/0002_alter_entry_table.py
# Generated by Django 3.2.5 on 2021-07-10 17:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scraper', '0001_initial'),
]
operations = [
migrations.AlterModelTable(
name='entry',
table='scraper_eksisozluk_entries',
),
]
|
altuntasmuhammet/eksisozluk-scraper | altuntas/scraper/tasks/eksisozlukbot.py | <filename>altuntas/scraper/tasks/eksisozlukbot.py
from celery.decorators import task
from eksisozlukbot.eksisozlukbot.spiders.eksisozluk import EksisozlukSpider
from eksisozlukbot.eksisozlukbot import settings as eksisozlukbot_settings
from scrapy import signals
from scrapy.crawler import Crawler
from scrapy.settings import Settings
from twisted.internet import reactor
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
import os
@task(name="Scrape EksiSozluk", track_started=True)
def scrape_eksisozluk(**kwargs):
# process = CrawlerProcess(settings=get_project_settings())
# process.crawl(EksisozlukSpider)
# process.start()
crawler_settings = Settings()
crawler_settings.setmodule(eksisozlukbot_settings)
#Create a crawler
crawler = Crawler(EksisozlukSpider, settings=crawler_settings)
# This ensures Twisted Reactor is properly shutdown
crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
# Start crawling
crawler.crawl(**kwargs)
# add blocking process
reactor.run() |
altuntasmuhammet/eksisozluk-scraper | altuntas/scraper/models/eksisozlukbot.py | from django.conf import settings
from django.db import models
class Entry(models.Model):
title = models.TextField()
content = models.TextField()
favourite_count = models.IntegerField()
author = models.TextField()
created_date = models.DateTimeField()
edited_date = models.DateTimeField(blank=True)
eksisozluk_entry_id = models.IntegerField(unique=True)
eksisozluk_author_id = models.IntegerField()
class Meta:
app_label = "scraper"
db_table = "scraper_eksisozluk_entries"
verbose_name_plural = "Eksisozluk Entries"
verbose_name = "Eksisozluk Entry"
# abstract=True
|
altuntasmuhammet/eksisozluk-scraper | altuntas/altuntas/settings.py | <reponame>altuntasmuhammet/eksisozluk-scraper<filename>altuntas/altuntas/settings.py<gh_stars>1-10
"""
Django settings for altuntas project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
from kombu import Exchange, Queue
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost']
# Database configuration
POSTGRES_DB=os.environ['POSTGRES_DB']
POSTGRES_USER=os.environ['POSTGRES_USER']
POSTGRES_PASSWORD=os.environ['POSTGRES_PASSWORD']
POSTGRES_HOST=os.environ['POSTGRES_HOST']
POSTGRES_PORT=os.environ['POSTGRES_PORT']
# Rabbitmq configuration
RABBITMQ_DEFAULT_USER = os.environ.get('RABBITMQ_DEFAULT_USER', 'guest')
RABBITMQ_DEFAULT_PASS = os.environ.get('RABBITMQ_DEFAULT_PASS', 'guest')
RABBITMQ_DEFAULT_HOST = os.environ.get('RABBITMQ_DEFAULT_HOST', 'rabbitmq')
RABBITMQ_DEFAULT_PORT = os.environ.get('RABBITMQ_DEFAULT_PORT', 5672)
RABBITMQ_DEFAULT_MGMT_PORT = os.environ.get('RABBITMQ_DEFAULT_MGMT_PORT', 15672)
RABBITMQ_DEFAULT_VHOST = os.environ.get('RABBITMQ_DEFAULT_VHOST', 'vhost')
# Celery configuration
CELERY_BROKER_URL = f"amqp://{RABBITMQ_DEFAULT_USER}:{RABBITMQ_DEFAULT_PASS}@{RABBITMQ_DEFAULT_HOST}:{RABBITMQ_DEFAULT_PORT}/{RABBITMQ_DEFAULT_VHOST}"
print("CELERY_BROKER_URL", CELERY_BROKER_URL)
CELERY_RESULT_BACKEND = 'django-db'
CELERY_CACHE_BACKEND = 'django-cache'
CELERY_TASK_SERIALIZER = "json"
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_ALWAYS_EAGER = False
CELERY_ACKS_LATE = True
CELERY_TASK_PUBLISH_RETRY = True
CELERY_DISABLE_RATE_LIMITS = False
CELERY_IMPORTS = (
'scraper.tasks.eksisozlukbot',
)
CELERY_DEFAULT_QUEUE = 'default'
CELERY_QUEUES = (
Queue('default', Exchange('default'), routing_key='default'),
)
# Application definition
INSTALLED_APPS = [
'bootstrap_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_celery_beat',
'django_celery_results',
'scraper',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'altuntas.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'altuntas.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': POSTGRES_DB,
'USER': POSTGRES_USER,
'PASSWORD':<PASSWORD>,
'HOST': POSTGRES_HOST,
'PORT': POSTGRES_PORT,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
altuntasmuhammet/eksisozluk-scraper | altuntas/eksisozlukbot/eksisozlukbot/spiders/eksisozluk.py | <gh_stars>1-10
import scrapy
import time
from datetime import datetime
import urllib.parse
EKSISOZLUK_BASEURL = "https://eksisozluk.com"
DATETIME_FORMAT = "%d.%m.%Y %H:%M"
START_DATE = ""
END_DATE = ""
# keywords = ["apache kafka"]
class EksisozlukSpider(scrapy.Spider):
name = 'eksisozluk'
allowed_domains = ['eksisozluk.com']
# start_urls = ['http://eksisozluk.com/']
def __init__(self, keywords="", *args, **kwargs):
super(EksisozlukSpider, self).__init__(*args, **kwargs)
self.keywords = [k.strip() for k in keywords.split(",")]
def start_requests(self):
title_query_url_templated = "https://eksisozluk.com/basliklar/ara?SearchForm.Keywords={keyword}&SearchForm.Author=&SearchForm.When.From={start_date}&SearchForm.When.To={end_date}&SearchForm.NiceOnly=false&SearchForm.FavoritedOnly=false&SearchForm.SortOrder=Date&_={timestamp}"
for keyword in self.keywords:
encoded_keyword = urllib.parse.quote(keyword)
timestamp = int(time.time() * 1000)
url = title_query_url_templated.format(keyword=encoded_keyword,
start_date=START_DATE,
end_date=END_DATE,
timestamp=timestamp)
print("******URL****** - {}".format(url))
yield scrapy.Request(url=url, callback=self.parse_title_links)
def parse_title_links(self, response):
print()
titles = response.css('div.instapaper_body ul.topic-list li')
print("********TITLE********:", titles)
for title in titles:
title_url = EKSISOZLUK_BASEURL + title.css('a::attr(href)').get()
title_name = title.css("a::text").get().strip()
yield scrapy.Request(url=title_url, callback=self.parse_pages, meta={'title_url': title_url, 'title_name': title_name})
def parse_pages(self, response):
page_count_str = response.css('div.pager::attr(data-pagecount)').get()
page_count = int(page_count_str) if page_count_str else 1
title_url = response.meta['title_url']
title_name = response.meta['title_name']
for page_num in range(1, page_count+1):
url = "{title_url}?p={page_num}".format(title_url=title_url, page_num=page_num)
yield scrapy.Request(url=url, callback=self.parse_entries_per_page, meta={'title_name':title_name})
def parse_entries_per_page(self, response):
entries = response.css('ul[id=entry-item-list] li')
title_name = response.meta['title_name']
for entry in entries:
# Content
content = " ".join(entry.css("div.content *::text").getall()).strip()
print("******LOG****** - Content:", content)
# Title
title = title_name
# Favourite Count
favourite_count = int(entry.attrib['data-favorite-count'])
# Author Name
author = entry.css("footer div.info a.entry-author::text").get().strip()
# Created Date and Edited Date
date = entry.css("footer div.info a.entry-date::text").get().strip()
if '~' in date:
created_date_str = date.split('~')[0].strip()
edited_date_str = date.split('~')[1].strip()
print("LOG 1", created_date_str, edited_date_str)
edited_date_str = edited_date_str if len(edited_date_str)==len(created_date_str) else " ".join([created_date_str.split(' ')[0], edited_date_str])
print("LOG 2", created_date_str, edited_date_str)
created_date = datetime.strptime(created_date_str, DATETIME_FORMAT)
edited_date = datetime.strptime(edited_date_str, DATETIME_FORMAT)
else:
created_date_str = date.strip()
created_date = datetime.strptime(created_date_str, DATETIME_FORMAT)
edited_date = None
# eksisozluk_entry_id
eksisozluk_entry_id = int(entry.attrib['data-id'])
# eksisozluk_author_id
eksisozluk_author_id = int(entry.attrib['data-author-id'])
yield {
"content": content,
"title": title,
"favourite_count": favourite_count,
"author": author,
"created_date": created_date,
"edited_date": edited_date,
"eksisozluk_entry_id": eksisozluk_entry_id,
"eksisozluk_author_id": eksisozluk_author_id
} |
altuntasmuhammet/eksisozluk-scraper | altuntas/scraper/admin.py | from django.contrib import admin
from .models import Entry
# Register your models here.
class EntryAdmin(admin.ModelAdmin):
list_display = ("title", "content", "favourite_count", "author", "created_date", "edited_date")
admin.site.register(Entry, EntryAdmin) |
altuntasmuhammet/eksisozluk-scraper | altuntas/eksisozlukbot/__init__.py | from six import iteritems
from eksisozlukbot.eksisozlukbot import spiders
from eksisozlukbot.eksisozlukbot import items
from eksisozlukbot.eksisozlukbot import middlewares
from eksisozlukbot.eksisozlukbot import pipelines
from eksisozlukbot.eksisozlukbot import settings
_SUBMODULES = {
"spiders": spiders,
"items": items,
"middlewares": middlewares,
"pipelines": pipelines,
"settings": settings,
}
import sys
for module_name, module in iteritems(_SUBMODULES):
sys.modules["eksisozlukbot.%s" % module_name] = module |
altuntasmuhammet/eksisozluk-scraper | altuntas/eksisozlukbot/eksisozlukbot/settings.py | <filename>altuntas/eksisozlukbot/eksisozlukbot/settings.py
# Scrapy settings for eksisozlukbot project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import os
import sys
import django
sys.path.append(
os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
)
sys.path.append(
os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "..")
)
sys.path.append(
os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "../../")
)
os.environ['DJANGO_SETTINGS_MODULE'] = 'altuntas.settings'
django.setup()
# from django.conf import settings
BOT_NAME = 'eksisozlukbot'
SPIDER_MODULES = ['eksisozlukbot.spiders']
NEWSPIDER_MODULE = 'eksisozlukbot.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'eksisozlukbot (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'eksisozlukbot.middlewares.EksisozlukbotSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'eksisozlukbot.middlewares.EksisozlukbotDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'eksisozlukbot.pipelines.EksisozlukbotPipeline': 300,
}
# # Configure database
# DATABASE = {
# "drivername": "postgresql",
# "host": settings.POSTGRES_HOST,
# "port": settings.POSTGRES_PORT,
# "username": settings.POSTGRES_USER,
# "password": settings.POSTGRES_PASSWORD,
# "database": settings.POSTGRES_DB,
# }
# Configure logging
LOG_LEVEL = "INFO"
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' |
altuntasmuhammet/eksisozluk-scraper | altuntas/eksisozlukbot/eksisozlukbot/pipelines.py | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scraper.models.eksisozlukbot import Entry
class EksisozlukbotPipeline:
def process_item(self, item, spider):
"""
Process the item and store to database.
"""
entry = Entry.objects.filter(eksisozluk_entry_id=item['eksisozluk_entry_id'])
if entry:
return item
entry_item = Entry(**item)
entry_item.save()
return item
|
pawansingh126/yaml_editor | yaml_editor/editor.py | <filename>yaml_editor/editor.py<gh_stars>1-10
##############################################################################
#
# Author : <NAME>
# Email : <EMAIL>
# Date : Oct 2018
#
##############################################################################
import click
import json
import os
import socket
import yaml
from flask import Flask, request, render_template
from flask_bootstrap import Bootstrap
app_path = os.path.dirname(os.path.abspath(__file__))
app = Flask('Yaml Editor!',
template_folder=os.path.join(app_path, 'templates'),
static_folder=os.path.join(app_path, 'static'))
Bootstrap(app)
@app.route('/', methods=['GET', 'POST'])
def index():
"""Renders index page to edit provided yaml file."""
with open(app.config['YAML_FILE_OBJ']) as file_obj:
data = yaml.load(file_obj, Loader=yaml.Loader)
return render_template('yaml.html',
data=json.dumps(data),
change_str=app.config['STRING_TO_CHANGE'])
@app.route('/tree', methods=['GET', 'POST'])
def tree():
"""Renders tree view page to edit provided yaml file."""
with open(app.config['YAML_FILE_OBJ']) as file_obj:
data = yaml.load(file_obj, Loader=yaml.Loader)
return render_template('treeyaml.html',
data=data, datastr=json.dumps(data),
change_str=app.config['STRING_TO_CHANGE'])
@app.route('/save', methods=['POST'])
def save():
"""Save current progress on file."""
out = request.json.get('yaml_data')
with open(app.config['YAML_FILE_OBJ'], 'w') as file_obj:
yaml.dump(out, file_obj, default_flow_style=False)
return "Data saved successfully!"
@app.route('/saveExit', methods=['POST'])
def save_exit():
"""Save current progress on file and shuts down the server."""
out = request.json.get('yaml_data')
with open(app.config['YAML_FILE_OBJ'], 'w') as file_obj:
yaml.dump(out, file_obj, default_flow_style=False)
func = request.environ.get('werkzeug.server.shutdown')
if func:
func()
return "Saved successfully, Shutting down app! You may close the tab!"
@app.errorhandler(404)
def page_not_found(e):
"""Serves 404 error."""
return '<h1>404: Page not Found!</h1>'
def run(*args, **kwargs):
"""Starts the server."""
port = kwargs.get('port', None)
if not port:
port = 8161
app.run(host='0.0.0.0', port=port, debug=False)
@click.command()
@click.option(
'--file',
'-f',
required=True,
type=click.Path(exists=True),
multiple=False,
help="Path with file name to the intermediary yaml file."
)
@click.option(
'--port',
'-p',
default=8161,
type=click.INT,
multiple=False,
help="Optional port parameter to run Flask on."
)
@click.option(
'--string',
'-s',
default='#CHANGE_ME',
type=click.STRING,
multiple=False,
help="Text which is required to be changed on yaml file."
)
def main(*args, **kwargs):
print("Please go to http://{0}:{1}/ to edit your yaml file.".format(
socket.gethostbyname(socket.gethostname()), kwargs['port']))
app.config['YAML_FILE_OBJ'] = kwargs['file']
app.config['STRING_TO_CHANGE'] = kwargs['string']
run(kwargs)
if __name__=='__main__':
"""Invoked when used as a script."""
main()
|
ankit-vaghela30/sign-language-recognition | sign-language-recognition/image_obj_detect.py | <filename>sign-language-recognition/image_obj_detect.py
#!flask/bin/python
from flask import Flask, render_template, request
from flask_cors import CORS, cross_origin
import base64
import json
# from datetime import datetime
import numpy as np
import sys
import tensorflow as tf
import os
from threading import Thread
from datetime import datetime
import cv2
from collections import defaultdict
from utils import label_map_util
from protos import string_int_label_map_pb2
import multiprocessing
from multiprocessing import Queue, Pool
import time
import datetime
import argparse
import os
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
import tensorflow as tf
from keras.models import load_model
import sklearn
import skimage
from skimage.transform import resize
from keras.backend import clear_session
app = Flask(__name__)
CORS(app, support_credentials=True)
_score_thresh = 0.27
MODEL_NAME = 'hand_inference_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt')
# In[5]:
NUM_CLASSES = 1
# load label map
label_map = label_map_util.load_labelmap('hand_label_map.pbtxt')
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_inference_graph():
# load frozen tensorflow model into memory
print("> ====== loading HAND frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
print("> ====== Hand Inference graph loaded.")
return detection_graph, sess
video_source = 0
# help='Device index of the camera.
num_hands=2
# help='Max number of hands to detect.')
fps=1
# help='Show FPS on detection/display visualization')
width=300
# help='Width of the frames in the video stream.')
height=200
# help='Height of the frames in the video stream.')
display=1
# help='Display the detected images using OpenCV. This reduces FPS')
num_workers=4
# help='Number of workers.')
queue_size=5
score_thresh = 0.2
print(">> loading frozen model for worker")
print('Loading model...')
global model_classification
model_classification = load_model('vgg16model.h5')
detection_graph, sess = load_inference_graph()
# global graph2
graph = tf.get_default_graph()
print('model loaded')
@app.route('/upload', methods=['POST'])
@cross_origin(supports_credentials=True)
def upload_base64_img():
global graph
with graph.as_default():
def detect_objects(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores)
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width*0.8, boxes[i][3] * im_width*1.1,
boxes[i][0] * im_height*0.8, boxes[i][2] * im_height*1.1)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
x= int(left)
y= int(top)
w=int(right)
h=int(bottom)
roi=image_np[y:y+h,x:x+w]
#cv2.imwrite('image.png',roi)
map_characters = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J', 10: 'K', 11: 'L', 12: 'M', 13: 'N', 14: 'O', 15: 'P', 16: 'Q', 17: 'R', 18: 'S', 19: 'T', 20: 'U', 21: 'V', 22: 'W', 23: 'X', 24: 'Y', 25: 'Z', 26: 'del', 27: '-', 28: 'space'}
X_ = []
imageSize=50
img_file_=roi
img_file_ = skimage.transform.resize(img_file_, (imageSize, imageSize, 3))
img_arr_ = np.asarray(img_file_)
X_.append(img_arr_)
X_ = np.asarray(X_)
y_pred = model_classification.predict(X_)
Y_pred_classes = np.argmax(y_pred,axis = 1)
print('Predicting the gesture')
print(map_characters.get(Y_pred_classes[0]))
cv2.imwrite('image.png', roi)
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
def get_pred(image_path):
# detection_graph, sess = load_inference_graph()
sess = tf.Session(graph=detection_graph)
#print("> ===== in worker loop, frame ", frame_processed)
frame = cv2.imread(image_path)
pred = '-'
ACCURACY = 0
#frame_processed = 0
#import matplotlib.pyplot as plt
#lt.imshow(frame)
if (frame is not None):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cap_params = {}
cap_params['im_height'], cap_params['im_width'] ,x = frame.shape
cap_params['score_thresh'] = score_thresh
# max number of hands we want to detect/track
cap_params['num_hands_detect'] = num_hands
# Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
# while scores contains the confidence for each of these boxes.
# Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)
boxes, scores = detect_objects(
frame, detection_graph, sess)
# draw bounding boxes
im_width = cap_params['im_width']
im_height = cap_params['im_height']
image_np = frame
for i in range(cap_params['num_hands_detect']):
if (scores[i] > cap_params["score_thresh"]):
(left, right, top, bottom) = (boxes[i][1] * im_width*0.8, boxes[i][3] * im_width*1.1,
boxes[i][0] * im_height*0.8, boxes[i][2] * im_height*1.1)
# (left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
# boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
x= int(left)
y= int(top)
w=int(right)
h=int(bottom)
roi=image_np[y:y+h,x:x+w]
#cv2.imwrite('image.png',roi)
map_characters = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J', 10: 'K', 11: 'L', 12: 'M', 13: 'N', 14: 'O', 15: 'P', 16: 'Q', 17: 'R', 18: 'S', 19: 'T', 20: 'U', 21: 'V', 22: 'W', 23: 'X', 24: 'Y', 25: 'Z', 26: 'del', 27: 'nothing', 28: 'space'}
X_ = []
imageSize=50
img_file_=roi
img_file_ = skimage.transform.resize(img_file_, (imageSize, imageSize, 3))
img_arr_ = np.asarray(img_file_)
X_.append(img_arr_)
X_ = np.asarray(X_)
y_pred = model_classification.predict(X_)
Y_pred_classes = np.argmax(y_pred,axis = 1)
ACCURACY=y_pred[0][Y_pred_classes[0]]*100
# if (ACCURACY > 80):
print('Predicting the gesture')
print(map_characters.get(Y_pred_classes[0]))
pred = map_characters.get(Y_pred_classes[0])
cv2.imwrite('roi.jpg', roi)
# cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
# add frame annotated with bounding box to queue
#cv2.imwrite('image_obj_det_hands.png',frame)
#frame_processed += 1
# clear_session()
# sess.close()
return pred, ACCURACY
content = request.get_json()
# checking if the image is present or not.
if 'image' not in content:
# abort(400)
# abort(Response('No Image data received'))
return 'Image not received'
imgdata = base64.b64decode(content['image'])
# (dt, micro) = datetime.utcnow().strftime('%Y%m%d%H%M%S.%f').split('.')
# dt = "%s%03d" % (dt, int(micro) / 1000)
filename = 'webcamImg/some_image.jpg'
with open(filename, 'wb') as f:
f.write(imgdata)
# print("--------------------->>>>>>", filename)
letter, acc = get_pred(filename)
result = {
"hand_object": "http://localhost:8888/ada/roi.jpg",
"letter_detected": str(letter),
"accuracy": acc
}
return json.dumps(result)
app.run(port=5000, debug=True) |
ankit-vaghela30/sign-language-recognition | sign-language-recognition/gesturedetector.py | import skimage
from skimage.transform import resize
from keras.models import load_model
import numpy as np
import cv2
import os
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
print('Loading model...')
model = load_model('vgg16model.h5')
print('model loaded')
map_characters = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J', 10: 'K', 11: 'L', 12: 'M', 13: 'N', 14: 'O', 15: 'P', 16: 'Q', 17: 'R', 18: 'S', 19: 'T', 20: 'U', 21: 'V', 22: 'W', 23: 'X', 24: 'Y', 25: 'Z', 26: 'del', 27: 'nothing', 28: 'space'}
while (True):
X_ = []
imageSize=50
img_file_ = cv2.imread('image.png')
if img_file_ is not None:
img_file_ = skimage.transform.resize(img_file_, (imageSize, imageSize, 3))
img_arr_ = np.asarray(img_file_)
X_.append(img_arr_)
X_ = np.asarray(X_)
y_pred = model.predict(X_)
Y_pred_classes = np.argmax(y_pred,axis = 1)
print('Predicting the gesture')
print(map_characters.get(Y_pred_classes[0]))
|
ankit-vaghela30/sign-language-recognition | web-service/analyze.py | <filename>web-service/analyze.py
#!flask/bin/python
from flask import Flask, render_template, request
from flask_cors import CORS, cross_origin
import base64
import json
#import time
from datetime import datetime
app = Flask(__name__)
CORS(app, support_credentials=True)
@app.route('/upload', methods=['POST'])
@cross_origin(supports_credentials=True)
def upload_base64_img():
content = request.get_json()
# checking if the image is present or not.
if 'image' not in content:
abort(400)
abort(Response('No Image data received'))
imgdata = base64.b64decode(content['image'])
(dt, micro) = datetime.utcnow().strftime('%Y%m%d%H%M%S.%f').split('.')
dt = "%s%03d" % (dt, int(micro) / 1000)
filename = 'images/'+dt+'.jpg'
with open(filename, 'wb') as f:
f.write(imgdata)
s = int(dt)%65 + 65
result = {
"hand_object": str(base64.b64encode(imgdata)),
"letter_detected": chr(s)
}
return json.dumps(result)
app.run(port=5000) |
ramonmeza/raycasting-py | tests/template/features/steps/template.py | @given('some state of an object') # type: ignore # noqa: F821
def step_impl(context) -> None: # noqa: F811
context.test_str = 'change me'
@when('we change that object') # type: ignore # noqa: F821
def step_impl(context) -> None: # noqa: F811
context.test_str = 'you\'ve changed'
@then('assert that the change hasn\'t broken shit') # type: ignore # noqa: F821,E501
def step_impl(context) -> None: # noqa: F811
assert context.test_str == 'you\'ve changed'
|
LZP4GitHub/ssd.pytorch | data/dota.py | <reponame>LZP4GitHub/ssd.pytorch
"""dota dataset classes
original author: <NAME>
https://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py
updated by: ellis brown, max degroot
for dota file list is:
|data/
|data/trainvalsplit/
|data/testsplit/
|data|trainvalsplit/images/
|data|trainvalsplit/labeltxt/
|data|trainvalsplit/train.txt
the image folder is xxx.jpg
"""
from .config import HOME
import os.path as osp
import sys
import torch
import torch.utils.data as data
import cv2
import numpy as np
import zipfile
import codecs
DOTA_CLASSES = ( # always index 0
'plane', 'baseball-diamond',
'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle',
'ship', 'tennis-court',
'basketball-court', 'storage-tank',
'soccer-ball-field', 'roundabout',
'harbor', 'swimming-pool',
'helicopter')
# note: if you used our download scripts, this should be right
#DOTA_ROOT = osp.join(HOME, '/data/dota-split-300/')
DOTA_ROOT = '/home/lzp/data/dota-split-300/'
class DOTAAnnotationTransform(object):
"""transforms a dota annotation into a tensor of bbox coords and label index
initilized with a dictionary lookup of classnames to indexes
arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of voc's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: false)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=True):
self.class_to_ind = class_to_ind or dict(
zip(DOTA_CLASSES, range(len(DOTA_CLASSES))))
self.keep_difficult = keep_difficult
def __call__(self, targets, width, height):
"""
arguments:
target (annotation) : the target annotation to be made usable
will be an et.element
returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = []
for target in targets:
difficult = (int(target[9]) != 0) #0 -> easy
if not self.keep_difficult and difficult:
continue
name = target[8].lower().strip()
pts = ['x1', 'y1', 'x2', 'y2','x3', 'y3', 'x4', 'y4']
bndbox = []
# for i,pt in enumerate(pts):
# if (i == 0 or i == 1 or i == 6 or i == 7):
#
# cur_pt = max(float(target[i]) - 1, 0)
# cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
# bndbox.append(cur_pt)
xmin, xmax, ymin, ymax = min(target[0], min(target[2], min(target[4], target[6]))), \
max(target[0], max(target[2], max(target[4], target[6]))), \
min(target[1], min(target[3], min(target[5], target[7]))), \
max(target[1], max(target[3], max(target[5], target[7])))
cur_pt_1 = max(float(xmin) - 1.0, 0.0)
cur_pt_1 = 1.0*cur_pt_1 / width
bndbox.append(cur_pt_1)
cur_pt_2 = max(float(ymin) - 1.0, 0.0)
cur_pt_2 = 1.0*cur_pt_2 / height
bndbox.append(cur_pt_2)
cur_pt_3 = max(float(xmax) - 1.0, 0.0)
cur_pt_3 = 1.0*cur_pt_3 / width
bndbox.append(cur_pt_3)
cur_pt_4 = max(float(ymax) - 1.0, 0.0)
cur_pt_4 = 1.0*cur_pt_4 / height
bndbox.append(cur_pt_4)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
return res
class DOTADetection(data.Dataset):
"""voc detection dataset object
input is image, target is annotation
arguments:
root (string): filepath to vocdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'voc2007')
"""
def __init__(self, root,
image_sets=['trainvalsplit'],
transform=None, target_transform=DOTAAnnotationTransform(),
dataset_name='DOTA',
train_test="train"):
self.root = root
self.image_set = image_sets
self.transform = transform
self.target_transform = target_transform
self.name = dataset_name
train_rootpath = osp.join(self.root, 'trainvalsplit')
self._annopath = osp.join(train_rootpath, 'labelTxt', '%s.txt')
self._imgpath = osp.join(train_rootpath, 'images', '%s.png')
self.ids = list()
if train_test == "train":
for line in open(osp.join(train_rootpath, 'train11.txt')):
self.ids.append((train_rootpath, line.strip()))
else:
for line in open(osp.join(train_rootpath, 'test.txt')):
print(line)
self.ids.append((train_rootpath, line.strip()))
# for line in open(osp.join(train_rootpath, 'train11.txt')):
# self.ids.append((train_rootpath, line.strip()))
def __getitem__(self, index):
im, gt, h, w = self.pull_item(index)
return im, gt
def __len__(self):
return len(self.ids)
def pull_item(self, index):
img_id = self.ids[index]
# print(img_id)
anno_file_path = (self._annopath % img_id[1])
# print(anno_file_path)
img = cv2.imread(self._imgpath % img_id[1])
# print("Image Path")
# print(self._imgpath % img_id[1])
height, width, channels = img.shape
# print("Image Channel:")
# print(height,width,channels)
#target = et.parse(self._annopath % img_id).getroot()
f = codecs.open(anno_file_path,"r")
targets = f.readlines()
f.close()
# print("GT is:")
# print(targets)
targets = [target.strip().strip('\n').split(' ') for target in targets]
# print("targets11111111.size")
# print(np.shape(targets))
if self.target_transform is not None:
targets = self.target_transform(targets, width, height)
# print("After target_transform GT is:")
# print(targets)
if self.transform is not None:
targets = np.array(targets)
# print("After transform GT is:")
# print(targets)
# if targets.size < 8: # avoide the null .txt
# # return torch.from_numpy(img).permute(2, 0, 1)., [], height, width
# return torch.from_numpy(img).permute(2, 0, 1).float(), [], height, width
img, boxes, labels = self.transform(img, targets[:, 0:4], targets[:,4])
# to rgb
img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
targets = np.hstack((boxes, np.expand_dims(labels, axis=1)))
# return torch.from_numpy(img).permute(2, 0, 1), targets, height, width
return torch.from_numpy(img).permute(2, 0, 1), targets, height, width # .float()
# return torch.from_numpy(img), targets, height, width
def pull_image(self, index):
'''returns the original image object at index in pil form
note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
argument:
index (int): index of img to show
return:
pil img
'''
img_id = self.ids[index]
return cv2.imread(self._imgpath % img_id[1], cv2.IMREAD_COLOR) #.float()
def pull_anno(self, index):
'''returns the original annotation of image at index
note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
argument:
index (int): index of img to get annotation of
return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
#anno = et.parse(self._annopath % img_id).getroot()
f = codecs.open(self._annopath % img_id[1],"r")
anno = f.readlines()
f.close()
anno = [target.strip().split(' ') for target in anno]
gt = self.target_transform(anno, 1, 1)
return img_id[1], gt
def pull_tensor(self, index):
'''returns the original image at an index in tensor form
note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
argument:
index (int): index of img to show
return:
tensorized version of img, squeezed
'''
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
|
LZP4GitHub/ssd.pytorch | test.py | <reponame>LZP4GitHub/ssd.pytorch
from __future__ import print_function
import sys
import os
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torch.autograd import Variable
# from data import VOC_ROOT, VOC_CLASSES as labelmap
from PIL import Image
# from data import DOTA_ROOT, VOC_ROOT, VOC_CLASSES as labelmap
from data import DOTA_ROOT,DOTA_CLASSES as labelmap
from data import DOTAAnnotationTransform, DOTADetection, BaseTransform, DOTA_CLASSES
import torch.utils.data as data
from ssd import build_ssd
import numpy as np
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')
parser.add_argument('--trained_model', default='weights/ssd300_DOTA_66800.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='Dir to save results')
parser.add_argument('--visual_threshold', default=0.1, type=float,
help='Final confidence threshold')
parser.add_argument('--cuda', default=True, type=bool,
help='Use cuda to train model')
parser.add_argument('--dota_root', default=DOTA_ROOT, help='Location of VOC root directory')
parser.add_argument('-f', default=None, type=str, help="Dummy arg so we can load in Jupyter Notebooks")
args = parser.parse_args()
if args.cuda and torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
def test_net(save_folder, net, cuda, testset, transform, thresh):
# dump predictions and assoc. ground truth to text file for now
filename = save_folder+'test1.txt'
num_images = len(testset)
for i in range(num_images):
print('Testing image {:d}/{:d}....'.format(i+1, num_images))
img = testset.pull_image(i)
img_id, annotation = testset.pull_anno(i)
x = torch.from_numpy(transform(img)[0]).permute(2, 0, 1)
x = Variable(x.unsqueeze(0))
filename_i = os.path.join('/home/lzp/Code/ssd_DOTA_rectangle/',save_folder , img_id+'.txt')
# with open(filename_i, mode='w') as f:
# f.write('\nGROUND TRUTH FOR: '+img_id+'\n')
# for box in annotation:
# f.write('label: '+' || '.join(str(b) for b in box)+'\n')
if cuda:
x = x.cuda()
y = net(x) # forward pass
detections = y.data
print("detections:",detections.size())
# scale each detection back up to the image
scale = np.array([img.shape[1], img.shape[0],
img.shape[1], img.shape[0]])
pred_num = 0
detections = detections.cpu().numpy()
index_top100 = np.unravel_index(np.argsort(detections[0, :, :, 0].ravel())[-100:], detections[0, :, :, 0].shape)
for i in range(100):
score = detections[0, index_top100[0][i], index_top100[1][i], 0]
label_name = labelmap[ index_top100[0][i] -1]
pt = (detections[0, index_top100[0][i], index_top100[1][i], 1:]*scale) #.cpu().numpy()
coords = (max(pt[0],0), max(pt[1],0), max(pt[2],0), max(pt[3],0))
pred_num += 1
print("filename_i",filename_i)
with open(filename_i, mode='a') as f:
print(filename_i)
f.write(( ' '.join(str(c) for c in coords ))+' '+ label_name + ' '+ str(score).strip().strip(')').strip('tensor(') + '\n')
def test_dota():
# load net
num_classes = len(DOTA_CLASSES) + 1 # +1 background
net = build_ssd('test', 300, num_classes) # initialize SSD
net.load_state_dict(torch.load(args.trained_model))
net.eval()
print('Finished loading model!')
# load data
testset = DOTADetection(root = args.dota_root,train_test ="test")
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
# evaluation
test_net(args.save_folder, net, args.cuda, testset,
BaseTransform(net.size, (104, 117, 123)),
thresh=args.visual_threshold)
if __name__ == '__main__':
test_dota()
|
jancervenka/acf | acf/core/computation.py | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, <NAME>
import logging
import functools
import multiprocessing as mp
from typing import Any, Callable, Iterable, List, Optional, Tuple
import numpy as np
import pandas as pd
from .preprocessing import create_user_item_matrix
from .utils import (
cast_numeric_greater_than_zero,
check_columns_in_dataframe,
check_feedback_column_numeric,
get_index_position,
drop_warn_na,
)
DEFAULT_REG_LAMBDA = 0.1
DEFAULT_ALPHA = 40
DEFAULT_N_FACTORS = 10
DEFAULT_N_ITER = 20
DEFAULT_N_JOBS = 1
# TODO: sparse R dataframe/matrix?
# TODO: X/Y init - which distributions?
# TODO: _compute_factors_1d, _get_least_square_sum to algebra.py module?
class Engine:
"""
Collaborative filtering algorithm based on matrix factorization
by alternating least squares. Designed for implicit feedback datasets.
The class exposes two public methods, `fit` to train the model and
`predict` to produce the recommendations.
Example:
```
import acf
import pandas as pd
# assuming the data are in the following format:
# | user_id | item_id | feedback |
# |---------|---------|----------|
# | 2491 | 129 | 2 |
interactions = pd.read_csv('interactions.csv')
engine = acf.Engine(reg_lambda=1, alpha=35, n_factors=2)
engine.fit(interactions,
user_column='user_id',
item_column='item_id',
feedback_column='feedback',
n_iter=20,
n_jobs=4)
# get the best 20 recommendation for given user
prediction = engine.predict(user=2491, top_n=20)
# to print training loss value at every iteration
print(engine.loss)
```
"""
def __init__(
self,
reg_lambda: float = DEFAULT_REG_LAMBDA,
alpha: float = DEFAULT_ALPHA,
n_factors: int = DEFAULT_N_FACTORS,
random_state: Optional[int] = None,
):
"""
Initializes the class with model hyperparameters
and `random_state`.
Args:
reg_lambda: regularization parameter
alpha: confidence control parameter
n_factors: number of latent factors
random_state: RNG seed
"""
self._reg_lambda = cast_numeric_greater_than_zero(
reg_lambda, "reg_lambda", float
)
self._alpha = cast_numeric_greater_than_zero(alpha, "alpha", float)
self._n_factors = cast_numeric_greater_than_zero(n_factors, "n_factors", int)
if random_state:
self._rng = np.random.RandomState(random_state)
else:
self._rng = np.random.RandomState()
self._X = self._Y = None
self._user_index = self._item_index = None
self._loss = []
@staticmethod
def _compute_factors_1d(
feedback_1d: np.array,
other_factors: np.array,
other_factors_small: np.array,
reg_lambda: float,
alpha: float,
) -> np.array:
"""
Computes a 1-dimensional factor array for either one user
or one item.
When computing user factors, `feedback_1` is an array of interactions
`r_u` between user `u` and every item, `other_factors` is an item
factor matrix `Y` and `other_factors_small` is `Y ^ T * Y`.
When computing the user factors `x_u` for user `u`, the function
solves a linear system
```
(Y^T * Y + Y^T * (C^u - I) * Y + λ * I) * x_u = Y^T * C^U * p_u
```
where `Y` is an item factor matrix, `C^u` is a diagonal matrix
containing feedback `r_u` of user `u` for each item `i` (the
feedback is transformed to `c_ii = 1 + alpha * r_ui`), `p_u`
is a binary vector of preferences of user `u` for each item,
and `λ` is regularization lambda.
The computation is symmetric for item factors.
Args:
feedback_1d: one R user-item matrix row or column
other_factors: either X or Y factor matrix
other_factors_small: either X^T * X or Y^T * Y matrix
:reg_lambda: regularization parameter
alpha: confidence control parameter
Returns:
1-dimensional factor array for one user/item
"""
p_ = (feedback_1d > 0).astype("uint8")
C_ = np.diag(1 + alpha * feedback_1d)
size, f = other_factors.shape
M = other_factors_small + np.linalg.multi_dot(
[other_factors.T, C_ - np.eye(size), other_factors]
)
return np.linalg.solve(
M + reg_lambda * np.eye(f), np.linalg.multi_dot([other_factors.T, C_, p_])
)
def _create_worker(
self, other_factors: np.array, other_factors_small: np.array
) -> Callable:
"""
Creates a callable worker from `Engine._compute_factors_1d` for
application over `R` rows/columns in multiprocessing map.
The worker takes only one argument (`R` column/row), other
parameters are set in this function using `functools.partial`
and are identical for all given rows/columns.
Args:
other_factors: either X or Y factor matrix
other_factors_small: either X^T * X or Y^T * Y matrix
Returns:
callable worker
"""
return functools.partial(
self._compute_factors_1d,
other_factors=other_factors,
other_factors_small=other_factors_small,
reg_lambda=self._reg_lambda,
alpha=self._alpha,
)
@staticmethod
def _feedback_1d_generator(R: pd.DataFrame, axis: int) -> Iterable[np.array]:
"""
Creates a generator from `R` rows or columns.
Each item in the generator is a numpy array.
Args:
R: user-item feedback matrix
axis: axis=1 for rows, axis=0 for columns
Returns:
R row/column generator
"""
for _, r in R.iterrows() if axis else R.iteritems():
yield r.values
def _compute_factors(
self, pool, R: pd.DataFrame, other_factors: np.array, axis: int
) -> np.array:
"""
Applies `Engine._compute_factors_1d` over `R` rows/columns
to compute factors for all user/items.
Args:
pool: multiprocessing pool
R: user-item feedback matrix
other_factors: either X or Y factor matrix
axis: axis=1 for rows, axis=0 for columns
Returns:
either X or Y factor matrix
"""
# TODO: axis can be infered from other_factors shape
other_factors_small = np.dot(other_factors.T, other_factors)
worker = self._create_worker(other_factors, other_factors_small)
all_feedback_1d = self._feedback_1d_generator(R, axis)
return np.vstack(pool.map(worker, all_feedback_1d))
def _initialize_factors(self, m: int, n: int) -> Tuple[np.array]:
"""
Initialzes `X`, `Y` matrices with random values.
Args:
m: number of users
n: number of items
Returns:
tuple of X, Y factor matrices
"""
return (self._rng.rand(m, self._n_factors), self._rng.rand(n, self._n_factors))
def _get_loss(self, X: np.array, Y: np.array, R: np.array) -> np.float:
"""
Computes least squares sum for given `X`, `Y`, `R`.
The loss is defined as `loss := Σ_ui (C * (P - X * Y^T)^2)`
where `C` is confidence matrix, `P` is preference matrix,
and `X`, `Y` are factor matrices.
Args:
X: user factors matrix
Y: item factors matrix
R: user-item feedback matrix
Returns:
loss value
"""
P = (R > 0).astype("uint8")
C = 1 + self._alpha * R
return np.sum(C * (P - np.dot(X, Y.T)) ** 2)
@staticmethod
def _log_iteration(iteration: int, loss: np.float) -> None:
"""
Logs least squares `loss` for given `iteration`.
Args:
iteration: training iterataion
loss: value to log
"""
logging.info(f"Iteration {iteration:04d} Loss Σ = {loss:.5f}")
def _run_iterations(self, pool, R: pd.DataFrame, n_iter: int) -> Tuple[np.array]:
"""
Initializes `X`, `Y` matrices and runs
alternating least squares iterations.
Args:
pool: multiprocessing pool
R: user-item feedback matrix
n_iter: number of iterations
Returns:
tuple of X, Y factor matrices
"""
X, Y = self._initialize_factors(*R.shape)
for i in range(n_iter):
X = self._compute_factors(pool, R, Y, axis=1) # rows
Y = self._compute_factors(pool, R, X, axis=0) # columns
self._loss.append(self._get_loss(X, Y, R.values))
self._log_iteration(i, self._loss[-1])
return X, Y
def fit(
self,
interactions: pd.DataFrame,
user_column: str,
item_column: str,
feedback_column: str,
n_iter: int = DEFAULT_N_ITER,
n_jobs: int = DEFAULT_N_JOBS,
) -> None:
"""
Fits the model by factorizing `interactions` into latent factors.
Dataframe `interactions` contains implicit feedbacks values for
user-item pairs in three columns `user_column`, `item_column`,
`feedback_column`.
```
| user_column | item_column | feedback_column |
|-------------|-------------|--------------------|
| user_id_0 | item_id_0 | 0_1_feedback_value |
```
Args:
interactions: user-item feedback pairs
user_column: name of the column containing user ids
item_column: name of the column containing item ids
feedback_column: name of the column containing feedbacks values
n_iter: number of alternating least squares iterations
n_jobs: number of multiprocessing jobs
"""
check_columns_in_dataframe(
interactions, (user_column, item_column, feedback_column)
)
check_feedback_column_numeric(interactions, feedback_column)
interactions = drop_warn_na(
interactions[[user_column, item_column, feedback_column]]
)
n_iter = cast_numeric_greater_than_zero(n_iter, "n_iter", int)
R = create_user_item_matrix(
interactions=interactions,
user_column=user_column,
item_column=item_column,
feedback_column=feedback_column,
)
self._user_index, self._item_index = R.index, R.columns
pool = mp.Pool(n_jobs)
try:
self._X, self._Y = self._run_iterations(pool, R, n_iter)
finally:
pool.close()
def predict(self, user: Any, top_n: Optional[int] = None) -> pd.Series:
"""
Computes recommendations for given `user`.
Args:
user: target of the recommendations
top_n: if not `None`, selects only the best n items
Returns:
series with predicted score for each item
"""
row = get_index_position(self._user_index, user)
prediction = pd.Series(
np.dot(self._Y, self._X[row, :]), index=self._item_index, name=user
)
if top_n:
return prediction.nlargest(top_n)
else:
return prediction
@property
def user_factors(self) -> pd.DataFrame:
"""
User factors property.
Returns:
user factors as a dataframe
"""
return pd.DataFrame(self._X, index=self._user_index)
@property
def item_factors(self) -> pd.DataFrame:
"""
Item factors property.
Returns:
item factors as a dataframe
"""
return pd.DataFrame(self._Y, index=self._item_index)
@property
def loss(self) -> List[np.float]:
"""
Training loss proprety.
Returns:
training loss by iteration
"""
return self._loss
|
jancervenka/acf | acf/tests/test_utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, <NAME>
import unittest
import numpy as np
import pandas as pd
from ..core import utils
class CheckColumnsInDataFrameTest(unittest.TestCase):
"""
Tests `utils.check_columns_in_data_frame`.
"""
def setUp(self) -> None:
"""
Sets up the tests.
"""
self._test_df = pd.DataFrame({"x": [1], "y": [1], "z": [1]})
def test_all_columns_in_dataframe(self) -> None:
"""
Tests that no exception is raised if a column is present.
"""
tests = (tuple(), ("x", "y"), ("x",), ("x", "y", "z"))
for columns in tests:
utils.check_columns_in_dataframe(self._test_df, columns)
def test_column_not_in_data_frame(self) -> None:
"""
Tests that an exception is raised if a column is not present.
"""
for columns in (("a"), ("x", "a")):
with self.assertRaises(ValueError):
utils.check_columns_in_dataframe(self._test_df, columns)
class CheckFeedbackColumnNumericTest(unittest.TestCase):
"""
Tests `utils.check_feedback_column_numeric`.
"""
def setUp(self) -> None:
"""
Sets up the tests.
"""
self._test_df = pd.DataFrame({"x": ["a", "b"], "y": [1, 3]})
def test_feedback_column_is_numeric(self) -> None:
"""
Tests that no exception is raised if the feedback column is numeric.
"""
utils.check_feedback_column_numeric(self._test_df, "y")
def test_feedback_column_not_numeric(self) -> None:
"""
Tests that a exception is raised if the feedback column is not
numeric.
"""
with self.assertRaises(ValueError):
utils.check_feedback_column_numeric(self._test_df, "x")
class DropWarnNaTest(unittest.TestCase):
"""
Tests `utils.test_drop_warn_na`.
"""
def test_na_present(self) -> None:
"""
Tests that rows are dropped and warning is raised when NA
value is present.
"""
test_df = pd.DataFrame({"x": [1.0, 2.0, np.nan]})
expected = pd.DataFrame({"x": [1.0, 2.0]})
with self.assertWarns(UserWarning):
result = utils.drop_warn_na(test_df)
pd.testing.assert_frame_equal(result, expected)
@staticmethod
def test_na_not_present() -> None:
"""
Tests that original dataframe is returned when no NA is present.
"""
test_df = expected = pd.DataFrame({"x": [1, 2]})
expected = pd.DataFrame({"x": [1, 2]})
result = utils.drop_warn_na(test_df)
pd.testing.assert_frame_equal(result, expected)
class CastNumericGreaterThanZeroTest(unittest.TestCase):
"""
Tests `utils.test_drop_warn_na`.
"""
def test_value_converted(self) -> None:
"""
Tests that compatible `value` are correctly cast to `required_type`.
"""
tests = ((1, "test", int, 1), (2.3, "test", float, 2.3), (1.2, "test", int, 1))
for value, value_name, required_type, expected in tests:
result = utils.cast_numeric_greater_than_zero(
value, value_name, required_type
)
self.assertEqual(result, expected)
def test_value_not_numeric(self) -> None:
"""
Test that an exception is raised when a non-numeric `value`
cannot be cast to `required_type`.
"""
with self.assertRaises(ValueError):
_ = utils.cast_numeric_greater_than_zero("test", "test", float)
def test_value_not_greater_than_zero(self) -> None:
"""
Test that an exception is raised when a numeric `value`
is not greater than zero.
"""
with self.assertRaises(ValueError):
_ = utils.cast_numeric_greater_than_zero(-1, "test", float)
class GetIndexPositionTest(unittest.TestCase):
"""
Tests `utils.get_index_position`.
"""
def test_index_value_not_unique(self) -> None:
"""
Tests that an exception is raised when
`index_value` is not unique in `index`.
"""
test_index = pd.Index([1, 3, 2, 2])
test_index_value = 2
with self.assertRaises(ValueError):
_ = utils.get_index_position(test_index, test_index_value)
def test_index_value_not_found(self) -> None:
"""
Tests that an exception is raised when
`index_value` is not present in `index`.
"""
test_index = pd.Index([1, 3, 2])
test_index_value = 4
with self.assertRaises(ValueError):
_ = utils.get_index_position(test_index, test_index_value)
def test_index_value_found(self) -> None:
"""
Tests that correct row positon is returned.
"""
test_index = pd.Index([1, 3, 2, 4])
test_index_value = 3
result = utils.get_index_position(test_index, test_index_value)
expected = 1
self.assertEqual(result, expected)
if __name__ == "__main__":
unittest.main()
|
jancervenka/acf | acf/__init__.py | <reponame>jancervenka/acf<filename>acf/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, <NAME>
from .core.computation import Engine
from .core import metrics
from .version import __version__
__doc__ = """
Lightweight recommender engine for implicit feedback datasets.
The package implements a collaborative filtering algorithm as described
in "Collaborative Filtering for Implicit Feedback Datasets" paper by
<NAME>, <NAME>, <NAME> (https://doi.org/10.1109/ICDM.2008.22).
"""
|
jancervenka/acf | acf/tests/test_metrics.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, <NAME>
import unittest
from unittest import mock
import pandas as pd
from ..core.metrics import mean_rank
class MeanRankTest(unittest.TestCase):
"""
Tests `metrics.mean_rank`.
"""
def test_mean_rank_correct_value(self) -> None:
"""
Tests that correct mean rank value is computed based
on `interactions` and `X`, `Y` factors.
"""
engine = mock.Mock()
engine.user_factors = pd.DataFrame(
{0: [0.5, 0.1, 0.9], 1: [0.1, 0.2, 0.5]}, index=["u1", "u2", "u3"]
)
engine.item_factors = pd.DataFrame(
{0: [0.1, 0.2, 0.3, 0.3], 1: [0.5, 0.9, 0.9, 0.7]},
index=["i1", "i2", "i3", "i4"],
)
test_interactions = pd.DataFrame(
{
"user_id": ["u1", "u1", "u1", "u2", "u2", "u3", "u3", "u3", "u3"],
"item_id": ["i1", "i2", "i4", "i2", "i3", "i1", "i2", "i3", "i4"],
"feedback": [4, 2, 1, 5, 10, 9, 8, 12, 8],
}
)
# R =
# 4 2 0 1
# 0 5 10 0
# 9 8 12 8
# X * Y^T =
# 1.00 0.75 0.25 0.50
# 0.11 0.20 0.21 0.17
# ą0.34 0.63 0.72 0.62
# X * Y^T ranks
# 1.00 0.75 0.25 0.50
# 1.00 0.50 0.25 0.75
# 1.00 0.50 0.25 0.75
result = mean_rank(
interactions=test_interactions,
user_column="user_id",
item_column="item_id",
feedback_column="feedback",
engine=engine,
)
expected = 33 / 59
self.assertEqual(result, expected)
if __name__ == "__main__":
unittest.main()
|
jancervenka/acf | acf/tests/test_preprocessing.py | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, <NAME>
import unittest
import numpy as np
import pandas as pd
from ..core.preprocessing import create_user_item_matrix
class CreateUserItemMatrixTest(unittest.TestCase):
"""
Tests `preprocessing.create_user_item_matrix`.
"""
@staticmethod
def test_create_user_item_matrix() -> None:
"""
Tests that the created user-item matrix is correct and
that duplicate user-item pairs are correctly aggregated.
"""
test_interactions = pd.DataFrame(
{
"id_item": ["A", "B", "C", "A", "B", "A", "D", "D"],
"id_user": [1, 1, 1, 2, 2, 3, 4, 4],
"feedback": [6, 2, 1, 5, 4, 2, 1, 2],
}
)
result = create_user_item_matrix(
test_interactions, "id_user", "id_item", "feedback"
)
expected_matrix = np.array(
[[6, 2, 1, 0], [5, 4, 0, 0], [2, 0, 0, 0], [0, 0, 0, 3]], dtype="float64"
)
expected = pd.DataFrame(
expected_matrix,
index=pd.Index([1, 2, 3, 4], name="id_user"),
columns=pd.Index(["A", "B", "C", "D"], name="id_item"),
)
pd.testing.assert_frame_equal(result, expected)
if __name__ == "__main__":
unittest.main()
|
jancervenka/acf | acf/core/preprocessing.py | <filename>acf/core/preprocessing.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, <NAME>
import pandas as pd
def create_user_item_matrix(
interactions: pd.DataFrame, user_column: str, item_column: str, feedback_column: str
) -> pd.DataFrame:
"""
Creates `R` from `interactions` dataframe by pivoting it
from long to wide format.
Args:
interactions: user-item feedback pairs
user_column: name of the column containing user ids
item_column: name of the column containing item ids
feedback_column: name of the column containing feedbacks values
Returns:
R user-item matrix
"""
# handles user-item duplicates by suming the feedback
return interactions.pivot_table(
index=user_column, columns=item_column, aggfunc="sum", values=feedback_column
).fillna(0)
|
jancervenka/acf | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2021, <NAME>
import setuptools
import pathlib
VERSION = "0.2.3"
def run_setup():
"""
Runs the package setup.
"""
this_directory = pathlib.Path(__file__).parent
setup_params = {
"name": "acf",
"version": VERSION,
"description": "Lightweight recommender engine",
"author": "<NAME>",
"author_email": "<EMAIL>",
"long_description": (this_directory / "README.md").read_text(),
"long_description_content_type": "text/markdown",
"packages": ["acf", "acf.core", "acf.tests"],
"python_requires": ">=3.7",
"install_requires": ["pandas>=1.0", "numpy>=1.16"],
}
setuptools.setup(**setup_params)
if __name__ == "__main__":
run_setup()
|
jancervenka/acf | acf/tests/test_computation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, <NAME>
import multiprocessing as mp
import unittest
from typing import Iterable
import numpy as np
import pandas as pd
from ..core.computation import Engine
class EngineTest(unittest.TestCase):
"""
Tests `computation.Engine` class.
"""
def setUp(self) -> None:
"""
Sets up the tests.
"""
self._engine = Engine(random_state=0)
def test_initialize_factors(self) -> None:
"""
Tests that `Engine._initialize_factors` produce factor
matrices `X` and `Y` with random values in correct shape.
"""
test_m, test_n = (15, 25)
X_expected, Y_expected = self._engine._initialize_factors(test_m, test_n)
for expected, size in ((X_expected, test_m), (Y_expected, test_n)):
self.assertTupleEqual((size, self._engine._n_factors), expected.shape)
def test_feedback_1d_generator(self) -> None:
"""
Tests that `Engine._feedback_1d_generator` produces correct
row/column arrays from `R` based on the `axis` argument.
"""
def assert_collection_of_numpy_equal(
x: Iterable[np.array], y: Iterable[np.array]
) -> None:
"""
Asserts that collections `x` and `y` containing
numpy arrays are identical.
"""
x, y = list(x), list(y)
assert len(x) == len(x)
for array_x, array_y in zip(x, y):
np.testing.assert_array_equal(array_x, array_y)
test_R = pd.DataFrame({"A": [1, 3], "B": [6, 7]})
tests = (
([np.array([1, 3]), np.array([6, 7])], 0),
([np.array([1, 6]), np.array([3, 7])], 1),
)
for expected, axis in tests:
result = list(self._engine._feedback_1d_generator(test_R, axis))
assert_collection_of_numpy_equal(result, expected)
def test_compute_factors_1d(self) -> None:
"""
Tests that `Engine._compute_factors` computes correct
factor row for one user/item.
"""
np.random.seed(0)
test_feedback_1d = np.array([5, 4, 0, 0, 0, 4, 0, 0, 0, 0])
test_other_factors = np.random.rand(10, 5)
test_other_factors_small = np.dot(test_other_factors.T, test_other_factors)
result = self._engine._compute_factors_1d(
feedback_1d=test_feedback_1d,
other_factors=test_other_factors,
other_factors_small=test_other_factors_small,
reg_lambda=1,
alpha=40,
)
expected = np.array([0.2940538, 0.52162197, 0.82403064, -0.2290155, 0.17128187])
np.testing.assert_array_almost_equal(result, expected, decimal=7)
def test_compute_factors(self) -> None:
"""
Tests that `Engine._compute_factors` computes
correct factor rows for every user/item.
"""
np.random.seed(0)
R_test = pd.DataFrame(
[
[3, 2, 0, 0, 0, 0, 0, 0, 1, 2],
[0, 2, 3, 0, 0, 0, 0, 2, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 2],
[0, 1, 0, 2, 2, 0, 0, 0, 2, 2],
[1, 0, 0, 0, 2, 0, 3, 0, 0, 0],
[1, 0, 0, 0, 2, 3, 0, 3, 0, 0],
[0, 0, 3, 0, 2, 0, 0, 0, 0, 0],
[0, 0, 1, 3, 3, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
]
)
test_n = R_test.shape[-1]
# axis=1 test for user factors, other_factors matrix is for items
test_axis = 1
test_other_factors = np.random.rand(test_n, 3)
pool = mp.Pool(1)
try:
result = self._engine._compute_factors(
pool=pool, R=R_test, other_factors=test_other_factors, axis=test_axis
)
finally:
pool.close()
# shape (9, 3): 3 factors for 9 users
expected = np.array(
[
[0.35764017, 1.03013196, 0.27789359],
[1.21552231, 1.36885415, -0.71711505],
[1.5414733, -0.52604296, -0.47427519],
[0.24321584, 1.0214088, 0.40020838],
[0.04003445, 1.06238047, 0.06082416],
[-0.51951887, 1.15240956, 1.12119426],
[-0.79891536, 1.55973535, -0.05766712],
[-1.09061448, 1.76283214, -0.04719709],
[0.27658886, 0.94346013, -0.90276183],
]
)
np.testing.assert_array_almost_equal(result, expected, decimal=7)
@staticmethod
def test_predict() -> None:
"""
Tests that `Engine.predict` computes correct
recommendation for given `user`.
"""
engine = Engine()
engine._X = np.array([[0.50, 0.90, 0.30], [0.05, 0.94, 0.81]])
engine._Y = np.array(
[[0.20, 0.12, 0.80], [0.50, 0.97, 0.03], [0.75, 0.02, 0.15]]
)
# user 20 likes factors 1 and 2
# item 4 really belongs to genre 1 and item 3 belongs to genre 2
# these two items will be recommended
#
# user 20 doesnt care about factor 0 where item 5 belongs
# small recommendation for item 5
engine._user_index = pd.Index([10, 20], name="user_id")
engine._item_index = pd.Index([3, 4, 5], name="item_id")
result = engine.predict(user=20)
result_top_2 = engine.predict(user=20, top_n=2)
expected = pd.Series(
[0.7708, 0.9611, 0.1778], index=engine._item_index, name=20
)
# calling .nlargest on series will sort the values (descending)
expected_top_2 = expected[[True, True, False]].sort_values(ascending=False)
pd.testing.assert_series_equal(result, expected)
pd.testing.assert_series_equal(result_top_2, expected_top_2)
def test_get_loss(self) -> None:
"""
Tests that `Engine._get_loss` computes correct least squares loss.
"""
R_test = np.array([[1, 0, 0, 4], [2, 1, 0, 0], [0, 0, 0, 1]])
X_test = np.array([[0.3, 0.1], [0.9, 0.4], [0.1, 0.5]])
Y_test = np.array([[0.4, 0.1], [0.9, 0.5], [0.9, 0.9], [0.6, 0.5]])
result = self._engine._get_loss(X_test, Y_test, R_test)
expected = 177.73779
self.assertAlmostEqual(result, expected, places=4)
if __name__ == "__main__":
unittest.main()
|
jancervenka/acf | acf/core/utils.py | <reponame>jancervenka/acf
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, <NAME>
import warnings
from typing import Tuple, Any
import numpy as np
import pandas as pd
def check_columns_in_dataframe(df: pd.DataFrame, columns: Tuple[str]) -> None:
"""
Raises an exception if any column name in `columns` is not
present in `df` dataframe.
Args:
df: dataframe to check
columns: tuple of columns that required to be present
"""
for col in columns:
if col not in df.columns:
raise ValueError(f"Column {col} is not in the dataframe.")
def check_feedback_column_numeric(
interactions: pd.DataFrame, feedback_column: str
) -> None:
"""
Raises an exception if `feedback_column` does not contain numeric values.
Args:
interactions: user-item interaction dataframe
feedback_column: name of the column containing feedback values
"""
if not pd.api.types.is_numeric_dtype(interactions[feedback_column]):
raise ValueError(f'Column "{feedback_column}" must be numeric.')
def drop_warn_na(df: pd.DataFrame) -> pd.DataFrame:
"""
Drops any rows with NA values and raises a warning.
Args:
df: dataframe to check
Returns:
df without the NA values
"""
n_0 = len(df)
df = df.dropna(how="any")
if len(df) < n_0:
warnings.warn(
f"NA values found in the dataframe," f" {n_0 - len(df)} rows removed."
)
return df
def cast_numeric_greater_than_zero(
value: Any, value_name: str, required_type: type
) -> None:
"""
Checks that `value` is greater than zero and casts
it to `required_type`.
Raises an exception `value` not greater than zero.
Args:
value: numeric value to check
value_name: name to be included in the error message
required_type: target type of the value
Returns:
value as required type
"""
if not isinstance(value, required_type):
value = required_type(value)
if value <= 0:
raise ValueError(f"Value {value_name} must be greater than zero.")
return value
def get_index_position(index: pd.Index, index_value: Any) -> np.int64:
"""
Finds position of `index_value` in `index` array.
This functions is used to find row number of given
`user_id` or `item_id`.
Raises an exception if `index` is not unique or the
value is not found.
Args:
index: array of indeces
index_value: value to find
Returns:
position of index_value as an integer
"""
pos = np.where(index == index_value)[0]
if len(pos) > 1:
raise ValueError("Index is not unique.")
if len(pos) == 0:
raise ValueError(f"index_value = {index_value} not found in the index.")
return pos[0]
|
jancervenka/acf | acf/core/metrics.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, <NAME>
import numpy as np
import pandas as pd
from . import computation
from .preprocessing import create_user_item_matrix
def mean_rank(
interactions: pd.DataFrame,
user_column: str,
item_column: str,
feedback_column: str,
engine: computation.Engine,
) -> np.float:
"""
Computes mean rank metric using real `interactions`
as ground truth and predictions produced by `engine`.
The metric is defined as
```
mean_rank := Σ_ui r_ui * rank_ui / Σ_ui r_ui
```
where `r_ui` is the feedback value between user `u` and item `i`
and `rank_ui` is the recommendation rank of item `i` for user `u`.
`rank_ui` is computed by inverse row-wise percentile ranking of
values in `X * Y^T` prediction matrix. Value `r_ui = 0` means
that item `i` is the first to be recommended for user `u`,
`r_uj = 1` is the last to be recommended.
The metric is a mean rank value weighted by `R` feedbacks.
Args:
interactions: user-item feedback pairs
user_column: name of the column containing user ids
item_column: name of the column containing item ids
feedback_column: name of the column containing feedbacks values
engine: trained model
Returns:
computed metric
"""
# TODO: column/NA checks
R = create_user_item_matrix(
interactions=interactions,
user_column=user_column,
item_column=item_column,
feedback_column=feedback_column,
)
user_ids, item_ids = R.index.tolist(), R.columns.tolist()
# prediction = X * Y ^ T
# ranks = apply rank row wise on prediction
ranks = (
engine.user_factors.loc[user_ids, :]
.dot(engine.item_factors.loc[item_ids, :].T)
.rank(pct=True, ascending=False, axis=1)
)
# multiply ranks with R element-wise, make a sum and divide by R sum
return R.mul(ranks).sum().sum() / R.sum().sum()
|
Andre-Vitorino/House-Predict | api/house/House.py | import pandas as pd
import inflection
import math
import datetime
import pickle
import numpy as np
class House(object):
def __init__(self):
self.year_built_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/year_built_scaler.pkl', 'rb'))
self.ms_sub_class_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/ms_sub_class_scaler.pkl', 'rb'))
self.lot_frontage_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/lot_frontage_scaler.pkl', 'rb'))
self.lot_area_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/lot_area_scaler.pkl', 'rb'))
self.overall_cond_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/overall_cond_scaler.pkl', 'rb'))
self.year_remod_add_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/year_remod_add_scaler.pkl', 'rb'))
self.mas_vnr_area_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/mas_vnr_area_scaler.pkl', 'rb'))
self.bsmt_fin_sf1_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/bsmt_fin_sf1_scaler.pkl', 'rb'))
self.bsmt_fin_sf2_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/bsmt_fin_sf2_scaler.pkl', 'rb'))
self.bsmt_unf_sf_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/bsmt_unf_sf_scaler.pkl', 'rb'))
self.total_bsmt_sf_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/total_bsmt_sf_scaler.pkl', 'rb'))
self.st_flr_sf_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/1st_flr_sf_scaler.pkl', 'rb'))
self.nd_flr_sf_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/2nd_flr_sf_scaler.pkl', 'rb'))
self.low_qual_fin_sf_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/low_qual_fin_sf_scaler.pkl', 'rb'))
self.gr_liv_area_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/gr_liv_area_scaler.pkl', 'rb'))
self.bsmt_full_bath_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/bsmt_full_bath_scaler.pkl', 'rb'))
self.bsmt_half_bath_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/bsmt_half_bath_scaler.pkl', 'rb'))
self.full_bath_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/full_bath_scaler.pkl', 'rb'))
self.half_bath_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/half_bath_scaler.pkl', 'rb'))
self.bsmt_full_bath_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/bsmt_full_bath_scaler.pkl', 'rb'))
self.bedroom_abv_gr_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/bedroom_abv_gr_scaler.pkl', 'rb'))
self.kitchen_qual_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/kitchen_qual_scaler.pkl', 'rb'))
self.kitchen_abv_gr_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/kitchen_abv_gr_scaler.pkl', 'rb'))
self.tot_rms_abv_grd_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/tot_rms_abv_grd_scaler.pkl', 'rb'))
self.fireplaces_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/fireplaces_scaler.pkl', 'rb'))
self.garage_yr_blt_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/garage_yr_blt_scaler.pkl', 'rb'))
self.garage_cars_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/garage_cars_scaler.pkl', 'rb'))
self.garage_area_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/garage_area_scaler.pkl', 'rb'))
self.wood_deck_sf_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/wood_deck_sf_scaler.pkl', 'rb'))
self.open_porch_sf_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/open_porch_sf_scaler.pkl', 'rb'))
self.enclosed_porch_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/enclosed_porch_scaler.pkl', 'rb'))
self.ssn_porch_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/3_ssn_porch_scaler.pkl', 'rb'))
self.screen_porch_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/screen_porch_scaler.pkl', 'rb'))
self.pool_area_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/pool_area_scaler.pkl', 'rb'))
self.misc_val_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/misc_val_scaler.pkl', 'rb'))
self.ms_zoning_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/ms_zoning_scaler.pkl', 'rb'))
self.street_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/street_scaler.pkl', 'rb'))
self.lot_shape_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/lot_shape_scaler.pkl', 'rb'))
self.land_contour_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/land_contour_scaler.pkl', 'rb'))
self.utilities_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/utilities_scaler.pkl', 'rb'))
self.lot_config_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/lot_config_scaler.pkl', 'rb'))
self.land_slope_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/land_slope_scaler.pkl', 'rb'))
self.neighborhood_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/neighborhood_scaler.pkl', 'rb'))
self.condition1_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/condition1_scaler.pkl', 'rb'))
self.condition2_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/condition2_scaler.pkl', 'rb'))
self.bldg_type_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/bldg_type_scaler.pkl', 'rb'))
self.house_style_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/house_style_scaler.pkl', 'rb'))
self.overall_qual_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/overall_qual_scaler.pkl', 'rb'))
self.roof_style_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/roof_style_scaler.pkl', 'rb'))
self.roof_matl_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/roof_matl_scaler.pkl', 'rb'))
self.exterior1st_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/exterior1st_scaler.pkl', 'rb'))
self.exterior2nd_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/exterior2nd_scaler.pkl', 'rb'))
#self.mas_vnr_type_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/mas_vnr_type_scaler.pkl', 'rb'))
self.exter_qual_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/exter_qual_scaler.pkl', 'rb'))
self.exter_cond_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/exter_cond_scaler.pkl', 'rb'))
self.foundation_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/foundation_scaler.pkl', 'rb'))
self.bsmt_qual_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/bsmt_qual_scaler.pkl', 'rb'))
self.bsmt_cond_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/bsmt_cond_scaler.pkl', 'rb'))
self.bsmt_exposure_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/bsmt_exposure_scaler.pkl', 'rb'))
self.bsmt_fin_type1_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/bsmt_fin_type1_scaler.pkl', 'rb'))
self.bsmt_fin_type2_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/bsmt_fin_type2_scaler.pkl', 'rb'))
self.heating_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/heating_scaler.pkl', 'rb'))
self.heating_qc_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/heating_qc_scaler.pkl', 'rb'))
self.central_air_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/central_air_scaler.pkl', 'rb'))
self.electrical_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/electrical_scaler.pkl', 'rb'))
self.functional_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/functional_scaler.pkl', 'rb'))
self.garage_type_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/garage_type_scaler.pkl', 'rb'))
self.garage_finish_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/garage_finish_scaler.pkl', 'rb'))
self.garage_qual_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/garage_qual_scaler.pkl', 'rb'))
self.garage_cond_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/garage_cond_scaler.pkl', 'rb'))
self.paved_drive_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/paved_drive_scaler.pkl', 'rb'))
self.sale_type_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/sale_type_scaler.pkl', 'rb'))
self.sale_condition_scaler = pickle.load(open('/home/andre/repos/House-Prices/parameter/sale_condition_scaler.pkl', 'rb'))
def data_cleaning(self, df1):
columns_old = ['Id', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street','Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig',
'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType','HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd',
'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType','MasVnrArea', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual',
'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinSF1','BsmtFinType2', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'Heating',
'HeatingQC', 'CentralAir', 'Electrical', '1stFlrSF', '2ndFlrSF','LowQualFinSF', 'GrLivArea', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath',
'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'KitchenQual','TotRmsAbvGrd', 'Functional', 'Fireplaces', 'FireplaceQu', 'GarageType',
'GarageYrBlt', 'GarageFinish', 'GarageCars', 'GarageArea', 'GarageQual','GarageCond', 'PavedDrive', 'WoodDeckSF', 'OpenPorchSF',
'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'PoolArea', 'PoolQC','Fence', 'MiscFeature', 'MiscVal', 'MoSold', 'YrSold', 'SaleType',
'SaleCondition']
#alterando o formato dos nomes das colunas
snakecase = lambda x: inflection.underscore(x)
cols_new = list(map(snakecase,columns_old))
df1.columns = cols_new
# dropando as colunas que não tem como arrumar
df1 = df1.drop(['alley', 'pool_qc', 'fence', 'misc_feature', 'fireplace_qu'], axis=1)
#lot_frontage
df1['lot_frontage'].fillna(60, inplace=True)
#bsmt_qual
df1['bsmt_qual'].fillna('TA', inplace=True)
#bsmt_cond
df1['bsmt_cond'].fillna('TA', inplace=True)
#bsmt_exposure
df1['bsmt_exposure'].fillna('NO', inplace=True)
#bsmt_fin_type1
df1['bsmt_fin_type1'].fillna('Unf', inplace=True)
#bsmt_fin_type2
df1['bsmt_fin_type2'].fillna('Unf', inplace=True)
#mas_vnr_type
#df1['mas_vnr_type'].fillna('None', inplace=True)
df1.drop('mas_vnr_type', axis=1)
#mas_vnr_area
df1['mas_vnr_area'].fillna(0, inplace=True)
#electrical
df1['electrical'].fillna('SBrkr', inplace=True)
#garage_type
df1['garage_type'].fillna('Attchd', inplace=True)
#garage_yr_blt(CONVERTER PARA YEAR)
df1['garage_yr_blt'].fillna(2005, inplace=True)
#garage_finish
df1['garage_finish'].fillna('Unf', inplace=True)
#garage_qual
df1['garage_qual'].fillna('TA', inplace=True)
#garage_cond
df1['garage_cond'].fillna('TA', inplace=True)
# Arrumando os formatos das colunas, conforme o necessário
#alterando o tipo da colunas abaixo de float para int
# Os valores de anos serão, por padrão, INT
#garage_yr_blt
df1['garage_yr_blt'] = df1['garage_yr_blt'].astype(int)
#mas_vnr_area
df1['mas_vnr_area'] = df1['mas_vnr_area'].astype(int)
#lot_frontage
df1['lot_frontage'] = df1['lot_frontage'].astype(int)
#alterando as colunas com valores numéricos para o tipo INT(JÁ FEITO)
# alterando o tipo da coluna de INT para STR
df1['overall_qual'] = df1['overall_qual'].astype(str)
return df1
def feature_engineer(self, df2):
# Fazendo a criação de novas variáveis que sejam úteis para o modelo
df2['sale_date'] = df2['mo_sold'].astype(str) + df2['yr_sold'].astype(str)
df2['sale_date'] = pd.to_datetime(df2['sale_date'], format='%m%Y')
df2 = df2.drop_duplicates()
# Selecionando colunas para excluir
df2 = df2.drop(['mo_sold', 'yr_sold'], axis=1)
return df2
def data_preparation(self,df1, df5):
df5['year_built'] = self.year_built_scaler.fit_transform(df5[['year_built']].values)
df5['ms_sub_class'] = self.ms_sub_class_scaler.fit_transform(df5[['ms_sub_class']].values)
df5['lot_frontage'] = self.lot_frontage_scaler.fit_transform(df5[['lot_frontage']].values)
df5['lot_area'] = self.lot_area_scaler.fit_transform(df5[['lot_area']].values)
df5['overall_cond'] = self.overall_cond_scaler.fit_transform(df5[['overall_cond']].values)
df5['year_remod_add'] = self.year_remod_add_scaler.fit_transform(df5[['year_remod_add']].values)
df5['mas_vnr_area'] = self.mas_vnr_area_scaler.fit_transform(df5[['mas_vnr_area']].values)
df5['bsmt_fin_sf1'] = self.bsmt_fin_sf1_scaler.fit_transform(df5[['bsmt_fin_sf1']].values)
df5['bsmt_fin_sf2'] = self.bsmt_fin_sf2_scaler.fit_transform(df5[['bsmt_fin_sf2']].values)
df5['bsmt_unf_sf'] = self.bsmt_unf_sf_scaler.fit_transform(df5[['bsmt_unf_sf']].values)
df5['total_bsmt_sf'] = self.total_bsmt_sf_scaler.fit_transform(df5[['total_bsmt_sf']].values)
df5['1st_flr_sf'] = self.st_flr_sf_scaler.fit_transform(df5[['1st_flr_sf']].values)
df5['2nd_flr_sf'] = self.nd_flr_sf_scaler.fit_transform(df5[['2nd_flr_sf']].values)
df5['low_qual_fin_sf'] = self.low_qual_fin_sf_scaler.fit_transform(df5[['low_qual_fin_sf']].values)
df5['gr_liv_area'] = self.gr_liv_area_scaler.fit_transform(df5[['gr_liv_area']].values)
df5['bsmt_full_bath'] = self.bsmt_full_bath_scaler.fit_transform(df5[['bsmt_full_bath']].values)
df5['bsmt_half_bath'] = self.bsmt_half_bath_scaler.fit_transform(df5[['bsmt_half_bath']].values)
df5['full_bath'] = self.full_bath_scaler.fit_transform(df5[['full_bath']].values)
df5['half_bath'] = self.half_bath_scaler.fit_transform(df5[['half_bath']].values)
df5['bedroom_abv_gr'] = self.bedroom_abv_gr_scaler.fit_transform(df5[['bedroom_abv_gr']].values)
df5['kitchen_abv_gr'] = self.kitchen_abv_gr_scaler.fit_transform(df5[['kitchen_abv_gr']].values)
df5['tot_rms_abv_grd'] = self.tot_rms_abv_grd_scaler.fit_transform(df5[['tot_rms_abv_grd']].values)
df5['fireplaces'] = self.fireplaces_scaler.fit_transform(df5[['fireplaces']].values)
df5['garage_yr_blt'] = self.garage_yr_blt_scaler.fit_transform(df5[['garage_yr_blt']].values)
df5['garage_cars'] = self.garage_cars_scaler.fit_transform(df5[['garage_cars']].values)
df5['garage_area'] = self.garage_area_scaler.fit_transform(df5[['garage_area']].values)
df5['wood_deck_sf'] = self.wood_deck_sf_scaler.fit_transform(df5[['wood_deck_sf']].values)
df5['open_porch_sf'] = self.open_porch_sf_scaler.fit_transform(df5[['open_porch_sf']].values)
df5['enclosed_porch'] = self.enclosed_porch_scaler.fit_transform(df5[['enclosed_porch']].values)
df5['3_ssn_porch'] = self.ssn_porch_scaler.fit_transform(df5[['3_ssn_porch']].values)
df5['screen_porch'] = self.screen_porch_scaler.fit_transform(df5[['screen_porch']].values)
df5['pool_area'] = self.pool_area_scaler.fit_transform(df5[['pool_area']].values)
df5['misc_val'] = self.misc_val_scaler.fit_transform(df5[['misc_val']].values)
#Label Encoder
df5['ms_zoning'] = self.ms_zoning_scaler.fit_transform(df5['ms_zoning'])
df5['street'] = self.street_scaler.fit_transform(df5['street'])
df5['lot_shape'] = self.lot_shape_scaler.fit_transform(df5['lot_shape'])
df5['land_contour'] = self.land_contour_scaler.fit_transform(df5['land_contour'])
df5['utilities'] = self.utilities_scaler.fit_transform(df5['utilities'])
df5['lot_config'] = self.lot_config_scaler.fit_transform(df5['lot_config'])
df5['land_slope'] = self.land_slope_scaler.fit_transform(df5['land_slope'])
df5['neighborhood'] = self.neighborhood_scaler.fit_transform(df5['neighborhood'])
df5['condition1'] = self.condition1_scaler.fit_transform(df5['condition1'])
df5['condition2'] = self.condition2_scaler.fit_transform(df5['condition2'])
df5['bldg_type'] = self.bldg_type_scaler.fit_transform(df5['bldg_type'])
df5['house_style'] = self.house_style_scaler.fit_transform(df5['house_style'])
df5['overall_qual'] = self.overall_qual_scaler.fit_transform(df5['overall_qual'])
df5['roof_style'] = self.roof_style_scaler.fit_transform(df5['roof_style'])
df5['roof_matl'] = self.roof_matl_scaler.fit_transform(df5['roof_matl'])
df5['exterior1st'] = self.exterior1st_scaler.fit_transform(df5['exterior1st'])
df5['exterior2nd'] = self.exterior2nd_scaler.fit_transform(df5['exterior2nd'])
#df5['mas_vnr_type'] = self.mas_vnr_type_scaler.fit_transform(df5['mas_vnr_type'])
df5['exter_qual'] = self.exter_qual_scaler.fit_transform(df5['exter_qual'])
df5['exter_cond'] = self.exter_cond_scaler.fit_transform(df5['exter_cond'])
df5['foundation'] = self.foundation_scaler.fit_transform(df5['foundation'])
df5['bsmt_qual'] = self.bsmt_qual_scaler.fit_transform(df5['bsmt_qual'])
df5['bsmt_cond'] = self.bsmt_cond_scaler.fit_transform(df5['bsmt_cond'])
df5['bsmt_exposure'] = self.bsmt_exposure_scaler.fit_transform(df5['bsmt_exposure'])
df5['bsmt_fin_type1'] = self.bsmt_fin_type1_scaler.fit_transform(df5['bsmt_fin_type1'])
df5['bsmt_fin_type2'] = self.bsmt_fin_type2_scaler.fit_transform(df5['bsmt_fin_type2'])
df5['heating'] = self.heating_scaler.fit_transform(df5['heating'])
df5['heating_qc'] = self.heating_qc_scaler.fit_transform(df5['heating_qc'])
df5['central_air'] = self.central_air_scaler.fit_transform(df5['central_air'])
df5['electrical'] = self.electrical_scaler.fit_transform(df5['electrical'])
df5['kitchen_qual'] = self.kitchen_qual_scaler.fit_transform(df5['kitchen_qual'])
df5['functional'] = self.functional_scaler.fit_transform(df5['functional'])
df5['garage_type'] = self.garage_type_scaler.fit_transform(df5['garage_type'])
df5['garage_finish'] = self.garage_finish_scaler.fit_transform(df5['garage_finish'])
df5['garage_qual'] = self.garage_qual_scaler.fit_transform(df5['garage_qual'])
df5['garage_cond'] = self.garage_cond_scaler.fit_transform(df5['garage_cond'])
df5['paved_drive'] = self.paved_drive_scaler.fit_transform(df5['paved_drive'])
df5['sale_type'] = self.sale_type_scaler.fit_transform(df5['sale_type'])
df5['sale_condition'] = self.sale_condition_scaler.fit_transform(df5['sale_condition'])
#VARIÁVEIS CÍCLICAS
df5['sale_date_sin'] = df1['mo_sold'].apply(lambda x: np.sin(x *(2. * np.pi /30)))
df5['sale_date_cos'] = df1['mo_sold'].apply(lambda x: np.cos(x *(2. * np.pi /30)))
cols_selected = ['ms_zoning','lot_area','neighborhood','overall_qual','overall_cond','year_built','year_remod_add','exter_qual','bsmt_qual','bsmt_fin_sf1',
'bsmt_unf_sf','total_bsmt_sf','1st_flr_sf','2nd_flr_sf','gr_liv_area','full_bath','garage_finish','garage_cars','garage_area']
return df5[cols_selected]
def get_prediction(self, model, original_data, test_data):
pred = model.predict(test_data)
original_data['prediction'] = np.expm1(pred)
return original_data.to_json(orient='records', date_format='iso')
|
Andre-Vitorino/House-Predict | api/handler.py | <gh_stars>0
import pandas as pd
from flask import Flask, request, Response
from house.House import House
import pickle
# carregando modelo
model = pickle.load(open('/home/andre/repos/House-Prices/model/house_model.pkl', 'rb'))
app = Flask(__name__)
@app.route('/house/predict', methods=['POST'])
def house_predict():
test_json = request.get_json()
if test_json: #tem dados
if isinstance(test_json, dict): # exemplo único
test_raw = pd.Dataframe(test_json, index=[0])
else:
test_raw = pd.DataFrame(test_json, columns=test_json[0].keys())
# instanciando classe House
pipeline = House()
# limpeza dos dados
df1 = pipeline.data_cleaning(test_raw)
# atributo dos dados
df2 = pipeline.feature_engineer(df1)
# preparação dos dados
df3 = pipeline.data_preparation(df1, df2)
# predição dos dados
df_response = pipeline.get_prediction(model, test_raw, df3)
return df_response
else:
return Response('{}', status=200, mimetype='application/json')
print(test_json)
if __name__ == '__main__':
app.run('127.0.0.1')
|
lukemassa/socialmixingmatrix | socialmixing.py | #!/usr/bin/env python3
"""
Groups social mixing data into age ranges
"""
__author__ = "<NAME>"
__license__ = "MIT"
__email__ = "<EMAIL>"
import csv
POP_ESTIMATE_COLUMN = "CENSUS2010POP" # Which column to use from the census data
class AgeMixing():
"""
Container for social mixing data, functions to manipulate it
"""
def __init__(self):
self.social_mixing_matrix = self.load_social_mixing_matrix()
self.age_proportions = self.load_age_proportions()
self.scaled_matrix = self.get_scaled_matrix(self.social_mixing_matrix)
@staticmethod
def _age_group_to_tuple(age_group):
"""
Given an age group like "0-5" return (0, 5)
"""
lower, upper = age_group.split('-')
return (int(lower), int(upper))
def _is_group_in_group(self, group1, group2):
"""
Is group1 (i.e. "5-10") in group2 (i.e. "0-15")
"""
total_min, total_max = self._age_group_to_tuple(group2)
my_min, my_max = self._age_group_to_tuple(group1)
return my_min >= total_min and my_max <= total_max
def _proportion_in_group(self, group):
"""
What proportion of the population is in this group (i.e. "0-5")
"""
ret = 0
lower, upper = self._age_group_to_tuple(group)
for i in range(lower, upper+1):
ret+=self.age_proportions[i]
return ret
def load_social_mixing_matrix(self):
"""
Load the social mixing matrix into a dict of dicts
"""
ret = {}
table = []
with open('Age-Mixing.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
table.append(row)
group_i = row["Age group"]
for group_j, value in row.items():
if group_j == "Age group":
continue
if group_i not in ret:
ret[group_i] = {}
# group_i comes from the first column of this row,
# group_j is the column for each cell
ret[group_i][group_j] = float(value)
return ret
def load_age_proportions(self):
"""
Load age proportion data into a map from age to what proportion of the pop is that age
"""
ret = {}
with open("US-Age-Sex-Distribution.csv", newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row["SEX"] == "0": # 0 == both sexes
age = int(row["AGE"])
# There's a row with age 999 that has the sum of all the data
if age == 999:
total = float(row[POP_ESTIMATE_COLUMN])
else:
ret[age] = float(row[POP_ESTIMATE_COLUMN])
# Scale by the total
for k, v in ret.items():
ret[k] = v / total
return ret
def get_scaled_matrix(self, social_mixing_matrix):
"""
Get the matrix, but scaled by proportions of population in both group a and b
This amounts to multiplying each cell by the proportion of its row * its column * itself
So if mixing from 0-5 to 10-15, had value of 2, and pop is 5% 0-5 and 8% 10-15, then new value is 2*.05*.08
"""
ret = {}
for group_i, row in social_mixing_matrix.items():
for group_j, value in row.items():
if group_i not in ret:
ret[group_i] = {}
ret[group_i][group_j] = self._proportion_in_group(group_i) * self._proportion_in_group(group_j) * value
return ret
def get_cell_for_new_matrix(self, new_group_i, new_group_j):
"""
Find value of a cell in the new matrix
Sums up all the cells covered by the new groupings
"""
# This is inefficient, just go through all the cells and see if they fit
ret = 0
for group_i in self.scaled_matrix:
for group_j in self.scaled_matrix:
if self._is_group_in_group(group_i, new_group_i) and self._is_group_in_group(group_j, new_group_j):
ret+= self.scaled_matrix[group_i][group_j]
return ret
def get_new_matrix(self, new_groups):
"""
Take the new groups, and the scaled matrix, and return the new matrix
"""
ret = {}
for group_i in new_groups:
for group_j in new_groups:
if group_i not in ret:
ret[group_i] = {}
ret[group_i][group_j] = self.get_cell_for_new_matrix(group_i, group_j)
return ret
def format_matrix(self, matrix):
"""
Given a matrix, format it as a string (essentially reverse of load_social_mixing_matrix)
"""
ret = []
toprow = []
groups = sorted(matrix.keys(), key=lambda x: self._age_group_to_tuple(x)[0])
toprow.append("Age group")
for group in groups:
toprow.append(group)
ret.append(",".join(toprow))
for group_i in groups:
row = []
row.append(group_i)
for group_j in groups:
row.append(str(matrix[group_i][group_j]))
ret.append(",".join(row))
return '\n'.join(ret)
def main():
a = AgeMixing()
matrix = a.get_new_matrix(("0-20", "21-64", "65-100"))
print(a.format_matrix(matrix))
if __name__ == "__main__":
main()
|
rezaeiii/VDSR | main.py | <reponame>rezaeiii/VDSR
import os, argparse
from vdsr import VDSR
# Arguments
parser = argparse.ArgumentParser(description='TensorFlow implementation of VDSR')
# Select GPU
parser.add_argument('--gpu-id', type=int, default=0)
parser.add_argument('--test', action='store_true', default=False)
parser.add_argument('--model-path', type=str, default='checkpoint/VDSR_pretrained')
# Training settings
parser.add_argument('--epoch', type=int, default=10, help='Number of epoch, default: 60')
parser.add_argument('--batch-size', type=int, default=128, help='Mini-batch size, default: 128')
parser.add_argument('--learning-rate', type=float, default=1e-4, help='Learning rate, default: 0.0001')
# Network setting
parser.add_argument('--layer-depth', type=int, default=20, help='Depth of the network, default: 20')
# Test setttings
parser.add_argument('--scale', type=int, default=3, help='Up-scale factor, only for test. default: 3')
parser.add_argument('--print-interval', type=int, default=100)
parser.add_argument('--eval-interval', type=int, default=200)
# Directory path
parser.add_argument('--train-dataset', type=str, default='10')
parser.add_argument('--train-dataset-path', type=str, default='Train')
parser.add_argument('--valid-dataset', type=str, default='Set5')
parser.add_argument('--test-dataset', type=str, default='Set5')
parser.add_argument('--test-dataset-path', type=str, default='Test')
parser.add_argument('--checkpoint-path', type=str, default='checkpoint/VDSR')
parser.add_argument('--result-dir', type=str, default='result')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def main():
if not args.test:
checkpoint_path = os.path.join(args.checkpoint_path)
if not os.path.exists(args.checkpoint_path):
os.makedirs(args.checkpoint_path)
vdsr = VDSR(args)
if args.test:
vdsr.test()
else:
vdsr.train()
if __name__ == '__main__':
main()
|
rezaeiii/VDSR | utils.py | <reponame>rezaeiii/VDSR<filename>utils.py
import tensorflow as tf
import numpy as np
import math
from PIL import Image
from tqdm import tqdm
import os
import h5py
# Read image
def imread(fname):
return Image.open(fname)
# Save image
def imsave(image, path, fname):
image = image * 255.
image = Image.fromarray(image.astype('uint8'), mode='YCbCr')
image = image.convert('RGB')
return image.save(os.path.join(path, fname))
# Save ground truth image, bicubic interpolated image and srcnn image
def save_result(path, gt, bicubic, srcnn, i):
imsave(gt, path, str(i)+ '_gt.png')
imsave(bicubic, path, str(i) + '_bicubic.png')
imsave(srcnn, path, str(i) + '_vdsr.png')
# Return true if the h5 sub-images file is exists
def exist_train_data(datasetname):
return os.path.exists('{}.h5'.format(datasetname))
# Concatenate Y and CrCb channel
def concat_ycrcb(y, crcb):
return np.concatenate((y, crcb), axis=2)
def psnr(gt, sr, shave=0, max_val=1.):
diff = gt[shave:-shave, shave:-shave] - sr[shave:-shave, shave:-shave]
diff = diff.flatten()
rmse = math.sqrt(np.mean(diff ** 2))
return 20 * math.log10(max_val / rmse)
def prepare_data(path, scale, is_valid=False):
dir_path = os.path.join(os.getcwd(), path)
path_gt = os.path.join(dir_path, 'gt')
path_lr = os.path.join(dir_path, 'bicubic_{:d}x'.format(scale))
# fnames = ['baby_GT.bmp, bird_GT.bmp, ...']
fnames = os.listdir(path_gt)
inputs = []
labels = []
count = 0
for fname in tqdm(fnames, desc='[*] Generating dataset ... '):
count += 1
_input = imread(os.path.join(path_lr, fname))
_label = imread(os.path.join(path_gt, fname))
_input = np.array(_input) / 255.
_label = np.array(_label) / 255.
_label = _label[:_label.shape[0] - np.mod(_label.shape[0], scale), :_label.shape[1] - np.mod(_label.shape[1], scale)]
#_label = _label[:_label.shape[0]//scale, :_label.shape[1]//scale]
if is_valid:
h, w, _ = _input.shape
_input_y = _input[:, :, 0]
_label_y = _label[:, :, 0]
_input_y = _input_y.reshape([1, h, w, 1])
_label_y = _label_y.reshape([1, h, w, 1])
inputs.append(_input_y)
labels.append(_label_y)
else:
inputs.append(_input)
labels.append(_label)
if is_valid:
print('[*] Successfully prepared {:d} valid images !'.format(count))
else:
print('[*] Successfully prepared {:d} test images !'.format(count))
return inputs, labels
|
rezaeiii/VDSR | dataset.py | <filename>dataset.py<gh_stars>1-10
import tensorflow as tf
import numpy as np
import threading, time
import h5py
class Dataset():
def __init__(self, data_path, batch_size):
self.data_path = data_path
# Patch size for training
self.input_size = 41
self.label_size = 41
self.batch_size = batch_size
self.queue_size = 3000
self.open_h5py_file()
self.make_queue()
def open_h5py_file(self):
self.h5py_file = h5py.File('{}.h5'.format(self.data_path), 'r')
self.data_size = self.h5py_file['data'].shape[0]
self.data_index = self.data_size // self.batch_size
def make_queue(self):
self.input_t = tf.placeholder(tf.float32, [None, self.input_size, self.input_size, 1])
self.label_t = tf.placeholder(tf.float32, [None, self.label_size, self.label_size, 1])
queue = tf.RandomShuffleQueue(
capacity=self.queue_size,
min_after_dequeue=self.batch_size,
dtypes=(tf.float32, tf.float32),
shapes=((self.input_size, self.input_size, 1), (self.label_size, self.label_size, 1)),
name = 'random_shuffle_queue'
)
self.enqueue_many = queue.enqueue_many([self.input_t, self.label_t])
self.dequeue_many = queue.dequeue_many(self.batch_size)
def start_enqueue_deamon(self, sess):
def enqueue_thread(sess):
while (True):
for (input_t, label_t) in self.generator():
sess.run([self.enqueue_many], feed_dict={
self.input_t: input_t,
self.label_t: label_t
})
time.sleep(0.0001)
thread_number = 1
threads = []
for i in range(thread_number):
t = threading.Thread(target=enqueue_thread, args=(sess,), daemon=True)
t.start()
threads.append(t)
return threads
def generator(self):
for i in range(self.data_index):
input_t = self.h5py_file['data'][i * self.batch_size : (i+1) * self.batch_size]
label_t = self.h5py_file['label'][i * self.batch_size : (i+1) * self.batch_size]
yield (input_t, label_t)
|
mikaelengstrom/django-react-templatetags | django_react_templatetags/tests/test_manager.py | # -*- coding: utf-8 -*-
from django.conf import global_settings
from django.template import Context, Template
from django.test import TestCase, modify_settings, override_settings
import responses
from django_react_templatetags.templatetags.react import _get_tag_manager, ReactTagManager
class TestReactTagManager(ReactTagManager):
def render(self, context):
return 'Test'
class ReactIncludeComponentTest(TestCase):
def setUp(self):
self.mocked_context = Context({'REACT_COMPONENTS': []})
def test_tag_manager_not_overridden(self):
"Test that the default ReactTagManager is used by default"
self.assertEqual(_get_tag_manager(), ReactTagManager)
@override_settings(
REACT_RENDER_TAG_MANAGER="django_react_templatetags.tests.test_manager.TestReactTagManager"
)
@responses.activate
def test_tag_manager_overridden(self):
"Test that the TestReactTagManager is actually used"
self.assertEqual(_get_tag_manager(), TestReactTagManager)
responses.add(responses.POST, 'http://react-service.dev',
body='Foo Bar', status=200)
out = Template(
"{% load react %}"
"{% react_render component=\"Component\" %}"
).render(self.mocked_context)
self.assertEqual('Test', out)
|
mikaelengstrom/django-react-templatetags | django_react_templatetags/tests/test_ssr.py | <reponame>mikaelengstrom/django-react-templatetags
import json
try:
from unittest import mock
except ImportError:
import mock
from django.urls import reverse
from django.template import Context, Template
from django.test import TestCase, override_settings
import responses
from django_react_templatetags.tests.demosite.models import (
Person, MovieWithContext
)
@override_settings(
REACT_RENDER_HOST='http://react-service.dev/',
)
class SSRTemplateTest(TestCase):
def setUp(self):
self.mocked_context = Context({'REACT_COMPONENTS': []})
@responses.activate
def test_verify_404(self):
"The SSR rendering falls back to client side rendering if 404"
responses.add(responses.POST, 'http://react-service.dev/',
json={'error': 'not found'}, status=404)
out = Template(
"{% load react %}"
"{% react_render component=\"Component\" %}"
).render(self.mocked_context)
self.assertTrue('<div id="Component_' in out)
@responses.activate
def test_verify_rendition(self):
"The SSR returns inner html"
responses.add(responses.POST, 'http://react-service.dev',
body='<h1>Title</h1>', status=200)
out = Template(
"{% load react %}"
"{% react_render component=\"Component\" %}"
).render(self.mocked_context)
self.assertTrue('<h1>Title</h1>' in out)
@responses.activate
def test_request_body(self):
"The SSR request sends the props in a expected way"
responses.add(responses.POST, 'http://react-service.dev',
body='<h1>Title</h1>', status=200)
person = Person(first_name='Tom', last_name='Waits')
self.mocked_context["person"] = person
self.mocked_context["component_data"] = {'album': 'Real gone'}
Template(
"{% load react %}"
"{% react_render component=\"Component\" prop_person=person data=component_data %}" # NOQA
).render(self.mocked_context)
request_body = {
'componentName': 'Component',
'props': {
'album': 'Real gone',
'person': {
'first_name': 'Tom',
'last_name': 'Waits'
}
},
'context': {}
}
self.assertTrue(
json.loads(responses.calls[0].request.body) == request_body
)
@responses.activate
def test_request_body_context(self):
"The SSR request sends the props in a expected way with context"
responses.add(responses.POST, 'http://react-service.dev',
body='<h1>Title</h1>', status=200)
movie = MovieWithContext(title='Office space', year=1991)
self.mocked_context["movie"] = movie
self.mocked_context["search_term"] = 'Stapler'
Template(
"{% load react %}"
"{% react_render component=\"Component\" prop_movie=movie %}"
).render(self.mocked_context)
request_body = {
'componentName': 'Component',
'props': {
'movie': {
'title': 'Office space',
'year': 1991,
'search_term': 'Stapler',
}
},
'context': {}
}
self.assertEquals(
json.loads(responses.calls[0].request.body),
request_body
)
@responses.activate
def test_request_body_with_ssr_context(self):
"The SSR request appends the 'ssr_context' in an expected way"
responses.add(responses.POST, 'http://react-service.dev',
body='<h1>Title</h1>', status=200)
self.mocked_context["ssr_ctx"] = {"location": "http://localhost"}
Template(
"{% load react %}"
"{% react_render component=\"Component\" ssr_context=ssr_ctx %}"
).render(self.mocked_context)
request_body = {
'componentName': 'Component',
'props': {},
'context': {'location': "http://localhost"}
}
self.assertEquals(
json.loads(responses.calls[0].request.body),
request_body
)
@responses.activate
def test_default_headers(self):
"The SSR uses default headers with json as conten type"
responses.add(responses.POST, 'http://react-service.dev',
body='Foo Bar', status=200)
Template(
"{% load react %}"
"{% react_render component=\"Component\" %}"
).render(self.mocked_context)
self.assertTrue(len(responses.calls) == 1)
self.assertEquals(
responses.calls[0].request.headers['Content-type'],
'application/json'
)
self.assertEquals(
responses.calls[0].request.headers['Accept'],
'text/plain'
)
@override_settings(
REACT_RENDER_HEADERS={
'Authorization': 'Basic 123'
}
)
@responses.activate
def test_custom_headers(self):
"The SSR uses custom headers if present"
responses.add(responses.POST, 'http://react-service.dev',
body='Foo Bar', status=200)
Template(
"{% load react %}"
"{% react_render component=\"Component\" %}"
).render(self.mocked_context)
self.assertTrue(len(responses.calls) == 1)
self.assertEquals(
responses.calls[0].request.headers['Authorization'],
'Basic 123'
)
@responses.activate
def test_hydrate_if_ssr_present(self):
"Makes sure ReactDOM.hydrate is used when SSR is active"
responses.add(responses.POST, 'http://react-service.dev',
body='Foo Bar', status=200)
out = Template(
"{% load react %}"
"{% react_render component=\"Component\" %}"
"{% react_print %}"
).render(self.mocked_context)
self.assertTrue('ReactDOM.hydrate(' in out)
@override_settings(
REACT_RENDER_HOST='http://react-service.dev/',
)
class SSRViewTest(TestCase):
@mock.patch("django_react_templatetags.ssr.load_or_empty")
def test_that_disable_ssr_header_disables_ssr(self, mocked_func):
self.client.get(
reverse('static_react_view'),
HTTP_X_DISABLE_SSR='1',
)
self.assertEqual(mocked_func.call_count, 0)
|
mikaelengstrom/django-react-templatetags | django_react_templatetags/templatetags/react.py | <filename>django_react_templatetags/templatetags/react.py
# -*- coding: utf-8 -*-
"""
This module contains tags for including react components into templates.
"""
import uuid
import importlib
import json
from django import template
from django.conf import settings
from django.template import Node
from django_react_templatetags import ssr
from django_react_templatetags.encoders import json_encoder_cls_factory
register = template.Library()
CONTEXT_KEY = "REACT_COMPONENTS"
CONTEXT_PROCESSOR = 'django_react_templatetags.context_processors.react_context_processor' # NOQA
DEFAULT_SSR_HEADERS = {
'Content-type': 'application/json',
'Accept': 'text/plain',
}
def get_uuid():
return uuid.uuid4().hex
def has_ssr(request):
if request and request.META.get("HTTP_X_DISABLE_SSR"):
return False
return hasattr(settings, 'REACT_RENDER_HOST') and \
settings.REACT_RENDER_HOST
def get_ssr_headers():
if not hasattr(settings, 'REACT_RENDER_HEADERS'):
return DEFAULT_SSR_HEADERS
return settings.REACT_RENDER_HEADERS
def has_context_processor():
try:
status = CONTEXT_PROCESSOR in settings.TEMPLATES[0]['OPTIONS']['context_processors'] # NOQA
except Exception as e: # NOQA
status = False
return status
def load_from_ssr(component, ssr_context=None):
return ssr.load_or_empty(
component,
headers=get_ssr_headers(),
ssr_context=ssr_context,
)
class ReactTagManager(Node):
"""
Handles the printing of react placeholders and queueing, is invoked by
react_render.
"""
def __init__(self, identifier, component, data=None, css_class=None,
props=None, ssr_context=None):
component_prefix = ""
if hasattr(settings, "REACT_COMPONENT_PREFIX"):
component_prefix = settings.REACT_COMPONENT_PREFIX
self.identifier = identifier
self.component = component
self.component_prefix = component_prefix
self.data = data
self.css_class = css_class
self.props = props
self.ssr_context = ssr_context
def render(self, context):
if not has_context_processor():
raise Exception('"react_context_processor must be added to TEMPLATE_CONTEXT_PROCESSORS"') # NOQA
qualified_component_name = self.get_qualified_name(context)
identifier = self.get_identifier(context, qualified_component_name)
component_props = self.get_component_props(context)
component = {
'identifier': identifier,
'name': qualified_component_name,
'json': self.props_to_json(component_props, context),
}
components = context.get(CONTEXT_KEY, [])
components.append(component)
context[CONTEXT_KEY] = components
placeholder_attr = (
('id', identifier),
('class', self.resolve_template_variable(self.css_class, context)),
)
placeholder_attr = [x for x in placeholder_attr if x[1] is not None]
component_html = ""
if has_ssr(context.get("request", None)):
component_html = load_from_ssr(component, ssr_context=self.get_ssr_context(context))
return self.render_placeholder(placeholder_attr, component_html)
def get_qualified_name(self, context):
component_name = self.resolve_template_variable(self.component, context)
return '{}{}'.format(self.component_prefix, component_name)
def get_identifier(self, context, qualified_component_name):
identifier = self.resolve_template_variable(self.identifier, context)
if identifier:
return identifier
return '{}_{}'.format(qualified_component_name, get_uuid())
def get_component_props(self, context):
resolved_data = self.resolve_template_variable_else_none(self.data, context)
resolved_data = resolved_data if resolved_data else {}
for prop in self.props:
data = self.resolve_template_variable_else_none(
self.props[prop],
context,
)
resolved_data[prop] = data
return resolved_data
def get_ssr_context(self, context):
if not self.ssr_context:
return {}
return self.resolve_template_variable(self.ssr_context, context)
@staticmethod
def resolve_template_variable(value, context):
if isinstance(value, template.Variable):
return value.resolve(context)
return value
@staticmethod
def resolve_template_variable_else_none(value, context):
try:
data = value.resolve(context)
except template.VariableDoesNotExist:
data = None
except AttributeError:
data = None
return data
@staticmethod
def props_to_json(resolved_data, context):
cls = json_encoder_cls_factory(context)
return json.dumps(resolved_data, cls=cls)
@staticmethod
def render_placeholder(attributes, component_html=''):
attr_pairs = map(lambda x: '{}="{}"'.format(*x), attributes)
return u'<div {}>{}</div>'.format(
" ".join(attr_pairs),
component_html,
)
@register.tag
def react_render(parser, token):
"""
Renders a react placeholder and adds it to the global render queue.
Example:
{% react_render component="ListRestaurants" data=restaurants %}
"""
values = _prepare_args(parser, token)
tag_manager = _get_tag_manager()
return tag_manager(**values)
def _prepare_args(parses, token):
"""
Normalize token arguments that can be passed along to node renderer
"""
values = {
"identifier": None,
"css_class": None,
"data": None,
"props": {},
}
key_mapping = {
"id": "identifier",
"class": "css_class",
"props": "data",
}
args = token.split_contents()
method = args[0]
for arg in args[1:]:
key, value = arg.split(r'=',)
key = key_mapping.get(key, key)
is_standalone_prop = key.startswith('prop_')
if is_standalone_prop:
key = key[5:]
value = template.Variable(value)
if is_standalone_prop:
values['props'][key] = value
else:
values[key] = value
assert "component" in values, "{} is missing component value".format(method) # NOQA
return values
def _get_tag_manager():
"""
Loads a custom React Tag Manager if provided in Django Settings.
"""
class_path = getattr(settings, 'REACT_RENDER_TAG_MANAGER', '')
if not class_path:
return ReactTagManager
module_path, class_name = class_path.rsplit('.', 1)
module = importlib.import_module(module_path)
return getattr(module, class_name)
@register.inclusion_tag('react_print.html', takes_context=True)
def react_print(context):
"""
Generates ReactDOM.hydate calls based on REACT_COMPONENT queue,
this needs to be run after react has been loaded.
The queue will be cleared after beeing called.
Example:
{% react_print %}
"""
components = context[CONTEXT_KEY]
context[CONTEXT_KEY] = []
new_context = context.__copy__()
new_context['ssr_available'] = has_ssr(
context.get("request", None)
)
new_context['components'] = components
return new_context
|
dhruvyad/MultimodalGame | model_symmetric.py | import os
import sys
import json
import time
import numpy as np
import random
import h5py
import copy
import functools
import logging
import pickle
import torch
from torch.autograd import Variable as _Variable
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
from torch.nn.parameter import Parameter
import torchvision.datasets as dset
import torchvision.models as models
import torchvision.transforms as transforms
from torchvision.utils import save_image
from sklearn.metrics import confusion_matrix
from agents import Agent
from analyze_messages import convert_tensor_to_string
from dataset_loader import load_shapeworld_dataset
from community_util import sample_agents, build_train_matrix, build_eval_list, get_msg_pairs
from misc import build_mask
from misc import calculate_average_message
from misc import calculate_entropy, calculate_average_entropy, check_entropy
from misc import count_distinct_messages
from misc import read_log_load
from misc import recursively_set_device, torch_save, torch_load, torch_load_communities
from misc import FileLogger
from misc import VisdomLogger as Logger
from misc import xavier_normal
import gflags
FLAGS = gflags.FLAGS
SHAPES = ['circle', 'cross', 'ellipse', 'pentagon', 'rectangle', 'semicircle', 'square', 'triangle']
COLORS = ['blue', 'cyan', 'gray', 'green', 'magenta', 'red', 'yellow']
OOD_EXAMPLES = ['square_red', 'triangle_green', 'circle_blue', 'rectangle_yellow', 'cross_magenta', 'ellipse_cyan']
MAX_EXAMPLES_TO_SAVE = 200
def Variable(*args, **kwargs):
var = _Variable(*args, **kwargs)
if FLAGS.cuda:
var = var.cuda()
return var
def loglikelihood(log_prob, target):
"""
Args: log softmax scores (N, C) where N is the batch size
and C is the number of classes
Output: log likelihood (N)
"""
return log_prob.gather(1, target)
def store_exemplar_batch(data, data_type, logger, flogger):
'''Writes MAX_EXAMPLES_TO_SAVE examples in the data to file for debugging
data: dictionary containing data and results
data = {"masked_im_1": [],
"masked_im_2": [],
"msg_1": [],
"msg_2": [],
"p": [],
"target": [],
"caption": [],
"shapes": [],
"colors": [],
"texts": [],
}
data_type: flag giving the name of the data to be stored.
e.g. "correct", "incorrect"
'''
debuglogger.info(f'Num {data_type}: {len(data["masked_im_1"])}')
debuglogger.info("Writing exemplar batch to file...")
assert len(data["masked_im_1"]) == len(data["masked_im_2"]) == len(data["p"]) == len(data["caption"]) == len(data["shapes"]) == len(data["colors"]) == len(data["texts"])
num_examples = min(len(data["shapes"]), MAX_EXAMPLES_TO_SAVE)
path = FLAGS.log_path
prefix = FLAGS.experiment_name + "_" + data_type
if not os.path.exists(path + "/" + prefix):
os.makedirs(path + "/" + prefix)
# Save images
masked_im_1 = torch.stack(data["masked_im_1"][:num_examples], dim=0)
debuglogger.debug(f'Masked im 1: {type(masked_im_1)}')
debuglogger.debug(f'Masked im 1: {masked_im_1.size()}')
save_image(masked_im_1, path + '/' + prefix + '/im1.png', nrow=16, pad_value=0.5)
masked_im_2 = torch.stack(data["masked_im_2"][:num_examples], dim=0)
save_image(masked_im_2, path + '/' + prefix + '/im2.png', nrow=16, pad_value=0.5)
# Save other relevant info
keys = ['p', 'caption', 'shapes', 'colors']
for k in keys:
filename = path + '/' + prefix + '/' + k + '.txt'
with open(filename, "w") as wf:
for i in range(num_examples):
wf.write(f'Example {i+1}: {data[k][i]}\n')
# Write texts
filename = path + '/' + prefix + '/texts.txt'
with open(filename, "w") as wf:
for i in range(num_examples):
s = ""
for t in data["texts"][i]:
s += t + ", "
wf.write(f'Example {i+1}: {s}\n')
# Print average and std p
np_p = np.array(data["p"])
debuglogger.info(f'p: mean: {np.mean(np_p)} std: {np.std(np_p)}')
def calc_message_mean_and_std(m_store):
'''Calculate the mean and std deviation of messages per agent per shape, color and shape-color combination'''
for k in m_store:
msgs = m_store[k]["message"]
msgs = torch.stack(msgs, dim=0)
debuglogger.debug(f'Key: {k}, Count: {m_store[k]["count"]}, Messages: {msgs.size()}')
mean = torch.mean(msgs, dim=0).cpu()
std = torch.std(msgs, dim=0).cpu()
m_store[k]["mean"] = mean
m_store[k]["std"] = std
return m_store
def log_message_stats(message_stats, logger, flogger, data_type, epoch, step, i_batch):
''' Helper function to write the message stats to file and log them to stdout
Logs the mean and std deviation per set of messages per shape, per color and per shape-color for each message set.
Additionally logs the distances between the mean message for each agent type per shape, color and shape-color'''
debuglogger.info('Logging message stats')
shape_colors = []
for s in SHAPES:
for c in COLORS:
shape_colors.append(str(s) + "_" + str(c))
# log shape stats
for s in SHAPES:
num = 0
if s in message_stats[0]["shape"]:
num = message_stats[0]["shape"][s]["count"]
means = []
stds = []
for i, m in enumerate(message_stats):
if s in message_stats[i]["shape"]:
assert num == message_stats[i]["shape"][s]["count"]
m = message_stats[i]["shape"][s]["mean"]
st = message_stats[i]["shape"][s]["std"]
means.append(m)
stds.append(st)
dists = []
assert len(means) != 1
for i in range(len(means)):
for j in range(i + 1, len(means)):
d = torch.dist(means[i], means[j])
dists.append((i, j, d))
if i == len(means) - 2:
break
logger.log(key=data_type + ": " + s + " message stats: count: ", val=num, step=step)
for i in range(len(means)):
logger.log(key=data_type + ": " + s + " message stats: Agent " + str(i) + ": mean: ",
val=means[i], step=step)
logger.log(key=data_type + ": " + s + " message stats: Agent " + str(i) + ": std: ",
val=stds[i], step=step)
flogger.Log("Epoch: {} Step: {} Batch: {} {} message stats: shape {}: count: {}, agent {}: mean: {}, std: {}".format(
epoch, step, i_batch, data_type, s, num, i, means[i], stds[i]))
for i in range(len(dists)):
logger.log(key=data_type + ": " + s + " message stats: distances: [" + str(dists[i][0]) + ":" + str(dists[i][1]) + "]: ", val=dists[i][2], step=step)
flogger.Log("Epoch: {} Step: {} Batch: {} {} message stats: shape {}: dists: {}".format(epoch, step, i_batch, data_type, s, dists))
# log color stats
for s in COLORS:
num = 0
if s in message_stats[0]["color"]:
num = message_stats[0]["color"][s]["count"]
means = []
stds = []
for i, m in enumerate(message_stats):
if s in message_stats[i]["color"]:
assert num == message_stats[i]["color"][s]["count"]
m = message_stats[i]["color"][s]["mean"]
st = message_stats[i]["color"][s]["std"]
means.append(m)
stds.append(st)
dists = []
assert len(means) != 1
for i in range(len(means)):
for j in range(i + 1, len(means)):
d = torch.dist(means[i], means[j])
dists.append((i, j, d))
if i == len(means) - 2:
break
logger.log(key=data_type + ": " + s + " message stats: count: ", val=num, step=step)
for i in range(len(means)):
logger.log(key=data_type + ": " + s + " message stats: Agent " + str(i) + ": mean: ",
val=means[i], step=step)
logger.log(key=data_type + ": " + s + " message stats: Agent " + str(i) + ": std: ",
val=stds[i], step=step)
flogger.Log("Epoch: {} Step: {} Batch: {} {} message stats: color {}: count: {}, agent {}: mean: {}, std: {}".format(
epoch, step, i_batch, data_type, s, num, i, means[i], stds[i]))
for i in range(len(dists)):
logger.log(key=data_type + ": " + s + " message stats: distances: [" + str(dists[i][0]) + ":" + str(dists[i][1]) + "]: ", val=dists[i][2], step=step)
flogger.Log("Epoch: {} Step: {} Batch: {} {} message stats: color {}: dists: {}".format(epoch, step, i_batch, data_type, s, dists))
# log shape - color stats
for s in shape_colors:
num = 0
if s in message_stats[0]["shape_color"]:
num = message_stats[0]["shape_color"][s]["count"]
means = []
stds = []
for i, m in enumerate(message_stats):
if s in message_stats[i]["shape_color"]:
assert num == message_stats[i]["shape_color"][s]["count"]
m = message_stats[i]["shape_color"][s]["mean"]
st = message_stats[i]["shape_color"][s]["std"]
means.append(m)
stds.append(st)
dists = []
assert len(means) != 1
for i in range(len(means)):
for j in range(i + 1, len(means)):
d = torch.dist(means[i], means[j])
dists.append((i, j, d))
if i == len(means) - 2:
break
logger.log(key=data_type + ": " + s + " message stats: count: ", val=num, step=step)
for i in range(len(means)):
logger.log(key=data_type + ": " + s + " message stats: Agent " + str(i) + ": mean: ", val=means[i], step=step)
logger.log(key=data_type + ": " + s + " message stats: Agent " + str(i) + ": std: ", val=stds[i], step=step)
flogger.Log("Epoch: {} Step: {} Batch: {} {} message stats: shape_color {}: count: {}, agent {}: mean: {}, std: {}".format(epoch, step, i_batch, data_type, s, num, i, means[i], stds[i]))
for i in range(len(dists)):
logger.log(key=data_type + ": " + s + " message stats: distances: [" + str(dists[i][0]) + ":" + str(dists[i][1]) + "]: ", val=dists[i][2], step=step)
flogger.Log("Epoch: {} Step: {} Batch: {} {} message stats: shape_color {}: dists: {}".format(epoch, step, i_batch, data_type, s, dists))
path = FLAGS.log_path + "/" + FLAGS.experiment_name + "_" + data_type + "_message_stats.pkl"
pickle.dump(message_stats, open(path, "wb"))
debuglogger.info(f'Saved message stats to log file')
def run_analyze_messages(data, data_type, logger, flogger, epoch, step, i_batch):
'''Calculates the mean and std deviation per set of messages per shape, per color and per shape-color for each message set.
Additionally caculates the distances between the mean message for each agent type per shape, color and shape-color
data: dictionary containing log of data_type examples
data_type: flag explaining the type of data
e.g. "correct", "incorrect"
Each message list should have the same length and the shape and colors lists
Also saves the messages and analysis to file
'''
message_stats = []
messages = [data["msg_1"], data["msg_2"]]
shapes = data["shapes"]
colors = data["colors"]
for m_set in messages:
assert len(m_set) == len(shapes)
assert len(m_set) == len(colors)
d = {"shape": {},
"color": {},
"shape_color": {}
}
message_stats.append(d)
debuglogger.info(f'Messages: {len(messages[0])}, {len(messages[0][0])}')
for i, m_set in enumerate(messages):
s_store = message_stats[i]["shape"]
c_store = message_stats[i]["color"]
s_c_store = message_stats[i]["shape_color"]
# Collect all messages
j = 0
for m, s, c in zip(m_set, shapes, colors):
if s in s_store:
# Potentially multiple exchanges
for m_i in m:
s_store[s]["count"] += 1
s_store[s]["message"].append(m_i.data)
else:
s_store[s] = {}
s_store[s]["count"] = 1
s_store[s]["message"] = [m[0].data]
if len(m) > 1:
for m_i in m[1:]:
s_store[s]["count"] += 1
s_store[s]["message"].append(m_i.data)
if c in c_store:
# Potentially multiple exchanges
for m_i in m:
c_store[c]["count"] += 1
c_store[c]["message"].append(m_i.data)
else:
c_store[c] = {}
c_store[c]["count"] = 1
c_store[c]["message"] = [m[0].data]
if len(m) > 1:
for m_i in m[1:]:
c_store[c]["count"] += 1
c_store[c]["message"].append(m_i.data)
s_c = str(s) + "_" + str(c)
if s_c in s_c_store:
# Potentially multiple exchanges
for m_i in m:
s_c_store[s_c]["count"] += 1
s_c_store[s_c]["message"].append(m_i.data)
else:
s_c_store[s_c] = {}
s_c_store[s_c]["count"] = 1
s_c_store[s_c]["message"] = [m[0].data]
if len(m) > 1:
for m_i in m[1:]:
s_c_store[s_c]["count"] += 1
s_c_store[s_c]["message"].append(m_i.data)
if j == 5:
debuglogger.debug(f's_store: {s_store}')
debuglogger.debug(f'c_store: {c_store}')
debuglogger.debug(f's_c_store: {s_c_store}')
j += 1
# Calculate and log mean and std_dev
s_store = calc_message_mean_and_std(s_store)
c_store = calc_message_mean_and_std(c_store)
s_c_store = calc_message_mean_and_std(s_c_store)
log_message_stats(message_stats, logger, flogger, data_type, epoch, step, i_batch)
def add_data_point(batch, i, data_store, messages_1, messages_2, probs_1, probs_2):
'''Adds the relevant data from a batch to a data store to analyze later'''
# Storing images creates a huge slowdown and no need to store them
# data_store["masked_im_1"].append(batch["masked_im_1"][i])
# data_store["masked_im_2"].append(batch["masked_im_2"][i])
data_store["p"].append(batch["p"][i])
data_store["target"].append(batch["target"][i])
data_store["caption"].append(batch["caption_str"][i])
data_store["shapes"].append(batch["shapes"][i])
data_store["colors"].append(batch["colors"][i])
data_store["texts"].append(batch["texts_str"][i])
# Add messages, probs and entropy from each exchange
m_1 = [] # message
p_1 = [] # message probability
m_1_ent = [] # entropy per message
m_1_str = [] # message as a string
for exchange, prob in zip(messages_1, probs_1):
m = exchange[i].data.cpu()
p = prob[i].data.cpu()
m_1.append(m)
p_1.append(p)
m_1_ent.append(calculate_entropy(p))
m_1_str.append(convert_tensor_to_string(m))
data_store["msg_1"].append(m_1)
data_store["probs_1"].append(p_1)
data_store["msg_1_str"].append(m_1_str)
data_store["msg_1_ent"].append(m_1_ent)
m_2 = [] # message
p_2 = [] # message probability
m_2_ent = [] # entropy per message
m_2_str = [] # message as a string
for exchange, prob in zip(messages_2, probs_2):
m = exchange[i].data.cpu()
p = prob[i].data.cpu()
m_2.append(m)
p_2.append(p)
m_2_ent.append(calculate_entropy(p))
m_2_str.append(convert_tensor_to_string(m))
data_store["msg_2"].append(m_2)
data_store["probs_2"].append(p_2)
data_store["msg_2_str"].append(m_2_str)
data_store["msg_2_ent"].append(m_2_ent)
return data_store
def save_messages_and_stats(correct, incorrect, agent_tag):
'''Saves all messages and message probs between two agents, along with relevant tags such as the shape, color and caption, and whether the message was correct or incorrect
Data is stored as a list of dicts and pickled. Each dict corresponds to one exchange. The dicts have the following keys
- msg_1: message(s) sent from agent 1
- msg_2: message(s) sent from agent 2
- probs_1: message 1 probs
- probs_2: message 2 probs
- caption: the correct caption
- shape: the shape in the caption
- color: the color in the caption
- correct: boolean, whether both agents solved the task after communication
'''
message_data = []
num_correct = len(correct["caption"])
num_incorrect = len(incorrect["caption"])
for i in range(num_correct):
elem = {}
elem["caption"] = correct["caption"][i]
elem["msg_1"] = correct["msg_1"][i]
elem["msg_2"] = correct["msg_2"][i]
elem["probs_1"] = correct["probs_1"][i]
elem["probs_2"] = correct["probs_2"][i]
elem["shape"] = correct["shapes"][i]
elem["color"] = correct["colors"][i]
elem["correct"] = True
message_data.append(elem)
for i in range(num_incorrect):
elem = {}
elem["caption"] = incorrect["caption"][i]
elem["msg_1"] = incorrect["msg_1"][i]
elem["msg_2"] = incorrect["msg_2"][i]
elem["probs_1"] = incorrect["probs_1"][i]
elem["probs_2"] = incorrect["probs_2"][i]
elem["shape"] = incorrect["shapes"][i]
elem["color"] = incorrect["colors"][i]
elem["correct"] = False
message_data.append(elem)
debuglogger.info(f'Saving messages...')
debuglogger.info(f'{num_correct} correct, {num_incorrect} incorrect, {num_correct + num_incorrect} total')
path = FLAGS.log_path + "/" + FLAGS.experiment_name + "_" + agent_tag + "_message_stats.pkl"
pickle.dump(message_data, open(path, "wb"))
debuglogger.info(f'Messages saved')
def get_similarity(dataset_path, in_domain_eval, agent1, agent2, a1_group, a2_group, a1_idx, a2_idx, agent_codes_1, agent_codes_2, logger, flogger):
'''Computes the similarity between two language groups. Can also be used to compute self similarity
Args:
agent1: first agent
agent2: second agent
a1_group: which group agent 1 belongs to (1 or 2)
a2_group: which group agent 2 belongs to (1 or 2)
a1_idx: index of first agent in the codes of the agent's group
a2_idx: index of second agent in the codes of the agent's group
agent_codes_1: average codes for each shape and color (from correct, non blank answers) sent by agents in group 1 (one set for each agent)
agent_codes_2: average codes for each shape and color (from correct, non blank answers) sent by agents in group 2 (one set for each agent)
'''
# Log agent details
debuglogger.info(f'Getting similarity for agents: [{a1_idx}/{a2_idx}], group: [{a1_group}/{a2_group}], length codes: [{len(agent_codes_1)}/{len(agent_codes_2)}]')
# Keep track of labels
true_labels = []
pred_labels_1_nc = []
pred_labels_1_com = []
pred_labels_2_nc = []
pred_labels_2_com = []
# Keep track of number of correct observations
total = 0
total_correct_nc = 0
total_correct_com = 0
atleast1_correct_nc = 0
atleast1_correct_com = 0
# Keep track of score when messages are changed
test_language_similarity = {"total": 0, "correct": [], "agent_w_changed_msg": [], "shape": [], "color": [], "orig_shape": [], "orig_color": [], "originally_correct": []}
detail_language_similarity = []
# Load development images
if in_domain_eval:
eval_mode = "train"
debuglogger.info("Evaluating on in domain validation set")
else:
eval_mode = FLAGS.dataset_eval_mode
debuglogger.info("Evaluating on out of domain validation set")
dev_loader = load_shapeworld_dataset(dataset_path, FLAGS.glove_path, eval_mode, FLAGS.dataset_size_dev, FLAGS.dataset_type, FLAGS.dataset_name, FLAGS.batch_size_dev, FLAGS.random_seed, FLAGS.shuffle_dev, FLAGS.img_feat, FLAGS.cuda, truncate_final_batch=False)
_batch_counter = 0
for batch in dev_loader:
_batch_counter += 1
debuglogger.debug(f'Batch {_batch_counter}')
target = batch["target"]
im_feats_1 = batch["im_feats_1"]
im_feats_2 = batch["im_feats_2"]
p = batch["p"]
desc = Variable(batch["texts_vec"])
_batch_size = target.size(0)
true_labels.append(target.cpu().numpy().reshape(-1))
# GPU support
if FLAGS.cuda:
im_feats_1 = im_feats_1.cuda()
im_feats_2 = im_feats_2.cuda()
target = target.cuda()
desc = desc.cuda()
data = {"im_feats_1": im_feats_1,
"im_feats_2": im_feats_2,
"p": p}
exchange_args = dict()
exchange_args["data"] = data
exchange_args["target"] = target
exchange_args["desc"] = desc
exchange_args["train"] = False
exchange_args["break_early"] = not FLAGS.fixed_exchange
s, message_1, message_2, y_all, r = exchange(
agent1, agent2, exchange_args)
s_masks_1, s_feats_1, s_probs_1 = s[0]
s_masks_2, s_feats_2, s_probs_2 = s[1]
feats_1, probs_1 = message_1
feats_2, probs_2 = message_2
y_nc = y_all[0]
y = y_all[1]
# Mask loss if dynamic exchange length
if FLAGS.fixed_exchange:
binary_s_masks = None
binary_agent1_masks = None
binary_agent2_masks = None
bas_agent1_masks = None
bas_agent2_masks = None
y1_masks = None
y2_masks = None
outp_1 = y[0][-1]
outp_2 = y[1][-1]
else:
# TODO
pass
# Before communication predictions
# Obtain predictions, loss and stats agent 1
(dist_1_nc, maxdist_1_nc, argmax_1_nc, ent_1_nc, nll_loss_1_nc,
logs_1_nc) = get_classification_loss_and_stats(y_nc[0], target)
# Obtain predictions, loss and stats agent 2
(dist_2_nc, maxdist_2_nc, argmax_2_nc, ent_2_nc, nll_loss_2_nc,
logs_2_nc) = get_classification_loss_and_stats(y_nc[1], target)
# After communication predictions
# Obtain predictions, loss and stats agent 1
(dist_1, maxdist_1, argmax_1, ent_1, nll_loss_1_com,
logs_1) = get_classification_loss_and_stats(outp_1, target)
# Obtain predictions, loss and stats agent 2
(dist_2, maxdist_2, argmax_2, ent_2, nll_loss_2_com,
logs_2) = get_classification_loss_and_stats(outp_2, target)
# Store top 1 prediction for confusion matrix
pred_labels_1_nc.append(argmax_1_nc.cpu().numpy())
pred_labels_1_com.append(argmax_1.cpu().numpy())
pred_labels_2_nc.append(argmax_2_nc.cpu().numpy())
pred_labels_2_com.append(argmax_2.cpu().numpy())
# Calculate number of correct observations for different types
accuracy_1_nc, correct_1_nc, top_1_1_nc = calculate_accuracy(
dist_1_nc, target, FLAGS.batch_size_dev, FLAGS.top_k_dev)
accuracy_1, correct_1, top_1_1 = calculate_accuracy(
dist_1, target, FLAGS.batch_size_dev, FLAGS.top_k_dev)
accuracy_2_nc, correct_2_nc, top_1_2_nc = calculate_accuracy(
dist_2_nc, target, FLAGS.batch_size_dev, FLAGS.top_k_dev)
accuracy_2, correct_2, top_1_2 = calculate_accuracy(
dist_2, target, FLAGS.batch_size_dev, FLAGS.top_k_dev)
batch_correct_nc = correct_1_nc.float() + correct_2_nc.float()
batch_correct_com = correct_1.float() + correct_2.float()
batch_correct_top_1_nc = top_1_1_nc.float() + top_1_2_nc.float()
batch_correct_top_1_com = top_1_1.float() + top_1_2.float()
# Update accuracy counts
total += float(_batch_size)
total_correct_nc += (batch_correct_nc == 2).sum()
total_correct_com += (batch_correct_com == 2).sum()
atleast1_correct_nc += (batch_correct_nc > 0).sum()
atleast1_correct_com += (batch_correct_com > 0).sum()
# Get correct indices
correct_indices_nc = (batch_correct_nc == 2)
correct_indices_com = (batch_correct_com == 2)
# Test compositionality
for _ in range(_batch_size):
# Construct batch of size 1
data = {"im_feats_1": im_feats_1[_].unsqueeze(0),
"im_feats_2": im_feats_2[_].unsqueeze(0),
"p": p[_]}
exchange_args = dict()
exchange_args["data"] = data
exchange_args["desc"] = desc[_].unsqueeze(0)
exchange_args["train"] = False
exchange_args["break_early"] = not FLAGS.fixed_exchange
exchange_args["test_language_similarity"] = True
# Construct candidate example to change message to
debuglogger.info(f'Agent with sight: {batch["non_blank_partition"][_]}')
# Only select examples where one agent is blind
if batch['non_blank_partition'][_] != 0:
change_agent = batch['non_blank_partition'][_]
texts = batch["texts_str"][_]
s = batch["shapes"][_]
c = batch["colors"][_]
debuglogger.info(f'i: {_}, caption: {batch["caption_str"][_]}, original target: {target[_]}, Correct? {correct_1[_]}/{correct_2[_]}')
debuglogger.debug(f'i: {_}, texts: {texts}')
debuglogger.debug(f'i: {_}, texts_shapes: {batch["texts_shapes"][_]}')
debuglogger.debug(f'i: {_}, texts_colors: {batch["texts_colors"][_]}')
for _t, t in enumerate(texts):
# Only select examples that are different to the current target
if _t != target[_]:
st = batch["texts_shapes"][_][_t]
ct = batch["texts_colors"][_][_t]
# Only select examples where there is a different of either the shape or the color, not both
if (st == s and ct != c) or (st != s and ct == c):
exchange_args["target"] = _t
exchange_args["change_agent"] = change_agent
if ct != c:
exchange_args["subtract"] = c
exchange_args["add"] = ct
else:
exchange_args["subtract"] = s
exchange_args["add"] = st
# Discard examples which involve adding or subtracting "None" (no code for this)
if exchange_args["subtract"] is None or exchange_args["add"] is None:
debuglogger.info(f'Skipping example due to None add or subtract...')
continue
debuglogger.info(f'i: {_} t: {_t}, subtracting: {exchange_args["subtract"]}, adding: {exchange_args["add"]}, change agent: {exchange_args["change_agent"]}')
# Set up to play the game and store results for all permutations
example_stats = {'subtract': {'name': exchange_args["subtract"], 'total': 0, 'correct': 0},
'add': {'name': exchange_args["add"], 'total': 0, 'correct': 0},
'own_correct': 0,
'originally_correct': 0,
'correct_permutations': 0,
'total_permutations': 0,
'total_own_codes': 0}
# Fix who goes first for all permutations in an example
exchange_args["use_given_who_goes_first"] = True
if random.random() < 0.5:
exchange_args["given_who_goes_first"] = 1
else:
exchange_args["given_who_goes_first"] = 2
# Flip the change agent since the agents receiving the image feats will be flipped
change_agent = 1 if change_agent == 2 else 2
exchange_args["change_agent"] = change_agent
debuglogger.info(f'Given who goes first: {exchange_args["given_who_goes_first"]}, change agent: {exchange_args["change_agent"]}')
''' ==========================================================='''
# First play game with the change agent's own codes
own_codes_flag = True
debuglogger.debug(f'Playing game with own codes: {own_codes_flag}')
if change_agent == 1:
if a1_group == 1:
exchange_args["agent_subtract_dict"] = agent_codes_1[a1_idx]
exchange_args["agent_add_dict"] = agent_codes_1[a1_idx]
else:
exchange_args["agent_subtract_dict"] = agent_codes_2[a1_idx]
exchange_args["agent_add_dict"] = agent_codes_2[a1_idx]
else:
if a2_group == 1:
exchange_args["agent_subtract_dict"] = agent_codes_1[a2_idx]
exchange_args["agent_add_dict"] = agent_codes_1[a2_idx]
else:
exchange_args["agent_subtract_dict"] = agent_codes_2[a2_idx]
exchange_args["agent_add_dict"] = agent_codes_2[a2_idx]
# Play game, corrupting message
_s, message_1, message_2, y_all, r = exchange(
agent1, agent2, exchange_args)
s_masks_1, s_feats_1, s_probs_1 = _s[0]
s_masks_2, s_feats_2, s_probs_2 = _s[1]
feats_1, probs_1 = message_1
feats_2, probs_2 = message_2
y_nc = y_all[0]
y = y_all[1]
# We only care about after communication predictions when measuring the peformance
score = None
new_target = torch.zeros(1).fill_(_t).long()
debuglogger.debug(f'Old target: {target[_]}')
na, argmax_y1 = torch.max(y[0][-1], 1)
na, argmax_y2 = torch.max(y[1][-1], 1)
debuglogger.debug(f'y1 logits: {y[0][-1].data}, y2 logits: {y[1][-1].data}')
debuglogger.debug(f'y1: {argmax_y1.data[0]}, y2: {argmax_y2.data[0]}, new_target: {new_target[0]}')
if FLAGS.cuda:
new_target = new_target.cuda()
if change_agent == 1:
# Calculate score for agent 2
(dist_2_change, na, na, na, na, na) = get_classification_loss_and_stats(y[1][-1], new_target)
debuglogger.debug(f'dist: {dist_2_change.data}')
na, na, top_1_2_change = calculate_accuracy(
dist_2_change, new_target, 1, FLAGS.top_k_dev)
score = top_1_2_change
else:
# Calculate score for agent 1
(dist_1_change, na, na, na, na, na) = get_classification_loss_and_stats(y[0][-1], new_target)
debuglogger.debug(f'dist: {dist_1_change.data}')
na, na, top_1_1_change = calculate_accuracy(
dist_1_change, new_target, 1, FLAGS.top_k_dev)
score = top_1_1_change
debuglogger.debug(f'i: {_}_{_t}: New caption: {t}, new target: {_t}, change_agent: {change_agent}, correct: {score[0]}, originally correct: {correct_1[_]}/{correct_2[_]}')
# Store results
test_language_similarity["total"] += 1
test_language_similarity["orig_shape"].append(s)
test_language_similarity["orig_color"].append(c)
if score[0] == 1:
test_language_similarity["correct"].append(1)
else:
test_language_similarity["correct"].append(0)
if change_agent == 2:
# The other agent had their message changed
test_language_similarity["originally_correct"].append(correct_1[_])
test_language_similarity["agent_w_changed_msg"].append(1)
else:
test_language_similarity["originally_correct"].append(correct_2[_])
test_language_similarity["agent_w_changed_msg"].append(2)
if ct != c:
test_language_similarity["shape"].append(None)
test_language_similarity["color"].append(ct)
else:
test_language_similarity["shape"].append(st)
test_language_similarity["color"].append(None)
# Track detailed results
example_stats['total_permutations'] += 1
example_stats['subtract']["total"] += 1
example_stats['add']["total"] += 1
if score[0] == 1:
example_stats["subtract"]["correct"] += 1
example_stats["add"]["correct"] += 1
example_stats["correct_permutations"] += 1
if change_agent == 2:
example_stats['originally_correct'] = correct_1[_]
else:
example_stats['originally_correct'] = correct_2[_]
if own_codes_flag:
example_stats['total_own_codes'] += 1
if score[0] == 1:
example_stats['own_correct'] += 1
''' ==========================================================='''
# Now play the game with all permutations of codes
for _g1 in range(len(agent_codes_1)):
for _g2 in range(len(agent_codes_2)):
# Track if agent is playing with its own codes
own_codes_flag = False
if FLAGS.self_similarity and (change_agent == 1) and (_g1 == _g2 == a1_idx):
own_codes_flag = True
elif FLAGS.self_similarity and (change_agent == 2) and (_g1 == _g2 == a2_idx):
own_codes_flag = True
debuglogger.debug(f'Playing game with own codes: {own_codes_flag}')
''' ==========================================================='''
# First play game with codes from group 1 subtracting and code from group 2 adding
exchange_args["agent_subtract_dict"] = agent_codes_1[_g1]
exchange_args["agent_add_dict"] = agent_codes_2[_g2]
# Play game, corrupting message
_s, message_1, message_2, y_all, r = exchange(
agent1, agent2, exchange_args)
s_masks_1, s_feats_1, s_probs_1 = _s[0]
s_masks_2, s_feats_2, s_probs_2 = _s[1]
feats_1, probs_1 = message_1
feats_2, probs_2 = message_2
y_nc = y_all[0]
y = y_all[1]
# We only care about after communication predictions when measuring the peformance
score = None
new_target = torch.zeros(1).fill_(_t).long()
debuglogger.debug(f'Old target: {target[_]}')
na, argmax_y1 = torch.max(y[0][-1], 1)
na, argmax_y2 = torch.max(y[1][-1], 1)
debuglogger.debug(f'y1 logits: {y[0][-1].data}, y2 logits: {y[1][-1].data}')
debuglogger.debug(f'y1: {argmax_y1.data[0]}, y2: {argmax_y2.data[0]}, new_target: {new_target[0]}')
if FLAGS.cuda:
new_target = new_target.cuda()
if change_agent == 1:
# Calculate score for agent 2
(dist_2_change, na, na, na, na, na) = get_classification_loss_and_stats(y[1][-1], new_target)
debuglogger.debug(f'dist: {dist_2_change.data}')
na, na, top_1_2_change = calculate_accuracy(
dist_2_change, new_target, 1, FLAGS.top_k_dev)
score = top_1_2_change
else:
# Calculate score for agent 1
(dist_1_change, na, na, na, na, na) = get_classification_loss_and_stats(y[0][-1], new_target)
debuglogger.debug(f'dist: {dist_1_change.data}')
na, na, top_1_1_change = calculate_accuracy(
dist_1_change, new_target, 1, FLAGS.top_k_dev)
score = top_1_1_change
debuglogger.debug(f'i: {_}_{_t}: New caption: {t}, new target: {_t}, change_agent: {change_agent}, correct: {score[0]}, originally correct: {correct_1[_]}/{correct_2[_]}')
# Store results
test_language_similarity["total"] += 1
test_language_similarity["orig_shape"].append(s)
test_language_similarity["orig_color"].append(c)
if score[0] == 1:
test_language_similarity["correct"].append(1)
else:
test_language_similarity["correct"].append(0)
if change_agent == 2:
# The other agent had their message changed
test_language_similarity["originally_correct"].append(correct_1[_])
test_language_similarity["agent_w_changed_msg"].append(1)
else:
test_language_similarity["originally_correct"].append(correct_2[_])
test_language_similarity["agent_w_changed_msg"].append(2)
if ct != c:
test_language_similarity["shape"].append(None)
test_language_similarity["color"].append(ct)
else:
test_language_similarity["shape"].append(st)
test_language_similarity["color"].append(None)
# Track detailed results
example_stats['total_permutations'] += 1
example_stats['subtract']["total"] += 1
example_stats['add']["total"] += 1
if score[0] == 1:
example_stats["subtract"]["correct"] += 1
example_stats["add"]["correct"] += 1
example_stats["correct_permutations"] += 1
if change_agent == 2:
example_stats['originally_correct'] = correct_1[_]
else:
example_stats['originally_correct'] = correct_2[_]
if own_codes_flag:
example_stats['total_own_codes'] += 1
if score[0] == 1:
example_stats['own_correct'] += 1
''' ==========================================================='''
''' ==========================================================='''
debuglogger.debug(f'Playing game with own codes: {own_codes_flag}')
# Now play the game with the switched codes
exchange_args["agent_subtract_dict"] = agent_codes_2[_g2]
exchange_args["agent_add_dict"] = agent_codes_1[_g1]
# Play game, corrupting message
_s, message_1, message_2, y_all, r = exchange(
agent1, agent2, exchange_args)
s_masks_1, s_feats_1, s_probs_1 = _s[0]
s_masks_2, s_feats_2, s_probs_2 = _s[1]
feats_1, probs_1 = message_1
feats_2, probs_2 = message_2
y_nc = y_all[0]
y = y_all[1]
# We only care about after communication predictions when measuring the peformance
score = None
new_target = torch.zeros(1).fill_(_t).long()
debuglogger.debug(f'Old target: {target[_]}')
na, argmax_y1 = torch.max(y[0][-1], 1)
na, argmax_y2 = torch.max(y[1][-1], 1)
debuglogger.debug(f'y1 logits: {y[0][-1].data}, y2 logits: {y[1][-1].data}')
debuglogger.debug(f'y1: {argmax_y1.data[0]}, y2: {argmax_y2.data[0]}, new_target: {new_target[0]}')
if FLAGS.cuda:
new_target = new_target.cuda()
if change_agent == 1:
# Calculate score for agent 2
(dist_2_change, na, na, na, na, na) = get_classification_loss_and_stats(y[1][-1], new_target)
debuglogger.debug(f'dist: {dist_2_change.data}')
na, na, top_1_2_change = calculate_accuracy(
dist_2_change, new_target, 1, FLAGS.top_k_dev)
score = top_1_2_change
else:
# Calculate score for agent 1
(dist_1_change, na, na, na, na, na) = get_classification_loss_and_stats(y[0][-1], new_target)
debuglogger.debug(f'dist: {dist_1_change.data}')
na, na, top_1_1_change = calculate_accuracy(
dist_1_change, new_target, 1, FLAGS.top_k_dev)
score = top_1_1_change
debuglogger.debug(f'i: {_}_{_t}: New caption: {t}, new target: {_t}, change_agent: {change_agent}, correct: {score[0]}, originally correct: {correct_1[_]}/{correct_2[_]}')
# Store results
test_language_similarity["total"] += 1
test_language_similarity["orig_shape"].append(s)
test_language_similarity["orig_color"].append(c)
if score[0] == 1:
test_language_similarity["correct"].append(1)
else:
test_language_similarity["correct"].append(0)
if change_agent == 2:
# The other agent had their message changed
test_language_similarity["originally_correct"].append(correct_1[_])
test_language_similarity["agent_w_changed_msg"].append(1)
else:
test_language_similarity["originally_correct"].append(correct_2[_])
test_language_similarity["agent_w_changed_msg"].append(2)
if ct != c:
test_language_similarity["shape"].append(None)
test_language_similarity["color"].append(ct)
else:
test_language_similarity["shape"].append(st)
test_language_similarity["color"].append(None)
# Track detailed results
example_stats['total_permutations'] += 1
example_stats['subtract']["total"] += 1
example_stats['add']["total"] += 1
if score[0] == 1:
example_stats["subtract"]["correct"] += 1
example_stats["add"]["correct"] += 1
example_stats["correct_permutations"] += 1
if change_agent == 2:
example_stats['originally_correct'] = correct_1[_]
else:
example_stats['originally_correct'] = correct_2[_]
if own_codes_flag:
example_stats['total_own_codes'] += 1
if score[0] == 1:
example_stats['own_correct'] += 1
''' ==========================================================='''
debuglogger.info(f'Detailed stats: {example_stats}')
detail_language_similarity.append(example_stats)
debuglogger.info(f'Total msg changed: {test_language_similarity["total"]}, Correct: {sum(test_language_similarity["correct"])}')
debuglogger.info(f'Eval total size: {total}')
debuglogger.info(f'Eval total correct com: {total_correct_com}')
aggregate_stats = {}
detail_total = 0
detail_orig_correct = 0
detail_total_permutes_not_filtered = 0
detail_own_total_total = 0
detail_own_total = 0
detail_own_total_filt = 0
detail_own_correct = 0
permutes_total = 0
permutes_correct = 0
for elem in detail_language_similarity:
detail_total += 1
detail_total_permutes_not_filtered += elem['total_permutations']
detail_own_total_total += elem['total_own_codes']
if elem["originally_correct"]:
detail_orig_correct += 1
detail_own_total += elem['total_own_codes']
if elem['own_correct'] > 0:
detail_own_total_filt += elem['total_own_codes']
detail_own_correct += elem['own_correct']
permutes_total += elem['total_permutations']
permutes_correct += elem['correct_permutations']
if elem['subtract']['name'] not in aggregate_stats:
aggregate_stats[elem['subtract']['name']] = {'total': 0, 'correct': 0, 'own_correct': 0, 'own_total': 0}
aggregate_stats[elem['subtract']['name']]['total'] += elem['subtract']['total']
aggregate_stats[elem['subtract']['name']]['correct'] += elem['subtract']['correct']
aggregate_stats[elem['subtract']['name']]['own_total'] += elem['total_own_codes']
aggregate_stats[elem['subtract']['name']]['own_correct'] += elem['own_correct']
if elem['add']['name'] not in aggregate_stats:
aggregate_stats[elem['add']['name']] = {'total': 0, 'correct': 0, 'own_correct': 0, 'own_total': 0}
aggregate_stats[elem['add']['name']]['total'] += elem['add']['total']
aggregate_stats[elem['add']['name']]['correct'] += elem['add']['correct']
aggregate_stats[elem['add']['name']]['own_total'] += elem['total_own_codes']
aggregate_stats[elem['add']['name']]['own_correct'] += elem['own_correct']
transform = elem['subtract']['name'] + '_' + elem['add']['name']
if transform not in aggregate_stats:
aggregate_stats[transform] = {'total': 0, 'correct': 0, 'own_correct': 0, 'own_total': 0}
aggregate_stats[transform]['total'] += elem['subtract']['total']
aggregate_stats[transform]['correct'] += elem['subtract']['correct']
aggregate_stats[transform]['own_total'] += elem['total_own_codes']
aggregate_stats[transform]['own_correct'] += elem['own_correct']
# Log detailed stats results
flogger.Log(f'Agents {a1_idx + 1},{a2_idx + 1}: Total examples: {detail_total}, orig correct: {detail_orig_correct}, % correct: {detail_orig_correct / detail_total}')
flogger.Log(f'Agents {a1_idx + 1},{a2_idx + 1}: Total permutations: {detail_total_permutes_not_filtered}, own_codes: {detail_own_total_total}, % own {detail_own_total_total / detail_total_permutes_not_filtered}')
flogger.Log(f'Agents {a1_idx + 1},{a2_idx + 1}: Total own codes: {detail_own_total}, correct own codes: {detail_own_correct}, % correct {detail_own_correct / detail_own_total}')
flogger.Log('Filtering for originally correct examples with at least one permutation with own codes correct...')
flogger.Log(f'Agents {a1_idx + 1},{a2_idx + 1}: Permutations total: {permutes_total}, permutations correct: {permutes_correct}, % {permutes_correct / permutes_total}')
norm_total = permutes_total - detail_own_total_filt
norm_correct = permutes_correct - detail_own_correct
flogger.Log(f'Agents {a1_idx + 1},{a2_idx + 1}: Normalized total: {norm_total} normalized correct: {norm_correct}, SIMILARITY: {norm_correct / norm_total}')
for key in aggregate_stats:
_total = aggregate_stats[key]['total']
_correct = aggregate_stats[key]['correct']
_own_total = aggregate_stats[key]['own_total']
_own_correct = aggregate_stats[key]['own_correct']
_normalized_total = _total - _own_total
_normalized_correct = _correct - _own_correct
if _total > 0:
flogger.Log(f'Agents {a1_idx + 1},{a2_idx + 1}: {key}: total: {_total} correct: {_correct} %: {_correct / _total}')
if _normalized_total > 0:
flogger.Log(f'Agents {a1_idx + 1},{a2_idx + 1}: {key}: normalized total: {_normalized_total} normalized correct: {_normalized_correct} SIMILARITY: {_normalized_correct / _normalized_total}')
else:
flogger.Log(f'Agents {a1_idx + 1},{a2_idx + 1}: {key}: normalized total: {_normalized_total} normalized correct: {_normalized_correct} SIMILARITY: {0}')
return test_language_similarity
def eval_dev(dataset_path, top_k, agent1, agent2, logger, flogger, epoch, step, i_batch, in_domain_eval=True, callback=None, store_examples=False, analyze_messages=True, save_messages=False, agent_tag="_", agent_dicts=None, agent_idxs=None, agent_groups=None):
"""
Function computing development accuracy and other metrics
"""
extra = dict()
correct_to_analyze = {"masked_im_1": [],
"masked_im_2": [],
"msg_1": [],
"msg_1_ent": [],
"msg_1_str": [],
"msg_2": [],
"msg_2_ent": [],
"msg_2_str": [],
"probs_1": [],
"probs_2": [],
"p": [],
"target": [],
"caption": [],
"shapes": [],
"colors": [],
"texts": [],
}
incorrect_to_analyze = {"masked_im_1": [],
"masked_im_2": [],
"msg_1": [],
"msg_1_ent": [],
"msg_1_str": [],
"msg_2": [],
"msg_2_ent": [],
"msg_2_str": [],
"probs_1": [],
"probs_2": [],
"p": [],
"target": [],
"caption": [],
"shapes": [],
"colors": [],
"texts": [],
}
# Keep track of shapes and color accuracy
shapes_accuracy = {}
for s in SHAPES:
shapes_accuracy[s] = {"correct": 0,
"total": 0}
colors_accuracy = {}
for c in COLORS:
colors_accuracy[c] = {"correct": 0,
"total": 0}
shapes_colors_accuracy = {}
for c in COLORS:
for s in SHAPES:
sc = s + '_' + c
shapes_colors_accuracy[sc] = {"correct": 0,
"total": 0}
# Keep track of agent specific performance (given other agent gets it both right)
agent1_performance = {"11": 0, # both right
"01": 0, # wrong before comms, right after
"10": 0, # right before comms, wrong after
"00": 0, # both wrong
"total": 0}
agent2_performance = {"11": 0, # both right
"01": 0, # wrong before comms, right after
"10": 0, # right before comms, wrong after
"00": 0, # both wrong
"total": 0}
# Keep track of conversation lengths
conversation_lengths_1 = []
conversation_lengths_2 = []
# Keep track of message diversity
hamming_1 = []
hamming_2 = []
# Keep track of labels
true_labels = []
pred_labels_1_nc = []
pred_labels_1_com = []
pred_labels_2_nc = []
pred_labels_2_com = []
# Keep track of number of correct observations
total = 0
total_correct_nc = 0
total_correct_com = 0
atleast1_correct_nc = 0
atleast1_correct_com = 0
# Load development images
if in_domain_eval:
eval_mode = "train"
debuglogger.info("Evaluating on in domain validation set")
else:
eval_mode = FLAGS.dataset_eval_mode
debuglogger.info("Evaluating on out of domain validation set")
dev_loader = load_shapeworld_dataset(dataset_path, FLAGS.glove_path, eval_mode, FLAGS.dataset_size_dev, FLAGS.dataset_type, FLAGS.dataset_name, FLAGS.batch_size_dev, FLAGS.random_seed, FLAGS.shuffle_dev, FLAGS.img_feat, FLAGS.cuda, truncate_final_batch=False)
_batch_counter = 0
for batch in dev_loader:
_batch_counter += 1
debuglogger.debug(f'Batch {_batch_counter}')
target = batch["target"]
im_feats_1 = batch["im_feats_1"]
im_feats_2 = batch["im_feats_2"]
p = batch["p"]
desc = Variable(batch["texts_vec"])
_batch_size = target.size(0)
true_labels.append(target.cpu().numpy().reshape(-1))
# GPU support
if FLAGS.cuda:
im_feats_1 = im_feats_1.cuda()
im_feats_2 = im_feats_2.cuda()
target = target.cuda()
desc = desc.cuda()
data = {"im_feats_1": im_feats_1,
"im_feats_2": im_feats_2,
"p": p}
exchange_args = dict()
exchange_args["data"] = data
exchange_args["target"] = target
exchange_args["desc"] = desc
exchange_args["train"] = False
exchange_args["break_early"] = not FLAGS.fixed_exchange
s, message_1, message_2, y_all, r = exchange(
agent1, agent2, exchange_args)
s_masks_1, s_feats_1, s_probs_1 = s[0]
s_masks_2, s_feats_2, s_probs_2 = s[1]
feats_1, probs_1 = message_1
feats_2, probs_2 = message_2
y_nc = y_all[0]
y = y_all[1]
# Mask loss if dynamic exchange length
if FLAGS.fixed_exchange:
binary_s_masks = None
binary_agent1_masks = None
binary_agent2_masks = None
bas_agent1_masks = None
bas_agent2_masks = None
y1_masks = None
y2_masks = None
outp_1 = y[0][-1]
outp_2 = y[1][-1]
else:
# TODO
pass
# Before communication predictions
# Obtain predictions, loss and stats agent 1
(dist_1_nc, maxdist_1_nc, argmax_1_nc, ent_1_nc, nll_loss_1_nc,
logs_1_nc) = get_classification_loss_and_stats(y_nc[0], target)
# Obtain predictions, loss and stats agent 2
(dist_2_nc, maxdist_2_nc, argmax_2_nc, ent_2_nc, nll_loss_2_nc,
logs_2_nc) = get_classification_loss_and_stats(y_nc[1], target)
# After communication predictions
# Obtain predictions, loss and stats agent 1
(dist_1, maxdist_1, argmax_1, ent_1, nll_loss_1_com,
logs_1) = get_classification_loss_and_stats(outp_1, target)
# Obtain predictions, loss and stats agent 2
(dist_2, maxdist_2, argmax_2, ent_2, nll_loss_2_com,
logs_2) = get_classification_loss_and_stats(outp_2, target)
# Store top 1 prediction for confusion matrix
pred_labels_1_nc.append(argmax_1_nc.cpu().numpy())
pred_labels_1_com.append(argmax_1.cpu().numpy())
pred_labels_2_nc.append(argmax_2_nc.cpu().numpy())
pred_labels_2_com.append(argmax_2.cpu().numpy())
# Calculate number of correct observations for different types
accuracy_1_nc, correct_1_nc, top_1_1_nc = calculate_accuracy(
dist_1_nc, target, FLAGS.batch_size_dev, FLAGS.top_k_dev)
accuracy_1, correct_1, top_1_1 = calculate_accuracy(
dist_1, target, FLAGS.batch_size_dev, FLAGS.top_k_dev)
accuracy_2_nc, correct_2_nc, top_1_2_nc = calculate_accuracy(
dist_2_nc, target, FLAGS.batch_size_dev, FLAGS.top_k_dev)
accuracy_2, correct_2, top_1_2 = calculate_accuracy(
dist_2, target, FLAGS.batch_size_dev, FLAGS.top_k_dev)
batch_correct_nc = correct_1_nc.float() + correct_2_nc.float()
batch_correct_com = correct_1.float() + correct_2.float()
batch_correct_top_1_nc = top_1_1_nc.float() + top_1_2_nc.float()
batch_correct_top_1_com = top_1_1.float() + top_1_2.float()
debuglogger.debug(f'A1 correct com: {correct_1}')
debuglogger.debug(f'A2 correct nc: {correct_2}')
debuglogger.debug(f'eval batch correct com: {batch_correct_com}')
debuglogger.debug(f'eval batch correct nc: {batch_correct_nc}')
debuglogger.debug(
f'eval batch top 1 correct com: {batch_correct_top_1_com}')
debuglogger.debug(
f'eval batch top 1 correct nc: {batch_correct_top_1_nc}')
# Update accuracy counts
total += float(_batch_size)
total_correct_nc += (batch_correct_nc == 2).sum()
total_correct_com += (batch_correct_com == 2).sum()
atleast1_correct_nc += (batch_correct_nc > 0).sum()
atleast1_correct_com += (batch_correct_com > 0).sum()
debuglogger.debug(f'eval total correct com: {total_correct_com}')
debuglogger.debug(f'eval total correct nc: {total_correct_nc}')
debuglogger.debug(f'eval atleast1 correct com: {atleast1_correct_com}')
debuglogger.debug(f'eval atleast1 correct nc: {atleast1_correct_nc}')
debuglogger.debug(f'batch agent 1 nc correct: {correct_1_nc}')
debuglogger.debug(f'batch agent 1 com correct: {correct_1}')
debuglogger.debug(f'batch agent 2 nc correct: {correct_2_nc}')
debuglogger.debug(f'batch agent 2 com correct: {correct_2}')
# Track agent specific stats
# Agent 1 given Agent 2 both correct
a2_idx = (correct_2_nc.float() + correct_2.float()) == 2
a1_00 = (a2_idx & ((correct_1_nc.float() + correct_1.float()) == 0)).sum()
a1_10 = (a2_idx & ((correct_1_nc.float() + (1 - correct_1.float()) == 2))).sum()
a1_01 = (a2_idx & (((1 - correct_1_nc.float()) + correct_1.float()) == 2)).sum()
a1_11 = (a2_idx & ((correct_1_nc.float() + correct_1.float()) == 2)).sum()
a1_tot = a2_idx.sum()
assert a1_tot == (a1_00 + a1_01 + a1_10 + a1_11)
agent1_performance["11"] += a1_11
agent1_performance["01"] += a1_01
agent1_performance["10"] += a1_10
agent1_performance["00"] += a1_00
agent1_performance["total"] += a1_tot
# Agent 2 given Agent 1 both correct
a1_idx = (correct_1_nc.float() + correct_1.float()) == 2
a2_00 = (a1_idx & ((correct_2_nc.float() + correct_2.float()) == 0)).sum()
a2_10 = (a1_idx & ((correct_2_nc.float() + (1 - correct_2.float()) == 2))).sum()
a2_01 = (a1_idx & (((1 - correct_2_nc.float()) + correct_2.float()) == 2)).sum()
a2_11 = (a1_idx & ((correct_2_nc.float() + correct_2.float()) == 2)).sum()
a2_tot = a1_idx.sum()
assert a2_tot == (a2_00 + a2_01 + a2_10 + a2_11)
agent2_performance["11"] += a2_11
agent2_performance["01"] += a2_01
agent2_performance["10"] += a2_10
agent2_performance["00"] += a2_00
agent2_performance["total"] += a2_tot
debuglogger.debug('Agent 1: total {}, 11: {}, 01: {} 00: {}, 10: {}'.format(
agent1_performance["total"],
agent1_performance["11"],
agent1_performance["01"],
agent1_performance["00"],
agent1_performance["10"]))
if agent1_performance["total"] > 0:
debuglogger.debug('Agent 1: total {}, 11: {}, 01: {} 00: {}, 10: {}'.format(
agent1_performance["total"] / agent1_performance["total"],
agent1_performance["11"] / agent1_performance["total"],
agent1_performance["01"] / agent1_performance["total"],
agent1_performance["00"] / agent1_performance["total"],
agent1_performance["10"] / agent1_performance["total"]))
debuglogger.debug('Agent 2: total {}, 11: {}, 01: {} 00: {}, 10: {}'.format(
agent2_performance["total"],
agent2_performance["11"],
agent2_performance["01"],
agent2_performance["00"],
agent2_performance["10"]))
if agent2_performance["total"] > 0:
debuglogger.debug('Agent 2: total {}, 11: {}, 01: {} 00: {}, 10: {}'.format(
agent2_performance["total"] / agent2_performance["total"],
agent2_performance["11"] / agent2_performance["total"],
agent2_performance["01"] / agent2_performance["total"],
agent2_performance["00"] / agent2_performance["total"],
agent2_performance["10"] / agent2_performance["total"]))
# Gather shape and color stats
correct_indices_nc = (batch_correct_nc == 2)
correct_indices_com = (batch_correct_com == 2)
for _i in range(_batch_size):
if batch['shapes'][_i] is not None:
shape = batch['shapes'][_i]
shapes_accuracy[shape]["total"] += 1
if correct_indices_com[_i]:
shapes_accuracy[shape]["correct"] += 1
if batch['colors'][_i] is not None:
color = batch['colors'][_i]
colors_accuracy[color]["total"] += 1
if correct_indices_com[_i]:
colors_accuracy[color]["correct"] += 1
if (batch['colors'][_i] is not None) and (batch['shapes'][_i] is not None):
color = batch['colors'][_i]
shape = batch['shapes'][_i]
sc = shape + '_' + color
shapes_colors_accuracy[sc]["total"] += 1
if correct_indices_com[_i]:
shapes_colors_accuracy[sc]["correct"] += 1
# Time consuming, so only do this if necessary
if store_examples or analyze_messages or save_messages or FLAGS.report_on_complexity:
if _batch_counter == 1 and _i == 0:
debuglogger.info("Storing message data to analyze later...")
if correct_indices_com[_i]:
correct_to_analyze = add_data_point(batch, _i, correct_to_analyze, feats_1, feats_2, probs_1, probs_2)
else:
incorrect_to_analyze = add_data_point(batch, _i, incorrect_to_analyze, feats_1, feats_2, probs_1, probs_2)
if _i == 5:
debuglogger.debug(f'Message 1: {feats_1}, probs 1: {probs_1}')
debuglogger.debug(f'Message 2: {feats_2}, probs 2: {probs_2}')
debuglogger.debug(f'Correct dict: {correct_to_analyze}')
debuglogger.debug(f'Incorrect dict: {incorrect_to_analyze}')
debuglogger.debug(f'shapes dict: {shapes_accuracy}')
debuglogger.debug(f'colors dict: {colors_accuracy}')
# Keep track of conversation lengths
# TODO not relevant yet
conversation_lengths_1 += torch.cat(s_feats_1,
1).data.float().sum(1).view(-1).tolist()
conversation_lengths_2 += torch.cat(s_feats_2,
1).data.float().sum(1).view(-1).tolist()
debuglogger.debug(f'Conversation length 1: {conversation_lengths_1}')
debuglogger.debug(f'Conversation length 2: {conversation_lengths_2}')
# Keep track of message diversity
mean_hamming_1 = 0
mean_hamming_2 = 0
prev_1 = torch.FloatTensor(_batch_size, FLAGS.m_dim).fill_(0)
prev_2 = torch.FloatTensor(_batch_size, FLAGS.m_dim).fill_(0)
for msg in feats_1:
mean_hamming_1 += (msg.data.cpu() - prev_1).abs().sum(1).mean()
prev_1 = msg.data.cpu()
mean_hamming_1 = mean_hamming_1 / float(len(feats_1))
for msg in feats_2:
mean_hamming_2 += (msg.data.cpu() - prev_2).abs().sum(1).mean()
prev_2 = msg.data.cpu()
mean_hamming_2 = mean_hamming_2 / float(len(feats_2))
hamming_1.append(mean_hamming_1)
hamming_2.append(mean_hamming_2)
if callback is not None:
callback_dict = dict(
s_masks_1=s_masks_1,
s_feats_1=s_feats_1,
s_probs_1=s_probs_1,
s_masks_2=s_masks_2,
s_feats_2=s_feats_2,
s_probs_2=s_probs_2,
feats_1=feats_1,
feats_2=feats_2,
probs_1=probs_1,
probs_2=probs_2,
y_nc=y_nc,
y=y)
callback(agent1, agent2, batch, callback_dict)
test_language_similarity = {}
if agent_dicts is not None:
test_language_similarity = get_similarity(dataset_path, in_domain_eval, agent1, agent2, agent_groups[0], agent_groups[1], agent_idxs[0], agent_idxs[1], agent_dicts[0], agent_dicts[1], logger, flogger)
debuglogger.info(f'Total msg changed: {test_language_similarity["total"]}, Correct: {sum(test_language_similarity["correct"])}')
if store_examples:
debuglogger.info(f'Finishing iterating through dev set, storing examples...')
store_exemplar_batch(correct_to_analyze, "correct", logger, flogger)
store_exemplar_batch(incorrect_to_analyze, "incorrect", logger, flogger)
if analyze_messages:
debuglogger.info(f'Analyzing messages...')
run_analyze_messages(correct_to_analyze, "correct", logger, flogger, epoch, step, i_batch)
if save_messages:
debuglogger.info(f'Saving messages...')
save_messages_and_stats(correct_to_analyze, incorrect_to_analyze, agent_tag)
complexity_stats = {}
if FLAGS.report_on_complexity:
debuglogger.info(f'Reporting on message complexity')
# Calc overall stats
avg_msg = calculate_average_message([correct_to_analyze['msg_1'], incorrect_to_analyze['msg_1'], correct_to_analyze['msg_2'], incorrect_to_analyze['msg_2']])
avg_ent = calculate_average_entropy([correct_to_analyze['msg_1_ent'], incorrect_to_analyze['msg_1_ent'], correct_to_analyze['msg_2_ent'], incorrect_to_analyze['msg_2_ent']])
ent_avg_msg = None if avg_msg is None else calculate_entropy(avg_msg)
print(f'Entropy of average message: {ent_avg_msg}')
t, d = count_distinct_messages([correct_to_analyze['msg_1_str'], incorrect_to_analyze['msg_1_str'], correct_to_analyze['msg_2_str'], incorrect_to_analyze['msg_2_str']])
print(f'Total messages: {t}, Num distinct messages: {d}')
complexity_stats['total'] = {'avg_ent': avg_ent, 'ent_avg_msg': ent_avg_msg, 'num_msg': t, 'distinct_msg': d}
# Correct answers only
avg_msg = calculate_average_message([correct_to_analyze['msg_1'], correct_to_analyze['msg_2']])
avg_ent = calculate_average_entropy([correct_to_analyze['msg_1_ent'], correct_to_analyze['msg_2_ent']])
ent_avg_msg = None if avg_msg is None else calculate_entropy(avg_msg)
t, d = count_distinct_messages([correct_to_analyze['msg_1_str'], correct_to_analyze['msg_2_str']])
complexity_stats['correct'] = {'avg_ent': avg_ent, 'ent_avg_msg': ent_avg_msg, 'num_msg': t, 'distinct_msg': d}
# Incorrect answers only
avg_msg = calculate_average_message([incorrect_to_analyze['msg_1'], incorrect_to_analyze['msg_2']])
avg_ent = calculate_average_entropy([incorrect_to_analyze['msg_1_ent'], incorrect_to_analyze['msg_2_ent']])
ent_avg_msg = None if avg_msg is None else calculate_entropy(avg_msg)
t, d = count_distinct_messages([incorrect_to_analyze['msg_1_str'], incorrect_to_analyze['msg_2_str']])
complexity_stats['incorrect'] = {'avg_ent': avg_ent, 'ent_avg_msg': ent_avg_msg, 'num_msg': t, 'distinct_msg': d}
# Agent 1 total
avg_msg = calculate_average_message([correct_to_analyze['msg_1'], incorrect_to_analyze['msg_1']])
avg_ent = calculate_average_entropy([correct_to_analyze['msg_1_ent'], incorrect_to_analyze['msg_1_ent']])
ent_avg_msg = None if avg_msg is None else calculate_entropy(avg_msg)
t, d = count_distinct_messages([correct_to_analyze['msg_1_str'], incorrect_to_analyze['msg_1_str']])
complexity_stats['a1_total'] = {'avg_ent': avg_ent, 'ent_avg_msg': ent_avg_msg, 'num_msg': t, 'distinct_msg': d}
# Agent 1 correct
avg_msg = calculate_average_message([correct_to_analyze['msg_1']])
avg_ent = calculate_average_entropy([correct_to_analyze['msg_1_ent']])
ent_avg_msg = None if avg_msg is None else calculate_entropy(avg_msg)
t, d = count_distinct_messages([correct_to_analyze['msg_1_str']])
complexity_stats['a1_correct'] = {'avg_ent': avg_ent, 'ent_avg_msg': ent_avg_msg, 'num_msg': t, 'distinct_msg': d}
# Agent 1 incorrect
avg_msg = calculate_average_message([incorrect_to_analyze['msg_1']])
avg_ent = calculate_average_entropy([incorrect_to_analyze['msg_1_ent']])
ent_avg_msg = None if avg_msg is None else calculate_entropy(avg_msg)
t, d = count_distinct_messages([incorrect_to_analyze['msg_1_str']])
complexity_stats['a1_incorrect'] = {'avg_ent': avg_ent, 'ent_avg_msg': ent_avg_msg, 'num_msg': t, 'distinct_msg': d}
# Agent 2 total
avg_msg = calculate_average_message([correct_to_analyze['msg_2'], incorrect_to_analyze['msg_2']])
avg_ent = calculate_average_entropy([correct_to_analyze['msg_2_ent'], incorrect_to_analyze['msg_2_ent']])
ent_avg_msg = None if avg_msg is None else calculate_entropy(avg_msg)
t, d = count_distinct_messages([correct_to_analyze['msg_2_str'], incorrect_to_analyze['msg_2_str']])
complexity_stats['a2_total'] = {'avg_ent': avg_ent, 'ent_avg_msg': ent_avg_msg, 'num_msg': t, 'distinct_msg': d}
# Agent 2 correct
avg_msg = calculate_average_message([correct_to_analyze['msg_2']])
avg_ent = calculate_average_entropy([correct_to_analyze['msg_2_ent']])
ent_avg_msg = None if avg_msg is None else calculate_entropy(avg_msg)
t, d = count_distinct_messages([correct_to_analyze['msg_2_str']])
complexity_stats['a2_correct'] = {'avg_ent': avg_ent, 'ent_avg_msg': ent_avg_msg, 'num_msg': t, 'distinct_msg': d}
# Agent 2 incorrect
avg_msg = calculate_average_message([incorrect_to_analyze['msg_2']])
avg_ent = calculate_average_entropy([incorrect_to_analyze['msg_2_ent']])
ent_avg_msg = None if avg_msg is None else calculate_entropy(avg_msg)
t, d = count_distinct_messages([incorrect_to_analyze['msg_2_str']])
complexity_stats['a2_incorrect'] = {'avg_ent': avg_ent, 'ent_avg_msg': ent_avg_msg, 'num_msg': t, 'distinct_msg': d}
# Print confusion matrix
true_labels = np.concatenate(true_labels).reshape(-1)
pred_labels_1_nc = np.concatenate(pred_labels_1_nc).reshape(-1)
pred_labels_1_com = np.concatenate(pred_labels_1_com).reshape(-1)
pred_labels_2_nc = np.concatenate(pred_labels_2_nc).reshape(-1)
pred_labels_2_com = np.concatenate(pred_labels_2_com).reshape(-1)
np.savetxt(FLAGS.conf_mat + "_1_nc", confusion_matrix(
true_labels, pred_labels_1_nc), delimiter=',', fmt='%d')
np.savetxt(FLAGS.conf_mat + "_1_com", confusion_matrix(
true_labels, pred_labels_1_com), delimiter=',', fmt='%d')
np.savetxt(FLAGS.conf_mat + "_2_nc", confusion_matrix(
true_labels, pred_labels_2_nc), delimiter=',', fmt='%d')
np.savetxt(FLAGS.conf_mat + "_2_com", confusion_matrix(
true_labels, pred_labels_2_com), delimiter=',', fmt='%d')
# Compute statistics
conversation_lengths_1 = np.array(conversation_lengths_1)
conversation_lengths_2 = np.array(conversation_lengths_2)
hamming_1 = np.array(hamming_1)
hamming_2 = np.array(hamming_2)
extra['conversation_lengths_1_mean'] = conversation_lengths_1.mean()
extra['conversation_lengths_1_std'] = conversation_lengths_1.std()
extra['conversation_lengths_2_mean'] = conversation_lengths_2.mean()
extra['conversation_lengths_2_std'] = conversation_lengths_2.std()
extra['hamming_1_mean'] = hamming_1.mean()
extra['hamming_2_mean'] = hamming_2.mean()
extra['shapes_accuracy'] = shapes_accuracy
extra['colors_accuracy'] = colors_accuracy
extra['shapes_colors_accuracy'] = shapes_colors_accuracy
extra['agent1_performance'] = agent1_performance
extra['agent2_performance'] = agent2_performance
extra['test_language_similarity'] = test_language_similarity
extra['complexity_stats'] = complexity_stats
debuglogger.info(f'Eval total size: {total}')
debuglogger.info(f'Eval total correct com: {total_correct_com}')
total_accuracy_nc = total_correct_nc / total
total_accuracy_com = total_correct_com / total
atleast1_accuracy_nc = atleast1_correct_nc / total
atleast1_accuracy_com = atleast1_correct_com / total
# Return accuracy
return total_accuracy_nc, total_accuracy_com, atleast1_accuracy_nc, atleast1_accuracy_com, extra
def get_and_log_dev_performance(agent1, agent2, dataset_path, in_domain_eval, dev_accuracy_log, logger, flogger, domain, epoch, step, i_batch, store_examples, analyze_messages, save_messages, agent_tag, agent_dicts=None, agent_idxs=None, agent_groups=None):
'''Logs performance on the dev set'''
total_accuracy_nc, total_accuracy_com, atleast1_accuracy_nc, atleast1_accuracy_com, extra = eval_dev(
dataset_path, FLAGS.top_k_dev, agent1, agent2, logger, flogger, epoch, step, i_batch, in_domain_eval=in_domain_eval, callback=None, store_examples=store_examples, analyze_messages=analyze_messages, save_messages=save_messages, agent_tag=agent_tag, agent_dicts=agent_dicts, agent_idxs=agent_idxs, agent_groups=agent_groups)
dev_accuracy_log['total_acc_both_nc'].append(total_accuracy_nc)
dev_accuracy_log['total_acc_both_com'].append(total_accuracy_com)
dev_accuracy_log['total_acc_atl1_nc'].append(atleast1_accuracy_nc)
dev_accuracy_log['total_acc_atl1_com'].append(atleast1_accuracy_com)
logger.log(key=domain + " Development Accuracy, both right, no comms", val=dev_accuracy_log['total_acc_both_nc'][-1], step=step)
logger.log(key=domain + "Development Accuracy, both right, after comms", val=dev_accuracy_log['total_acc_both_com'][-1], step=step)
logger.log(key=domain + "Development Accuracy, at least 1 right, no comms", val=dev_accuracy_log['total_acc_atl1_nc'][-1], step=step)
logger.log(key=domain + "Development Accuracy, at least 1 right, after comms", val=dev_accuracy_log['total_acc_atl1_com'][-1], step=step)
logger.log(key=domain + "Conversation Length A1 (avg)",
val=extra['conversation_lengths_1_mean'], step=step)
logger.log(key=domain + "Conversation Length A1 (std)",
val=extra['conversation_lengths_1_std'], step=step)
logger.log(key=domain + "Conversation Length A2 (avg)",
val=extra['conversation_lengths_2_mean'], step=step)
logger.log(key=domain + "Conversation Length A2 (std)",
val=extra['conversation_lengths_2_std'], step=step)
logger.log(key=domain + "Hamming 1 (avg)",
val=extra['hamming_1_mean'], step=step)
logger.log(key=domain + "Hamming 2 (avg)",
val=extra['hamming_2_mean'], step=step)
if extra['agent1_performance']["total"] > 0:
logger.log(key=domain + " Development Accuracy: Agent 1 given Agent 2 both right: 01: ",
val=extra['agent1_performance']["01"] / extra['agent1_performance']["total"], step=step)
logger.log(key=domain + " Development Accuracy: Agent 1 given Agent 2 both right: 11: ",
val=extra['agent1_performance']["11"] / extra['agent1_performance']["total"], step=step)
logger.log(key=domain + " Development Accuracy: Agent 1 given Agent 2 both right: 00: ",
val=extra['agent1_performance']["00"] / extra['agent1_performance']["total"], step=step)
logger.log(key=domain + " Development Accuracy: Agent 1 given Agent 2 both right: 10: ",
val=extra['agent1_performance']["10"] / extra['agent1_performance']["total"], step=step)
else:
logger.log(key=domain + " Development Accuracy: Agent 1 given Agent 2 both right: 0 examples",
val=None, step=step)
if extra['agent2_performance']["total"] > 0:
logger.log(key=domain + " Development Accuracy: Agent 2 given Agent 1 both right: 01: ",
val=extra['agent2_performance']["01"] / extra['agent2_performance']["total"], step=step)
logger.log(key=domain + " Development Accuracy: Agent 2 given Agent 1 both right: 11: ",
val=extra['agent2_performance']["11"] / extra['agent2_performance']["total"], step=step)
logger.log(key=domain + " Development Accuracy: Agent 2 given Agent 1 both right: 00: ",
val=extra['agent2_performance']["00"] / extra['agent2_performance']["total"], step=step)
logger.log(key=domain + " Development Accuracy: Agent 2 given Agent 1 both right: 10: ",
val=extra['agent2_performance']["10"] / extra['agent2_performance']["total"], step=step)
else:
logger.log(key=domain + " Development Accuracy: Agent 1 given Agent 2 both right: 0 examples",
val=None, step=step)
detail_total = 0
detail_correct = 0
for k in extra['shapes_accuracy']:
if extra['shapes_accuracy'][k]['total'] > 0:
logger.log(key=domain + " Development Accuracy: " + k + " ", val=extra['shapes_accuracy'][k]['correct'] / extra['shapes_accuracy'][k]['total'], step=step)
detail_total += extra['shapes_accuracy'][k]['total']
detail_correct += extra['shapes_accuracy'][k]['correct']
logger.log(key=domain + " Development Accuracy: shapes ", val=detail_correct / detail_total, step=step)
detail_total = 0
detail_correct = 0
for k in extra['colors_accuracy']:
if extra['colors_accuracy'][k]['total'] > 0:
logger.log(key=domain + " Development Accuracy: " + k + " ", val=extra['colors_accuracy'][k]['correct'] / extra['colors_accuracy'][k]['total'], step=step)
detail_total += extra['colors_accuracy'][k]['total']
detail_correct += extra['colors_accuracy'][k]['correct']
logger.log(key=domain + " Development Accuracy: colors ", val=detail_correct / detail_total, step=step)
detail_total = 0
detail_correct = 0
for k in extra['shapes_colors_accuracy']:
if extra['shapes_colors_accuracy'][k]['total'] > 0:
logger.log(key=domain + " Development Accuracy: " + k + " ", val=extra['shapes_colors_accuracy'][k]['correct'] / extra['shapes_colors_accuracy'][k]['total'], step=step)
detail_total += extra['shapes_colors_accuracy'][k]['total']
detail_correct += extra['shapes_colors_accuracy'][k]['correct']
logger.log(key=domain + " Development Accuracy: shapes_colors ", val=detail_correct / detail_total, step=step)
flogger.Log("Epoch: {} Step: {} Batch: {} {} Development Accuracy, both right, no comms: {}".format(
epoch, step, i_batch, domain, dev_accuracy_log['total_acc_both_nc'][-1]))
flogger.Log("Epoch: {} Step: {} Batch: {} {} Development Accuracy, both right, after comms: {}".format(
epoch, step, i_batch, domain, dev_accuracy_log['total_acc_both_com'][-1]))
flogger.Log("Epoch: {} Step: {} Batch: {} {} Development Accuracy, at least right, no comms: {}".format(
epoch, step, i_batch, domain, dev_accuracy_log['total_acc_atl1_nc'][-1]))
flogger.Log("Epoch: {} Step: {} Batch: {} {} Development Accuracy, at least 1 right, after comms: {}".format(
epoch, step, i_batch, domain, dev_accuracy_log['total_acc_atl1_com'][-1]))
flogger.Log("Epoch: {} Step: {} Batch: {} {} Development Accuracy CHECK, both right, no comms: {}".format(
epoch, step, i_batch, domain, total_accuracy_nc))
flogger.Log("Epoch: {} Step: {} Batch: {} {} Development Accuracy CHECK, both right, after comms: {}".format(
epoch, step, i_batch, domain, total_accuracy_com))
flogger.Log("Epoch: {} Step: {} Batch: {} {} Development Accuracy CHECK, at least right, no comms: {}".format(
epoch, step, i_batch, domain, atleast1_accuracy_nc))
flogger.Log("Epoch: {} Step: {} Batch: {} {} Development Accuracy CHECK, at least 1 right, after comms: {}".format(
epoch, step, i_batch, domain, atleast1_accuracy_com))
flogger.Log("Epoch: {} Step: {} Batch: {} {} Conversation Length 1 (avg/std): {}/{}".format(
epoch, step, i_batch, domain, extra['conversation_lengths_1_mean'], extra['conversation_lengths_1_std']))
flogger.Log("Epoch: {} Step: {} Batch: {} {} Conversation Length 2 (avg/std): {}/{}".format(
epoch, step, i_batch, domain, extra['conversation_lengths_2_mean'], extra['conversation_lengths_2_std']))
flogger.Log("Epoch: {} Step: {} Batch: {} {} Mean Hamming Distance (1/2): {}/{}"
.format(epoch, step, i_batch, domain, extra['hamming_1_mean'], extra['hamming_2_mean']))
flogger.Log('Agent 1: total {}, 11: {}, 01: {} 00: {}, 10: {}'.format(
extra["agent1_performance"]["total"],
extra["agent1_performance"]["11"],
extra["agent1_performance"]["01"],
extra["agent1_performance"]["00"],
extra["agent1_performance"]["10"]))
if extra["agent1_performance"]["total"] > 0:
flogger.Log('Agent 1: total {}, 11: {}, 01: {} 00: {}, 10: {}'.format(
extra["agent1_performance"]["total"] / extra["agent1_performance"]["total"],
extra["agent1_performance"]["11"] / extra["agent1_performance"]["total"],
extra["agent1_performance"]["01"] / extra["agent1_performance"]["total"],
extra["agent1_performance"]["00"] / extra["agent1_performance"]["total"],
extra["agent1_performance"]["10"] / extra["agent1_performance"]["total"]))
flogger.Log('Agent 2: total {}, 11: {}, 01: {} 00: {}, 10: {}'.format(
extra["agent2_performance"]["total"],
extra["agent2_performance"]["11"],
extra["agent2_performance"]["01"],
extra["agent2_performance"]["00"],
extra["agent2_performance"]["10"]))
if extra["agent2_performance"]["total"] > 0:
flogger.Log('Agent 2: total {}, 11: {}, 01: {} 00: {}, 10: {}'.format(
extra["agent2_performance"]["total"] / extra["agent2_performance"]["total"],
extra["agent2_performance"]["11"] / extra["agent2_performance"]["total"],
extra["agent2_performance"]["01"] / extra["agent2_performance"]["total"],
extra["agent2_performance"]["00"] / extra["agent2_performance"]["total"],
extra["agent2_performance"]["10"] / extra["agent2_performance"]["total"]))
detail_total = 0
detail_correct = 0
for k in extra['shapes_accuracy']:
if extra['shapes_accuracy'][k]['total'] > 0:
flogger.Log('{}: total: {}, correct: {}, accuracy: {}'.format(
k,
extra['shapes_accuracy'][k]['total'],
extra['shapes_accuracy'][k]['correct'],
extra['shapes_accuracy'][k]['correct'] / extra['shapes_accuracy'][k]['total']))
detail_total += extra['shapes_accuracy'][k]['total']
detail_correct += extra['shapes_accuracy'][k]['correct']
flogger.Log('{}: {}: total: {}, correct: {}, accuracy: {}'.format(
domain, 'TOTAL SHAPES', detail_total, detail_correct, detail_correct / detail_total))
detail_total = 0
detail_correct = 0
for k in extra['colors_accuracy']:
if extra['colors_accuracy'][k]['total'] > 0:
flogger.Log('{}: total: {}, correct: {}, accuracy: {}'.format(
k,
extra['colors_accuracy'][k]['total'],
extra['colors_accuracy'][k]['correct'],
extra['colors_accuracy'][k]['correct'] / extra['colors_accuracy'][k]['total']))
detail_total += extra['colors_accuracy'][k]['total']
detail_correct += extra['colors_accuracy'][k]['correct']
flogger.Log('{}: {}: total: {}, correct: {}, accuracy: {}'.format(
domain, 'TOTAL COLORS', detail_total, detail_correct, detail_correct / detail_total))
detail_total = 0
detail_correct = 0
ood_total = 0
ood_correct = 0
id_total = 0
id_correct = 0
for k in extra['shapes_colors_accuracy']:
if extra['shapes_colors_accuracy'][k]['total'] > 0:
flogger.Log('{}: total: {}, correct: {}, accuracy: {}'.format(
k,
extra['shapes_colors_accuracy'][k]['total'],
extra['shapes_colors_accuracy'][k]['correct'],
extra['shapes_colors_accuracy'][k]['correct'] / extra['shapes_colors_accuracy'][k]['total']))
detail_total += extra['shapes_colors_accuracy'][k]['total']
detail_correct += extra['shapes_colors_accuracy'][k]['correct']
if k in OOD_EXAMPLES:
ood_total += extra['shapes_colors_accuracy'][k]['total']
ood_correct += extra['shapes_colors_accuracy'][k]['correct']
else:
id_total += extra['shapes_colors_accuracy'][k]['total']
id_correct += extra['shapes_colors_accuracy'][k]['correct']
flogger.Log('{}: {}: total: {}, correct: {}, accuracy: {}'.format(
domain, 'TOTAL SHAPES_COLORS', detail_total, detail_correct, detail_correct / detail_total))
if id_total > 0:
flogger.Log('{}: {}: total: {}, correct: {}, accuracy: {}'.format(
domain, 'TOTAL SHAPES_COLORS in domain', id_total, id_correct, id_correct / id_total))
if ood_total > 0:
flogger.Log('{}: {}: total: {}, correct: {}, accuracy: {}'.format(
domain, 'TOTAL SHAPES_COLORS out of domain', ood_total, ood_correct, ood_correct / ood_total))
if FLAGS.report_on_complexity:
flogger.Log(f'{domain}: complexity report: total: avg ent: {extra["complexity_stats"]["total"]["avg_ent"]}, ent_avg_msg: {extra["complexity_stats"]["total"]["ent_avg_msg"]}, num_msgs: {extra["complexity_stats"]["total"]["num_msg"]}, distinct msg: {extra["complexity_stats"]["total"]["distinct_msg"]}')
flogger.Log(f'{domain}: complexity report: correct: avg ent: {extra["complexity_stats"]["correct"]["avg_ent"]}, ent_avg_msg: {extra["complexity_stats"]["correct"]["ent_avg_msg"]}, num_msgs: {extra["complexity_stats"]["correct"]["num_msg"]}, distinct msg: {extra["complexity_stats"]["correct"]["distinct_msg"]}')
flogger.Log(f'{domain}: complexity report: incorrect: avg ent: {extra["complexity_stats"]["incorrect"]["avg_ent"]}, ent_avg_msg: {extra["complexity_stats"]["incorrect"]["ent_avg_msg"]}, num_msgs: {extra["complexity_stats"]["incorrect"]["num_msg"]}, distinct msg: {extra["complexity_stats"]["incorrect"]["distinct_msg"]}')
flogger.Log(f'{domain}: complexity report: a1_total: avg ent: {extra["complexity_stats"]["a1_total"]["avg_ent"]}, ent_avg_msg: {extra["complexity_stats"]["a1_total"]["ent_avg_msg"]}, num_msgs: {extra["complexity_stats"]["a1_total"]["num_msg"]}, distinct msg: {extra["complexity_stats"]["a1_total"]["distinct_msg"]}')
flogger.Log(f'{domain}: complexity report: a1_correct: avg ent: {extra["complexity_stats"]["a1_correct"]["avg_ent"]}, ent_avg_msg: {extra["complexity_stats"]["a1_correct"]["ent_avg_msg"]}, num_msgs: {extra["complexity_stats"]["a1_correct"]["num_msg"]}, distinct msg: {extra["complexity_stats"]["a1_correct"]["distinct_msg"]}')
flogger.Log(f'{domain}: complexity report: a1_incorrect: avg ent: {extra["complexity_stats"]["a1_incorrect"]["avg_ent"]}, ent_avg_msg: {extra["complexity_stats"]["a1_incorrect"]["ent_avg_msg"]}, num_msgs: {extra["complexity_stats"]["a1_incorrect"]["num_msg"]}, distinct msg: {extra["complexity_stats"]["a1_incorrect"]["distinct_msg"]}')
flogger.Log(f'{domain}: complexity report: a2_total: avg ent: {extra["complexity_stats"]["a2_total"]["avg_ent"]}, ent_avg_msg: {extra["complexity_stats"]["a2_total"]["ent_avg_msg"]}, num_msgs: {extra["complexity_stats"]["a2_total"]["num_msg"]}, distinct msg: {extra["complexity_stats"]["a2_total"]["distinct_msg"]}')
flogger.Log(f'{domain}: complexity report: a2_correct: avg ent: {extra["complexity_stats"]["a2_correct"]["avg_ent"]}, ent_avg_msg: {extra["complexity_stats"]["a2_correct"]["ent_avg_msg"]}, num_msgs: {extra["complexity_stats"]["a2_correct"]["num_msg"]}, distinct msg: {extra["complexity_stats"]["a2_correct"]["distinct_msg"]}')
flogger.Log(f'{domain}: complexity report: a2_incorrect: avg ent: {extra["complexity_stats"]["a2_incorrect"]["avg_ent"]}, ent_avg_msg: {extra["complexity_stats"]["a2_incorrect"]["ent_avg_msg"]}, num_msgs: {extra["complexity_stats"]["a2_incorrect"]["num_msg"]}, distinct msg: {extra["complexity_stats"]["a2_incorrect"]["distinct_msg"]}')
if agent_dicts is not None:
flogger.Log('Test language similarity performance')
comp_dict = {}
comp_data = extra['test_language_similarity']
correct_orig_correct = 0
correct_orig_incorrect = 0
orig_correct = 0
orig_incorrect = 0
correct = 0
for i in range(len(comp_data['orig_shape'])):
if comp_data['shape'][i] is not None:
transform = comp_data['orig_shape'][i] + '_' + comp_data['shape'][i]
else:
transform = comp_data['orig_color'][i] + '_' + comp_data['color'][i]
if transform not in comp_dict:
comp_dict[transform] = {'total': 0, 'correct': 0, 'p_correct': 0,
'orig_correct': {'total': 0, 'correct': 0, 'p_correct': 0},
'orig_incorrect': {'total': 0, 'correct': 0, 'p_correct': 0}}
comp_dict[transform]['total'] += 1
if comp_data['correct'][i] == 1:
comp_dict[transform]['correct'] += 1
correct += 1
if comp_data['originally_correct'][i] == 1:
comp_dict[transform]['orig_correct']['total'] += 1
orig_correct += 1
if comp_data['correct'][i]:
comp_dict[transform]['orig_correct']['correct'] += 1
correct_orig_correct += 1
else:
comp_dict[transform]['orig_incorrect']['total'] += 1
orig_incorrect += 1
if comp_data['correct'][i]:
comp_dict[transform]['orig_incorrect']['correct'] += 1
correct_orig_incorrect += 1
flogger.Log(f'Test comp: total: {len(comp_data["orig_shape"])} orig correct: {orig_correct} orig incorrect: {orig_incorrect}')
flogger.Log(f'Total correct: {correct}/{correct / len(comp_data["orig_shape"])}, orig_correct_correct {correct_orig_correct}/{correct_orig_correct / orig_correct}, orig_incorrect_correct {correct_orig_incorrect}/{correct_orig_incorrect / orig_incorrect}')
for key in comp_dict:
comp_dict[key]['p_correct'] = comp_dict[key]['correct'] / comp_dict[key]['total']
if comp_dict[key]['orig_correct']['total'] > 0:
comp_dict[key]['orig_correct']['p_correct'] = comp_dict[key]['orig_correct']['correct'] / comp_dict[key]['orig_correct']['total']
if comp_dict[key]['orig_incorrect']['total'] > 0:
comp_dict[key]['orig_incorrect']['p_correct'] = comp_dict[key]['orig_incorrect']['correct'] / comp_dict[key]['orig_incorrect']['total']
flogger.Log(f'Transform {key}: total: {comp_dict[key]["total"]}:{comp_dict[key]["correct"]}/{comp_dict[key]["p_correct"]}')
flogger.Log(f'Transform {key}: orig_correct: {comp_dict[key]["orig_correct"]["total"]}:{comp_dict[key]["orig_correct"]["correct"]}/{comp_dict[key]["orig_correct"]["p_correct"]}')
flogger.Log(f'Transform {key}: orig_incorrect: {comp_dict[key]["orig_incorrect"]["total"]}:{comp_dict[key]["orig_incorrect"]["correct"]}/{comp_dict[key]["orig_incorrect"]["p_correct"]}')
return dev_accuracy_log, total_accuracy_com
def eval_community(eval_list, models_dict, dev_accuracy_log, logger, flogger, epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=""):
eval_type = {1: "Self com, agent connected to multiple pools",
2: "Self com, agent connected to just one pool",
3: "Within pool com, different agents, trained together",
4: "Within pool com, different agents, never trained together",
5: "Cross pool com, different agents, trained together",
6: "Cross pool com, different agents, never trained together"}
for num, items in enumerate(eval_list):
logger.log(key="Dev accuracy: ",
val=eval_type[num + 1], step=step)
flogger.Log(f'Dev accuracy: {eval_type[num + 1]}, step: {step}')
print(items)
if type(items[0]) is list:
for nested in items:
for elem in nested:
if elem is None:
pass
else:
(i, j) = elem
agent1 = models_dict["agent" + str(i + 1)]
agent2 = models_dict["agent" + str(j + 1)]
agent_tag = f'A_{i + 1}_{j + 1}'
if i == j:
# Create a copy of agents playing with themselves to avoid sharing the hidden state
agent2 = Agent(im_feature_type=FLAGS.img_feat,
im_feat_dim=FLAGS.img_feat_dim,
h_dim=FLAGS.h_dim,
m_dim=FLAGS.m_dim,
desc_dim=FLAGS.desc_dim,
num_classes=FLAGS.num_classes,
s_dim=FLAGS.s_dim,
use_binary=FLAGS.use_binary,
use_attn=FLAGS.visual_attn,
attn_dim=FLAGS.attn_dim,
use_MLP=FLAGS.use_MLP,
cuda=FLAGS.cuda,
im_from_scratch=FLAGS.improc_from_scratch,
dropout=FLAGS.dropout)
agent2.load_state_dict(agent1.state_dict())
if FLAGS.cuda:
agent2.cuda()
domain = f'In Domain Dev: Agent {i + 1} | Agent {j + 1}, ids [{id(agent1)}]/[{id(agent2)}]: '
_, _ = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_log, logger, flogger, domain, epoch, step, i_batch, store_examples, analyze_messages, save_messages, agent_tag)
else:
for elem in items:
if elem is None:
pass
else:
(i, j) = elem
agent1 = models_dict["agent" + str(i + 1)]
agent2 = models_dict["agent" + str(j + 1)]
agent_tag = f'A_{i + 1}_{j + 1}'
if i == j:
# Create a copy of agents playing with themselves to avoid sharing the hidden state
agent2 = Agent(im_feature_type=FLAGS.img_feat,
im_feat_dim=FLAGS.img_feat_dim,
h_dim=FLAGS.h_dim,
m_dim=FLAGS.m_dim,
desc_dim=FLAGS.desc_dim,
num_classes=FLAGS.num_classes,
s_dim=FLAGS.s_dim,
use_binary=FLAGS.use_binary,
use_attn=FLAGS.visual_attn,
attn_dim=FLAGS.attn_dim,
use_MLP=FLAGS.use_MLP,
cuda=FLAGS.cuda,
im_from_scratch=FLAGS.improc_from_scratch,
dropout=FLAGS.dropout)
agent2.load_state_dict(agent1.state_dict())
if FLAGS.cuda:
agent2.cuda()
domain = f'In Domain Dev: Agent {i + 1} | Agent {j + 1}, ids [{id(agent1)}]/[{id(agent2)}]: '
_, _ = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_log, logger, flogger, domain, epoch, step, i_batch, store_examples, analyze_messages, save_messages, agent_tag)
def corrupt_message(corrupt_region, agent, binary_message):
# Obtain mask
mask = Variable(build_mask(corrupt_region, agent.m_dim))
mask_broadcast = mask.view(1, agent_1.m_dim).expand_as(binary_message)
# Subtract the mask to change values, but need to get absolute value
# to set -1 values to 1 to essentially "flip" all the bits.
binary_message = (binary_message - mask_broadcast).abs()
return binary_message
def exchange(a1, a2, exchange_args):
"""Run a batched conversation between two agents.
There are two parts to an exchange:
1. Each agent receives part of an image, and uses this to select the corresponding text from a selection of texts
2. Agents communicate for a number of steps, then each select the corresponding text again from the same selection of texts
Exchange Args:
data: Image features
- dict containing the image features for agent 1 and agent 2, and the percentage of the
image each agent received
e.g. { "im_feats_1": im_feats_1,
"im_feats_2": im_feats_2,
"p": p}
target: Class labels.
desc: List of description vectors.
train: Boolean value indicating training mode (True) or evaluation mode (False).
break_early: Boolean value. If True, then terminate batched conversation if both agents are satisfied
test_language_similarity: Boolean: whether to test the language similarity using communication vector arithmetric
Function Args:
a1: agent1
a2: agent2
exchange_args: Other useful arguments.
Returns:
s: Contains all stop bits [a1, a2]
a1 = agent1 (Masks, Values, Probabilties)
a2 = agent2 (Masks, Values, Probabilties)
message_1: All agent_1 messages. (Values, Probabilities)
message_2: All agent_2 messages. (Values, Probabilities)
y_all = [y_nc, y]
y_nc: (y_1_nc, y_2_nc)
y_1_nc: Agent 1 predictions before communication
y_2_nc: Agent 2 predictions before communication
y = (y_1, y_2)
y_1: All predictions that were made by agent 1 after communication
y_2: All predictions that were made by agent 2 after communication
r = (r_1, r_2)
r_1: Estimated rewards of agent_1.
r_2: Estimated rewards of agent_2.
"""
# Randomly select which agent goes first
who_goes_first = None
use_given_who_goes_first = exchange_args.get("use_given_who_goes_first", False)
given_who_goes_first = exchange_args.get("given_who_goes_first", -1)
if FLAGS.randomize_comms:
if use_given_who_goes_first:
if given_who_goes_first == -1:
debuglogger.warn(f'No given agent to go first')
sys.exit()
elif given_who_goes_first == 1:
agent1 = a1
agent2 = a2
who_goes_first = 1
debuglogger.debug(f'Agent 1 communicates first')
else:
agent1 = a2
agent2 = a1
who_goes_first = 2
debuglogger.debug(f'Agent 2 communicates first')
else:
if random.random() < 0.5:
agent1 = a1
agent2 = a2
who_goes_first = 1
debuglogger.debug(f'Agent 1 communicates first')
else:
agent1 = a2
agent2 = a1
who_goes_first = 2
debuglogger.debug(f'Agent 2 communicates first')
else:
agent1 = a1
agent2 = a2
who_goes_first = 1
debuglogger.debug(f'Agent 1 communicates first')
data = exchange_args["data"]
# TODO extend implementation to include data context
data_context = None
target = exchange_args["target"]
desc = exchange_args["desc"]
train = exchange_args["train"]
break_early = exchange_args.get("break_early", False)
corrupt = exchange_args.get("corrupt", False)
corrupt_region = exchange_args.get("corrupt_region", None)
test_language_similarity = exchange_args.get("test_language_similarity", False)
batch_size = data["im_feats_1"].size(0)
# Pad with one column of ones.
stop_mask_1 = [Variable(torch.ones(batch_size, 1).byte())]
stop_feat_1 = []
stop_prob_1 = []
stop_mask_2 = [Variable(torch.ones(batch_size, 1).byte())]
stop_feat_2 = []
stop_prob_2 = []
feats_1 = []
probs_1 = []
feats_2 = []
probs_2 = []
y_1_nc = None
y_2_nc = None
y_1 = []
y_2 = []
r_1 = []
r_2 = []
# First message (default is 0)
m_binary = Variable(torch.FloatTensor(batch_size, agent1.m_dim).fill_(
FLAGS.first_msg), volatile=not train)
if FLAGS.cuda:
m_binary = m_binary.cuda()
if train:
agent1.train()
agent2.train()
else:
agent1.eval()
agent2.eval()
agent1.reset_state()
agent2.reset_state()
# The message is ignored initially
use_message = False
# Run data through both agents
if data_context is not None:
# No data context at the moment - # TODO
debuglogger.warning(f'Data context not supported currently')
sys.exit()
else:
# debuglogger.info(f'Inside exchange: Train status: {train}, Message: {m_binary}')
s_1e, m_1e, y_1e, r_1e = agent1(
data['im_feats_1'],
m_binary,
0,
desc,
use_message,
batch_size,
train)
s_2e, m_2e, y_2e, r_2e = agent2(
data['im_feats_2'],
m_binary,
0,
desc,
use_message,
batch_size,
train)
# Add no message selections to results
# Need to be consistent about storing the a1 and a2's even though their roles are randomized during each exchange
# agent1 and agent2 is a local name that refers to the order of communication. Storage refers to global labels a1 and a2
if who_goes_first == 1:
y_1_nc = y_1e
y_2_nc = y_2e
else:
y_1_nc = y_2e
y_2_nc = y_1e
for i_exchange in range(FLAGS.max_exchange):
debuglogger.debug(
f' ================== EXCHANGE {i_exchange} ====================')
# The messages are now used
use_message = True
# Agent 1's message
m_1e_binary, m_1e_probs = m_1e
# Store last but one communication from agent1 to train with
# Last communication is "communication into the void" since the other
# agent never gets it.
m_binary_1, m_probs_1 = m_1e
# Optionally corrupt agent 1's message
if corrupt:
m_1e_binary = corrupt_message(corrupt_region, agent1, m_1e_binary)
# Optionally change agent 1's message
if test_language_similarity:
if (who_goes_first == 1 and exchange_args["change_agent"] == 1) or (who_goes_first == 2 and exchange_args["change_agent"] == 2):
debuglogger.debug(f'Inside exchange: Changing agent 1 message...')
a_dict_add = exchange_args["agent_add_dict"]
a_dict_sub = exchange_args["agent_subtract_dict"]
add = a_dict_add[exchange_args["add"]].unsqueeze(0)
sub = a_dict_sub[exchange_args["subtract"]].unsqueeze(0)
if FLAGS.cuda:
add = add.cuda()
sub = sub.cuda()
new_m_prob = m_1e_probs.data - sub + add
new_m_binary = torch.clamp(new_m_prob, 0, 1).round()
m_1e_binary = _Variable(new_m_binary)
debuglogger.debug(f'Old msg: {m_binary_1.data}, old prob: {m_probs_1.data}')
debuglogger.debug(f'Sub: {sub}, add: {add}')
debuglogger.debug(f'New msg: {new_m_binary}, new prob: {new_m_prob}')
# Optionally mute communication channel
if FLAGS.no_comms_channel:
m_1e_binary = m_binary
# Run data through agent 2
if data_context is not None:
# TODO
debuglogger.warning(f'Data context not supported currently')
sys.exit()
else:
# debuglogger.info(f'Inside exchange: Train status: {train}, Message to agent 2: {m_1e_binary}')
s_2e, m_2e, y_2e, r_2e = agent2(
data['im_feats_2'],
m_1e_binary,
i_exchange,
desc,
use_message,
batch_size,
train)
# Agent 2's message
m_2e_binary, m_2e_probs = m_2e
# Optionally corrupt agent 2's message
if corrupt:
debuglogger.info(f'Corrupting message...')
m_2e_binary = corrupt_message(corrupt_region, agent2, m_2e_binary)
# Optionally change agent 2's message
if test_language_similarity:
if (who_goes_first == 1 and exchange_args["change_agent"] == 2) or (who_goes_first == 2 and exchange_args["change_agent"] == 1):
debuglogger.debug(f'Inside exchange: Changing agent 2 message...')
a_dict_add = exchange_args["agent_add_dict"]
a_dict_sub = exchange_args["agent_subtract_dict"]
add = a_dict_add[exchange_args["add"]].unsqueeze(0)
sub = a_dict_sub[exchange_args["subtract"]].unsqueeze(0)
if FLAGS.cuda:
add = add.cuda()
sub = sub.cuda()
new_m_prob = m_2e_probs.data - sub + add
new_m_binary = torch.clamp(new_m_prob, 0, 1).round()
m_2e_binary = _Variable(new_m_binary)
debuglogger.debug(f'Old msg: {m_binary_1.data}, old prob: {m_probs_1.data}')
debuglogger.debug(f'Sub: {sub}, add: {add}')
debuglogger.debug(f'New msg: {new_m_binary}, new prob: {new_m_prob}')
# Optionally mute communication channel
if FLAGS.no_comms_channel:
m_2e_binary = m_binary
# Run data through agent 1
if data_context is not None:
pass
else:
# debuglogger.info(f'Inside exchange: Train status: {train}, Message to agent 1: {m_2e_binary}')
s_1e, m_1e, y_1e, r_1e = agent1(
data['im_feats_1'],
m_2e_binary,
i_exchange,
desc,
use_message,
batch_size,
train)
# Store rest of communication and stop information
s_binary_1, s_prob_1 = s_1e
s_binary_2, s_prob_2 = s_2e
m_binary_2, m_probs_2 = m_2e
# Save for later
# TODO check stop mask
# Need to be consistent about storing the a1 and a2's even though their roles are randomized during each exchange
# agent1 and agent2 is a local name that refers to the order of communication. Storage refers to global labels a1 and a2
if who_goes_first == 1:
stop_mask_1.append(torch.min(stop_mask_1[-1], s_binary_1.byte()))
stop_mask_2.append(torch.min(stop_mask_2[-1], s_binary_2.byte()))
stop_feat_1.append(s_binary_1)
stop_feat_2.append(s_binary_2)
stop_prob_1.append(s_prob_1)
stop_prob_2.append(s_prob_2)
feats_1.append(m_binary_1)
feats_2.append(m_binary_2)
probs_1.append(m_probs_1)
probs_2.append(m_probs_2)
y_1.append(y_1e)
y_2.append(y_2e)
r_1.append(r_1e)
r_2.append(r_2e)
else:
stop_mask_1.append(torch.min(stop_mask_2[-1], s_binary_2.byte()))
stop_mask_2.append(torch.min(stop_mask_1[-1], s_binary_1.byte()))
stop_feat_1.append(s_binary_2)
stop_feat_2.append(s_binary_1)
stop_prob_1.append(s_prob_2)
stop_prob_2.append(s_prob_1)
feats_1.append(m_binary_2)
feats_2.append(m_binary_1)
probs_1.append(m_probs_2)
probs_2.append(m_probs_1)
y_1.append(y_2e)
y_2.append(y_1e)
r_1.append(r_2e)
r_2.append(r_1e)
# Terminate exchange if everyone is done conversing
if break_early and stop_mask_1[-1].float().sum().data[0] == 0 and stop_mask_2[-1].float().sum().data[0] == 0:
break
# The final mask must always be zero.
stop_mask_1[-1].data.fill_(0)
stop_mask_2[-1].data.fill_(0)
s = [(stop_mask_1, stop_feat_1, stop_prob_1),
(stop_mask_2, stop_feat_2, stop_prob_2)]
message_1 = (feats_1, probs_1)
message_2 = (feats_2, probs_2)
y = (y_1, y_2)
y_nc = (y_1_nc, y_2_nc)
y_all = [y_nc, y]
r = (r_1, r_2)
return s, message_1, message_2, y_all, r
def get_outp(y, masks):
def negent(yy):
probs = F.softmax(yy, dim=1)
return (torch.log(probs + 1e-8) * probs).sum(1).mean()
# TODO: This is wrong for the dynamic exchange, and we might want a "per example"
# entropy for either exchange (this version is mean across batch).
negentropy = list(map(negent, y))
# TODO check ok for new agents
if masks is not None:
batch_size = y[0].size(0)
exchange_steps = len(masks)
inp = torch.cat([yy.view(batch_size, 1, -1) for yy in y], 1)
mask = torch.cat(masks, 1).view(
batch_size, exchange_steps, 1).expand_as(inp)
outp = torch.masked_select(inp, mask.detach()).view(batch_size, -1)
if FLAGS.debug:
# Each mask index should have exactly 1 true value.
assert all([mm.data[0] == 1 for mm in torch.cat(masks, 1).sum(1)])
return outp, negentropy
else:
return y[-1], negentropy
def calculate_loss_binary(binary_features, binary_probs, rewards, baseline_rewards, entropy_penalty):
'''Calculates the reinforcement learning loss on the agent communication vectors'''
log_p_z = Variable(binary_features.data) * torch.log(binary_probs + 1e-8) + \
(1 - Variable(binary_features.data)) * \
torch.log(1 - binary_probs + 1e-8)
log_p_z = log_p_z.sum(1)
weight = Variable(rewards) - \
Variable(baseline_rewards.clone().detach().data)
if rewards.size(0) > 1: # Ensures weights are not larger than 1
weight = weight / max(1., torch.std(weight.data).item())
loss = torch.mean(-1 * weight * log_p_z)
# Must do both sides of negent, otherwise is skewed towards 0.
initial_negent = (torch.log(binary_probs + 1e-8) * binary_probs).sum(1).mean()
inverse_negent = (torch.log((1. - binary_probs) + 1e-8) * (1. - binary_probs)).sum(1).mean()
negentropy = initial_negent + inverse_negent
if entropy_penalty is not None:
loss = (loss + entropy_penalty * negentropy)
return loss, negentropy
def multistep_loss_binary(binary_features, binary_probs, rewards, baseline_rewards, masks, entropy_penalty):
''' Same as calculate loss binary but with multiple communications per exchange'''
if masks is not None:
# TODO - implement for new agents
pass
else:
outp = list(map(lambda feat, prob, scores: calculate_loss_binary(feat, prob, rewards, scores, entropy_penalty), binary_features, binary_probs, baseline_rewards))
losses = [o[0] for o in outp]
entropies = [o[1] for o in outp]
loss = sum(losses) / len(binary_features)
return loss, entropies
def calculate_loss_bas(baseline_scores, rewards):
loss_bas = nn.MSELoss()(baseline_scores, Variable(rewards))
return loss_bas
def multistep_loss_bas(baseline_scores, rewards, masks):
if masks is not None:
# TODO - check for new agents
pass
else:
losses = list(map(lambda scores: calculate_loss_bas(scores, rewards), baseline_scores))
loss = sum(losses) / len(baseline_scores)
return loss
def calculate_accuracy(prediction_dist, target, batch_size, top_k):
'''Calculates the prediction accuracy using correct@top_k
Returns:
- accuracy: float
- correct: boolean vector of batch_size elements.
1 indicates prediction was correct@top_k
- top_1: boolean vector of batch_size elements.
1 indicates prediction was correct (top 1)
'''
assert batch_size == target.size(0)
target_exp = target.view(-1, 1).expand(batch_size, top_k)
top_k_ind = torch.from_numpy(
prediction_dist.data.cpu().numpy().argsort()[:, -top_k:]).long()
correct = (top_k_ind == target_exp.cpu()).sum(dim=1)
top_1_ind = torch.from_numpy(
prediction_dist.data.cpu().numpy().argsort()[:, -1:]).long()
top_1 = (top_1_ind == target.view(-1, 1).cpu()).sum(dim=1)
accuracy = correct.sum() / float(batch_size)
return accuracy, correct, top_1
def get_classification_loss_and_stats(predictions, targets):
'''
Arguments:
- predictions: predicted logits for the classes
- targets: correct classes
Returns:
- dist: logs of the predicted probability distribution over the classes
- argmax: predicted class
- argmax_prob: predicted class probability
- ent: average entropy of the predicted probability distributions (over the batch)
- nll_loss: Negative Log Likelihood loss between the predictions and targets
- logs: Individual log likelihoods across the batch
'''
dist = F.log_softmax(predictions, dim=1)
maxdist, argmax = dist.data.max(1)
probs = F.softmax(predictions, dim=1)
ent = (torch.log(probs + 1e-8) * probs).sum(1).mean()
debuglogger.debug(f'Mean entropy: {-ent.data.item()}')
nll_loss = nn.NLLLoss()(dist, Variable(targets))
logs = loglikelihood(Variable(dist.data),
Variable(targets.view(-1, 1)))
return (dist, maxdist, argmax, ent, nll_loss, logs)
def run():
flogger = FileLogger(FLAGS.log_file)
logger = Logger(
env=FLAGS.env, experiment_name=FLAGS.experiment_name, enabled=FLAGS.visdom)
flogger.Log("Flag Values:\n" +
json.dumps(FLAGS.FlagValuesDict(), indent=4, sort_keys=True))
if not os.path.exists(FLAGS.json_file):
with open(FLAGS.json_file, "w") as f:
f.write(json.dumps(FLAGS.FlagValuesDict(), indent=4, sort_keys=True))
# Initialize Agents
agents = []
optimizers_dict = {}
models_dict = {}
frozen_agents = {}
# Only used for training agent communities
num_agents_per_community = None
intra_pool_connect_p = None
train_vec_prob = None # Probability distribution over agent pairs, used to sample pairs per batch
agent_idx_list = None # Mapping from train_vec indices to agent pairs
eval_agent_list = None # List of agent pairs to evaluate each dev eval
# Check agent setup
if FLAGS.num_agents > 2 and not (FLAGS.agent_pools or FLAGS.agent_communities):
flogger.Log("{} is too many agents. There can only be two agents if FLAGS.agent_pools and FLAGS.agent_communities are false".format(FLAGS.num_agents))
sys.exit()
if FLAGS.agent_pools and FLAGS.agent_communities:
flogger.Log("You cannot train an agent pool and community at the same time. Please select one of the other")
sys.exit()
if not FLAGS.use_binary:
flogger.Log("Non binary agent communication not implemented yet. Set FLAGS.use_binary to True")
sys.exit()
if FLAGS.agent_communities:
if FLAGS.num_communities != len(FLAGS.num_agents_per_community):
flogger.Log("Number of communities doesn't match length of community size list")
sys.exit()
if FLAGS.num_communities != len(FLAGS.community_checkpoints):
flogger.Log("Number of communities doesn't match length of community checkpoint list. Use None to train from scratch.")
sys.exit()
if FLAGS.num_communities != len(FLAGS.intra_pool_connect_p):
flogger.Log("Number of communities doesn't match length of intra community connectivity list")
sys.exit()
if FLAGS.community_type == "chain" and FLAGS.num_communities <= 2:
flogger.Log("There must be at least 3 communities to train in a chain")
sys.exit()
num_agents_per_community = [int(x) for x in FLAGS.num_agents_per_community]
intra_pool_connect_p = [float(x) for x in FLAGS.intra_pool_connect_p]
if FLAGS.num_agents != sum(num_agents_per_community):
flogger.Log("Total number of agents does not match sum of agents in each community")
sys.exit()
flogger.Log(f'Total number of agents: {FLAGS.num_agents}, Agents per community: {num_agents_per_community}')
if FLAGS.test_language_similarity and not FLAGS.eval_only:
flogger.Log("Can only test language similarity in eval only mode")
sys.exit()
if FLAGS.test_language_similarity:
if len(FLAGS.agent_dicts) != FLAGS.num_agents:
flogger.Log("There must be a code dictionary per agent")
sys.exit()
# Create agents
for _ in range(FLAGS.num_agents):
agent = Agent(im_feature_type=FLAGS.img_feat,
im_feat_dim=FLAGS.img_feat_dim,
h_dim=FLAGS.h_dim,
m_dim=FLAGS.m_dim,
desc_dim=FLAGS.desc_dim,
num_classes=FLAGS.num_classes,
s_dim=FLAGS.s_dim,
use_binary=FLAGS.use_binary,
use_attn=FLAGS.visual_attn,
attn_dim=FLAGS.attn_dim,
use_MLP=FLAGS.use_MLP,
cuda=FLAGS.cuda,
im_from_scratch=FLAGS.improc_from_scratch,
dropout=FLAGS.dropout)
flogger.Log("Agent {} id: {} Architecture: {}".format(_ + 1, id(agent), agent))
total_params = sum([functools.reduce(lambda x, y: x * y, p.size(), 1.0)
for p in agent.parameters()])
flogger.Log("Total Parameters: {}".format(total_params))
agents.append(agent)
# Optimizer
if FLAGS.optim_type == "SGD":
optimizer_agent = optim.SGD(
agent.parameters(), lr=FLAGS.learning_rate)
elif FLAGS.optim_type == "Adam":
optimizer_agent = optim.Adam(
agent.parameters(), lr=FLAGS.learning_rate)
elif FLAGS.optim_type == "RMSprop":
optimizer_agent = optim.RMSprop(
agent.parameters(), lr=FLAGS.learning_rate)
else:
raise NotImplementedError
optim_name = "optimizer_agent" + str(_ + 1)
agent_name = "agent" + str(_ + 1)
optimizers_dict[optim_name] = optimizer_agent
models_dict[agent_name] = agent
if FLAGS.agent_communities:
flogger.Log(f'Training {FLAGS.num_communities} communities of agents, type: {FLAGS.community_type}')
flogger.Log("Number of agents: {}".format(len(agents)))
for k in optimizers_dict:
flogger.Log("Optimizer {}: {}".format(k, optimizers_dict[k]))
# Create additional data structures if training with agent communites
# These handle to sampling of agents during training, and selection of agent pairs when evaluating of the dev set
if FLAGS.agent_communities:
(train_vec_prob, agent_idx_list) = build_train_matrix(num_agents_per_community, FLAGS.community_type, intra_pool_connect_p, FLAGS.inter_pool_connect_p, FLAGS.ratio_adjust, FLAGS.intra_inter_ratio)
eval_agent_list = build_eval_list(num_agents_per_community, FLAGS.community_type, train_vec_prob)
flogger.Log(f'Train matrix: {torch.from_numpy(np.reshape(train_vec_prob, (FLAGS.num_agents, FLAGS.num_agents)))}')
flogger.Log(f'Eval agent list: {eval_agent_list}')
# Training metrics
epoch = 0
step = 0
best_dev_acc = 0
# Optionally load previously saved models
if os.path.exists(FLAGS.checkpoint):
flogger.Log("Loading from: " + FLAGS.checkpoint)
data = torch_load(FLAGS.checkpoint, models_dict, optimizers_dict)
flogger.Log("Loaded at step: {} and best dev acc: {}".format(
data['step'], data['best_dev_acc']))
step = data['step']
best_dev_acc = data['best_dev_acc']
if FLAGS.agent_communities:
# Load train and eval matrices
flogger.Log(f"Train and eval matrices for {FLAGS.checkpoint} exists?: {os.path.isfile(FLAGS.checkpoint + '_train_vec.pkl')}")
if os.path.isfile(FLAGS.checkpoint + '_train_vec.pkl'):
train_vec_prob = pickle.load(open(FLAGS.checkpoint + '_train_vec.pkl', 'rb'))
agent_idx_list = pickle.load(open(FLAGS.checkpoint + '_agent_idx_list.pkl', 'rb'))
eval_agent_list = pickle.load(open(FLAGS.checkpoint + '_eval_agent_list.pkl', 'rb'))
else:
# Try just .pt suffix. Used when evaluating checkpoints saved after xx steps e.g. .pt_50000
# Or for the best checkpoint, .pt_best.
# During normal training the train and eval matrices are only associated with .pt suffix
stripped_checkpoint = FLAGS.checkpoint.split("pt_")[0] + 'pt'
flogger.Log("Loading train and eval matrices from: " + FLAGS.checkpoint + " failed")
flogger.Log("Trying " + stripped_checkpoint)
train_vec_prob = pickle.load(open(stripped_checkpoint + '_train_vec.pkl', 'rb'))
agent_idx_list = pickle.load(open(stripped_checkpoint + '_agent_idx_list.pkl', 'rb'))
eval_agent_list = pickle.load(open(stripped_checkpoint + '_eval_agent_list.pkl', 'rb'))
flogger.Log(f'Train matrix: {torch.from_numpy(np.reshape(train_vec_prob, (FLAGS.num_agents, FLAGS.num_agents)))}')
flogger.Log(f'Eval agent list: {eval_agent_list}')
elif FLAGS.agent_communities:
torch_load_communities(FLAGS.community_checkpoints, num_agents_per_community, models_dict, optimizers_dict)
if FLAGS.agent_communities:
# Save additional community data
pickle.dump(train_vec_prob, open(FLAGS.checkpoint + '_train_vec.pkl', 'wb'))
pickle.dump(agent_idx_list, open(FLAGS.checkpoint + '_agent_idx_list.pkl', 'wb'))
pickle.dump(eval_agent_list, open(FLAGS.checkpoint + '_eval_agent_list.pkl', 'wb'))
# Copy agents to measure how much the language has changed
frozen_agents = copy.deepcopy(models_dict)
frozen_optimizers = copy.deepcopy(optimizers_dict)
if os.path.exists(FLAGS.checkpoint) and FLAGS.agent_communities:
# Load original agents into frozen agents so that self play comparison is with original agents
flogger.Log("Loading original agent pools into frozen agents")
torch_load_communities(FLAGS.community_checkpoints, num_agents_per_community, frozen_agents, frozen_optimizers)
# GPU support
if FLAGS.cuda:
for m in models_dict.values():
m.cuda()
for m in frozen_agents.values():
m.cuda()
for o in optimizers_dict.values():
recursively_set_device(o.state_dict(), gpu=0)
# If training / evaluating with pools of agents or communities of agents sample with each batch
if FLAGS.agent_pools or FLAGS.agent_communities:
agent1 = None
agent2 = None
optimizer_agent1 = None
optimizer_agent2 = None
agent_idxs = [None, None]
# Otherwise keep agents fixed for each batch
else:
if FLAGS.num_agents == 1:
agent1 = agents[0]
agent2 = agents[0]
optimizer_agent1 = optimizers_dict["optimizer_agent1"]
optimizer_agent2 = optimizers_dict["optimizer_agent1"]
agent_idxs = [1, 1]
else:
agent1 = agents[0]
agent2 = agents[1]
optimizer_agent1 = optimizers_dict["optimizer_agent1"]
optimizer_agent2 = optimizers_dict["optimizer_agent2"]
agent_idxs = [1, 2]
# Alternatives to training.
if FLAGS.eval_only:
if not os.path.exists(FLAGS.checkpoint):
raise Exception("Must provide valid checkpoint.")
debuglogger.info("Evaluating on validation set")
step = i_batch = epoch = 0
# Storage for results
dev_accuracy_id = []
dev_accuracy_ood = []
dev_accuracy_self_com = []
for i in range(FLAGS.num_agents):
dev_accuracy_id.append({'total_acc_both_nc': [], # % both agents right before comms
'total_acc_both_com': [], # % both agents right after comms
'total_acc_atl1_nc': [], # % at least 1 agent right before comms
'total_acc_atl1_com': [] # % at least 1 agent right after comms
})
dev_accuracy_ood.append({'total_acc_both_nc': [], # % both agents right before comms
'total_acc_both_com': [], # % both agents right after comms
'total_acc_atl1_nc': [], # % at least 1 agent right before comms
'total_acc_atl1_com': [] # % at least 1 agent right after comms
})
dev_accuracy_self_com.append({'total_acc_both_nc': [], # % both agents right before comms
'total_acc_both_com': [], # % both agents right after comms
'total_acc_atl1_nc': [], # % at least 1 agent right before comms
'total_acc_atl1_com': [] # % at least 1 agent right after comms
})
if FLAGS.eval_xproduct:
# Complete evaluation on all possible pairs of agents
for i in range(FLAGS.num_agents):
for j in range(FLAGS.num_agents):
flogger.Log("Agent 1: {}".format(i + 1))
logger.log(key="Agent 1: ", val=i + 1, step=step)
agent1 = models_dict["agent" + str(i + 1)]
flogger.Log("Agent 2: {}".format(j + 1))
logger.log(key="Agent 2: ", val=j + 1, step=step)
agent2 = models_dict["agent" + str(j + 1)]
if i == j:
# Create a copy of agents playing with themselves to avoid sharing the hidden state
agent2 = Agent(im_feature_type=FLAGS.img_feat,
im_feat_dim=FLAGS.img_feat_dim,
h_dim=FLAGS.h_dim,
m_dim=FLAGS.m_dim,
desc_dim=FLAGS.desc_dim,
num_classes=FLAGS.num_classes,
s_dim=FLAGS.s_dim,
use_binary=FLAGS.use_binary,
use_attn=FLAGS.visual_attn,
attn_dim=FLAGS.attn_dim,
use_MLP=FLAGS.use_MLP,
cuda=FLAGS.cuda,
im_from_scratch=FLAGS.improc_from_scratch,
dropout=FLAGS.dropout)
agent2.load_state_dict(agent1.state_dict())
if FLAGS.cuda:
agent2.cuda()
if i == 0 and j == 0:
# Report in domain development accuracy and store examples
# TODO: Store examples currently disabled. To fix store examples - image saving disabled because it clogs the memory and makes everything very slow
dev_accuracy_id[i], total_accuracy_com = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id[i], logger, flogger, f'In Domain Agents {i + 1},{j + 1}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'eval_only_A_{i + 1}_{j + 1}')
# Report out of domain development accuracy and store examples
dev_accuracy_ood[i], total_accuracy_com = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_outdomain_valid_path, True, dev_accuracy_id[i], logger, flogger, f'Out of Domain Agents {i + 1},{j + 1}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'eval_only_A_{i + 1}_{j + 1}')
else:
# Report in domain development accuracy
dev_accuracy_id[i], total_accuracy_com = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id[i], logger, flogger, f'In Domain Agents {i + 1},{j + 1}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'eval_only_A_{i + 1}_{j + 1}')
# Report out of domain development accuracy
dev_accuracy_ood[i], total_accuracy_com = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_outdomain_valid_path, True, dev_accuracy_id[i], logger, flogger, f'Out of Domain Agents {i + 1},{j + 1}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'eval_only_A_{i + 1}_{j + 1}')
elif FLAGS.gen_community_messages:
# Get list of agent pairs
agent_pairs = get_msg_pairs(FLAGS.community_structure)
flogger.Log(f"Agent pairs to generate messages for: {agent_pairs}")
for (i, j) in agent_pairs:
flogger.Log("Agent 1: {}".format(i + 1))
logger.log(key="Agent 1: ", val=i + 1, step=step)
agent1 = models_dict["agent" + str(i + 1)]
flogger.Log("Agent 2: {}".format(j + 1))
logger.log(key="Agent 2: ", val=j + 1, step=step)
agent2 = models_dict["agent" + str(j + 1)]
# Report in domain development accuracy
dev_accuracy_id[i], total_accuracy_com = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id[i], logger, flogger, f'In Domain Agents {i + 1},{j + i}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=True, agent_tag=f'eval_only_A_{i + 1}_{j + 1}')
# Report out of domain development accuracy
dev_accuracy_ood[i], total_accuracy_com = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_path, False, dev_accuracy_ood[i], logger, flogger, f'Out of Domain Agents {i + 1},{j + 1}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag="")
elif FLAGS.test_language_similarity:
# Load agent dictionaries
code_dicts = []
if FLAGS.self_similarity:
for i in range(FLAGS.num_agents):
_ = pickle.load(open(FLAGS.agent_dicts[i], 'rb'))
code_dicts.append(_)
for i in range(FLAGS.num_agents - 1):
flogger.Log("Agent 1: {}".format(i + 1))
logger.log(key="Agent 1: ", val=i + 1, step=step)
agent1 = models_dict["agent" + str(i + 1)]
flogger.Log("Agent 2: {}".format(i + 2))
logger.log(key="Agent 2: ", val=i + 2, step=step)
agent2 = models_dict["agent" + str(i + 2)]
dev_accuracy_id[i], total_accuracy_com = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id[i], logger, flogger, f'In Domain Agents {i + 1},{i + 2}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'eval_only_A_{i + 1}_{i + 2}', agent_dicts=[code_dicts, code_dicts], agent_idxs=[i, i + 1], agent_groups=[1, 2])
elif FLAGS.ancestor_similarity:
# The main difference vs. the other methods for testing language similarity is that both agents always come from group 1. Only the codes for group 2 are present and used to measure the similarity of the current protocol to an ancestor protocol, as well as to trace the ancestry of individual words.
# Load codes for checkpointed agents
debuglogger.info(f'Agent codes: {FLAGS.agent_dicts}')
debuglogger.info(f'Ancestor codes: {FLAGS.agent_supplementary_dicts}')
cd = []
for i in range(FLAGS.num_agents):
_ = pickle.load(open(FLAGS.agent_dicts[i], 'rb'))
cd.append(_)
code_dicts.append(cd)
# Load ancestor codes. The ancestor codes are given through FLAGS.agent_supplementary_dicts
cd = []
for i in range(FLAGS.num_supplementary_agents):
_ = pickle.load(open(FLAGS.agent_supplementary_dicts[i], 'rb'))
cd.append(_)
code_dicts.append(cd)
for i in range(FLAGS.num_agents - 1):
flogger.Log("Agent 1: {}".format(i + 1))
logger.log(key="Agent 1: ", val=i + 1, step=step)
agent1 = models_dict["agent" + str(i + 1)]
flogger.Log("Agent 2: {}".format(i + 2))
logger.log(key="Agent 2: ", val=i + 2, step=step)
agent2 = models_dict["agent" + str(i + 2)]
dev_accuracy_id[i], total_accuracy_com = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id[i], logger, flogger, f'In Domain Agents {i + 1},{i + 2}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'eval_only_A_{i + 1}_{i + 2}', agent_dicts=[code_dicts[0], code_dicts[1]], agent_idxs=[i, i + 1], agent_groups=[1, 1])
else:
'''Please note that this is an experimental feature. To be tested further.'''
# Testing similarity between two language pairs.
# The pairs are assumed to be 2 of the pairs in a checkpointed community
# The pairs are specified through FLAGS.compare_language_pairs and are the index into FLAGS.num_agents_per_community
# The code dicts are given in the command line through FLAGS.agent_dicts and FLAGS.agent_supplementary dicts
cd = []
for i in range(FLAGS.num_agents):
_ = pickle.load(open(FLAGS.agent_dicts[i], 'rb'))
cd.append(_)
# Extract codes for group 1
idx = int(FLAGS.compare_language_pairs[0])
start = end = 0
g1_bounds = (None, None)
for i, p in enumerate(FLAGS.num_agents_per_community):
end += int(p)
if idx == i:
g1_bounds = (start, end)
break
start += int(p)
code_dicts.append(cd[start:end])
# Extract codes for group 2
idx = int(FLAGS.compare_language_pairs[1])
start = end = 0
g2_bounds = (None, None)
for i, p in enumerate(FLAGS.num_agents_per_community):
end += int(p)
if idx == i:
g2_bounds = (start, end)
break
start += int(p)
code_dicts.append(cd[start:end])
flogger.Log(f"Comparing groups: {FLAGS.compare_language_pairs}")
flogger.Log(f"G1 bounds: {g1_bounds}, G2 bounds: {g2_bounds}")
# Evaluate on group 1 pairs
for i in range(g1_bounds[0], g1_bounds[1] - 1):
flogger.Log("Agent 1: {}".format(i + 1))
logger.log(key="Agent 1: ", val=i + 1, step=step)
agent1 = models_dict["agent" + str(i + 1)]
flogger.Log("Agent 2: {}".format(i + 2))
logger.log(key="Agent 2: ", val=i + 2, step=step)
agent2 = models_dict["agent" + str(i + 2)]
dev_accuracy_id[i], total_accuracy_com = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id[i], logger, flogger, f'In Domain Agents {i + 1},{i + 2}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'eval_only_A_{i + 1}_{i + 2}', agent_dicts=[code_dicts[0], code_dicts[1]], agent_idxs=[i, i + 1], agent_groups=[1, 1])
# Evaluate on group 2 pairs
for i in range(g2_bounds[0], g2_bounds[1] - 1):
flogger.Log("Agent 1: {}".format(i + 1))
logger.log(key="Agent 1: ", val=i + 1, step=step)
agent1 = models_dict["agent" + str(i + 1)]
flogger.Log("Agent 2: {}".format(i + 2))
logger.log(key="Agent 2: ", val=i + 2, step=step)
agent2 = models_dict["agent" + str(i + 2)]
dev_accuracy_id[i], total_accuracy_com = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id[i], logger, flogger, f'In Domain Agents {i + 1},{i + 2}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'eval_only_A_{i + 1}_{i + 2}', agent_dicts=[code_dicts[0], code_dicts[1]], agent_idxs=[i - g2_bounds[0], i + 1 - g2_bounds[0]], agent_groups=[2, 2])
elif FLAGS.eval_agent_communities:
eval_community(eval_agent_list, models_dict, dev_accuracy_id[0], logger, flogger, epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=True, agent_tag="no_tag")
else:
# For the pairs of agents calculate results
# Applies to both pools of agents and an agent pair
for i in range(FLAGS.num_agents - 1):
flogger.Log("Agent 1: {}".format(i + 1))
logger.log(key="Agent 1: ", val=i + 1, step=step)
agent1 = models_dict["agent" + str(i + 1)]
flogger.Log("Agent 2: {}".format(i + 2))
logger.log(key="Agent 2: ", val=i + 2, step=step)
agent2 = models_dict["agent" + str(i + 2)]
if i == 0:
# Report in domain development accuracy and analyze messages and store examples
dev_accuracy_id[i], total_accuracy_com = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id[i], logger, flogger, f'In Domain Agents {i + 1},{i + 2}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=True, agent_tag=f'eval_only_A_{i + 1}_{i + 2}')
else:
# Report in domain development accuracy
dev_accuracy_id[i], total_accuracy_com = get_and_log_dev_performance(
agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id[i], logger, flogger, f'In Domain Agents {i + 1},{i + 2}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=True, agent_tag=f'eval_only_A_{i + 1}_{i + 2}')
# Report out of domain development accuracy
dev_accuracy_ood[i], total_accuracy_com = get_and_log_dev_performance(agent1, agent2, FLAGS.dataset_outdomain_valid_path, True, dev_accuracy_id[i], logger, flogger, f'Out of Domain Agents {i + 1},{i + 2}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'eval_only_A_{i + 1}_{i + 2}')
# Report in domain development accuracy when agents communicate with themselves
for i in range(FLAGS.num_agents):
agent1 = models_dict["agent" + str(i + 1)]
# Create a copy of agents playing with themselves to avoid sharing the hidden state
agent2 = copy.deepcopy(agent1)
flogger.Log("Agent {} self communication: id {}".format(i + 1, id(agent)))
dev_accuracy_self_com[i], total_accuracy_com = get_and_log_dev_performance(
agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_self_com[i], logger, flogger, "Agent " + str(i + 1) + " self communication: In Domain", epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'eval_only_self_com_A_{i + 1}')
sys.exit()
# Training loop
while epoch < FLAGS.max_epoch:
flogger.Log("Starting epoch: {}".format(epoch))
# Read dataset randomly into batches
if FLAGS.dataset == "shapeworld":
dataloader = load_shapeworld_dataset(FLAGS.dataset_path, FLAGS.glove_path, FLAGS.dataset_mode, FLAGS.dataset_size_train, FLAGS.dataset_type,
FLAGS.dataset_name, FLAGS.batch_size, FLAGS.random_seed, FLAGS.shuffle_train, FLAGS.img_feat, FLAGS.cuda, truncate_final_batch=False)
else:
raise NotImplementedError
# Keep track of metrics
batch_accuracy = {'total_nc': [], # no communicaton
'total_com': [], # after communication
'rewards_1': [], # agent 1 rewards
'rewards_2': [], # agent 2 rewards
'total_acc_both_nc': [], # % both agents right before comms
'total_acc_both_com': [], # % both agents right after comms
'total_acc_atl1_nc': [], # % at least 1 agent right before comms
'total_acc_atl1_com': [], # % at least 1 agent right after comms
'agent1_nc': [], # no communicaton
'agent2_nc': [], # no communicaton
'agent1_com': [], # after communicaton
'agent2_com': [] # after communicaton
}
dev_accuracy_id = {'total_acc_both_nc': [], # % both agents right before comms
'total_acc_both_com': [], # % both agents right after comms
'total_acc_atl1_nc': [], # % at least 1 agent right before comms
'total_acc_atl1_com': [] # % at least 1 agent right after comms
}
dev_accuracy_ood = {'total_acc_both_nc': [], # % both agents right before comms
'total_acc_both_com': [], # % both agents right after comms
'total_acc_atl1_nc': [], # % at least 1 agent right before comms
'total_acc_atl1_com': [] # % at least 1 agent right after comms
}
dev_accuracy_id_pairs = []
dev_accuracy_self_com = []
for i in range(FLAGS.num_agents):
dev_accuracy_id_pairs.append({'total_acc_both_nc': [], # % both agents right before comms
'total_acc_both_com': [], # % both agents right after comms
'total_acc_atl1_nc': [], # % at least 1 agent right before comms
'total_acc_atl1_com': [] # % at least 1 agent right after comms
})
dev_accuracy_self_com.append({'total_acc_both_nc': [], # % both agents right before comms
'total_acc_both_com': [], # % both agents right after comms
'total_acc_atl1_nc': [], # % at least 1 agent right before comms
'total_acc_atl1_com': [] # % at least 1 agent right after comms
})
# Iterate through batches
for i_batch, batch in enumerate(dataloader):
debuglogger.debug(f'Batch {i_batch}')
# Select agents if training with pools or communities
if FLAGS.agent_pools:
idx = random.randint(0, len(agents) - 1)
agent1 = agents[idx]
optimizer_agent1 = optimizers_dict["optimizer_agent" + str(idx + 1)]
agent_idxs[0] = idx + 1
old_idx = idx
if FLAGS.with_replacement:
# Sampling second agent with replacement
idx = random.randint(0, len(agents) - 1)
else:
# Sampling second agent without replacement
while idx == old_idx:
idx = random.randint(0, len(agents) - 1)
agent2 = agents[idx]
optimizer_agent2 = optimizers_dict["optimizer_agent" + str(idx + 1)]
agent_idxs[1] = idx + 1
elif FLAGS.agent_communities:
(idx1, idx2) = sample_agents(train_vec_prob, agent_idx_list)
agent1 = agents[idx1]
optimizer_agent1 = optimizers_dict["optimizer_agent" + str(idx1 + 1)]
agent_idxs[0] = idx1 + 1
agent2 = agents[idx2]
optimizer_agent2 = optimizers_dict["optimizer_agent" + str(idx2 + 1)]
agent_idxs[1] = idx2 + 1
elif FLAGS.num_agents == 1:
# Training with one agents
agent1 = agents[0]
agent2 = agents[0]
optimizer_agent1 = optimizers_dict["optimizer_agent1"]
optimizer_agent2 = optimizers_dict["optimizer_agent1"]
agent_idxs = [1, 1]
else:
# Training with just a pair of agents
agent1 = agents[0]
agent2 = agents[1]
optimizer_agent1 = optimizers_dict["optimizer_agent1"]
optimizer_agent2 = optimizers_dict["optimizer_agent2"]
agent_idxs = [1, 2]
# Create a copy of agents playing with themselves to avoid sharing the hidden state
if FLAGS.num_agents == 1 or (agent_idxs[0] == agent_idxs[1]):
agent2 = Agent(im_feature_type=FLAGS.img_feat,
im_feat_dim=FLAGS.img_feat_dim,
h_dim=FLAGS.h_dim,
m_dim=FLAGS.m_dim,
desc_dim=FLAGS.desc_dim,
num_classes=FLAGS.num_classes,
s_dim=FLAGS.s_dim,
use_binary=FLAGS.use_binary,
use_attn=FLAGS.visual_attn,
attn_dim=FLAGS.attn_dim,
use_MLP=FLAGS.use_MLP,
cuda=FLAGS.cuda,
im_from_scratch=FLAGS.improc_from_scratch,
dropout=FLAGS.dropout)
agent2.load_state_dict(agent1.state_dict())
if FLAGS.cuda:
agent2.cuda()
debuglogger.debug(f'Agent 1: {agent_idxs[0]}, Agent 1: {agent_idxs[1]}')
# Converted to Variable in get_classification_loss_and_stats
target = batch["target"]
im_feats_1 = batch["im_feats_1"] # Already Variable
im_feats_2 = batch["im_feats_2"] # Already Variable
p = batch["p"]
desc = Variable(batch["texts_vec"])
# GPU support
if FLAGS.cuda:
im_feats_1 = im_feats_1.cuda()
im_feats_2 = im_feats_2.cuda()
target = target.cuda()
desc = desc.cuda()
data = {"im_feats_1": im_feats_1,
"im_feats_2": im_feats_2,
"p": p}
exchange_args = dict()
exchange_args["data"] = data
exchange_args["target"] = target
exchange_args["desc"] = desc
exchange_args["train"] = True
exchange_args["break_early"] = not FLAGS.fixed_exchange
s, message_1, message_2, y_all, r = exchange(
agent1, agent2, exchange_args)
s_masks_1, s_feats_1, s_probs_1 = s[0]
s_masks_2, s_feats_2, s_probs_2 = s[1]
feats_1, probs_1 = message_1
feats_2, probs_2 = message_2
y_nc = y_all[0]
y = y_all[1]
# Mask loss if dynamic exchange length
if FLAGS.fixed_exchange:
binary_s_masks = None
binary_agent1_masks = None
binary_agent2_masks = None
bas_agent1_masks = None
bas_agent2_masks = None
y1_masks = None
y2_masks = None
outp_1 = y[0][-1]
outp_2 = y[1][-1]
else:
# TODO
pass
# Before communication predictions
# Obtain predictions, loss and stats agent 1
(dist_1_nc, maxdist_1_nc, argmax_1_nc, ent_1_nc, nll_loss_1_nc,
logs_1_nc) = get_classification_loss_and_stats(y_nc[0], target)
# Obtain predictions, loss and stats agent 2
(dist_2_nc, maxdist_2_nc, argmax_2_nc, ent_2_nc, nll_loss_2_nc,
logs_2_nc) = get_classification_loss_and_stats(y_nc[1], target)
# After communication predictions
# Obtain predictions, loss and stats agent 1
(dist_1, maxdist_1, argmax_1, ent_1, nll_loss_1_com,
logs_1) = get_classification_loss_and_stats(outp_1, target)
# Obtain predictions, loss and stats agent 2
(dist_2, maxdist_2, argmax_2, ent_2, nll_loss_2_com,
logs_2) = get_classification_loss_and_stats(outp_2, target)
# Store prediction entropies
if FLAGS.fixed_exchange:
ent_agent1_y = [ent_1]
ent_agent2_y = [ent_2]
else:
# TODO - not implemented yet
ent_agent1_y = []
ent_agent2_y = []
# Calculate accuracy
accuracy_1_nc, correct_1_nc, top_1_1_nc = calculate_accuracy(
dist_1_nc, target, FLAGS.batch_size, FLAGS.top_k_train)
accuracy_1, correct_1, top_1_1 = calculate_accuracy(
dist_1, target, FLAGS.batch_size, FLAGS.top_k_train)
accuracy_2_nc, correct_2_nc, top_1_2_nc = calculate_accuracy(
dist_2_nc, target, FLAGS.batch_size, FLAGS.top_k_train)
accuracy_2, correct_2, top_1_2 = calculate_accuracy(
dist_2, target, FLAGS.batch_size, FLAGS.top_k_train)
# Calculate accuracy
total_correct_nc = correct_1_nc.float() + correct_2_nc.float()
total_correct_com = correct_1.float() + correct_2.float()
total_accuracy_nc = (total_correct_nc ==
2).sum() / float(FLAGS.batch_size)
total_accuracy_com = (total_correct_com ==
2).sum() / float(FLAGS.batch_size)
atleast1_accuracy_nc = (
total_correct_nc > 0).sum() / float(FLAGS.batch_size)
atleast1_accuracy_com = (
total_correct_com > 0).sum() / float(FLAGS.batch_size)
# Calculate rewards
# rewards = difference between performance before and after communication
# Only use top 1
total_correct_top_1_nc = top_1_1_nc.float() + top_1_2_nc.float()
total_correct_top_1_com = top_1_1.float() + top_1_2.float()
if FLAGS.reward_type == 'cooperative':
rewards_1 = (total_correct_top_1_com.float() - total_correct_top_1_nc.float())
rewards_2 = rewards_1
elif FLAGS.reward_type == 'cooperative_nodiff':
rewards_1 = total_correct_top_1_com.float()
rewards_2 = rewards_1
elif FLAGS.reward_type == 'selfish_nodiff':
rewards_1 = top_1_1.float()
rewards_2 = top_1_2.float()
elif FLAGS.reward_type == 'selfish':
rewards_1 = top_1_1.float() - top_1_1_nc.float()
rewards_2 = top_1_2.float() - top_1_2_nc.float()
else:
debuglogger.warn(f'Reward type {FLAGS.reward_type} not recognized')
sys.exit()
debuglogger.debug(
f'total correct top 1 com: {total_correct_top_1_com}')
debuglogger.debug(
f'total correct top 1 nc: {total_correct_top_1_nc}')
debuglogger.debug(f'total correct com: {total_correct_com}')
debuglogger.debug(f'total correct nc: {total_correct_nc}')
debuglogger.debug(f'rewards_1: {rewards_1}')
debuglogger.debug(f'rewards_2: {rewards_2}')
# Store results
batch_accuracy['agent1_nc'].append(accuracy_1_nc)
batch_accuracy['agent2_nc'].append(accuracy_2_nc)
batch_accuracy['agent1_com'].append(accuracy_1)
batch_accuracy['agent2_com'].append(accuracy_2)
batch_accuracy['total_nc'].append(total_correct_nc)
batch_accuracy['total_com'].append(total_correct_com)
batch_accuracy['rewards_1'].append(rewards_1)
batch_accuracy['rewards_1'].append(rewards_1)
batch_accuracy['total_acc_both_nc'].append(total_accuracy_nc)
batch_accuracy['total_acc_both_com'].append(total_accuracy_com)
batch_accuracy['total_acc_atl1_nc'].append(atleast1_accuracy_nc)
batch_accuracy['total_acc_atl1_com'].append(atleast1_accuracy_com)
# Cross entropy loss for each agent
nll_loss_1 = FLAGS.nll_loss_weight_nc * nll_loss_1_nc + \
FLAGS.nll_loss_weight_com * nll_loss_1_com
nll_loss_2 = FLAGS.nll_loss_weight_nc * nll_loss_2_nc + \
FLAGS.nll_loss_weight_com * nll_loss_2_com
loss_agent1 = nll_loss_1
loss_agent2 = nll_loss_2
# If training communication channel
if FLAGS.use_binary:
if not FLAGS.fixed_exchange:
# TODO - fix
# Stop loss
# TODO - check old use of entropy_s
# The receiver might have no z-loss if we stop after first message from sender.
debuglogger.warning(
f'Error: multistep adaptive exchange not implemented yet')
sys.exit()
elif FLAGS.max_exchange == 1:
loss_binary_1, ent_bin_1 = calculate_loss_binary(
feats_1[0], probs_1[0], rewards_1, r[0][0], FLAGS.entropy_agent1)
loss_binary_2, ent_bin_2 = calculate_loss_binary(
feats_2[0], probs_2[0], rewards_2, r[1][0], FLAGS.entropy_agent2)
loss_baseline_1 = calculate_loss_bas(r[0][0], rewards_1)
loss_baseline_2 = calculate_loss_bas(r[1][0], rewards_2)
ent_agent1_bin = [ent_bin_1]
ent_agent2_bin = [ent_bin_2]
elif FLAGS.max_exchange > 1:
loss_binary_1, ent_bin_1 = multistep_loss_binary(
feats_1, probs_1, rewards_1, r[0], binary_agent1_masks, FLAGS.entropy_agent1)
loss_binary_2, ent_bin_2 = multistep_loss_binary(
feats_2, probs_2, rewards_2, r[1], binary_agent2_masks, FLAGS.entropy_agent2)
loss_baseline_1 = multistep_loss_bas(r[0], rewards_1, bas_agent1_masks)
loss_baseline_2 = multistep_loss_bas(r[1], rewards_2, bas_agent2_masks)
ent_agent1_bin = ent_bin_1
ent_agent2_bin = ent_bin_2
debuglogger.debug(f'Loss bin 1: {loss_binary_1} bin 2: {loss_binary_2}')
debuglogger.debug(f'Loss baseline 1: {loss_baseline_1} baseline 2: {loss_baseline_2}')
debuglogger.debug(f'Entropy bin 1: {ent_agent1_bin} Entropy bin 2: {ent_agent1_bin}')
if FLAGS.use_binary:
loss_agent1 += FLAGS.rl_loss_weight * loss_binary_1
loss_agent2 += FLAGS.rl_loss_weight * loss_binary_2
if not FLAGS.fixed_exchange:
# TODO
pass
else:
loss_baseline_1 = Variable(torch.zeros(1))
loss_baseline_2 = Variable(torch.zeros(1))
loss_agent1 += FLAGS.baseline_loss_weight * loss_baseline_1
loss_agent2 += FLAGS.baseline_loss_weight * loss_baseline_2
if FLAGS.num_agents == 1 or (agent_idxs[0] == agent_idxs[1]):
debuglogger.info(f'Agent 1 and 2 are the same')
optimizer_agent1.zero_grad()
agent2.zero_grad()
# Get gradients from both sides of the exchange
loss_agent1.backward(retain_graph=True)
nn.utils.clip_grad_norm(agent1.parameters(), max_norm=1.)
loss_agent2.backward()
nn.utils.clip_grad_norm(agent2.parameters(), max_norm=1.)
# Add agent2 gradients to agent1 gradients
for p1, p2 in zip(agent1.parameters(), agent2.parameters()):
if p1.grad is not None:
p1.grad += p2.grad
# Only need to update agent1
optimizer_agent1.step()
else:
debuglogger.debug(f'Different agents')
# Update agent1
optimizer_agent1.zero_grad()
loss_agent1.backward()
nn.utils.clip_grad_norm(agent1.parameters(), max_norm=1.)
optimizer_agent1.step()
# Update agent2
optimizer_agent2.zero_grad()
loss_agent2.backward()
nn.utils.clip_grad_norm(agent2.parameters(), max_norm=1.)
optimizer_agent2.step()
# Print logs regularly
if step % FLAGS.log_interval == 0:
# Average batch accuracy
avg_batch_acc_total_nc = np.array(
batch_accuracy['total_acc_both_nc'][-FLAGS.log_interval:]).mean()
avg_batch_acc_total_com = np.array(
batch_accuracy['total_acc_both_com'][-FLAGS.log_interval:]).mean()
avg_batch_acc_atl1_nc = np.array(
batch_accuracy['total_acc_atl1_nc'][-FLAGS.log_interval:]).mean()
avg_batch_acc_atl1_com = np.array(
batch_accuracy['total_acc_atl1_com'][-FLAGS.log_interval:]).mean()
# Log accuracy
log_acc = "Epoch: {} Step: {} Batch: {} Agent 1: {} Agent 2: {} Training Accuracy:\nBefore comms: Both correct: {} At least 1 correct: {}\nAfter comms: Both correct: {} At least 1 correct: {}".format(epoch, step, i_batch, agent_idxs[0], agent_idxs[1], avg_batch_acc_total_nc, avg_batch_acc_atl1_nc, avg_batch_acc_total_com, avg_batch_acc_atl1_com)
flogger.Log(log_acc)
# Agent1
log_loss_agent1 = "Epoch: {} Step: {} Batch: {} Loss Agent1: {}".format(
epoch, step, i_batch, loss_agent1.data.item())
flogger.Log(log_loss_agent1)
# Agent 1 breakdown
log_loss_agent1_detail = "Epoch: {} Step: {} Batch: {} Loss Agent1: NLL: {} (BC:{} / AC:{}), RL: {}, Baseline: {} ".format(
epoch, step, i_batch, nll_loss_1.data.item(), nll_loss_1_nc.data.item(), nll_loss_1_com.data.item(), loss_binary_1.data.item(), loss_baseline_1.data.item())
flogger.Log(log_loss_agent1_detail)
# Agent2
log_loss_agent2 = "Epoch: {} Step: {} Batch: {} Loss Agent2: {}".format(
epoch, step, i_batch, loss_agent2.data.item())
flogger.Log(log_loss_agent2)
# Agent 2 breakdown
log_loss_agent2_detail = "Epoch: {} Step: {} Batch: {} Loss Agent2: NLL: {} (BC:{} / AC:{}), RL: {}, Baseline: {} ".format(
epoch, step, i_batch, nll_loss_2.data.item(), nll_loss_2_nc.data.item(), nll_loss_2_com.data.item(), loss_binary_2.data.item(), loss_baseline_2.data.item())
flogger.Log(log_loss_agent2_detail)
# Log predictions
log_pred = "Predictions: Target | Agent1 BC | Agent1 AC | Agent2 BC | Agent2 AC: {}".format(
torch.cat([target, argmax_1_nc, argmax_1, argmax_2_nc, argmax_2], 0).view(-1, FLAGS.batch_size))
flogger.Log(log_pred)
# Log Entropy for both Agents
if FLAGS.use_binary:
if len(ent_agent1_bin) > 0:
log_ent_agent1_bin = "Entropy Agent1 Binary"
for i, ent in enumerate(ent_agent1_bin):
log_ent_agent1_bin += "\n{}. {}".format(
i, -ent.data.item())
log_ent_agent1_bin += "\n"
flogger.Log(log_ent_agent1_bin)
if len(ent_agent2_bin) > 0:
log_ent_agent2_bin = "Entropy Agent2 Binary"
for i, ent in enumerate(ent_agent2_bin):
log_ent_agent2_bin += "\n{}. {}".format(
i, -ent.data.item())
log_ent_agent2_bin += "\n"
flogger.Log(log_ent_agent2_bin)
if len(ent_agent1_y) > 0:
log_ent_agent1_y = "Entropy Agent1 Predictions\n"
log_ent_agent1_y += "No comms entropy {}\n Comms entropy\n".format(
-ent_1_nc.data.item())
for i, ent in enumerate(ent_agent1_y):
log_ent_agent1_y += "\n{}. {}".format(i, -ent.data.item())
log_ent_agent1_y += "\n"
flogger.Log(log_ent_agent1_y)
if len(ent_agent2_y) > 0:
log_ent_agent2_y = "Entropy Agent2 Predictions\n"
log_ent_agent2_y += "No comms entropy {}\n Comms entropy\n".format(
-ent_2_nc.data.item())
for i, ent in enumerate(ent_agent2_y):
log_ent_agent2_y += "\n{}. {}".format(i, -ent.data.item())
log_ent_agent2_y += "\n"
flogger.Log(log_ent_agent2_y)
# Agent 1
logger.log(key="Loss Agent 1 (Total)",
val=loss_agent1.data.item(), step=step)
logger.log(key="Loss Agent 1 (NLL)",
val=nll_loss_1.data.item(), step=step)
logger.log(key="Loss Agent 1 (NLL NC)",
val=nll_loss_1_nc.data.item(), step=step)
logger.log(key="Loss Agent 1 (NLL COM)",
val=nll_loss_1_com.data.item(), step=step)
if FLAGS.use_binary:
logger.log(key="Loss Agent 1 (RL)",
val=loss_binary_1.data.item(), step=step)
logger.log(key="Loss Agent 1 (BAS)",
val=loss_baseline_1.data.item(), step=step)
if not FLAGS.fixed_exchange:
# TODO
pass
# Agent 2
logger.log(key="Loss Agent 2 (Total)",
val=loss_agent2.data.item(), step=step)
logger.log(key="Loss Agent 2 (NLL)",
val=nll_loss_2.data.item(), step=step)
logger.log(key="Loss Agent 2 (NLL NC)",
val=nll_loss_2_nc.data.item(), step=step)
logger.log(key="Loss Agent 2 (NLL COM)",
val=nll_loss_2_com.data.item(), step=step)
if FLAGS.use_binary:
logger.log(key="Loss Agent 2 (RL)",
val=loss_binary_2.data.item(), step=step)
logger.log(key="Loss Agent 2 (BAS)",
val=loss_baseline_2.data.item(), step=step)
if not FLAGS.fixed_exchange:
# TODO
pass
# Accuracy metrics
logger.log(key="Training Accuracy (Total, BC)",
val=avg_batch_acc_total_nc, step=step)
logger.log(key="Training Accuracy (At least 1, BC)",
val=avg_batch_acc_atl1_nc, step=step)
logger.log(key="Training Accuracy (Total, COM)",
val=avg_batch_acc_total_com, step=step)
logger.log(key="Training Accuracy (At least 1, COM)",
val=avg_batch_acc_atl1_com, step=step)
# Report development accuracy
if step % FLAGS.log_dev == 0:
# Report in domain development accuracy and checkpoint if best result
log_agents = "Epoch: {} Step: {} Batch: {} Agent 1: {} Agent 2: {}".format(
epoch, step, i_batch, agent_idxs[0], agent_idxs[1])
flogger.Log(log_agents)
dev_accuracy_id, total_accuracy_com = get_and_log_dev_performance(
agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id, logger, flogger, "In Domain", epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag="no_tag")
if step >= FLAGS.save_after and total_accuracy_com > best_dev_acc:
best_dev_acc = total_accuracy_com
flogger.Log(
"Checkpointing with best In Domain Development Accuracy (both right after comms): {}".format(best_dev_acc))
# Optionally store additional information
data = dict(step=step, best_dev_acc=best_dev_acc)
torch_save(FLAGS.checkpoint + "_best", data, models_dict,
optimizers_dict, gpu=0 if FLAGS.cuda else -1)
# Re-run in domain dev performance and log examples and analyze messages for a number of pairs of agents
# Time consuming to run, only run if dev_acc high enough
if best_dev_acc > 0.75 and FLAGS.agent_pools:
for i in range(FLAGS.num_agents - 1):
flogger.Log("Agent 1: {}".format(i + 1))
logger.log(key="Agent 1: ", val=i + 1, step=step)
_agent1 = models_dict["agent" + str(i + 1)]
flogger.Log("Agent 2: {}".format(i + 2))
logger.log(key="Agent 2: ", val=i + 2, step=step)
_agent2 = models_dict["agent" + str(i + 2)]
if i == 0:
# Report in domain development accuracy and store examples
dev_accuracy_id_pairs[i], total_accuracy_com = get_and_log_dev_performance(
_agent1, _agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id_pairs[i], logger, flogger, f'In Domain: Agents {i + 1},{i + 2}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'A_{i + 1}_{i + 2}')
else:
# Report in domain development accuracy
dev_accuracy_id_pairs[i], total_accuracy_com = get_and_log_dev_performance(
_agent1, _agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id_pairs[i], logger, flogger, f'In Domain: Agents {i + 1},{i + 2}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'A_{i + 1}_{i + 2}')
# Report out of domain development accuracy
dev_accuracy_ood, total_accuracy_com = get_and_log_dev_performance(
agent1, agent2, FLAGS.dataset_outdomain_valid_path, True, dev_accuracy_ood, logger, flogger, "Out of Domain", epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag="no_tag")
# Report in domain development accuracy when training agent communities
if FLAGS.agent_communities and step % FLAGS.log_community_eval == 0:
eval_community(eval_agent_list, models_dict, dev_accuracy_id_pairs[0], logger, flogger, epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag="no_tag")
# Log how much the language has changed by measuring the performance of one agent from each pool playing a frozen version of itself.
offset = 0
for _, p in enumerate(num_agents_per_community):
idx = offset + p - 1 # Select last agent from each community to play with itself
key = "agent" + str(idx + 1)
dev_accuracy_id, total_accuracy_com = get_and_log_dev_performance(
models_dict[key], frozen_agents[key], FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id, logger, flogger, f'In Domain, Pool {_ + 1}, agent {idx + 1} playing with frozen version of itself, ids: {id(models_dict[key])}/{id(frozen_agents[key])}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag="no_tag")
offset += p
# Report in domain development accuracy when agents communicate with themselves
if (not FLAGS.agent_communities) and step % FLAGS.log_self_com == 0:
for i in range(FLAGS.num_agents):
agent1 = models_dict["agent" + str(i + 1)]
# Create a copy of agents playing with themselves to avoid sharing the hidden state
agent2 = Agent(im_feature_type=FLAGS.img_feat,
im_feat_dim=FLAGS.img_feat_dim,
h_dim=FLAGS.h_dim,
m_dim=FLAGS.m_dim,
desc_dim=FLAGS.desc_dim,
num_classes=FLAGS.num_classes,
s_dim=FLAGS.s_dim,
use_binary=FLAGS.use_binary,
use_attn=FLAGS.visual_attn,
attn_dim=FLAGS.attn_dim,
use_MLP=FLAGS.use_MLP,
cuda=FLAGS.cuda,
im_from_scratch=FLAGS.improc_from_scratch,
dropout=FLAGS.dropout)
agent2.load_state_dict(agent1.state_dict())
if FLAGS.cuda:
agent2.cuda()
flogger.Log("Agent {} self communication: id {}".format(i + 1, id(agent)))
dev_accuracy_self_com[i], total_accuracy_com = get_and_log_dev_performance(
agent1, agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_self_com[i], logger, flogger, "Agent " + str(i + 1) + " self communication: In Domain", epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'self_com_A_{i + 1}')
# Every FLAGS.check_accuracy_interval steps check if the agents have reached an average accuracy of 75%
# If yes, then saved a version
if step > 0 and (step % FLAGS.check_accuracy_interval == 0):
# Only check the first 8 x 8 agents max - otherwise too time consuming
_agent_accuracy = []
for i in range(min(FLAGS.num_agents, 8)):
for j in range(min(FLAGS.num_agents, 8)):
debuglogger.info(f"Accuracy check i:{i} j:{j}")
_agent1 = models_dict["agent" + str(i + 1)]
if i == j:
# Create a copy of agents playing with themselves to avoid sharing the hidden state
_agent2 = Agent(im_feature_type=FLAGS.img_feat,
im_feat_dim=FLAGS.img_feat_dim,
h_dim=FLAGS.h_dim,
m_dim=FLAGS.m_dim,
desc_dim=FLAGS.desc_dim,
num_classes=FLAGS.num_classes,
s_dim=FLAGS.s_dim,
use_binary=FLAGS.use_binary,
use_attn=FLAGS.visual_attn,
attn_dim=FLAGS.attn_dim,
use_MLP=FLAGS.use_MLP,
cuda=FLAGS.cuda,
im_from_scratch=FLAGS.improc_from_scratch,
dropout=FLAGS.dropout)
_agent2.load_state_dict(agent1.state_dict())
if FLAGS.cuda:
_agent2.cuda()
else:
_agent2 = models_dict["agent" + str(j + 1)]
dev_accuracy_id_pairs[i], total_accuracy_com = get_and_log_dev_performance(
_agent1, _agent2, FLAGS.dataset_indomain_valid_path, True, dev_accuracy_id_pairs[i], logger, flogger, f'Average Check: In Domain: Agents {i + 1},{j + 1}', epoch, step, i_batch, store_examples=False, analyze_messages=False, save_messages=False, agent_tag=f'A_{i + 1}_{j + 1}')
_agent_accuracy.append(total_accuracy_com)
if FLAGS.num_agents == 2:
_avg_accuracy = np.mean([_agent_accuracy[1], _agent_accuracy[2]])
else:
_avg_accuracy = np.mean(_agent_accuracy)
flogger.Log(f"Step {step}: Average accuracy: {_avg_accuracy}, Individual accuracies: {_agent_accuracy}")
if _avg_accuracy > 0.80:
flogger.Log(f"Checkpointing a model with {_avg_accuracy} average accuracy at {step} steps")
# Optionally store additional information
data = dict(step=step, best_dev_acc=best_dev_acc)
torch_save(FLAGS.checkpoint + "_{0:.4f}".format(_avg_accuracy), data, models_dict,
optimizers_dict, gpu=0 if FLAGS.cuda else -1)
flogger.Log(f"Accuracy reached at least 75% on average, stopping training at step {step}...")
sys.exit()
# Save model periodically (overwrites most recent)
if step >= FLAGS.save_after and step % FLAGS.save_interval == 0:
flogger.Log("Checkpointing.")
# Optionally store additional information
data = dict(step=step, best_dev_acc=best_dev_acc)
torch_save(FLAGS.checkpoint, data, models_dict,
optimizers_dict, gpu=0 if FLAGS.cuda else -1)
# Save separate copy of model every FLAGS.save_distinct_interval steps
if step >= FLAGS.save_after and step % FLAGS.save_distinct_interval == 0:
flogger.Log(f"Checkpointing a distinct model at {step} steps")
# Optionally store additional information
data = dict(step=step, best_dev_acc=best_dev_acc)
torch_save(FLAGS.checkpoint + "_" + str(step), data, models_dict,
optimizers_dict, gpu=0 if FLAGS.cuda else -1)
# Increment batch step
step += 1
# Increment epoch
epoch += 1
flogger.Log("Finished training.")
"""
Preset Model Configurations
1. Fixed - Fixed conversation length.
2. Adaptive - Adaptive conversation length using STOP bit.
3. FixedAttention - Fixed with Visual Attention.
4. AdaptiveAttention - Adaptive with Visual Attention.
"""
def Fixed():
FLAGS.img_feat = "avgpool_512"
FLAGS.img_feat_dim = 512
FLAGS.fixed_exchange = True
FLAGS.visual_attn = False
def Adaptive():
FLAGS.img_feat = "avgpool_512"
FLAGS.img_feat_dim = 512
FLAGS.fixed_exchange = False
FLAGS.visual_attn = False
def FixedAttention():
FLAGS.img_feat = "layer4_2"
FLAGS.img_feat_dim = 512
FLAGS.fixed_exchange = True
FLAGS.visual_attn = True
FLAGS.attn_dim = 256
FLAGS.attn_extra_context = False
FLAGS.attn_context_dim = 1000
def AdaptiveAttention():
FLAGS.img_feat = "layer4_2"
FLAGS.img_feat_dim = 512
FLAGS.fixed_exchange = False
FLAGS.visual_attn = True
FLAGS.attn_dim = 256
FLAGS.attn_extra_context = True
FLAGS.attn_context_dim = 1000
def flags():
# Debug settings
gflags.DEFINE_string("branch", None, "")
gflags.DEFINE_string("sha", None, "")
gflags.DEFINE_boolean("debug", False, "")
gflags.DEFINE_string("debug_log_level", 'INFO', "")
# Convenience settings
gflags.DEFINE_integer("save_after", 1000,
"Min step (num batches) after which to save")
gflags.DEFINE_integer(
"save_interval", 1000, "How often to save after min batches have been reached")
gflags.DEFINE_integer(
"save_distinct_interval", 50000, "How often to save a distinct model after min batches have been reached")
gflags.DEFINE_integer(
"check_accuracy_interval", 25000, "How often to check if the accuracy has reached 75%")
gflags.DEFINE_string("checkpoint", None, "Path to save data")
gflags.DEFINE_string("conf_mat", None, "Path to save confusion matrix")
gflags.DEFINE_string("log_path", "./logs", "Path to save logs")
gflags.DEFINE_string("log_file", None, "")
gflags.DEFINE_string("id_eval_csv_file", None, "Path to in domain eval log file")
gflags.DEFINE_string("ood_eval_csv_file", None, "Path to out of domain eval log file")
gflags.DEFINE_string(
"json_file", None, "Where to store all flags for an experiment")
gflags.DEFINE_string("log_load", None, "")
gflags.DEFINE_boolean("eval_only", False, "")
gflags.DEFINE_boolean("eval_xproduct", False, "Whether to evaluate the full cross product of agent pairs")
gflags.DEFINE_boolean("eval_agent_communities", False, "Whether to evaluate on the community evaluation pairs")
gflags.DEFINE_boolean("test_language_similarity", False,
"Whether to test language similarity by changing trained agent messages, eval only option")
gflags.DEFINE_boolean("self_similarity", False,
"Whether the language similarity being evaluated is with itself")
gflags.DEFINE_boolean("ancestor_similarity", False,
"Whether the language similarity being evaluated is with an ancestor language")
# Extract Settings
gflags.DEFINE_boolean("binary_only", False,
"Only extract binary data (no training)")
gflags.DEFINE_string("binary_output", None, "Where to store binary data")
# Performance settings
gflags.DEFINE_boolean("cuda", False, "")
# Display settings
gflags.DEFINE_string("env", "main", "")
gflags.DEFINE_boolean("visdom", False, "")
gflags.DEFINE_string("experiment_name", None, "")
gflags.DEFINE_integer("log_interval", 50, "")
gflags.DEFINE_integer("log_dev", 1000, "")
gflags.DEFINE_integer("log_self_com", 10000, "")
gflags.DEFINE_integer("log_community_eval", 10000, "")
gflags.DEFINE_boolean("report_on_complexity", False, "")
# Data settings
gflags.DEFINE_integer("wv_dim", 100, "Dimension of the word vectors")
gflags.DEFINE_string("dataset", "shapeworld",
"What type of dataset to use")
gflags.DEFINE_string(
"dataset_path", "./Shapeworld/data/oneshape_simple_textselect", "Root directory of the dataset")
gflags.DEFINE_string(
"dataset_indomain_valid_path", "./Shapeworld/data/oneshape_valid/oneshape_simple_textselect", "Root directory of the in domain validation dataset")
gflags.DEFINE_string(
"dataset_outdomain_valid_path", "./Shapeworld/data/oneshape_valid_all_combos/oneshape_simple_textselect", "Root directory of the in domain validation dataset")
gflags.DEFINE_string("dataset_mode", "train", "")
gflags.DEFINE_enum("dataset_eval_mode", "validation",
["validation", "test"], "")
gflags.DEFINE_string("dataset_type", "agreement", "Task type")
gflags.DEFINE_string("dataset_name", "oneshape_simple_textselect",
"Name of dataset (should correspond to the root directory name automatically generated using ShapeWorld generate.py)")
gflags.DEFINE_integer("dataset_size_train", 100,
"How many examples to use")
gflags.DEFINE_integer("dataset_size_dev", 100, "How many examples to use")
gflags.DEFINE_string(
"glove_path", "./glove.6B/glove.6B.100d.txt", "")
gflags.DEFINE_boolean("shuffle_train", True, "")
gflags.DEFINE_boolean("shuffle_dev", False, "")
gflags.DEFINE_integer("random_seed", 7, "")
gflags.DEFINE_enum(
"resnet", "34", ["18", "34", "50", "101", "152"], "Specify Resnet variant.")
gflags.DEFINE_boolean("improc_from_scratch", False, "Whether to train the image processor from scratch")
gflags.DEFINE_integer("image_size", 128, "Width and height in pixels of the images to give to the agents")
gflags.DEFINE_boolean("vertical_mask", False, "Whether to just use a vertical mask on images. Otherwise the mask is random")
# Model settings
gflags.DEFINE_enum("model_type", None, [
"Fixed", "Adaptive", "FixedAttention", "AdaptiveAttention"], "Preset model configurations.")
gflags.DEFINE_enum("img_feat", "avgpool_512", [
"layer4_2", "avgpool_512", "fc"], "Specify which layer output to use as image")
gflags.DEFINE_enum("data_context", "fc", [
"fc"], "Specify which layer output to use as context for attention")
gflags.DEFINE_integer("img_feat_dim", 512,
"Dimension of the image features")
gflags.DEFINE_integer(
"h_dim", 100, "Hidden dimension for all hidden representations in the network")
gflags.DEFINE_integer("m_dim", 64, "Dimension of the messages")
gflags.DEFINE_integer(
"desc_dim", 100, "Dimension of the input description vectors")
gflags.DEFINE_integer(
"num_classes", 10, "How many texts the agents have to choose from")
gflags.DEFINE_integer("s_dim", 1, "Stop probability output dim")
gflags.DEFINE_boolean("use_binary", True,
"Encoding whether agents uses binary features")
gflags.DEFINE_boolean("no_comms_channel", False,
"Whether to mute the communications channel")
gflags.DEFINE_list("agent_dicts", ['None', 'None'], "list of paths to average message code dictionaries")
gflags.DEFINE_list("agent_supplementary_dicts", ['None', 'None'], "list of paths to extra average message code dictionaries. Used for testing language similarity between an existing and ancestor community")
gflags.DEFINE_boolean("randomize_comms", False,
"Whether to randomize the order in which agents communicate")
gflags.DEFINE_string("reward_type", "cooperative", "String describing the reward type to train the message channel")
gflags.DEFINE_boolean("agent_pools", False,
"Whether to have a pool of agents to train instead of two fixed agents")
gflags.DEFINE_integer("num_agents", 2, "How many agents total (single pool or community)")
gflags.DEFINE_integer("num_supplementary_agents", 2, "How many agents total in an additional community. Used for testing language similarity between an existing and ancestor community.")
gflags.DEFINE_list("compare_language_pairs", [0, 1], "Index of the two groups in a community to compare")
gflags.DEFINE_boolean("with_replacement", False, "Whether to sample agents from pool with replacement")
gflags.DEFINE_boolean("agent_communities", False,
"Whether to have a community of agents to train instead of two fixed agents")
gflags.DEFINE_integer("num_communities", 2, "How many communities of agents")
gflags.DEFINE_string("community_type", "dense", "Type of agent community: dense or chain")
gflags.DEFINE_list("community_checkpoints", ['None', 'None'], "list of checkpoints per community")
gflags.DEFINE_boolean("gen_community_messages", False, "")
gflags.DEFINE_string("community_structure", "55555", "String listing the community structure")
gflags.DEFINE_list("num_agents_per_community", [5, 5], "Number of agents per community. Specify one per community, can be different")
gflags.DEFINE_list("intra_pool_connect_p", [1.0, 1.0], "Percentage of intra pool connectivity. Specify one value per community, can be different")
gflags.DEFINE_float("inter_pool_connect_p", 0.1, "Percentage of inter pool connectvity. Specify one value")
gflags.DEFINE_integer("ratio_adjust", 1, "Whether to adjust the inter:intra training ratio")
gflags.DEFINE_float("intra_inter_ratio", 1.0, "Ratio of intra to inter pool training. Default is 1.0. Agents across pools and within pools will be trained equally.")
gflags.DEFINE_float("first_msg", 0, "Value to fill the first message with")
gflags.DEFINE_boolean("visual_attn", False, "agents attends over image")
gflags.DEFINE_boolean(
"use_MLP", False, "use MLP to generate prediction scores")
gflags.DEFINE_integer("attn_dim", 256, "")
gflags.DEFINE_boolean("attn_extra_context", False, "")
gflags.DEFINE_integer("attn_context_dim", 4096, "")
gflags.DEFINE_float("dropout", 0.3, "How much dropout to apply when training using the original image")
gflags.DEFINE_integer("top_k_dev", 3, "Top-k error in development")
gflags.DEFINE_integer("top_k_train", 3, "Top-k error in training")
# Optimization settings
gflags.DEFINE_enum("optim_type", "RMSprop", ["Adam", "SGD", "RMSprop"], "")
gflags.DEFINE_integer("batch_size", 32, "Minibatch size for train set.")
gflags.DEFINE_integer("batch_size_dev", 50, "Minibatch size for dev set.")
gflags.DEFINE_float("learning_rate", 1e-4, "Used in optimizer.")
gflags.DEFINE_integer("max_epoch", 500, "")
gflags.DEFINE_float("entropy_s", None, "")
gflags.DEFINE_float("entropy_agent1", None, "")
gflags.DEFINE_float("entropy_agent2", None, "")
gflags.DEFINE_float("nll_loss_weight_nc", 1.0, "")
gflags.DEFINE_float("nll_loss_weight_com", 1.0, "")
gflags.DEFINE_float("rl_loss_weight", 1.0, "")
gflags.DEFINE_float("baseline_loss_weight", 1.0, "")
# Conversation settings
gflags.DEFINE_integer("max_exchange", 1, "")
gflags.DEFINE_boolean("fixed_exchange", True, "")
gflags.DEFINE_boolean(
"bit_flip", False, "Whether sender's messages are corrupted.")
gflags.DEFINE_string("corrupt_region", None,
"Comma-separated ranges of bit indexes (e.g. ``0:3,5'').")
def default_flags():
if FLAGS.log_load:
log_flags = json.loads(open(FLAGS.log_load).read())
for k in log_flags.keys():
if k in FLAGS.FlagValuesDict().keys():
setattr(FLAGS, k, log_flags[k])
FLAGS(sys.argv) # Optionally override predefined flags.
if FLAGS.model_type:
eval(FLAGS.model_type)()
FLAGS(sys.argv) # Optionally override predefined flags.
if not FLAGS.use_binary:
FLAGS.exchange_samples = 0
if not FLAGS.experiment_name:
timestamp = str(int(time.time()))
FLAGS.experiment_name = "{}-so_{}-wv_{}-bs_{}-{}".format(
FLAGS.dataset,
FLAGS.m_dim,
FLAGS.wv_dim,
FLAGS.batch_size,
timestamp,
)
if not FLAGS.conf_mat:
FLAGS.conf_mat = os.path.join(
FLAGS.log_path, FLAGS.experiment_name + ".conf_mat.txt")
if not FLAGS.log_file:
FLAGS.log_file = os.path.join(
FLAGS.log_path, FLAGS.experiment_name + ".log")
if not FLAGS.id_eval_csv_file:
FLAGS.id_eval_csv_file = os.path.join(
FLAGS.log_path, FLAGS.experiment_name + ".id_eval.csv")
if not FLAGS.ood_eval_csv_file:
FLAGS.ood_eval_csv_file = os.path.join(
FLAGS.log_path, FLAGS.experiment_name + ".ood_eval.csv")
if not FLAGS.json_file:
FLAGS.json_file = os.path.join(
FLAGS.log_path, FLAGS.experiment_name + ".json")
if not FLAGS.checkpoint:
FLAGS.checkpoint = os.path.join(
FLAGS.log_path, FLAGS.experiment_name + ".pt")
if not FLAGS.binary_output:
FLAGS.binary_output = os.path.join(
FLAGS.log_path, FLAGS.experiment_name + ".bv.hdf5")
if not FLAGS.branch:
FLAGS.branch = os.popen(
'git rev-parse --abbrev-ref HEAD').read().strip()
if not FLAGS.sha:
FLAGS.sha = os.popen('git rev-parse HEAD').read().strip()
if not torch.cuda.is_available():
FLAGS.cuda = False
if FLAGS.debug:
np.seterr(all='raise')
# silly expanduser
FLAGS.glove_path = os.path.expanduser(FLAGS.glove_path)
if __name__ == '__main__':
flags()
FLAGS(sys.argv)
default_flags()
print(sys.argv)
FORMAT = '[%(asctime)s %(levelname)s] %(message)s'
logging.basicConfig(format=FORMAT)
debuglogger = logging.getLogger('main_logger')
debuglogger.setLevel(FLAGS.debug_log_level)
if FLAGS.random_seed != -1:
random.seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed)
torch.manual_seed(FLAGS.random_seed)
else:
random.seed()
np.random.seed()
run()
|
dls-controls/scanpointgenerator | scanpointgenerator/rois/__init__.py | <filename>scanpointgenerator/rois/__init__.py
###
# Copyright (c) 2016, 2017 Diamond Light Source Ltd.
#
# Contributors:
# <NAME> - initial API and implementation and/or initial documentation
# <NAME> - initial API and implementation and/or initial documentation
# <NAME> - initial API and implementation and/or initial documentation
#
###
from scanpointgenerator.rois.circular_roi import CircularROI
from scanpointgenerator.rois.elliptical_roi import EllipticalROI
from scanpointgenerator.rois.linear_roi import LinearROI
from scanpointgenerator.rois.point_roi import PointROI
from scanpointgenerator.rois.polygonal_roi import PolygonalROI
from scanpointgenerator.rois.rectangular_roi import RectangularROI
from scanpointgenerator.rois.sector_roi import SectorROI
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.