repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
inversegraphics | inversegraphics-master/test.py | #!/usr/bin/env python3.4m
import matplotlib
matplotlib.use('Agg')
import scene_io_utils
from blender_utils import *
from generative_models import *
from tabulate import tabulate
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import cv2
plt.ioff()
numpy.random.seed(1)
inchToMeter = 0.0254
rendersDir = '../data/output/'
baseTestDir = '../data/aztest/'
groundTruth, imageFiles, segmentFiles,segmentSingleFiles, unoccludedFiles, prefixes = loadGroundTruth(rendersDir)
width = 110
height = 110
useCycles = False
useGPU = False
distance = 0.45
numSamples = 16
originalLoc = mathutils.Vector((0,-distance , 0))
numpy.random.seed(1)
minThresTemplate = 10
maxThresTemplate = 100
minThresImage = 50
maxThresImage = 150
baseDir = '../databaseFull/models/'
experimentTeapots = [2]
outputExperiments = []
# distanceTypes = ['chamferDataToModel', 'robustChamferDataToModel', 'sqDistImages', 'robustSqDistImages']
distanceTypes = ['negLogLikelihood']
# distanceTypes = ['negLogLikelihood','negLogLikelihoodRobust']
masks = numpy.array([])
segmentImages = []
for segment in segmentSingleFiles:
print(segment)
segmentImg = cv2.imread(segment, cv2.IMREAD_ANYDEPTH)/255.0
segmentImg = segmentImg[..., numpy.newaxis]
segmentImages = segmentImages + [segmentImg]
masks = numpy.concatenate([aux for aux in segmentImages], axis=-1)
layerPrior = globalLayerPrior(masks)
backgroundModels = ['UNOCCLUDED']
# backgroundModels = ['SINGLE', 'UNOCCLUDED']
completeScene = True
if 'FULL' not in backgroundModels:
completeScene = False
# sortedScenesIndices = numpy.argsort(groundTruth[:,6])
# sortedGroundTruth = groundTruth[sortedScenesIndices, :]
# sortedMasks = masks[:,:,sortedScenesIndices]
# sortedImageFiles = imageFiles[sortedScenesIndices]
filterScenes = [19]
indicesOccluded = (groundTruth[:,5] < 0.90) & (groundTruth[:,5] > 0.05)
indicesScenes = (groundTruth[:,6] != 19)
partsOccluded = (groundTruth[:,14] == 1) | (groundTruth[:,13] == 1)
indicesPrefix = [ x==y for (x,y) in zip(prefixes, ['_occluded']*len(prefixes))]
[targetScenes, targetModels, transformations] = scene_io_utils.loadTargetModels(experimentTeapots)
numExperiments = 100
spout = mathutils.Vector((-6.2, -0.16, 6.25))
handle = mathutils.Vector((6.2, 0.2, 5.7))
tip = mathutils.Vector((0, 0, 8))
for teapotIdx, teapotTest in enumerate(experimentTeapots):
teapot = targetModels[teapotIdx]
teapot.layers[0] = True
teapot.layers[1] = True
transformation = transformations[teapotIdx]
spout = transformation * spout
handle = transformation * handle
tip = transformation * tip
print("Experiment on teapot " + teapot.name)
indicesTeapot = (groundTruth[:, 3] == teapotTest)
indices = numpy.where(indicesTeapot & indicesOccluded & indicesScenes & partsOccluded )
selTest = indices[0]
numTests = len(selTest)
print ("Total of " + str(numTests) + " tests")
# expSelTest = [0,1]
numExperiments = min(numTests, numExperiments)
expSelTest = numpy.array(numpy.arange(0,numTests-1,int(numTests-1)/numExperiments), dtype=numpy.int)
# numpy.arange(0,numTests,int(numTests))
print ("Running only " + str(len(expSelTest)) + " tests")
# selTest = numpy.random.permutation(selTest)
currentScene = -1
performance = {}
elevations = {}
groundTruthRelAzimuths = {}
groundTruthAzimuths = {}
bestRelAzimuths= {}
bestAzimuths= {}
occlusions = {}
for backgroundModelOut in backgroundModels:
for distanceTypeOut in distanceTypes:
performance[(backgroundModelOut, distanceTypeOut)] = numpy.array([])
elevations[(backgroundModelOut, distanceTypeOut)] = numpy.array([])
groundTruthRelAzimuths[(backgroundModelOut, distanceTypeOut)] = numpy.array([])
groundTruthAzimuths[(backgroundModelOut, distanceTypeOut)] = numpy.array([])
bestRelAzimuths[(backgroundModelOut, distanceTypeOut)]= numpy.array([])
bestAzimuths[(backgroundModelOut, distanceTypeOut)] = numpy.array([])
occlusions[(backgroundModelOut, distanceTypeOut)] = numpy.array([])
for selTestNum in expSelTest:
test = selTest[selTestNum]
testResults = {}
testAzimuths = {}
groundTruthAz = groundTruth[test,0]
groundTruthObjAz = groundTruth[test,1]
groundTruthRelAz = numpy.arctan2(numpy.sin((groundTruthAz-groundTruthObjAz)*numpy.pi/180), numpy.cos((groundTruthAz-groundTruthObjAz)*numpy.pi/180))*180/numpy.pi
groundTruthEl = groundTruth[test,2]
occlusion = groundTruth[test,5]
sceneNum = int(groundTruth[test,6])
targetIndex= int(groundTruth[test,7])
sampleDir = baseTestDir + 'teapot' + str(teapotTest) + '/' 'test_samples/num' + str(test) + '_azim' + str(int(groundTruthAz)) + '_elev' + str(int(groundTruthEl)) + '_occlusion' + str(occlusion) + '/'
if not os.path.exists(sampleDir):
os.makedirs(sampleDir)
if currentScene != sceneNum:
if currentScene != -1:
# Cleanup
for objnum, obji in enumerate(scene.objects):
if obji.name != teapot.name and obji.type == 'EMPTY' and obji.dupli_type == 'GROUP':
deleteInstance(obji)
elif obji.type == 'LAMP':
obji.data.user_clear()
bpy.data.lamps.remove(obji.data)
obji.user_clear()
bpy.data.objects.remove(obji)
elif obji.name != teapot.name:
obji.user_clear()
bpy.data.objects.remove(obji)
camera.user_clear()
bpy.data.objects.remove(camera)
cam.user_clear()
bpy.data.cameras.remove(cam)
world.user_clear()
bpy.data.worlds.remove(world)
bpy.context.screen.scene = bpy.data.scenes['Scene']
scene.user_clear()
bpy.data.scenes.remove(scene)
currentScene = sceneNum
sceneFileName = '{0:05d}'.format(sceneNum)
print("Experiment on scene " + sceneFileName)
instances = scene_io_utils.getSceneInstancesInfo('../databaseFull/scenes/scene' + sceneFileName + '.txt')
targetParentPosition = instances[targetIndex][2]
targetParentIndex = instances[targetIndex][1]
[blenderScenes, modelInstances] = scene_io_utils.importBlenderScenes(instances, completeScene, targetIndex)
# targetParentInstance = modelInstances[targetParentIndex]
roomName = ''
for model in modelInstances:
reg = re.compile('(room[0-9]+)')
res = reg.match(model.name)
if res:
roomName = res.groups()[0]
scene = scene_io_utils.composeScene(modelInstances, targetIndex)
# bpy.context.scene.use_nodes = True
# tree = bpy.context.scene.node_tree
# rl = tree.nodes[0]
# links = tree.links
# v = tree.nodes.new('CompositorNodeViewer')
# v.location = 750,210
# v.use_alpha = False
# links.new(rl.outputs[0], v.inputs[0]) # link Image output to Viewer input
scene.update()
scene.render.threads = 8
scene.render.threads_mode = 'AUTO'
bpy.context.screen.scene = scene
cycles = bpy.context.scene.cycles
scene.render.tile_x = 55
scene.render.tile_y = 55
cam = bpy.data.cameras.new("MainCamera")
camera = bpy.data.objects.new("MainCamera", cam)
world = bpy.data.worlds.new("MainWorld")
setupScene(scene, targetIndex, roomName, world, distance, camera, width, height, numSamples, useCycles, useGPU)
scene.objects.link(teapot)
teapot.layers[1] = True
scene.update()
teapot.matrix_world = mathutils.Matrix.Translation(targetParentPosition)
center = centerOfGeometry(teapot.dupli_group.objects, teapot.matrix_world)
original_matrix_world = teapot.matrix_world.copy()
teapot.matrix_world = original_matrix_world
# bpy.ops.mesh.primitive_uv_sphere_add(size=0.005, location=spout)
# sphereSpout = scene.objects[0]
# bpy.ops.mesh.primitive_uv_sphere_add(size=0.005, location=handle)
# sphereHandle = scene.objects[0]
# bpy.ops.mesh.primitive_uv_sphere_add(size=0.005, location=tip)
# sphereTip = scene.objects[0]
# sphereTip.layers[1] = True
# sphereSpout.layers[1] = True
# sphereHandle.layers[1] = True
azimuthRot = mathutils.Matrix.Rotation(radians(-groundTruthObjAz), 4, 'Z')
groundTruthTransf = mathutils.Matrix.Translation(original_matrix_world.to_translation()) * azimuthRot * (mathutils.Matrix.Translation(-original_matrix_world.to_translation()))
teapot.matrix_world = groundTruthTransf * original_matrix_world
sphereSpout_matrix_world = teapot.matrix_world * mathutils.Matrix.Translation(spout)
sphereHandle_matrix_world = teapot.matrix_world * mathutils.Matrix.Translation(handle)
sphereTip_matrix_world = teapot.matrix_world * mathutils.Matrix.Translation(tip)
for backgroundModel in backgroundModels:
variances = numpy.ones([height, width, 3])*2/255.0
print("Experiment on background model " + backgroundModel)
scene.layers[1] = True
scene.layers[0] = False
scene.render.layers[0].use = False
scene.render.layers[1].use = True
teapot.layers[1] = True
if backgroundModel == 'FULL':
scene.layers[1] = False
scene.layers[0] = True
scene.render.layers[0].use = True
scene.render.layers[1].use = False
teapot.layers[0] = True
scene.update()
sqDistsSeq = []
for distIdx, distanceType in enumerate(distanceTypes):
print("Experiment on model " + distanceType)
computingSqDists = False
if distIdx == 0:
computingSqDists = True
if not computingSqDists and not backgroundModel == 'FULL':
sqRes = numpy.concatenate([aux[..., numpy.newaxis] for aux in sqDistsSeq], axis=-1)
variances = computeVariances(sqRes)
variances[numpy.where(variances <= 1)] = 2.0/255.0
# robust = not robust
# if robust is False:
# robustScale = 0
scores = []
relAzimuths = []
azimuths = []
directory = baseTestDir + 'teapot' + str(teapotTest) + '/' + backgroundModel + '_' + distanceType
if not os.path.exists('../data/aztest/' + 'teapot' + str(teapotTest) + '/'):
os.makedirs('../data/aztest/' + 'teapot' + str(teapotTest) + '/')
if not os.path.exists(directory + 'test_samples'):
os.makedirs(directory + 'test_samples')
numDir = directory + 'test_samples/num' + str(test) + '_azim' + str(int(groundTruthAz)) + '_elev' + str(int(groundTruthEl)) + '_occlusion' + str(occlusion) + '/'
if not os.path.exists(numDir):
os.makedirs(numDir)
testImage = cv2.imread(imageFiles[test])/255.0
if backgroundModel == 'UNOCCLUDED':
testImage = cv2.imread(unoccludedFiles[test])/255.0
# testImage = cv2.cvtColor(numpy.float32(rgbTestImage*255), cv2.COLOR_RGB2BGR)/255.0
testMask = masks[:,:,test]
testImageEdges = cv2.Canny(numpy.uint8(testImage*255), minThresImage,maxThresImage)
cv2.imwrite(numDir + "image_canny_" + backgroundModel + ".png" , testImageEdges)
cv2.imwrite(numDir + "image_" + backgroundModel + ".png" , numpy.uint8(testImage*255))
cv2.imwrite(sampleDir + "image_canny_" + backgroundModel + ".png" , testImageEdges)
cv2.imwrite(sampleDir + "image_" + backgroundModel + ".png" , numpy.uint8(testImage*255))
score = numpy.finfo(numpy.float64).max
elevation = groundTruthEl
azimuth = 0
elevationRot = mathutils.Matrix.Rotation(radians(-elevation), 4, 'X')
for azimuth in numpy.arange(0,360,5):
azimuthRot = mathutils.Matrix.Rotation(radians(-azimuth), 4, 'Z')
elevationRot = mathutils.Matrix.Rotation(radians(-elevation), 4, 'X')
location = center + azimuthRot * elevationRot * originalLoc
camera.location = location
scene.update()
look_at(camera, center)
scene.update()
# projMat = projection_matrix(scene.camera.data, scene)
# ipdb.set_trace()
spoutlocation = image_project(scene, camera, sphereSpout_matrix_world.to_translation())
handlelocation = image_project(scene, camera,sphereHandle_matrix_world.to_translation())
spherelocation = image_project(scene, camera, sphereTip_matrix_world.to_translation())
# touched = closestCameraIntersection(scene, sphereSpout.matrix_world.to_translation())
# scene.objects.unlink(sphereSpout)
# scene.objects.unlink(sphereHandle)
# scene.objects.unlink(sphereTip)
result, object, matrix, location, normal = scene.ray_cast(scene.camera.location, sphereSpout_matrix_world.to_translation())
scene.render.filepath = numDir + '_blender.png'
bpy.ops.render.render( write_still=True )
# image2 = numpy.flipud(numpy.array(bpy.data.images['Viewer Node'].extract_render(scene=scene)).reshape([256,256,4]))[:,:,0:3]
image = cv2.imread(scene.render.filepath)
# blendImage = bpy.data.images['Render Result']
# #
# image2 = numpy.flipud(numpy.array(blendImage.extract_render(scene=scene)).reshape([height,width,4]))[:,:,0:3]
# # ipdb.set_trace()
# Truncate intensities larger than 1.
# image = image * 1.5
# image2[numpy.where(image2 > 1)] = 1
# ipdb.set_trace()
# image[0:20, 75:100, :] = 0
# image2 = cv2.cvtColor(numpy.float32(image2), cv2.COLOR_RGB2BGR)
image = image/255.0
# image2 = image2/255.0
methodParams = {'backgroundModel': backgroundModel, 'testMask': testMask, 'variances':variances, 'layerPrior': layerPrior, 'minThresImage': minThresImage, 'maxThresImage': maxThresImage, 'minThresTemplate': minThresTemplate, 'maxThresTemplate': maxThresTemplate}
distance = scoreImage(testImage, image, distanceType, methodParams)
# cv2.imwrite(numDir + 'image' + "_az" + '%.1f' % azimuth + '_dist' + '%.1f' % distance + '.png', numpy.uint8(image*255.0))
scores.append(distance)
relAzimuth = numpy.mod(numpy.arctan2(numpy.sin((azimuth-groundTruthObjAz)*numpy.pi/180), numpy.cos((azimuth-groundTruthObjAz)*numpy.pi/180))*180/numpy.pi , 360)
azimuths.append(azimuth)
if distance <= score:
imageEdges = cv2.Canny(numpy.uint8(image*255.0), minThresTemplate,maxThresTemplate)
bestImageEdges = imageEdges
bestImage = image
score = distance
bestRelAzimuth = relAzimuth
bestAzimuth = azimuth
# if robust is False:
# robustScale = 1.4826 * numpy.sqrt(numpy.median(scores))
# error = numpy.arctan2(numpy.sin((groundTruthRelAz-bestRelAzimuth)*numpy.pi/180), numpy.cos((groundTruthRelAz-bestRelAzimuth)*numpy.pi/180))*180/numpy.pi
error = numpy.arctan2(numpy.sin((groundTruthAz-bestAzimuth)*numpy.pi/180), numpy.cos((groundTruthAz-bestAzimuth)*numpy.pi/180))*180/numpy.pi
performance[(backgroundModel, distanceType)] = numpy.append(performance[(backgroundModel, distanceType)], error)
elevations[(backgroundModel, distanceType)] = numpy.append(elevations[(backgroundModel, distanceType)], elevation)
bestAzimuths[(backgroundModel, distanceType)] = numpy.append(bestAzimuths[(backgroundModel, distanceType)], bestAzimuth)
groundTruthAzimuths[(backgroundModel, distanceType)] = numpy.append(groundTruthAzimuths[(backgroundModel, distanceType)], groundTruthAz)
occlusions[(backgroundModel, distanceType)] = numpy.append(occlusions[(backgroundModel, distanceType)], occlusion)
testResults[(backgroundModel, distanceType)] = scores
testAzimuths[(backgroundModel, distanceType)] = azimuths
sqDist = sqDistImages(bestImage, testImage)
disp = cv2.normalize(sqDist, sqDist, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
cv2.imwrite(numDir + 'sqDists' + "_az" + '%.1f' % bestAzimuth + '_dist' + '%.1f' % score + '.png', numpy.uint8(disp))
if computingSqDists:
sqDistsSeq = sqDistsSeq + [sqDist]
if distanceType == 'negLogLikelihoodRobust':
pixLik = pixelLikelihoodRobust(testImage, bestImage, testMask, backgroundModel, layerPrior, variances)
fig = plt.figure()
plt.imshow(pixLik)
plt.colorbar()
fig.savefig(numDir + 'pixelLikelihoodRobustPlot' + "_az" + '%.1f' % bestAzimuth + '_dist' + '%.1f' % score + '.png')
plt.close(fig)
disp = cv2.normalize(pixLik, pixLik, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
cv2.imwrite(numDir + 'pixelLikelihoodRobust' + "_az" + '%.1f' % bestAzimuth + '_dist' + '%.1f' % score + '.png', numpy.uint8(disp))
fgpost, bgpost = layerPosteriorsRobust(testImage, bestImage, testMask, backgroundModel, layerPrior, variances)
# plt.imshow(testImage)
# plt.show()
# plt.imshow(bestImage)
# plt.show()
# z = fgpost + bgpost
# plt.imshow(z)
# plt.show()
# plt.imshow(fgpost)
# plt.show()
# plt.imshow(bgpost)
# plt.show()
# ipdb.set_trace
assert(numpy.abs(numpy.sum(fgpost + bgpost - testMask)) < 0.01)
fig = plt.figure()
plt.imshow(fgpost)
plt.colorbar()
fig.savefig(numDir + 'fgPosteriorPlot' + "_az" + '%.1f' % bestAzimuth + '_dist' + '%.1f' % score + '.png')
plt.close(fig)
fig = plt.figure()
plt.imshow(bgpost)
plt.colorbar()
fig.savefig(numDir + 'bgPosteriorPlot' + "_az" + '%.1f' % bestAzimuth + '_dist' + '%.1f' % score + '.png')
plt.close(fig)
disp = cv2.normalize(fgpost, fgpost, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
cv2.imwrite(numDir + 'fgPosterior' + "_az" + '%.1f' % bestAzimuth + '_dist' + '%.1f' % score + '.png', numpy.uint8(disp))
disp = cv2.normalize(bgpost, bgpost, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
cv2.imwrite(numDir + 'bgPosterior' + "_az" + '%.1f' % bestAzimuth + '_dist' + '%.1f' % score + '.png', numpy.uint8(disp))
if distanceType == 'negLogLikelihood':
pixLik = pixelLikelihood(testImage, bestImage, testMask, backgroundModel, variances)
fig = plt.figure()
plt.imshow(pixLik)
plt.colorbar()
fig.savefig(numDir + 'pixelLikelihoodPlot' + "_az" + '%.1f' % bestAzimuth + '_dist' + '%.1f' % score + '.png')
plt.close(fig)
disp = cv2.normalize(pixLik, pixLik, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
cv2.imwrite(numDir + 'pixelLikelihood' + "_az" + '%.1f' % bestAzimuth + '_dist' + '%.1f' % score + '.png', numpy.uint8(disp))
cv2.imwrite(numDir + 'bestImage' + "_canny_az" + '%.1f' % bestAzimuth + '_dist' + '%.1f' % score + '.png' , bestImageEdges)
cv2.imwrite(numDir + 'bestImage' + "_az" + '%.1f' % bestAzimuth + '_dist' + '%.1f' % score + '.png', numpy.uint8(bestImage*255.0))
imgEdges = cv2.Canny(numpy.uint8(testImage*255), minThresImage,maxThresImage)
bwEdges1 = cv2.distanceTransform(~imgEdges, cv2.DIST_L2, 5)
disp = cv2.normalize(bwEdges1, bwEdges1, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
cv2.imwrite(numDir + 'dist_transform' + '.png', disp)
# ipdb.set_trace()
fig = plt.figure()
plt.plot(azimuths, numpy.array(scores))
plt.xlabel('Azimuth (degrees)')
plt.ylabel('Negative Log Likelihood')
plt.title(distanceType)
plt.axvline(x=bestAzimuth, linewidth=2, color='b', label='Minimum score azimuth')
plt.axvline(x=groundTruthAz, linewidth=2, color='g', label='Ground truth azimuth')
plt.axvline(x=(bestAzimuth + 180) % 360, linewidth=1, color='b', ls='--', label='Minimum distance azimuth + 180')
fontP = FontProperties()
fontP.set_size('small')
x1,x2,y1,y2 = plt.axis()
plt.axis((0,360,y1,y2))
# plt.legend()
fig.savefig(numDir + 'performance.png')
plt.close(fig)
fig = plt.figure()
backgroundModelOut = 'UNOCCLUDED'
distanceTypeOut = 'negLogLikelihood'
maxY = numpy.max(numpy.array(testResults[(backgroundModelOut, distanceTypeOut)]))
minY = numpy.min(numpy.array(testResults[(backgroundModelOut, distanceTypeOut)]))
plt.plot(testAzimuths[(backgroundModelOut, distanceTypeOut)], (numpy.array(testResults[(backgroundModelOut, distanceTypeOut)]) - minY)/ (maxY-minY), color='g', label='Clean model')
plt.axvline(x=testAzimuths[(backgroundModelOut, distanceTypeOut)][numpy.argmin(numpy.array(testResults[(backgroundModelOut, distanceTypeOut)]))], linewidth=1, color='g', ls='--')
backgroundModelOut = 'SINGLE'
distanceTypeOut = 'negLogLikelihood'
maxY = numpy.max(numpy.array(testResults[(backgroundModelOut, distanceTypeOut)]))
minY = numpy.min(numpy.array(testResults[(backgroundModelOut, distanceTypeOut)]))
plt.plot(testAzimuths[(backgroundModelOut, distanceTypeOut)], (numpy.array(testResults[(backgroundModelOut, distanceTypeOut)]) - minY)/ (maxY-minY), color='r', label='Normal model')
plt.axvline(x=testAzimuths[(backgroundModelOut, distanceTypeOut)][numpy.argmin(numpy.array(testResults[(backgroundModelOut, distanceTypeOut)]))], linewidth=1, color='r', ls='--')
backgroundModelOut = 'SINGLE'
distanceTypeOut = 'negLogLikelihoodRobust'
maxY = numpy.max(numpy.array(testResults[(backgroundModelOut, distanceTypeOut)]))
minY = numpy.min(numpy.array(testResults[(backgroundModelOut, distanceTypeOut)]))
plt.plot(testAzimuths[(backgroundModelOut, distanceTypeOut)], (numpy.array(testResults[(backgroundModelOut, distanceTypeOut)])- minY) / (maxY-minY), color='c', label='Robust model')
plt.axvline(x=testAzimuths[(backgroundModelOut, distanceTypeOut)][numpy.argmin(numpy.array(testResults[(backgroundModelOut, distanceTypeOut)]))], linewidth=1, color='c', ls='--')
plt.xlabel('Azimuth (degrees)')
plt.ylabel('Negative Log Likelihood')
fontP = FontProperties()
fontP.set_size('small')
# plt.axvline(x=bestAzimuth, linewidth=2, color='r', label='Minimum score azimuth')
plt.axvline(x=groundTruthAz, linewidth=1, color='b', label='GT azimuth')
# plt.axvline(x=(bestAzimuth + 180) % 360, linewidth=1, color='r', ls='--', label='Minimum distance azimuth + 180')
x1,x2,y1,y2 = plt.axis()
plt.axis((0,360,y1,y2))
lgd = plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
fig.savefig(sampleDir + 'performance.png', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
scene.objects.unlink(teapot)
for backgroundModelOut in backgroundModels:
for distanceTypeOut in distanceTypes:
directory = baseTestDir + 'teapot' + str(teapotTest) + '/' + backgroundModelOut + distanceTypeOut
experiment = {'distanceType':distanceTypeOut, 'backgroundModel':backgroundModelOut, 'rendersDir':rendersDir, 'scores':scores, 'azimuths':azimuths,'teapot':teapotTest, 'bestAzimuths':bestAzimuths[(backgroundModelOut, distanceTypeOut)], 'performance': performance[(backgroundModelOut, distanceTypeOut)], 'elevations':elevations[(backgroundModelOut, distanceTypeOut)], 'groundTruthAzimuths': groundTruthRelAzimuths[(backgroundModelOut, distanceTypeOut)], 'occlusions': occlusions[(backgroundModelOut, distanceTypeOut)],'selTest':selTest, 'test':test}
with open(directory + 'experiment.pickle', 'wb') as pfile:
pickle.dump(experiment, pfile)
fig = plt.figure()
plt.scatter(elevations[(backgroundModelOut, distanceTypeOut)], performance[(backgroundModelOut, distanceTypeOut)])
plt.xlabel('Elevation (degrees)')
plt.ylabel('Angular error')
x1,x2,y1,y2 = plt.axis()
plt.axis((0,90,-180,180))
plt.title('Performance scatter plot')
fig.savefig(directory + '_elev-performance-scatter.png')
plt.close(fig)
fig = plt.figure()
plt.scatter(occlusions[(backgroundModelOut, distanceTypeOut)]*100.0, performance[(backgroundModelOut, distanceTypeOut)])
plt.xlabel('Occlusion (%)')
plt.ylabel('Angular error')
x1,x2,y1,y2 = plt.axis()
plt.axis((0,100,-180,180))
plt.title('Performance scatter plot')
fig.savefig(directory + '_occlusion-performance-scatter.png')
plt.close(fig)
fig = plt.figure()
plt.scatter(groundTruthAzimuths[(backgroundModelOut, distanceTypeOut)], performance[(backgroundModelOut, distanceTypeOut)])
plt.xlabel('Azimuth (degrees)')
plt.ylabel('Angular error')
x1,x2,y1,y2 = plt.axis()
plt.axis((0,360,-180,180))
plt.title('Performance scatter plot')
fig.savefig(directory + '_azimuth-performance-scatter.png')
plt.close(fig)
fig = plt.figure()
plt.hist(performance[(backgroundModelOut, distanceTypeOut)], bins=36)
plt.xlabel('Angular error')
plt.ylabel('Counts')
x1,x2,y1,y2 = plt.axis()
plt.axis((-180,180,y1, y2))
plt.title('Performance histogram')
fig.savefig(directory + '_performance-histogram.png')
plt.close(fig)
# experimentFile = 'aztest/teapotsc7549b28656181c91bff71a472da9153Teapot N311012_cleaned.pickle'
# with open(experimentFile, 'rb') as pfile:
# experiment = pickle.load( pfile)
headers=["Best global fit", ""]
table = [["Mean angular error", numpy.mean(numpy.abs(performance[(backgroundModelOut, distanceTypeOut)]))],["Median angualar error",numpy.median(numpy.abs(performance[(backgroundModelOut, distanceTypeOut)]))]]
performanceTable = tabulate(table, tablefmt="latex", floatfmt=".1f")
with open(directory + 'performance.tex', 'w') as expfile:
expfile.write(performanceTable)
experiment = {'distanceTypes':distanceTypes, 'backgroundModels':backgroundModels, 'rendersDir':rendersDir,'teapot':teapotTest,'testResults':testResults,'testAzimuths':testAzimuths, 'bestAzimuths':bestAzimuths, 'performance': performance, 'elevations':elevations, 'groundTruthAzimuths': groundTruthRelAzimuths, 'occlusions': occlusions}
with open(baseTestDir + 'teapot' + str(teapotTest) + '/' + 'experiment.pickle', 'wb') as pfile:
pickle.dump(experiment, pfile)
print("Finished the experiment")
| 29,478 | 47.645215 | 559 | py |
inversegraphics | inversegraphics-master/differentiable_renderer.py | import chumpy as ch
from chumpy import depends_on, Ch
import cv2
import numpy as np
import scipy.sparse as sp
from chumpy.utils import row, col
from opendr.geometry import Rodrigues
import warnings
#Make simple experiment.
def nanmean(a, axis):
# don't call nan_to_num in here, unless you check that
# occlusion_test.py still works after you do it!
result = np.nanmean(a, axis=axis)
return result
def nangradients(arr):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
dy = np.expand_dims(arr[:-1,:,:] - arr[1:,:,:], axis=3)
dx = np.expand_dims(arr[:,:-1,:] - arr[:, 1:, :], axis=3)
dy = np.concatenate((dy[1:,:,:], dy[:-1,:,:]), axis=3)
dy = np.nanmean(dy, axis=3)
dx = np.concatenate((dx[:,1:,:], dx[:,:-1,:]), axis=3)
dx = np.nanmean(dx, axis=3)
if arr.shape[2] > 1:
gy, gx, _ = np.gradient(arr)
else:
gy, gx = np.gradient(arr.squeeze())
gy = np.atleast_3d(gy)
gx = np.atleast_3d(gx)
gy[1:-1,:,:] = -dy
gx[:,1:-1,:] = -dx
return gy, gx
class SQErrorRenderer(Ch):
terms = ['renderer', 'params_list']
dterms = ['params']
def compute_r(self):
return self.renderer.errors
def compute_dr_wrt(self, wrt):
import ipdb
# ipdb.set_trace()
inParamsList = False
if wrt is self.params:
return None
if not inParamsList:
for param in self.params_list:
if wrt is param:
inParamsList = True
if inParamsList:
return self.gradient_pred(wrt)
else:
errJac = sp.csc_matrix((2*self.renderer.r - 2*self.imageGT).ravel()[:,None])
return errJac.multiply(self.renderer.dr_wrt(wrt))
def gradient_pred(self, paramWrt):
observed = self.renderer.color_image
boundaryid_image = self.renderer.boundaryid_image
barycentric = self.renderer.barycentric_image
visibility = self.renderer.visibility_image
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
jacIm = self.dImage_wrt_2dVerts_predict(observed, paramWrt, visible, visibility, barycentric, observed.shape[0], observed.shape[1], self.renderer.v.shape[0], self.renderer.f, boundaryid_image != 4294967295)
return jacIm.dot(self.renderer.camera.dr_wrt(paramWrt))
def dImage_wrt_2dVerts_predict(self, observed, paramWrt, visible, visibility, barycentric, image_width, image_height, num_verts, f, bnd_bool):
"""Construct a sparse jacobian that relates 2D projected vertex positions
(in the columns) to pixel values (in the rows). This can be done
in two steps."""
bnd_bool = np.logical_and(bnd_bool, self.renderer.visibility_image != 4294967295)
camJac = self.renderer.camera.dr_wrt(paramWrt)
xdiff = self.renderer.dEdx
ydiff = self.renderer.dEdy
# visible.ravel()[bidxs_out[bidx]] = True
# import ipdb
# ipdb.set_trace()
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
n_channels = np.atleast_3d(observed).shape[2]
shape = visibility.shape
#2: Take the data and copy the corresponding dxs and dys to these new pixels.
# Step 1: get the structure ready, ie the IS and the JS
IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()
JS = col(f[visibility.ravel()[visible]].ravel())
JS = np.hstack((JS*2, JS*2+1)).ravel()
pxs = np.asarray(visible % shape[1], np.int32)
pys = np.asarray(np.floor(np.floor(visible) / shape[1]), np.int32)
if n_channels > 1:
IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
datas = []
# The data is weighted according to barycentric coordinates
bc0 = col(barycentric[pys, pxs, 0])
bc1 = col(barycentric[pys, pxs, 1])
bc2 = col(barycentric[pys, pxs, 2])
for k in range(n_channels):
dxs = xdiff[pys, pxs, k]
dys = ydiff[pys, pxs, k]
if f.shape[1] == 3:
datas.append(np.hstack((col(dxs)*bc0,col(dys)*bc0,col(dxs)*bc1,col(dys)*bc1,col(dxs)*bc2,col(dys)*bc2)).ravel())
else:
datas.append(np.hstack((col(dxs)*bc0,col(dys)*bc0,col(dxs)*bc1,col(dys)*bc1)).ravel())
data = np.concatenate(datas)
ij = np.vstack((IS.ravel(), JS.ravel()))
result = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))
return result
class DifferentiableRenderer(Ch):
terms = ['renderer', 'params_list']
dterms = ['params']
def compute_r(self):
return self.renderer.r
def compute_dr_wrt(self, wrt):
import ipdb
for param in self.params_list:
if wrt is param:
return self.gradient_pred(wrt)
return self.renderer.dr_wrt(wrt)
def gradients(self):
# self._call_on_changed()
observed = self.renderer.color_image
boundaryid_image = self.renderer.boundaryid_image
barycentric = self.renderer.barycentric_image
visibility = self.renderer.visibility_image
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
return self.dImage_wrt_2dVerts_bnd_gradient(observed, barycentric, observed.shape[0], observed.shape[1], boundaryid_image != 4294967295)
def dImage_wrt_2dVerts_bnd_gradient(self, observed, barycentric, image_width, image_height, bnd_bool):
bnd_bool = np.logical_and(self.renderer.visibility_image != 4294967295, bnd_bool)
n_channels = np.atleast_3d(observed).shape[2]
shape = [image_height, image_width]
bndf = bnd_bool.astype(np.float64)
bnd_nan = bndf.reshape((observed.shape[0], observed.shape[1], -1)).copy()
bnd_nan.ravel()[bnd_nan.ravel()>0] = np.nan
bnd_nan += 1
obs_nonbnd = np.atleast_3d(observed) * bnd_nan
ydiffnb, xdiffnb = nangradients(obs_nonbnd)
observed = np.atleast_3d(observed)
if observed.shape[2] > 1:
ydiffbnd, xdiffbnd, _ = np.gradient(observed)
else:
ydiffbnd, xdiffbnd = np.gradient(observed.squeeze())
ydiffbnd = np.atleast_3d(ydiffbnd)
xdiffbnd = np.atleast_3d(xdiffbnd)
# This corrects for a bias imposed boundary differences begin spread over two pixels
# (by np.gradients or similar) but only counted once (since OpenGL's line
# drawing spans 1 pixel)
xdiffbnd *= 2.0
ydiffbnd *= 2.0
xdiffnb = -xdiffnb
ydiffnb = -ydiffnb
xdiffbnd = -xdiffbnd
ydiffbnd = -ydiffbnd
idxs = np.isnan(xdiffnb.ravel())
xdiffnb.ravel()[idxs] = xdiffbnd.ravel()[idxs]
idxs = np.isnan(ydiffnb.ravel())
ydiffnb.ravel()[idxs] = ydiffbnd.ravel()[idxs]
xdiff = xdiffnb
ydiff = ydiffnb
dybt = -np.vstack([np.diff(observed, n=1, axis=0), np.zeros([1,observed.shape[1],3])])
dytb = np.vstack([np.zeros([1,observed.shape[1],3]), np.flipud(np.diff(np.flipud(observed), n=1, axis=0))])
dxrl = -np.hstack([np.diff(observed, n=1, axis=1), np.zeros([observed.shape[0],1,3])])
dxlr = np.hstack([np.zeros([observed.shape[0],1,3]),np.fliplr(np.diff(np.fliplr(observed), n=1, axis=1))])
bary_sl = np.roll(barycentric , shift=-1, axis=1)
bary_sr = np.roll(barycentric , shift=1, axis=1)
bary_st = np.roll(barycentric , shift=-1, axis=0)
bary_sb = np.roll(barycentric , shift=1, axis=0)
return xdiff, ydiff, dybt, dxrl, dytb, dxlr, bary_sl, bary_sr, bary_st, bary_sb
def gradient_pred(self, paramWrt):
observed = self.renderer.color_image
boundaryid_image = self.renderer.boundaryid_image
barycentric = self.renderer.barycentric_image
visibility = self.renderer.visibility_image
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
jacIm = self.dImage_wrt_2dVerts_predict(observed, paramWrt, visible, visibility, barycentric, observed.shape[0], observed.shape[1], self.renderer.v.shape[0], self.renderer.f, boundaryid_image != 4294967295)
return jacIm.dot(self.renderer.camera.dr_wrt(paramWrt))
def boundary_neighborhood(self):
boundary = self.renderer.boundaryid_image != 4294967295
visibility = self.renderer.visibility_image != 4294967295
boundary = np.logical_and(visibility, boundary)
shape = boundary.shape
notboundary = np.logical_not(boundary)
horizontal = np.hstack((np.diff(boundary.astype(np.int8),axis=1), np.zeros((shape[0],1), dtype=np.int8)))
# horizontal = np.hstack((np.diff(boundary.astype(np.int8),axis=1), np.zeros((shape[0],1), dtype=np.int8)))
vertical = np.vstack((np.diff(boundary.astype(np.int8), axis=0), np.zeros((1,shape[1]), dtype=np.int8)))
# vertical = np.vstack((np.diff(boundary.astype(np.int8), axis=0), np.zeros((1,shape[1]), dtype=np.int8)))
pixl = (horizontal == 1)
pixr = (horizontal == -1)
pixt = (vertical == 1)
pixb = (vertical == -1)
# plt.imshow((pixrl | pixlr | pixtb | pixbt))
#Quicker, convolve (FFT) and take mask * etc.
lidxs_out = np.where(pixl.ravel())[0]
ridxs_out = np.where(pixr.ravel())[0] + 1
tidxs_out = np.where(pixt.ravel())[0]
bidxs_out = np.where(pixb.ravel())[0] + shape[1]
lidxs_int = np.where(pixl.ravel())[0] + 1
ridxs_int = np.where(pixr.ravel())[0]
tidxs_int = np.where(pixt.ravel())[0] + shape[1]
bidxs_int = np.where(pixb.ravel())[0]
return pixr, pixl, pixt, pixb, lidxs_out, ridxs_out, tidxs_out, bidxs_out, lidxs_int, ridxs_int, tidxs_int, bidxs_int
def dImage_wrt_2dVerts_predict(self, observed, paramWrt, visible, visibility, barycentric, image_width, image_height, num_verts, f, bnd_bool):
"""Construct a sparse jacobian that relates 2D projected vertex positions
(in the columns) to pixel values (in the rows). This can be done
in two steps."""
bnd_bool = np.logical_and(bnd_bool, self.renderer.visibility_image != 4294967295)
camJac = self.renderer.camera.dr_wrt(paramWrt)
xdiff, ydiff, dybt, dxrl, dytb, dxlr, bary_sl, bary_sr, bary_st, bary_sb = self.gradients()
pixr, pixl, pixt, pixb, lidxs_out, ridxs_out, tidxs_out, bidxs_out, lidxs_int, ridxs_int, tidxs_int, bidxs_int = self.boundary_neighborhood()
lidxs_out = np.where(bnd_bool.ravel())[0]-1
ridxs_out = np.where(bnd_bool.ravel())[0]+1
tidxs_out = np.where(bnd_bool.ravel())[0]-bnd_bool.shape[1]
bidxs_out = np.where(bnd_bool.ravel())[0]+bnd_bool.shape[1]
lidxs_int = np.where(bnd_bool.ravel())[0]
ridxs_int= np.where(bnd_bool.ravel())[0]
tidxs_int= np.where(bnd_bool.ravel())[0]
bidxs_int = np.where(bnd_bool.ravel())[0]
#Where are triangles moving wrt to the image coordinates at the boundaries?
lintGrad = camJac[f[visibility.ravel()[lidxs_int]]*2]
rintGrad = camJac[f[visibility.ravel()[ridxs_int]]*2]
tintGrad = camJac[f[visibility.ravel()[tidxs_int]]*2+1]
bintGrad = camJac[f[visibility.ravel()[bidxs_int]]*2+1]
lidx = lintGrad[:,0,0] < -0.0001
xdiff.reshape([-1,3])[lidxs_out[lidx]] = xdiff.reshape([-1,3])[lidxs_int[lidx]]
barycentric.reshape([-1,3])[lidxs_out[lidx]] = barycentric.reshape([-1,3])[lidxs_int[lidx]]
visibility.ravel()[lidxs_out[lidx]] = visibility.ravel()[lidxs_int[lidx]]
xdiff.reshape([-1,3])[lidxs_int[lidx]] = dxrl.reshape([-1,3])[lidxs_int[lidx]]
# visible.ravel()[lidxs_out[lidx]] = True
ridx = rintGrad[:,0,0] > 0.0001
xdiff.reshape([-1,3])[ridxs_out[ridx]] = xdiff.reshape([-1,3])[ridxs_int[ridx]]
barycentric.reshape([-1,3])[ridxs_out[ridx]] = barycentric.reshape([-1,3])[ridxs_int[ridx]]
visibility.ravel()[ridxs_out[ridx]] = visibility.ravel()[ridxs_int[ridx]]
xdiff.reshape([-1,3])[ridxs_int[ridx]] = dxlr.reshape([-1,3])[ridxs_int[ridx]]
# visible.ravel()[ridxs_out[ridx]] = True
tidx = tintGrad[:,0,0] > 0.0001
ydiff.reshape([-1,3])[tidxs_out[tidx]] = ydiff.reshape([-1,3])[tidxs_int[tidx]]
barycentric.reshape([-1,3])[tidxs_out[tidx]] = barycentric.reshape([-1,3])[tidxs_int[tidx]]
visibility.ravel()[tidxs_out[tidx]] = visibility.ravel()[tidxs_int[tidx]]
ydiff.reshape([-1,3])[tidxs_int[tidx]] = dybt.reshape([-1,3])[tidxs_int[tidx]]
# visible.ravel()[tidxs_out[tidx]] = True
bidx = bintGrad[:,0,0] < -0.0001
ydiff.reshape([-1,3])[bidxs_out[bidx]] = ydiff.reshape([-1,3])[bidxs_int[bidx]]
barycentric.reshape([-1,3])[bidxs_out[bidx]] = barycentric.reshape([-1,3])[bidxs_int[bidx]]
visibility.ravel()[bidxs_out[bidx]] = visibility.ravel()[bidxs_int[bidx]]
ydiff.reshape([-1,3])[bidxs_int[bidx]] = dytb.reshape([-1,3])[bidxs_int[bidx]]
# visible.ravel()[bidxs_out[bidx]] = True
# import ipdb
# ipdb.set_trace()
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
n_channels = np.atleast_3d(observed).shape[2]
shape = visibility.shape
#2: Take the data and copy the corresponding dxs and dys to these new pixels.
# Step 1: get the structure ready, ie the IS and the JS
IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()
JS = col(f[visibility.ravel()[visible]].ravel())
JS = np.hstack((JS*2, JS*2+1)).ravel()
pxs = np.asarray(visible % shape[1], np.int32)
pys = np.asarray(np.floor(np.floor(visible) / shape[1]), np.int32)
if n_channels > 1:
IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
datas = []
# The data is weighted according to barycentric coordinates
bc0 = col(barycentric[pys, pxs, 0])
bc1 = col(barycentric[pys, pxs, 1])
bc2 = col(barycentric[pys, pxs, 2])
for k in range(n_channels):
dxs = xdiff[pys, pxs, k]
dys = ydiff[pys, pxs, k]
if f.shape[1] == 3:
datas.append(np.hstack((col(dxs)*bc0,col(dys)*bc0,col(dxs)*bc1,col(dys)*bc1,col(dxs)*bc2,col(dys)*bc2)).ravel())
else:
datas.append(np.hstack((col(dxs)*bc0,col(dys)*bc0,col(dxs)*bc1,col(dys)*bc1)).ravel())
data = np.concatenate(datas)
ij = np.vstack((IS.ravel(), JS.ravel()))
result = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))
return result
| 14,887 | 40.127072 | 214 | py |
inversegraphics | inversegraphics-master/lasagne_nn.py | #!/usr/bin/env python
"""
Usage example employing Lasagne for digit recognition using the MNIST dataset.
This example is deliberately structured as a long flat file, focusing on how
to use Lasagne, instead of focusing on writing maximally modular and reusable
code. It is used as the foundation for the introductory Lasagne tutorial:
http://lasagne.readthedocs.org/en/latest/user/tutorial.html
More in-depth examples and reproductions of paper results are maintained in
a separate repository: https://github.com/Lasagne/Recipes
"""
from __future__ import print_function
import sys
import os
import time
import numpy as np
import theano
import theano.tensor as T
import pickle
# import lasagne.layers.dnn
import lasagne
from lasagne.layers import InputLayer, DenseLayer, DropoutLayer
# from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
from lasagne.layers import Conv2DLayer as ConvLayer
from lasagne.layers import MaxPool2DLayer as PoolLayer
from lasagne.layers import LocalResponseNormalization2DLayer as NormLayer
from lasagne.utils import floatX
from utils import getTriplets
import ipdb
# ##################### Build the neural network model #######################
# This script supports three types of models. For each one, we define a
# function that takes a Theano variable representing the input and returns
# the output layer of a neural network model built in Lasagne.
def build_mlp(input_var=None):
# This creates an MLP of two hidden layers of 800 units each, followed by
# a softmax output layer of 10 units. It applies 20% dropout to the input
# data and 50% dropout to the hidden layers.
# Input layer, specifying the expected input shape of the network
# (unspecified batchsize, 1 channel, 28 rows and 28 columns) and
# linking it to the given Theano variable `input_var`, if any:
l_in = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
# Apply 20% dropout to the input data:
l_in_drop = lasagne.layers.DropoutLayer(l_in, p=0.2)
# Add a fully-connected layer of 800 units, using the linear rectifier, and
# initializing weights with Glorot's scheme (which is the default anyway):
l_hid1 = lasagne.layers.DenseLayer(
l_in_drop, num_units=800,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# We'll now add dropout of 50%:
l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1, p=0.5)
# Another 800-unit layer:
l_hid2 = lasagne.layers.DenseLayer(
l_hid1_drop, num_units=800,
nonlinearity=lasagne.nonlinearities.rectify)
# 50% dropout again:
l_hid2_drop = lasagne.layers.DropoutLayer(l_hid2, p=0.5)
# Finally, we'll add the fully-connected output layer, of 10 softmax units:
l_out = lasagne.layers.DenseLayer(
l_hid2_drop, num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
# Each layer is linked to its incoming layer(s), so we only need to pass
# the output layer to give access to a network in Lasagne:
return l_out
def build_custom_mlp(input_var=None, depth=2, width=800, drop_input=.2,
drop_hidden=.5):
# By default, this creates the same network as `build_mlp`, but it can be
# customized with respect to the number and size of hidden layers. This
# mostly showcases how creating a network in Python code can be a lot more
# flexible than a configuration file. Note that to make the code easier,
# all the layers are just called `network` -- there is no need to give them
# different names if all we return is the last one we created anyway; we
# just used different names above for clarity.
# Input layer and dropout (with shortcut `dropout` for `DropoutLayer`):
network = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
if drop_input:
network = lasagne.layers.dropout(network, p=drop_input)
# Hidden layers and dropout:
nonlin = lasagne.nonlinearities.rectify
for _ in range(depth):
network = lasagne.layers.DenseLayer(
network, width, nonlinearity=nonlin)
if drop_hidden:
network = lasagne.layers.dropout(network, p=drop_hidden)
# Output layer:
softmax = lasagne.nonlinearities.softmax
network = lasagne.layers.DenseLayer(network, 10, nonlinearity=softmax)
return network
def build_cnn(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional layers.
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Expert note: Lasagne provides alternative convolutional layers that
# override Theano's choice of which implementation to use; for details
# please see http://lasagne.readthedocs.org/en/latest/user/tutorial.html.
# Max-pooling layer of factor 2 in both dimensions:
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=9,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn_small(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional layers.
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
network, num_filters=16, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Expert note: Lasagne provides alternative convolutional layers that
# override Theano's choice of which implementation to use; for details
# please see http://lasagne.readthedocs.org/en/latest/user/tutorial.html.
# Max-pooling layer of factor 2 in both dimensions:
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=16, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=64,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=9,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn_pose_embedding(input_var=None, sizeIm=150):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, sizeIm, sizeIm),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
#
# network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
# network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
# network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# # # A fully-connected layer of 256 units with 50% dropout on its inputs:
# network = lasagne.layers.DenseLayer(
# lasagne.layers.dropout(network, p=.5),
# num_units=128,
# nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=32,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn2(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional layers.
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Expert note: Lasagne provides alternative convolutional layers that
# override Theano's choice of which implementation to use; for details
# please see http://lasagne.readthedocs.org/en/latest/user/tutorial.html.
# Max-pooling layer of factor 2 in both dimensions:
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=9,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn_pose(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=32,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=4,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn_pose_color(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 3, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=32,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=4,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn_appLight(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
input = lasagne.layers.InputLayer(shape=(None, 3, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
input, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=32,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=12,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn_shape(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=32,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=10,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn_shape_k(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
input = lasagne.layers.InputLayer(shape=(None, 3, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
input, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.0),
num_units=128,
nonlinearity=lasagne.nonlinearities.rectify)
## A fully-connected layer of 256 units with 50% dropout on its inputs:
#network = lasagne.layers.DenseLayer(
# lasagne.layers.dropout(network, p=.0),
# num_units=128,
# nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=10,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn_shape_medium(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
input = lasagne.layers.InputLayer(shape=(None, 3, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
input, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=128,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=10,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn_shape_large(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 3, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional layers.
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
network, num_filters=256, filter_size=(7, 7),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(3, 3))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=256, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=256, filter_size=(3, 3),
nonlinearity=lasagne.nonlinearities.rectify)
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=256, filter_size=(3, 3),
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=10,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn_light(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
input = lasagne.layers.InputLayer(shape=(None, 3, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
input, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=32,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=9,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn_app(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
input = lasagne.layers.InputLayer(shape=(None, 3, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
input, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=32,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=3,
nonlinearity=lasagne.nonlinearities.linear)
return network
class MeanLayer(lasagne.layers.Layer):
def get_output_for(self, input, **kwargs):
return input.mean(axis=1).mean(axis=1)
def get_output_shape_for(self, input_shape):
return [input_shape[0],1]
def build_cnn_mask(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
input = lasagne.layers.InputLayer(shape=(None, 3, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
input, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=225,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=2250,
nonlinearity=lasagne.nonlinearities.linear)
numSmallMask = 2250
scaleFactor = 10
network = lasagne.layers.ReshapeLayer(network, ([0],1, numSmallMask))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
mask = lasagne.layers.Upscale1DLayer(
incoming=network,
scale_factor=scaleFactor)
mask = lasagne.layers.ReshapeLayer(mask, ([0], numSmallMask*scaleFactor))
mask = lasagne.layers.NonlinearityLayer(
mask,
nonlinearity=lasagne.nonlinearities.sigmoid)
return mask
def build_cnn_mask_large(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
input = lasagne.layers.InputLayer(shape=(None, 3, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
input, num_filters=256, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
lasagne.layers.get_output_shape(network)
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=256, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=256, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
network = ConvLayer(
network, num_filters=256, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
# network = lasagne.layers.MaxPool2DLayer(network, pool_size=(1, 1))
network = ConvLayer(
network, num_filters=256, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
# network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=1125,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=1125,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network),
num_units=2500,
nonlinearity=lasagne.nonlinearities.linear)
# numSmallMask = 2250
# scaleFactor = 10
# network = lasagne.layers.ReshapeLayer(network, ([0],1, numSmallMask))
#
# # A fully-connected layer of 256 units with 50% dropout on its inputs:
# mask = lasagne.layers.Upscale1DLayer(
# incoming=network,
# scale_factor=scaleFactor)
#
# mask = lasagne.layers.ReshapeLayer(mask, ([0], numSmallMask*scaleFactor))
mask = lasagne.layers.NonlinearityLayer(
network,
nonlinearity=lasagne.nonlinearities.sigmoid)
return mask
def build_cnn_appmask(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
input = lasagne.layers.InputLayer(shape=(None, 3, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
input, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=2250,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
mask = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=22500,
nonlinearity=lasagne.nonlinearities.rectify)
inputR = lasagne.layers.SliceLayer(input, 0, axis=1)
inputG = lasagne.layers.SliceLayer(input, 1, axis=1)
inputB = lasagne.layers.SliceLayer(input, 2, axis=1)
reshapedMask = lasagne.layers.ReshapeLayer(mask, ([0],150, 150))
outR = MeanLayer(lasagne.layers.ElemwiseMergeLayer(incomings=[inputR, reshapedMask], merge_function=theano.tensor.mul))
outG = MeanLayer(lasagne.layers.ElemwiseMergeLayer(incomings=[inputG, reshapedMask], merge_function=theano.tensor.mul))
outB = MeanLayer(lasagne.layers.ElemwiseMergeLayer(incomings=[inputB, reshapedMask], merge_function=theano.tensor.mul))
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.ConcatLayer(incomings=[outR, outG, outB], axis=0)
network = lasagne.layers.ReshapeLayer(network, ([0],3))
return network
def build_cnn_pose_norm(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = lasagne.layers.normalization.batch_norm(ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify))
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = lasagne.layers.normalization.batch_norm(ConvLayer(
network, num_filters=64, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify))
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = lasagne.layers.normalization.batch_norm(ConvLayer(
network, num_filters=128, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify))
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.normalization.batch_norm(lasagne.layers.DenseLayer(
network,
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.normalization.batch_norm(lasagne.layers.DenseLayer(
network,
num_units=32,
nonlinearity=lasagne.nonlinearities.rectify))
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=4,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn_pose_large(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 150, 150),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional layers.
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = ConvLayer(
network, num_filters=96, filter_size=(7, 7),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(3, 3))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=256, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=512, filter_size=(3, 3),
nonlinearity=lasagne.nonlinearities.rectify)
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=512, filter_size=(3, 3),
nonlinearity=lasagne.nonlinearities.rectify)
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = ConvLayer(
network, num_filters=512, filter_size=(3, 3),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(3, 3))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=4096,
nonlinearity=lasagne.nonlinearities.rectify)
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=1024,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
network,
num_units=4,
nonlinearity=lasagne.nonlinearities.linear)
return network
def build_cnn_pose_large_norm(input_var=None):
network = lasagne.layers.InputLayer(shape=(None, 1, 150, 150),
input_var=input_var)
network = lasagne.layers.normalization.batch_norm(ConvLayer(
network, num_filters=96, filter_size=(7, 7),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform()))
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(3, 3))
network = lasagne.layers.normalization.batch_norm(ConvLayer(
network, num_filters=256, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify))
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
network = lasagne.layers.normalization.batch_norm(ConvLayer(
network, num_filters=512, filter_size=(3, 3),
nonlinearity=lasagne.nonlinearities.rectify))
network = lasagne.layers.normalization.batch_norm(ConvLayer(
network, num_filters=512, filter_size=(3, 3),
nonlinearity=lasagne.nonlinearities.rectify))
network = lasagne.layers.normalization.batch_norm(ConvLayer(
network, num_filters=512, filter_size=(3, 3),
nonlinearity=lasagne.nonlinearities.rectify))
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(3, 3))
network = lasagne.layers.normalization.batch_norm(lasagne.layers.DenseLayer(
network,
num_units=4096,
nonlinearity=lasagne.nonlinearities.rectify))
network = lasagne.layers.normalization.batch_norm(lasagne.layers.DenseLayer(
network,
num_units=1024,
nonlinearity=lasagne.nonlinearities.rectify))
network = lasagne.layers.DenseLayer(
network,
num_units=4,
nonlinearity=lasagne.nonlinearities.linear)
return network
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def iterate_minibatches_triplets(inputs, inputs_t1, inputs_t2, batchsize, shuffle=False):
assert len(inputs) == len(inputs_t1) == len(inputs_t2)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], inputs_t1[excerpt], inputs_t2[excerpt]
def iterate_minibatches_h5(inputs_h5, trainSet, trainValSet, targets, batchsize, shuffle=False):
assert len(trainValSet) == len(targets)
print("Loading minibatch set")
if shuffle:
indices = np.arange(len(trainValSet))
np.random.shuffle(indices)
for start_idx in range(0, len(trainValSet), batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
boolSet = np.zeros(len(inputs_h5)).astype(np.bool)
boolSet[trainSet[trainValSet[excerpt]]] = True
yield inputs_h5[boolSet,:,:], targets[excerpt]
print("Ended loading minibatch set")
def load_network(modelType='cnn', param_values=[], imgSize=150):
# Load the dataset
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
# Create neural network model (depending on first command line parameter)
print("Building model and compiling functions...")
if modelType == 'mlp':
network = build_mlp(input_var)
elif modelType.startswith('custom_mlp:'):
depth, width, drop_in, drop_hid = modelType.split(':', 1)[1].split(',')
network = build_custom_mlp(input_var, int(depth), int(width),
float(drop_in), float(drop_hid))
elif modelType == 'cnn':
network = build_cnn(input_var)
elif modelType == 'cnn2':
network = build_cnn2(input_var)
elif modelType == 'cnn_pose':
network = build_cnn_pose(input_var)
elif modelType == 'cnn_pose_embedding':
network = build_cnn_pose_embedding(input_var, imgSize)
elif modelType == 'cnn_pose_large':
network = build_cnn_pose_large(input_var)
elif modelType == 'cnn_pose_color':
network = build_cnn_pose_color(input_var)
elif modelType == 'cnn_pose_norm':
network = build_cnn_pose_norm(input_var)
elif modelType == 'cnn_pose_large_norm':
network = build_cnn_pose_large_norm(input_var)
elif modelType == 'cnn_light':
network = build_cnn_light(input_var)
elif modelType == 'cnn_shape':
network = build_cnn_shape(input_var)
elif modelType == 'cnn_shape_large':
network = build_cnn_shape_large(input_var)
elif modelType == 'cnn_shape_medium':
network = build_cnn_shape_medium(input_var)
elif modelType == 'cnn_appLight':
network = build_cnn_appLight(input_var)
elif modelType == 'cnn_app':
network = build_cnn_app(input_var)
elif modelType == 'cnn_appmask':
network = build_cnn_appmask(input_var)
elif modelType == 'cnn_mask':
network = build_cnn_mask(input_var)
elif modelType == 'cnn_mask_large':
network = build_cnn_mask_large(input_var)
else:
print("Unrecognized model type %r." % modelType)
if param_values:
lasagne.layers.set_all_param_values(network, param_values)
return network
def get_prediction_fun(network):
# Load the dataset
# Prepare Theano variables for inputs and targets
input_var = lasagne.layers.get_all_layers(network)[0].input_var
prediction = lasagne.layers.get_output(network, deterministic=True)
prediction_fn = theano.function([input_var], prediction)
return prediction_fn
def get_prediction_fun_nondeterministic(network):
# Load the dataset
# Prepare Theano variables for inputs and targets
input_var = lasagne.layers.get_all_layers(network)[0].input_var
prediction = lasagne.layers.get_output(network, deterministic=False)
prediction_fn = theano.function([input_var], prediction)
return prediction_fn
def train_nn_h5(X_h5, trainSetVal, y_train, y_val, meanImage, network, modelType = 'cnn', num_epochs=150, saveModelAtEpoch=True, modelPath='tmp/nnmodel.pickle', param_values=[]):
# Load the dataset
print("Loading validation set")
if meanImage.ndim == 2:
meanImage = meanImage[:,:,None]
X_val = X_h5[trainSetVal::].astype(np.float32) - meanImage.reshape([1,meanImage.shape[2], meanImage.shape[0],meanImage.shape[1]]).astype(np.float32)
print("Ended loading validation set")
model = {}
model['mean'] = meanImage
model['type'] = modelType
if param_values:
lasagne.layers.set_all_param_values(network, param_values)
input_var = lasagne.layers.get_all_layers(network)[0].input_var
target_var = T.fmatrix('targets')
prediction = lasagne.layers.get_output(network)
params = lasagne.layers.get_all_params(network, trainable=True)
test_prediction = lasagne.layers.get_output(network, deterministic=True)
if modelType == 'cnn_mask':
loss = lasagne.objectives.binary_crossentropy(prediction, target_var)
loss = loss.mean()
test_loss = lasagne.objectives.binary_crossentropy(test_prediction, target_var)
test_loss = test_loss.mean()
else:
loss = lasagne.objectives.squared_error(prediction, target_var)
loss = loss.mean()
test_loss = lasagne.objectives.squared_error(test_prediction,
target_var)
test_loss = test_loss.mean()
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.01, momentum=0.9)
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accura# cy:
val_fn = theano.function([input_var, target_var], test_loss)
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
patience = 20
best_valid = np.inf
best_valid_epoch = 0
best_weights = None
batchSize = 128
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
slicesize = 20000
sliceidx = 0
for start_idx in range(0, trainSetVal, slicesize):
sliceidx += 1
print("Working on slice " + str(sliceidx) + " of " + str(int(trainSetVal/slicesize)))
X_train = X_h5[start_idx:min(start_idx + slicesize,trainSetVal)].astype(np.float32) - meanImage.reshape([1,meanImage.shape[2], meanImage.shape[0],meanImage.shape[1]]).astype(np.float32)
for batch in iterate_minibatches(X_train, y_train[start_idx:min(start_idx + slicesize,trainSetVal)], batchSize, shuffle=True):
# print("Batch " + str(train_batches))
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, batchSize, shuffle=False):
inputs, targets = batch
err = val_fn(inputs, targets)
val_err += err
val_batches += 1
if val_err < best_valid:
best_weights = lasagne.layers.get_all_param_values(network)
if saveModelAtEpoch:
model['params'] = best_weights
with open(modelPath, 'wb') as pfile:
pickle.dump(model, pfile)
best_valid = val_err
best_valid_epoch = epoch
elif best_valid_epoch + patience < epoch:
print("Early stopping.")
# print("Best valid loss was {:.6f} at epoch {}.".format(
# best_valid, best_valid_epoch))
break
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
model['params'] = best_weights
return model
from lasagne.regularization import regularize_layer_params_weighted, l2, l1
def train_triplets_h5(X_h5, Xt1_h5, Xt2_h5, trainSetVal, meanImage, network, modelType = 'cnn', num_epochs=150, saveModelAtEpoch=True, modelPath='tmp/nnmodel.pickle', param_values=[]):
# Load the dataset
print("Loading validation set")
if meanImage.ndim == 2:
meanImage = meanImage[:,:,None]
X_val = X_h5[trainSetVal::].astype(np.float32)
X_t1_val = Xt1_h5[trainSetVal::].astype(np.float32)
X_t2_val = Xt2_h5[trainSetVal::].astype(np.float32)
# height = X_h5[0].shape[2]
# width = X_h5[0].shape[3]
print("Ended loading validation set")
model = {}
model['mean'] = meanImage
model['type'] = modelType
if param_values:
lasagne.layers.set_all_param_values(network, param_values)
input_var = lasagne.layers.get_all_layers(network)[0].input_var
# target_var = T.fmatrix('targets')
predictions = lasagne.layers.get_output(network)
params = lasagne.layers.get_all_params(network, trainable=True)
batchSize = 36
batchStepSize = int(batchSize / 3)
test_predictions = lasagne.layers.get_output(network, deterministic=True)
test_predictions_t0 = test_predictions[0:batchStepSize]
test_predictions_t1 = test_predictions[batchStepSize:2*batchStepSize]
test_predictions_t2 = test_predictions[2*batchStepSize::]
# loss = lasagne.objectives.binary_crossentropy(prediction, prediction_t1, prediction_t2)
m = 0.001
predictions_t0 = predictions[0:batchStepSize]
predictions_t1 = predictions[batchStepSize:2*batchStepSize]
predictions_t2 = predictions[2*batchStepSize::]
loss = T.sum((predictions_t1 - predictions_t0)**2, axis=1) / (T.sum((predictions_t2 - predictions_t0)**2, axis=1) + m)
loss = loss.mean()
# ipdb.set_trace()
layers = lasagne.layers.get_all_layers(network)
regLayers = {layer:0.01 for layer in layers}
l2_penalty = regularize_layer_params_weighted(regLayers,l2)
# loss = loss
test_loss = T.sum((test_predictions_t1 - test_predictions_t0)**2, axis=1) / (T.sum((test_predictions_t2 - test_predictions_t0)**2, axis=1) + m)
test_loss = test_loss.mean()
# test_loss = test_loss
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.01, momentum=0.9)
train_fn = theano.function([input_var], loss, updates=updates)
pred_t0_fn = theano.function([input_var], test_predictions_t0)
pred_t1_fn = theano.function([input_var], test_predictions_t1)
pred_t2_fn = theano.function([input_var], test_predictions_t2)
# Compile a second function computing the validation loss and accura# cy:
val_fn = theano.function([input_var], test_loss)
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
patience = 20
best_valid = np.inf
best_valid_epoch = 0
best_weights = None
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
slicesize = 10000
sliceidx = 0
for start_idx in range(0, trainSetVal, slicesize):
sliceidx += 1
print("Working on slice " + str(sliceidx) + " of " + str(int(trainSetVal/slicesize)))
X_train = X_h5[start_idx:min(start_idx + slicesize,trainSetVal)].astype(np.float32)
X_t1_train = Xt1_h5[start_idx:min(start_idx + slicesize,trainSetVal)].astype(np.float32)
X_t2_train = Xt2_h5[start_idx:min(start_idx + slicesize, trainSetVal)].astype(np.float32)
# parameterVals = y_train[start_idx:min(start_idx + slicesize,trainSetVal)]
# anchorIdx, closeIdx, farIdx = getTriplets(parameterVals, closeDist=5 * np.pi / 180, farDist=15 * np.pi / 180, normConst=2 * np.pi, chunkSize=10)
for batch in iterate_minibatches_triplets(X_train, X_t1_train, X_t2_train, batchStepSize, shuffle=True):
# print("Batch " + str(train_batches))
inputs, inputs_t1, inputs_t2 = batch
#
# p0 = pred_t0_fn(np.concatenate([inputs, inputs_t1, inputs_t2], axis=0))
# p1 = pred_t1_fn(np.concatenate([inputs, inputs_t1, inputs_t2], axis=0))
# p2 = pred_t2_fn(np.concatenate([inputs, inputs_t1, inputs_t2], axis=0))
# nperr = np.sum((p1 - p0) ** 2, axis=1) / (np.sum((p2 - p0)**2, axis=1) + m)
train_err += train_fn(np.concatenate([inputs, inputs_t1, inputs_t2],axis=0))
# #
# p0 = pred_t0_fn(np.concatenate([inputs, inputs_t1, inputs_t2], axis=0))
# p1 = pred_t1_fn(np.concatenate([inputs, inputs_t1, inputs_t2], axis=0))
# p2 = pred_t2_fn(np.concatenate([inputs, inputs_t1, inputs_t2], axis=0))
#
# nperr = np.sum((p1 - p0) ** 2, axis=1) / (np.sum((p2 - p0)**2, axis=1) + m)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_batches = 0
for batch in iterate_minibatches_triplets(X_val, X_t1_val, X_t2_val, batchStepSize, shuffle=True):
inputs, inputs_t1, inputs_t2 = batch
err = val_fn(np.concatenate([inputs, inputs_t1, inputs_t2], axis=0))
val_err += err
val_batches += 1
if val_err < best_valid:
best_weights = lasagne.layers.get_all_param_values(network)
if saveModelAtEpoch:
model['params'] = best_weights
with open(modelPath, 'wb') as pfile:
pickle.dump(model, pfile)
best_valid = val_err
best_valid_epoch = epoch
elif best_valid_epoch + patience < epoch:
print("Early stopping.")
# print("Best valid loss was {:.6f} at epoch {}.".format(
# best_valid, best_valid_epoch))
break
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
model['params'] = best_weights
return model
| 63,328 | 40.122727 | 197 | py |
inversegraphics | inversegraphics-master/densecrf_model.py | """
Usage: python util_inference_example.py image annotations
Adapted from the dense_inference.py to demonstate the usage of the util
functions.
"""
import sys
import numpy as np
import cv2
import pydensecrf.densecrf as dcrf
import matplotlib.pylab as plt
from skimage.segmentation import relabel_sequential
import skimage
from skimage.segmentation import slic
import skimage.segmentation
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
import ipdb
from pydensecrf.utils import compute_unary, create_pairwise_bilateral, \
create_pairwise_gaussian
def unaryOcclusionModel(img, fgMask, probs):
"""
Simple classifier that is 50% certain that the annotation is correct
(same as in the inference example).
"""
fg_energy = -np.log(probs[0])
occ_energy = -np.log(probs[1])
probBackground = 0.6
U = np.zeros((3, fgMask.size), dtype='float32')
U[0, :] = -np.log(0.0001)
U[1, :] = -np.log((1-probBackground))
U[2, :] = -np.log(probBackground)
U[0, fgMask] = fg_energy
U[1, fgMask] = occ_energy
U[2, fgMask] = -np.log(0.0001)
return U
def superpixelUnary(img, U, fgMask, iouProb):
segments = skimage.segmentation.quickshift(img, ratio=1, max_dist=10, convert2lab=True)
# plt.imshow(segments)
fgMaskReshaped = fgMask.reshape([img.shape[0], img.shape[1]])
for seg_i in np.arange(np.max(segments)):
currentSegment = segments == seg_i
masksCat = np.concatenate([fgMaskReshaped[:,:,None], currentSegment[:,:,None]], axis=2)
segmentationIOU = np.sum(np.all(masksCat, axis=2)) / np.sum(currentSegment)
if segmentationIOU > 0.05 and segmentationIOU < 0.95:
U[0, currentSegment.ravel()*fgMask] = -np.log(1 - iouProb)
U[1, currentSegment.ravel()*fgMask] = -np.log(iouProb)
U[2, currentSegment.ravel()*(~fgMask)] = -np.log(1 - iouProb)
U[1, currentSegment.ravel()*(~fgMask)] = -np.log(iouProb)
# # Edge pixels inside the mask (and far from boundary) are occluder pixels. How to choose the right one? (interior pixels)
# if segmentationIOU >= 0.95 :
#
# U[0, currentSegment.ravel()*fgMask] = -np.log(iouProb)
# U[1, currentSegment.ravel()*fgMask] = -np.log(1-iouProb)
# # Segments with edges that match with boundary of the mask are fg.
# if segmentationIOU >= 0.99:
# U[0, currentSegment*fgMask] = 1 - iouProb
# U[1, currentSegment*fgMask] = iouProb
# U[2, currentSegment*(~fgMask)] = 1 - iouProb
# U[1, currentSegment*(~fgMask)] = iouProb
return U
def boundaryUnary(img, U, fgMask, boundProb):
from damascene import damascene
#img must be uint8 0-255
borders, textons, orientations = damascene(np.uint8(img[:,:,0:3]*255), device_num=0)
return U
def crfInference(imageGT, fgMask, probs):
##################################
### Read images and annotation ###
##################################
# img = np.uint8(img*255)
img = skimage.color.rgb2lab(imageGT)
M = 3 # forground, background, occluding object.
###########################
### Setup the CRF model ###
###########################
# Example using the DenseCRF class and the util functions
crfmodel = dcrf.DenseCRF(img.shape[0] * img.shape[1], M)
# get unary potentials (neg log probability)
# U = compute_unary(labels, M)
U = unaryOcclusionModel(img, fgMask, probs)
U = superpixelUnary(imageGT, U, fgMask, 0.8)
crfmodel.setUnaryEnergy(U)
# This creates the color-independent features and then add them to the CRF
feats = create_pairwise_gaussian(sdims=(3, 3), shape=img.shape[:2])
crfmodel.addPairwiseEnergy(feats, compat=3,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This creates the color-dependent features and then add them to the CRF
feats = create_pairwise_bilateral(sdims=(30, 30), schan=(13, 13, 13),
img=img, chdim=2)
crfmodel.addPairwiseEnergy(feats, compat=10,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
####################################
### Do inference and compute map ###
####################################
Q = crfmodel.inference(5)
mapseg = np.argmax(Q, axis=0).reshape(img.shape[:2])
# res = map.astype('float32') * 255 / map.max()
# plt.imshow(res)
# plt.show()
# # Manually inference
# Q, tmp1, tmp2 = crfmodel.startInference()
# for i in range(5):
# print("KL-divergence at {}: {}".format(i, crfmodel.klDivergence(Q)))
# crfmodel.stepInference(Q, tmp1, tmp2)
return mapseg, np.array(Q) | 4,856 | 31.165563 | 131 | py |
inversegraphics | inversegraphics-master/probLineSearch.py | from scipy.special import erf
import numpy as np
from scipy.stats import mvn
import ipdb
def probLineSearch(func, x0, f0, df0, search_direction, alpha0,
verbosity, outs, paras, var_f0, var_df0):
# probLineSearch.m -- A probabilistic line search algorithm for nonlinear
# optimization problems with noisy gradients.
#
# == INPUTS ===============================================================
# [f,f', var_f, var_df] = func(x) -- function handle
# input:
# x -- column vectors (positions) (Dx1)
# output:
# f -- scalar function values
# df -- vector gradients (Dx1)
# var_f -- estimated noise for function values (scalar)
# var_df -- estimated noise for gradients (Dx1)
# x0 -- current position of optimizer (Dx1)
# f0 -- function value at x0 (scalar, previous output y_tt)
# df0 -- gradient at x0 ((Dx1), previous output dy_tt)
# search_direction -- (- df(x0) does not need to be normalized)
# alpha0: initial step size (scalar, previous output alpha0_out)
# var_f0 -- variance of function values at x0. (scalar, previous output var_f_tt)
# var_df0 -- variance of gradients at x0. ((Dx1), previous output var_df_tt)
# verbosity -- level of stdout output.
# 0 -- no output
# 1 -- steps, function values, state printed to stdout
# 2 -- plots with only function values
# 3 -- plots including Gaussian process and Wolfe condition beliefs.
# paras -- possible parameters that func needs.
# outs -- struct with collected statistics
#
# == OUTPUTS ==============================================================
# outs -- struct including counters and statistics
# alpha0_out -- accepted stepsize * 1.3 (initial step size for next step)
# x_tt -- accepted position
# y_tt -- functin value at x_tt
# dy_tt -- gradient at x_tt
# var_f_tt -- variance of function values at x_tt
# var_df_tt -- variance of gradients values at x_tt
#
# outs = {}
# -- setup fixed parameters -----------------------------------------------
# if ~isfield(outs, 'counter')
# outs['counter'] = 1
# end
outs['alpha_stats'] = alpha0 # running average over accepted step sizes
limit = 7 # maximum #function evaluations in one line search (+1)
# constants for Wolfe conditions (must be chosen 0 < c1 < c2 < 1)
c1 = 0.05 # <---- DECIDED FIXED 0.05
c2 = 0.8 # <---- DECIDED FIXED 0.8
# c2 = 0 extends until ascend location reached: lots of extrapolation
# c2 = 1 accepts any point of increased gradient: almost no extrapolation
WolfeThreshold = 0.3 # <---- DECIDED FIXED (0.3)
# the new free parameter of this method relative to sgd:
# search is terminated when probability(Wolfe conditions hold) > WolfeThreshold
# not sensitive between 0.1 and 0.8 (0 = accept everyting, 1= accept nothing)
offset = 10 # off-set, for numerical stability.
EXT = 1 # extrapolation factor
tt = 1 # initial step size in scaled space
# -- set up GP ------------------------------------------------------------
# create variables with shared scope. Ugly, but necessary because
# matlab does not discover shared variables if they are first created in a
# nested function.
d2m = np.array([]); d3m = np.array([]); V = np.array([]); Vd = np.array([]); dVd = np.array([])
m0 = np.array([]); dm0 = np.array([]); V0= np.array([]);Vd0 = np.array([]); dVd0= np.array([])
V0f = np.array([]); Vd0f= np.array([]); V0df=np.array([]); Vd0df = np.array([])
# kernel:
k = lambda a,b: ((np.minimum(a+offset,b+offset)**3)/3 + 0.5 * np.abs(a-b) * np.minimum(a+offset,b+offset)**2)
kd = lambda a,b: np.int32(a<b) * ((a+offset)**2)/2 + np.int32(a>=b) * (np.dot(a+offset,b+offset) - 0.5 * (b+offset)**2)
dk = lambda a,b: np.int32(a>b) * ((b+offset)**2)/2 + np.int32(a<=b) * (np.dot(a+offset,b+offset) - 0.5 * (a+offset)**2)
dkd= lambda a,b: np.minimum(a+offset,b+offset)
# further derivatives
ddk = lambda a,b: np.int32(a<=b) * (b-a)
ddkd= lambda a,b: np.int32(a<=b)
dddk= lambda a,b: -np.int32(a<=b)
# -- helper functions -----------------------------------------------------
GaussCDF = lambda z: 0.5 * (1 + erf(z/np.sqrt(2)))
GaussPDF = lambda z: np.exp( - 0.5 * z**2 ) / np.sqrt(2*np.pi)
EI = lambda m,s,eta: (eta - m) * GaussCDF((eta-m)/s) + s * GaussPDF((eta-m)/s)
# -- scale ----------------------------------------------------------------
beta = np.abs(np.dot(search_direction.T,df0))
# scale f and df according to 1/(beta*alpha0)
# -- scaled noise ---------------------------------------------------------
sigmaf = np.sqrt(var_f0)/(alpha0*beta)
sigmadf = np.sqrt(np.dot((search_direction**2).T,var_df0))/beta
# -- initiate data storage ------------------------------------------------
T = np.array([0])
Y = np.array([0])
dY = np.array(df0)[:,None]
dY_projected = np.array([np.dot(df0.T,search_direction)/beta])
Sigmaf = np.array([var_f0])
Sigmadf = np.array(var_df0)[:,None]
N = 1
m = []
d1m = []
d2m = []
d3m = []
V = []
Vd = []
dVd = []
m0 = []
dm0 = []
V0 = []
Vd0 = []
dVd0 = []
V0f = []
Vd0f = []
V0df = []
d0df = []
# -- helper functions -----------------------------------------------------
def updateGP(): # using multiscope variables to construct GP
nonlocal m
nonlocal d1m
nonlocal d2m
nonlocal d3m
nonlocal V
nonlocal Vd
nonlocal dVd
nonlocal m0
nonlocal dm0
nonlocal V0
nonlocal Vd0
nonlocal dVd0
nonlocal V0f
nonlocal Vd0f
nonlocal V0df
nonlocal Vd0df
# build Gram matrix
kTT = np.zeros([N,N]);
kdTT = np.zeros([N,N]);
dkdTT = np.zeros([N,N]);
for i in range(N):
for j in range(N):
kTT[i,j] = k(T[i], T[j])
kdTT[i,j] = kd(T[i], T[j])
dkdTT[i,j] = dkd(T[i],T[j])
# build noise matrix
Sig = sigmaf**2 * np.ones([2*N, 1]); Sig[N::] = sigmadf**2
# build Gram matrix
G = np.diag(Sig.ravel()) + np.r_[np.c_[kTT, kdTT], np.c_[kdTT.T, dkdTT]]
A = np.linalg.solve(G, np.append(Y, dY_projected))
# posterior mean function and all its derivatives
m = lambda t: np.dot(np.concatenate([k(t, T.T) , kd(t, T.T)]), A)
d1m = lambda t: np.dot(np.concatenate([dk(t, T.T) , dkd(t, T.T)]), A)
d2m = lambda t: np.dot(np.concatenate([ddk(t, T.T) ,ddkd(t, T.T)]), A)
d3m = lambda t: np.dot(np.concatenate([dddk(t, T.T), np.zeros([N])]), A)
# posterior marginal covariance between function and first derivative
V = lambda t: k(t,t) - np.dot(np.concatenate([k(t, T.T) , kd(t, T.T)]), np.linalg.solve(G , np.concatenate([k(t, T.T) , kd(t, T.T)]).T))
Vd = lambda t: kd(t,t) - np.dot(np.concatenate([k(t, T.T) , kd(t, T.T)]), np.linalg.solve(G , np.concatenate([dk(t, T.T),dkd(t, T.T)]).T))
dVd = lambda t: dkd(t,t) - np.dot(np.concatenate([dk(t, T.T), dkd(t, T.T)]), np.linalg.solve(G , np.concatenate([dk(t, T.T),dkd(t, T.T)]).T))
# belief at starting point, used for Wolfe conditions
m0 = m(0)
dm0 = d1m(0)
V0 = V(0)
Vd0 = Vd(0)
dVd0 = dVd(0)
# covariance terms with function (derivative) values at origin
V0f = lambda t: k(0,t) - np.dot(np.concatenate([k(0, T.T) , kd(0, T.T)]) , np.linalg.solve(G, np.concatenate([k(t, T.T) , kd(t, T.T)]).T))
Vd0f = lambda t: dk(0,t) - np.dot(np.concatenate([dk(0, T.T), dkd(0, T.T)]) , np.linalg.solve(G , np.concatenate([k(t, T.T) , kd(t, T.T)]).T))
V0df = lambda t: kd(0,t) - np.dot(np.concatenate([k(0, T.T), kd(0, T.T)]) , np.linalg.solve(G , np.concatenate([dk(t, T.T),dkd(t, T.T)]).T))
Vd0df = lambda t: dkd(0,t)- np.dot(np.concatenate([dk(0, T.T), dkd(0, T.T)]) , np.linalg.solve(G , np.concatenate([dk(t, T.T),dkd(t, T.T)]).T))
# -- update GP with new datapoint -----------------------------------------
updateGP()
x_tt = []
y_tt = []
dy_tt = []
var_f_tt = []
var_df_tt = []
alpha0_out = []
def make_outs(y, dy, var_f, var_df):
nonlocal x_tt
nonlocal y_tt
nonlocal dy_tt
nonlocal var_f_tt
nonlocal var_df_tt
nonlocal alpha0_out
nonlocal outs
x_tt = x0 + tt*alpha0*search_direction # accepted position
y_tt = y*(alpha0*beta) + f0 # function value at accepted position
dy_tt = dy # gradient at accepted position
var_f_tt = var_f # variance of function value at accepted position
var_df_tt = var_df # variance of gradients at accepted position
# set new set size
# next initial step size is 1.3 times larger than last accepted step size
alpha0_out = tt*alpha0 * 1.3
# running average for reset in case the step size becomes very small
# this is a saveguard
gamma = 0.9
outs['alpha_stats'] = gamma*outs['alpha_stats'] + (1-gamma)*tt*alpha0;
# reset NEXT initial step size to average step size if accepted step
# size is 100 times smaller or larger than average step size
if (alpha0_out > 1e2*outs['alpha_stats']) or (alpha0_out < 1e-2*outs['alpha_stats']):
if verbosity > 0:
print('making a very small step, resetting alpha0')
alpha0_out = outs['alpha_stats'] # reset step size
def probWolfe(t): # probability for Wolfe conditions to be fulfilled
# marginal for Armijo condition
ma = m0 - m(t) + c1 * t * dm0
Vaa = V0 + (c1 * t)**2 * dVd0 + V(t) + 2 * (c1 * t * (Vd0 - Vd0f(t)) - V0f(t))
# marginal for curvature condition
mb = d1m(t) - c2 * dm0
Vbb = c2**2 * dVd0 - 2 * c2 * Vd0df(t) + dVd(t)
# covariance between conditions
Vab = -c2 * (Vd0 + c1 * t * dVd0) + V0df(t) + c2 * Vd0f(t) + c1 * t * Vd0df(t) - Vd(t)
if (Vaa < 1e-9) and (Vbb < 1e-9): # deterministic evaluations
p = np.int32(ma >= 0) * np.int32(mb >= 0)
return p, None
# joint probability
if Vaa <= 0 or Vbb <= 0:
p = 0
p12 = np.array([0,0,0])
return p,p12
rho = Vab / np.sqrt(Vaa * Vbb)
upper = 2 * c2 * ((np.abs(dm0)+2*np.sqrt(dVd0)-mb)/np.sqrt(Vbb) )
# p = bvn(-ma / np.sqrt(Vaa), np.inf, -mb / np.sqrt(Vbb), upper, rho)
_, p, _ = mvn.mvndst(np.array([-ma / np.sqrt(Vaa),-mb / np.sqrt(Vbb)]), np.array([np.inf,upper]), np.array([1, 2]), rho)
# if nargout > 1:
# individual marginal probabilities for each condition
# (for debugging)
p12 = np.array([1 - GaussCDF(-ma/np.sqrt(Vaa)), GaussCDF(upper)- GaussCDF(-mb/np.sqrt(Vbb)), Vab / np.sqrt(Vaa * Vbb)])
return p, p12
def cubicMinimum(ts):
nonlocal d1m
nonlocal d2m
nonlocal d3m
# mean belief at ts is a cubic function. It is defined up to a constant by
d1mt = d1m(ts)
d2mt = d2m(ts)
d3mt = d3m(ts)
a = 0.5 * d3mt
b = d2mt - ts * d3mt
c = d1mt - d2mt * ts + 0.5 * d3mt * ts**2
if abs(d3mt) < 1e-9: # essentially a quadratic. Single extremum
tm = - (d1mt - ts * d2mt) / d2mt
return tm
# compute the two possible roots:
detmnt = b**2 - 4*a*c
if detmnt < 0: # no roots
tm = np.inf
return tm
LR = (-b - np.sign(a) * np.sqrt(detmnt)) / (2*a) # left root
RR = (-b + np.sign(a) * np.sqrt(detmnt)) / (2*a) # right root
# and the two values of the cubic at those points (up to constant)
Ldt = LR - ts # delta t for left root
Rdt = RR - ts # delta t for right root
LCV = d1mt * Ldt + 0.5 * d2mt * Ldt**2 + (d3mt * Ldt**3)/6 # left cubic value
RCV = d1mt * Rdt + 0.5 * d2mt * Rdt**2 + (d3mt * Rdt**3)/6 # right cubic value
if LCV < RCV:
tm = LR
else:
tm = RR
return tm
# -- search (until budget used or converged) ------------------------------
for N in range(2,limit+1):
# -- evaluate function (change minibatch!) ----------------------------
outs['counter'] = outs['counter'] + 1
[y, dy, var_f, var_df] = func(x0 + tt*alpha0*search_direction) # y: function value at tt
# # Test with matlab function TESTFUNCTION=3
# if N==2:
# y = 140.9659
# dy = np.array([16.5853, 22.3174])
# var_f = 1
# var_df = np.array([1, 1])
#
# if N == 3:
# y = 133.2719
# dy = np.array([18.4566,22.1113])
# var_f = 1
# var_df = np.array([1,1])
#
# if N == 4:
# y = 118.0413
# dy = np.array([18.0786,21.8977])
# var_f = 1
# var_df = np.array([1,1])
#
# if N == 5:
# y = 85.3557
# dy = np.array([17.3779,18.2448])
# var_f = 1
# var_df = np.array([1,1])
#
# if N == 6:
# y = 43.5753
# dy = np.array([3.1085,11.2455])
# var_f = 1
# var_df = np.array([1,1])
if np.isinf(y) or np.isnan(y):
# this does not happen often, but still needs a fix
# e.g. if function return inf or nan (probably too large step),
# evaluate again at 0.1*tt
print('function values is inf or nan.')
print("N " + str(N))
# -- scale function output --------------------------------------------
y = (y - f0)/(alpha0*beta) # substract offset
dy_projected = np.dot(dy.T,search_direction)/beta # projected gradient
# -- store output -----------------------------------------------------
T = np.append(T,tt)
Y = np.append(Y,y)
dY = np.hstack([dY,dy.reshape([-1,1])])
dY_projected = np.append(dY_projected, dy_projected)
Sigmaf = np.append(Sigmaf, var_f)
Sigmadf = np.hstack([Sigmadf,var_df.reshape([-1,1])])
# if N == 7:
# ipdb.set_trace()
# -- update GP with new datapoint -------------------------------------
updateGP() # store, update belief
# -- check last evaluated point for acceptance ------------------------
(probWolfeVal, _) = probWolfe(tt)
if probWolfeVal > WolfeThreshold: # are we done?
if verbosity > 0:
print('found acceptable point.')
make_outs(y, dy, var_f, var_df)
return outs, alpha0_out, y_tt, dy_tt, x_tt, var_f_tt, var_df_tt
# -- find candidates (set up for EI) ----------------------------------
# decide where to probe next: evaluate expected improvement and prob
# Wolfe conditions at a number of candidate points, take the most promising one.
# lowest mean evaluation, and its gradient (needed for EI):
M = np.zeros([N,1])
dM = np.zeros([N,1])
for l in range(N):
M[l] = m(T[l])
dM[l] = d1m(T[l])
minj = np.argmin(M) # minm: lowest GP mean, minj: index in candidate list
minm = M[minj]
tmin = T[minj] # tt of point with lowest GP mean of function values
dmin = dM[minj] # GP mean of gradient at lowest point
# -- check this point as well for acceptance --------------------------
if np.abs(dmin) < 1e-5 and Vd(tmin) < 1e-4: # nearly deterministic
tt = tmin; dy = dY[:, minj]; y = Y[minj]; var_f = Sigmaf[minj]; var_df = Sigmadf[:, minj];
print('found a point with almost zero gradient. Stopping, although Wolfe conditions not guaranteed.')
make_outs(y, dy, var_f, var_df)
return outs, alpha0_out, y_tt, dy_tt, x_tt, var_f_tt, var_df_tt
# -- find candidates --------------------------------------------------
# CANDIDATES 1: minimal means between all evaluations:
# iterate through all `cells' (O[N]), check minimum mean locations.
Tcand = np.array([]) # positions of candidate points
Mcand = np.array([]) # means of candidate points
Scand = np.array([]) # standard deviation of candidate points
Tsort = np.sort(T)
Wolfes = np.array([]) # list of acceptable points.
reeval = False
for cel in range(N-1): # loop over cells
Trep = Tsort[cel] + 1e-6 * (Tsort[cel+1] - Tsort[cel])
cc = cubicMinimum(Trep)
# add point to candidate list if minimum lies in between T(cel) and T(cel+1)
if cc > Tsort[cel] and cc < Tsort[cel+1]:
Tcand = np.append(Tcand,cc)
Mcand = np.append(Mcand,m(cc))
Scand = np.append(Scand, np.sqrt(V(cc)))
else: # no minimum, just take half-way
if cel==1 and d1m(0) > 0: # only in first cell
if verbosity > 0:
print('function seems very steep, reevaluating close to start.')
reeval = True
Tcand = np.array([0.1 * (Tsort[cel] + Tsort[cel+1])])
Mcand = np.append(Mcand, m(0.1 * (Tsort[cel] + Tsort[cel+1])))
Scand = np.append(Scand, np.sqrt(V(0.1 * (Tsort[cel] + Tsort[cel+1]))))
break
# check whether there is an acceptable point among already
# evaluated points (since this changes each time the GP gets updated)
probWolfeVal, _ = probWolfe(Tsort[cel])
if cel > 1 and (probWolfeVal > WolfeThreshold):
Wolfes = np.append(Wolfes,Tsort[cel]) # list of acceptable points.
# -- check if at least on point is acceptable -------------------------
if len(Wolfes) > 0:
if verbosity > 0:
# makePlot()
print('found acceptable point.')
# -- chose best point among Wolfes, return. -----------------------
MWolfes = 0 * Wolfes;
for o in range(len(Wolfes)):
MWolfes[o] = m(Wolfes[o]) # compute GP means of acceptable points
tt = Wolfes[MWolfes == np.min(MWolfes)]
# find corresponding gradient and variances
dy = dY[:, T == tt].ravel(); y = Y[T == tt].ravel(); var_f = Sigmaf[T == tt].ravel(); var_df = Sigmadf[:, T==tt].ravel();
make_outs(y, dy, var_f, var_df)
# if dy.shape == (2,1):
# ipdb.set_trace()
return outs, alpha0_out, y_tt, dy_tt, x_tt, var_f_tt, var_df_tt
# Candidate 2: one extrapolation step
if not reeval:
Tcand = np.append(Tcand, np.max(T) + EXT)
Mcand = np.append(Mcand, m(np.max(T) + EXT))
Scand = np.append(Scand, np.sqrt(V(np.max(T)+EXT)))
# -- discriminate candidates through EI and prob Wolfe ----------------
EIcand = EI(Mcand, Scand, minm) # minm: lowest GP mean of collected evaluations (not candidates)
PPcand = np.zeros(EIcand.shape)
for ti in range(len(EIcand)):
PPcand[ti], _ = probWolfe(Tcand[ti])
idx_best = np.argmax(EIcand * PPcand) # find best candidate
if Tcand[idx_best] == tt + EXT: # extrapolating. Extend extrapolation step
EXT = 2 * EXT
tt = Tcand[idx_best]
# makePlot()
# -- algorithm reached limit without finding acceptable point. ------------
# Evaluate a final time, return "best" point (one with lowest function value)
outs['counter'] = outs['counter'] + 1
[y, dy, var_f, var_df] = func(x0 + tt*alpha0*search_direction) # y: function value at tt
if np.isinf(y) or np.isnan(y):
# this does not happen often, but still needs a fix
# e.g. if function return inf or nan (probably too large step),
# evaluate again at 0.1*tt
print('function values is inf or nan.')
# -- scale function output ------------------------------------------------
y = (y - f0)/(alpha0*beta) # substract offset
dy_projected = np.dot(dy.T,search_direction)/beta # projected gradient at tt;
# -- store output -----------------------------------------------------
T = np.append(T,tt)
Y = np.append(Y,y)
dY = np.hstack([dY,dy.reshape([-1,1])])
dY_projected = np.append(dY_projected, dy_projected)
N = limit + 1
Sigmaf = np.append(Sigmaf, var_f)
Sigmadf = np.hstack([Sigmadf,var_df.reshape([-1,1])])
# -- update GP with new datapoint -----------------------------------------
updateGP()
# -- check last evaluated point for acceptance ----------------------------
probWolfeVal, _ = probWolfe(tt)
if probWolfeVal > WolfeThreshold: # are we done?
if verbosity > 0:
print('found acceptable point right at end of budget. Phew!')
make_outs(y, dy, var_f, var_df)
return outs, alpha0_out, y_tt, dy_tt, x_tt, var_f_tt, var_df_tt
# -- return point with lowest mean ----------------------------------------
M = np.ones([N,1])*np.inf
for ii in range(N):
M[ii] = m(T[ii]) # compute all GP means of all evaluated locations
minj = np.argmin(M) # index of minimal GP mean
if verbosity > 0:
print('reached evaluation limit. Returning ''best'' known point.');
# find corresponding tt, gradient and noise
tt = T[minj]; dy = dY[:, minj]; y = Y[minj]; var_f = Sigmaf[minj]; var_df = Sigmadf[:, minj]
make_outs(y, dy, var_f, var_df)
return outs, alpha0_out, y_tt, dy_tt, x_tt, var_f_tt, var_df_tt | 22,197 | 38.92446 | 151 | py |
inversegraphics | inversegraphics-master/render.py | #!/usr/bin/env python3.4m
import matplotlib
# matplotlib.use('Agg')
import scene_io_utils
import re
from blender_utils import *
from collision import *
import matplotlib.pyplot as plt
numpy.random.seed(1)
inchToMeter = 0.0254
outputDir = '../data/output/'
if not os.path.exists(outputDir):
os.makedirs(outputDir)
prefix = '_tmp'
width = 110
height = 110
numSamples = 1024
useCycles = False
useGPU = False
distance = 0.45
numFrames = 200
batchSize = 10
completeScene = True
cam = bpy.data.cameras.new("MainCamera")
camera = bpy.data.objects.new("MainCamera", cam)
world = bpy.data.worlds.new("MainWorld")
renderTeapotsList = [2]
[targetScenes, targetModels, transformations] = scene_io_utils.loadTargetModels(renderTeapotsList)
replaceableScenesFile = '../databaseFull/fields/scene_replaceables.txt'
sceneLines = [line.strip() for line in open(replaceableScenesFile)]
sceneLineNums = numpy.arange(len(sceneLines))
spout = mathutils.Vector((-6.2, -0.16, 6.25))
handle = mathutils.Vector((6.2, 0.2, 5.7))
tip = mathutils.Vector((0, 0, 8))
occludeHandle = True
for sceneNum in sceneLineNums[0:1]:
sceneLine = sceneLines[sceneNum]
sceneParts = sceneLine.split(' ')
sceneFile = sceneParts[0]
sceneNumber = int(re.search('.+?scene([0-9]+)\.txt', sceneFile, re.IGNORECASE).groups()[0])
sceneFileName = re.search('.+?(scene[0-9]+\.txt)', sceneFile, re.IGNORECASE).groups()[0]
targetIndex = int(sceneParts[1])
instances = scene_io_utils.getSceneInstancesInfo('../databaseFull/scenes/' + sceneFileName)
targetParentPosition = instances[targetIndex][2]
targetParentIndex = instances[targetIndex][1]
[blenderScenes, modelInstances] = scene_io_utils.importBlenderScenes(instances, completeScene, targetIndex)
targetParentInstance = modelInstances[targetParentIndex]
targetParentInstance.layers[2] = True
roomName = ''
for model in modelInstances:
reg = re.compile('(room[0-9]+)')
res = reg.match(model.name)
if res:
roomName = res.groups()[0]
occludingObjName = 'c9fe86bef85fd1d0caeedf6b101df8f6'
for model in modelInstances:
reg = re.compile('(.*?' + occludingObjName + '.*?)')
res = reg.match(model.name)
if res:
occludingObjName = res.groups()[0]
scene = scene_io_utils.composeScene(modelInstances, targetIndex)
roomInstance = scene.objects[roomName]
ipdb.set_trace()
roomInstance.layers[2] = True
targetParentInstance.layers[2] = True
occludingObjInstance = scene.objects[occludingObjName]
newOccludingObjInstance = bpy.data.objects.new(occludingObjName + '_2', None)
newOccludingObjInstance.dupli_type = 'GROUP'
newOccludingObjInstance.dupli_group = occludingObjInstance.dupli_group
newOccludingObjInstance.matrix_world = occludingObjInstance.matrix_world
newOccludingObjInstance.pass_index = 0
scene.update()
scene.render.threads = 20
scene.render.threads_mode = 'AUTO'
bpy.context.screen.scene = scene
cycles = bpy.context.scene.cycles
scene.render.tile_x = 25
scene.render.tile_y = 25
originalLoc = mathutils.Vector((0,-distance , 0))
setupScene(scene, targetIndex,roomName, world, distance, camera, width, height, numSamples, useCycles, useGPU)
bpy.context.user_preferences.system.prefetch_frames = batchSize
bpy.context.user_preferences.system.memory_cache_limit = 1000
totalAzimuths = []
totalObjAzimuths = []
totalElevations = []
totalObjectIds = []
frameStart = 0
frameEnd = frameStart + numFrames
for teapotIdx, teapotNum in enumerate(renderTeapotsList):
director = outputDir
teapot = targetModels[teapotIdx]
teapot.layers[1] = True
teapot.layers[2] = True
transformation = transformations[teapotIdx]
spout = transformation * spout
handle = transformation * handle
tip = transformation * tip
azimuths = numpy.mod(numpy.random.uniform(270,450, numFrames), 360) # Avoid looking outside the room
# azimuths = numpy.array([])
# while len(azimuths) < numFrames:
# num = numpy.random.uniform(0,360, 1)
# numpy.arccos(mathutils.Vector((-0.6548619270324707, 0.6106656193733215, -0.4452454447746277)) * mathutils.Vector((0.0, -1.0, 0.0)))
# objAzimuths = numpy.append(azimuths, num)
# objAzimuths = numpy.arange(0,360, 5) # Map it to non colliding rotations.
objAzimuths = numpy.array([])
while len(objAzimuths) < numFrames:
num = numpy.random.uniform(0,360, 1)
# if not(num>= 250 and num<290) and not(num>= 80 and num<110):
objAzimuths = numpy.append(objAzimuths, num)
elevations = numpy.random.uniform(0,90, numFrames)
spoutProjectionsX = numpy.zeros(numFrames)
spoutProjectionsY = numpy.zeros(numFrames)
handleProjectionsX = numpy.zeros(numFrames)
handleProjectionsY = numpy.zeros(numFrames)
tipProjectionsX = numpy.zeros(numFrames)
tipProjectionsY = numpy.zeros(numFrames)
spoutsOccluded = numpy.zeros(numFrames)
handlesOccluded = numpy.zeros(numFrames)
tipsOccluded = numpy.zeros(numFrames)
scene.objects.link(teapot)
teapot.layers[1] = True
teapot.layers[2] = True
teapot.matrix_world = mathutils.Matrix.Translation(targetParentPosition)
center = centerOfGeometry(teapot.dupli_group.objects, teapot.matrix_world)
original_matrix_world = teapot.matrix_world.copy()
# ipdb.set_trace()
for frame in range(frameStart, frameEnd):
azimuth = azimuths[frame - frameStart]
objAzimuth = objAzimuths[frame - frameStart]
elevation = elevations[frame - frameStart]
bpy.context.scene.frame_set(frame)
azimuthRot = mathutils.Matrix.Rotation(radians(-azimuth), 4, 'Z')
elevationRot = mathutils.Matrix.Rotation(radians(-elevation), 4, 'X')
location = center + azimuthRot * elevationRot * originalLoc
camera.location = location
azimuthRot = mathutils.Matrix.Rotation(radians(-objAzimuth), 4, 'Z')
teapot.matrix_world = mathutils.Matrix.Translation(original_matrix_world.to_translation()) * azimuthRot * (mathutils.Matrix.Translation(-original_matrix_world.to_translation())) * original_matrix_world
scene.update()
look_at(camera, center)
scene.update()
teapot.keyframe_insert(data_path="rotation_euler", frame=frame, index=-1)
camera.keyframe_insert(data_path="location", frame=frame, index=-1)
camera.keyframe_insert(data_path="rotation_euler", frame=frame, index=-1)
scene.frame_set(frame)
scene.update()
sphereSpout_matrix_world = teapot.matrix_world * mathutils.Matrix.Translation(spout)
sphereHandle_matrix_world = teapot.matrix_world * mathutils.Matrix.Translation(handle)
sphereTip_matrix_world = teapot.matrix_world * mathutils.Matrix.Translation(tip)
spoutlocation = image_project(scene, camera, sphereSpout_matrix_world.to_translation())
spoutProjectionsX[frame - frameStart] = spoutlocation[0]
spoutProjectionsY[frame - frameStart] = spoutlocation[1]
handlelocation = image_project(scene, camera,sphereHandle_matrix_world.to_translation())
handleProjectionsX[frame - frameStart] = handlelocation[0]
handleProjectionsY[frame - frameStart] = handlelocation[1]
tiplocation = image_project(scene, camera, sphereTip_matrix_world.to_translation())
tipProjectionsX[frame - frameStart] = tiplocation[0]
tipProjectionsY[frame - frameStart] = tiplocation[1]
#Occlude Handle
result, object, matrix, location, normal = scene.ray_cast(scene.camera.location, sphereSpout_matrix_world.to_translation())
spoutOccluded = False
if result and object.users_group[0].name != teapot.name:
spoutOccluded = True
spoutsOccluded[frame - frameStart] = int(spoutOccluded)
result, object, matrix, location, normal = scene.ray_cast(scene.camera.location, sphereHandle_matrix_world.to_translation())
handleOccluded = False
if result and object.users_group[0].name != teapot.name:
handleOccluded = True
handlesOccluded[frame - frameStart] = int(handleOccluded)
#Occlude Handle:
if scene.objects.find(newOccludingObjInstance.name) != -1:
scene.objects.unlink(newOccludingObjInstance)
if (not result) and occludeHandle and elevation < 35:
scene.objects.link(newOccludingObjInstance)
initLoc = newOccludingObjInstance.location
handleLoc = sphereHandle_matrix_world.to_translation()
initLocA = numpy.array(initLoc)
handleLocA = numpy.array(handleLoc)
newX = ((handleLocA - camera.location)/4 + camera.location)[0]
newY = ((handleLocA - camera.location)/4 + camera.location)[1]
newOccludingObjInstance.location = mathutils.Vector((newX, newY, initLoc.z))
newOccludingObjInstance.keyframe_insert(data_path="location", frame=frame, index=-1)
scene.frame_set(frame)
scene.update()
result, object, matrix, location, normal = scene.ray_cast(scene.camera.location, sphereHandle_matrix_world.to_translation())
handleOccluded = False
if result and object.users_group[0].name != teapot.name:
handleOccluded = True
handlesOccluded[frame - frameStart] = int(handleOccluded)
result, object, matrix, location, normal = scene.ray_cast(scene.camera.location, sphereTip_matrix_world.to_translation())
tipOccluded = False
if result and object.users_group[0].name != teapot.name:
tipOccluded = True
tipsOccluded[frame - frameStart] = int(tipOccluded)
numBatches = int(numFrames / batchSize)
for batch in range(numBatches):
with open(director + 'groundtruth.txt', "a") as groundtruth:
for batch_i in range(batchSize):
print(str(azimuths[batch * batchSize + batch_i]) + ' ' + str(objAzimuths[batch * batchSize + batch_i]) + ' ' + str(elevations[batch * batchSize + batch_i]) + ' ' + str(teapotNum) + ' ' + str(batch * batchSize + batch_i + frameStart) + ' ' + str(sceneNumber) + ' ' + str(targetIndex) \
+ ' ' + str(spoutProjectionsX[batch * batchSize + batch_i]) + ' ' + str(spoutProjectionsY[batch * batchSize + batch_i]) \
+ ' ' + str(handleProjectionsX[batch * batchSize + batch_i]) + ' ' + str(handleProjectionsY[batch * batchSize + batch_i]) \
+ ' ' + str(tipProjectionsX[batch * batchSize + batch_i]) + ' ' + str(tipProjectionsY[batch * batchSize + batch_i]) \
+ ' ' + str(int(spoutsOccluded[batch * batchSize + batch_i])) \
+ ' ' + str(int(handlesOccluded[batch * batchSize + batch_i])) \
+ ' ' + str(int(tipsOccluded[batch * batchSize + batch_i])) \
+ ' ' + prefix, file = groundtruth)
scene.frame_start = frameStart + batch * batchSize
scene.frame_end = min(frameStart + batch * batchSize + batchSize - 1, frameEnd)
scene.layers[1] = True
scene.layers[0] = False
scene.layers[2] = False
scene.render.layers[0].use = False
scene.render.layers[2].use = False
scene.render.layers[1].use = True
cycles.samples = 1
scene.render.engine = 'CYCLES'
scene.render.image_settings.file_format = 'OPEN_EXR_MULTILAYER'
# scene.render.image_settings.file_format = 'PNG'
scene.render.filepath = director + 'render' + prefix + '_obj' + str(teapotNum) + '_' + 'scene' + str(sceneNumber) + '_target' + str(targetIndex) + '_' + 'single_'
scene.update()
bpy.ops.render.render( animation=True )
scene.layers[1] = False
scene.layers[2] = False
scene.layers[0] = True
scene.render.layers[0].use = True
scene.render.layers[1].use = False
scene.render.layers[2].use = False
# scene.render.image_settings.file_format = 'OPEN_EXR_MULTILAYER'
#scene.render.image_settings.file_format = 'PNG'
scene.render.filepath = director + 'render' + prefix + '_obj' + str(teapotNum) + '_' + 'scene' + str(sceneNumber) + '_target' + str(targetIndex) + '_'
scene.update()
bpy.ops.render.render( animation=True )
if useCycles:
cycles.samples = numSamples
if not useCycles:
scene.render.engine = 'BLENDER_RENDER'
scene.render.image_settings.file_format = 'PNG'
scene.render.filepath = director + 'images/' + 'render' + prefix + '_obj' + str(teapotNum) + '_' + 'scene' + str(sceneNumber) + '_target' + str(targetIndex) + '_'
scene.update()
bpy.ops.render.render( animation=True )
scene.layers[1] = False
scene.layers[2] = True
scene.layers[0] = False
scene.render.layers[0].use = False
scene.render.layers[1].use = False
scene.render.layers[2].use = True
scene.render.image_settings.file_format = 'PNG'
scene.render.filepath = director + 'images/' + 'render' + prefix + '_obj' + str(teapotNum) + '_' + 'scene' + str(sceneNumber) + '_target' + str(targetIndex) + '_unoccluded'
scene.update()
bpy.ops.render.render( animation=True )
scene.objects.unlink(teapot)
objectIds = [teapotNum]*numFrames
# with open(director + 'groundtruth' + str(teapotNum) + '.txt', mode='wt', encoding='utf-8') as groundtruth:
# print(str(azimuths.tolist())[1:-1], file = groundtruth)
# print(str(objAzimuths.tolist())[1:-1], file = groundtruth)
# print(str(elevations.tolist())[1:-1], file = groundtruth)
# print(str(objectIds)[1:-1], file = groundtruth)
totalAzimuths = totalAzimuths + azimuths.tolist()
totalObjAzimuths = totalObjAzimuths + objAzimuths.tolist()
totalElevations = totalElevations + elevations.tolist()
totalObjectIds = totalObjectIds + objectIds
# Cleanup
for objnum, obji in enumerate(scene.objects):
if obji.name != teapot.name:
obji.user_clear()
bpy.data.objects.remove(obji)
scene.user_clear()
bpy.data.scenes.remove(scene)
# bpy.ops.scene.delete()
print("Renders ended.")
# with open(director + 'groundtruth_total.txt', mode='wt', encoding='utf-8') as groundtruth:
# print(str(totalAzimuths)[1:-1], file = groundtruth)
# print(str(totalObjAzimuths)[1:-1], file = groundtruth)
# print(str(totalElevations)[1:-1], file = groundtruth)
# print(str(totalObjectIds)[1:-1], file = groundtruth)
#
# with open(director + 'scene.pickle', 'wb') as pfile:
# pickle.dump(scene, pfile) | 15,462 | 41.833795 | 304 | py |
inversegraphics | inversegraphics-master/diffrender_test.py | test__author__ = 'pol'
# from damascene import damascene
import matplotlib
# matplotlib.use('QT4Agg')
import matplotlib.pyplot as plt
plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
import scene_io_utils
import mathutils
from math import radians
import timeit
import time
import opendr
import chumpy as ch
import geometry
import image_processing
import numpy as np
import cv2
import glfw
import generative_models
import recognition_models
from opendr_utils import *
from utils import *
import OpenGL.GL as GL
import light_probes
from OpenGL import contextdata
from light_probes import SHProjection
import theano
theano.config.optimizer='fast_compile'
theano.config.cycle_detection = 'fast'
# theano.sandbox.cuda.use('cpu')
import lasagne
import lasagne_nn
plt.ion()
#########################################
# Test configuration
#########################################
seed = 1
np.random.seed(seed)
parameterRecognitionModels = set(['randForestAzs', 'randForestElevs', 'randForestVColors', 'linearRegressionVColors', 'neuralNetModelSHLight', ])
parameterRecognitionModels = set(['randForestAzs', 'randForestElevs', 'randForestVColors', 'linearRegressionVColors', 'linRegModelSHZernike' ])
parameterRecognitionModels = set(['randForestAzs', 'randForestElevs','linearRegressionVColors','neuralNetModelSHLight' ])
parameterRecognitionModels = set(['neuralNetPose', 'linearRegressionVColors','constantSHLight' ])
parameterRecognitionModels = set(['neuralNetPose', 'neuralNetApperanceAndLight', 'neuralNetVColors' ])
parameterRecognitionModels = set(['neuralNetPose', 'neuralNetModelSHLight', 'neuralNetVColors', 'neuralNetModelShape' ])
# parameterRecognitionModels = set(['neuralNetPose', 'neuralNetApperanceAndLight'])
# parameterRecognitionModels = set(['randForestAzs', 'randForestElevs','randForestVColors','randomForestSHZernike' ])
#
gtPrefix = 'train4_occlusion_shapemodel'
# gtPrefix = 'train4_occlusion_shapemodel_synthetic_10K_test100-1100'
gtPrefix = 'train4_occlusion_shapemodel_photorealistic_10K_test100-1100'
# gtPrefix = 'objectnet3d_teapots'
experimentPrefix = 'train4_occlusion_shapemodel_10k'
# gtPrefix = 'train4_occlusion_multi'
# experimentPrefix = 'train4_occlusion_multi'
trainPrefixPose = 'train4_occlusion_shapemodel_10k'
trainPrefixVColor = 'train4_occlusion_shapemodel_10k'
trainPrefixLightCoeffs = 'train4_occlusion_shapemodel_10k'
trainPrefixShapeParams = 'train4_occlusion_shapemodel_10k'
trainModelsDirAppLight = 'train4_occlusion_shapemodel_10k'
#########################################
# OpenDR Initialization starts here
#########################################
#Main script options:
glModes = ['glfw','mesa']
glMode = glModes[0]
width, height = (150, 150)
win = -1
multiObjects = False
chThError = None
global renderer
if glMode == 'glfw':
#Initialize base GLFW context for the Demo and to share context among all renderers.
glfw.init()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
# glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.DEPTH_BITS,32)
glfw.window_hint(glfw.VISIBLE, GL.GL_FALSE)
win = glfw.create_window(width, height, "Demo", None, None)
glfw.make_context_current(win)
angle = 60 * 180 / np.pi
clip_start = 0.05
clip_end = 10
frustum = {'near': clip_start, 'far': clip_end, 'width': width, 'height': height}
camDistance = 0.4
teapots = [line.strip() for line in open('teapots.txt')]
# renderTeapotsList = np.arange(len(teapots))
renderTeapotsList = np.arange(len(teapots))[0:1]
hdridx = 0
targetModels = []
v_teapots, f_list_teapots, vc_teapots, vn_teapots, uv_teapots, haveTextures_list_teapots, textures_list_teapots, vflat, varray, center_teapots = scene_io_utils.loadTeapotsOpenDRData(renderTeapotsList, False, False, targetModels)
azimuth = np.pi
chCosAz = ch.Ch([np.cos(azimuth)])
chSinAz = ch.Ch([np.sin(azimuth)])
chAz = 2*ch.arctan(chSinAz/(ch.sqrt(chCosAz**2 + chSinAz**2) + chCosAz))
chAz = ch.Ch([0])
chObjAz = ch.Ch([0])
chAzRel = chAz - chObjAz
elevation = 0
chLogCosEl = ch.Ch(np.log(np.cos(elevation)))
chLogSinEl = ch.Ch(np.log(np.sin(elevation)))
chEl = 2*ch.arctan(ch.exp(chLogSinEl)/(ch.sqrt(ch.exp(chLogCosEl)**2 + ch.exp(chLogSinEl)**2) + ch.exp(chLogCosEl)))
chEl = ch.Ch([0.95993109])
chDist = ch.Ch([camDistance])
chLightSHCoeffs = ch.Ch(np.array([2, 0.25, 0.25, 0.12,-0.17,0.36,0.1,0.,0.]))
# if multiObjects:
chObjDist = ch.Ch([0])
chObjRotation = ch.Ch([0])
chObjAzMug = ch.Ch([0])
chObjDistMug = ch.Ch([0])
chObjRotationMug = ch.Ch([0])
chVColorsMug = ch.Ch([1,0,0])
clampedCosCoeffs = clampedCosineCoefficients()
chComponent = chLightSHCoeffs * clampedCosCoeffs
light_color = ch.ones(3)
chVColors = ch.Ch([0.4,0.4,0.4])
chDisplacement = ch.Ch([0.0, 0.0,0.0])
chScale = ch.Ch([1.0,1.0,1.0])
# vcch[0] = np.ones_like(vcflat[0])*chVColorsGT.reshape([1,3])
renderer_teapots = []
for teapot_i in range(len(renderTeapotsList)):
vmod = v_teapots[teapot_i]
fmod_list = f_list_teapots[teapot_i]
vcmod = vc_teapots[teapot_i]
vnmod = vn_teapots[teapot_i]
uvmod = uv_teapots[teapot_i]
haveTexturesmod_list = haveTextures_list_teapots[teapot_i]
texturesmod_list = textures_list_teapots[teapot_i]
centermod = center_teapots[teapot_i]
vmod, vnmod, _ = transformObject(vmod, vnmod, chScale, chObjAz, ch.Ch([0]), ch.Ch([0]), np.array([0,0,0]))
renderer = createRendererTarget(glMode, chAz, chEl, chDist, centermod, vmod, vcmod, fmod_list, vnmod, light_color, chComponent, chVColors, 0, chDisplacement, width,height, uvmod, haveTexturesmod_list, texturesmod_list, frustum, win )
renderer.overdraw = True
renderer.nsamples = 8
renderer.msaa = True
renderer.initGL()
renderer.initGLTexture()
# renderer.initGL_AnalyticRenderer()
renderer.imageGT = None
renderer.r
renderer_teapots = renderer_teapots + [renderer]
currentTeapotModel = 0
center = center_teapots[currentTeapotModel]
if multiObjects:
mugs = [line.strip() for line in open('mugs.txt')]
renderMugsList = np.arange(len(teapots))[0:1]
v_mugs, f_list_mugs, vc_mugs, vn_mugs, uv_mugs, haveTextures_list_mugs, textures_list_mugs, vflat_mugs, varray_mugs, center_mugs = scene_io_utils.loadMugsOpenDRData(
renderMugsList, False, False, None)
v_mug = v_mugs[0][0]
f_list_mug = f_list_mugs[0][0]
chVColorsMug = ch.Ch([1, 0, 0])
vc_mug = [chVColorsMug * np.ones(v_mug[0].shape)]
vn_mug = vn_mugs[0][0]
uv_mug = uv_mugs[0][0]
haveTextures_list_mug = haveTextures_list_mugs[0][0]
textures_list_mug = textures_list_mugs[0][0]
#########################################
# Initialization ends here
#########################################
#########################################
# Generative model set up
#########################################
global rendererGT
rendererGT = ch.Ch(renderer.r.copy())
numPixels = width*height
E_raw = renderer - rendererGT
SE_raw = ch.sum(E_raw*E_raw, axis=2)
SSqE_raw = ch.SumOfSquares(E_raw)/numPixels
initialPixelStdev = 0.1
# finalPixelStdev = 0.05
stds = ch.Ch([initialPixelStdev])
variances = stds ** 2
globalPrior = ch.Ch([0.9])
negLikModel = -ch.sum(generative_models.LogGaussianModel(renderer=renderer, groundtruth=rendererGT, variances=variances))
negLikModelRobust = -ch.sum(generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances))
pixelLikelihoodCh = generative_models.LogGaussianModel(renderer=renderer, groundtruth=rendererGT, variances=variances)
pixelLikelihoodRobustCh = generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances)
post = generative_models.layerPosteriorsRobustCh(rendererGT, renderer, np.array([]), 'MASK', globalPrior, variances)[0]
models = [negLikModel, negLikModelRobust]
pixelModels = [pixelLikelihoodCh, pixelLikelihoodRobustCh]
modelsDescr = ["Gaussian Model", "Outlier model" ]
model = 1
pixelErrorFun = pixelModels[model]
errorFun = models[model]
global iterat
iterat = 0
t = time.time()
#########################################
# Generative model setup ends here.
#########################################
#########################################
# Test code starts here:
#########################################
gtDir = 'groundtruth/' + gtPrefix + '/'
featuresDir = gtDir
experimentDir = 'experiments/' + experimentPrefix + '/'
trainModelsDirPose = 'experiments/' + trainPrefixPose + '/' + gtPrefix
trainModelsDirVColor = 'experiments/' + trainPrefixVColor + '/' + gtPrefix
trainModelsDirLightCoeffs = 'experiments/' + trainPrefixLightCoeffs + '/' + gtPrefix
trainModelsDirShapeParams = 'experiments/' + trainPrefixShapeParams + '/' + gtPrefix
trainModelsDirAppLight = 'experiments/' + trainModelsDirAppLight + '/' + gtPrefix
useCRFOcclusionPred = False
useShapeModel = True
makeVideo = False
reduceVariance = False
getColorFromCRF = False
syntheticGroundtruth = False
evaluateWithGT = True
ignoreGT = True
ignore = []
if os.path.isfile(gtDir + 'ignore.npy'):
ignore = np.load(gtDir + 'ignore.npy')
testSet = np.load(experimentDir + 'test.npy')
rangeTests = np.arange(len(testSet))
rangeTests = np.arange(100,1100)
# rangeTests = np.arange(100,1100)
#
testSet = testSet[rangeTests]
numTests = len(testSet)
if evaluateWithGT:
groundTruthFilename = gtDir + 'groundTruth.h5'
gtDataFile = h5py.File(groundTruthFilename, 'r')
shapeGT = gtDataFile[gtPrefix].shape
# boolTestSet = np.zeros(shapeGT).astype(np.bool)
# boolTestSet[testSet] = True
boolTestSet = np.array([np.any(num == testSet) for num in gtDataFile[gtPrefix]['trainIds']])
dataIds = gtDataFile[gtPrefix][boolTestSet]['trainIds']
dataIdsTestIndices = np.array([np.where(dataIds==num)[0][0] for num in testSet])
# testGroundTruth = gtDataFile[gtPrefix][boolTestSet]
# groundTruthTest = np.zeros(shapeGT, dtype=testGroundTruth.dtype)
# groundTruthTest[boolTestSet] = testGroundTruth
groundTruth = gtDataFile[gtPrefix][boolTestSet][dataIdsTestIndices]
dataTeapotIdsTest = groundTruth['trainTeapotIds']
test = np.arange(len(testSet))
testSet = testSet[test]
print("Reading experiment.")
dataAzsGT = groundTruth['trainAzsGT']
dataObjAzsGT = groundTruth['trainObjAzsGT']
dataElevsGT = groundTruth['trainElevsGT']
dataLightAzsGT = groundTruth['trainLightAzsGT']
dataLightElevsGT = groundTruth['trainLightElevsGT']
dataLightIntensitiesGT = groundTruth['trainLightIntensitiesGT']
dataVColorGT = groundTruth['trainVColorGT']
dataScenes = groundTruth['trainScenes']
dataTeapotIds = groundTruth['trainTeapotIds']
dataEnvMaps = groundTruth['trainEnvMaps']
dataOcclusions = groundTruth['trainOcclusions']
dataTargetIndices = groundTruth['trainTargetIndices']
dataLightCoefficientsGT = groundTruth['trainLightCoefficientsGT']
dataLightCoefficientsGTRel = groundTruth['trainLightCoefficientsGTRel']
dataAmbientIntensityGT = groundTruth['trainAmbientIntensityGT']
dataIds = groundTruth['trainIds']
if multiObjects:
dataObjDistGT = groundTruth['trainObjDistGT']
dataObjRotationGT = groundTruth['trainObjRotationGT']
dataObjDistMug = groundTruth['trainObjDistMug']
dataObjRotationMug = groundTruth['trainObjRotationMug']
dataObjAzMug = groundTruth['trainObjAzMug']
dataVColorsMug = groundTruth['trainVColorsMug']
dataObjAzMugRel = groundTruth['trainObjAzMugRel']
dataObjAzGTRel = groundTruth['trainObjAzGTRel']
dataMugPosOffset = groundTruth['trainMugPosOffset']
dataTeapotPosOffset = groundTruth['trainTeapotPosOffset']
if useShapeModel:
dataShapeModelCoeffsGT = groundTruth['trainShapeModelCoeffsGT']
gtDtype = groundTruth.dtype
testSetFixed = testSet
whereBad = []
for test_it, test_id in enumerate(testSet):
if test_id in ignore:
bad = np.where(testSetFixed==test_id)
testSetFixed = np.delete(testSetFixed, bad)
whereBad = whereBad + [bad]
# testSet = testSetFixed
loadFromHdf5 = False
synthPrefix = '_cycles'
if syntheticGroundtruth:
synthPrefix = ''
if syntheticGroundtruth:
imagesDir = gtDir + 'images_opendr/'
else:
imagesDir = gtDir + 'images/'
if evaluateWithGT:
images = readImages(imagesDir, dataIds, loadFromHdf5)
else:
import glob
imageFiles = glob.glob1(imagesDir, "*.png")
images = np.zeros([len(imageFiles), width, height, 3])
for imageFile_i, imageFile in enumerate(imageFiles):
image = skimage.io.imread(imagesDir + imageFile)
image = skimage.transform.resize(image , [height, width])
images[imageFile_i] = image
testSet = np.arange(len(images))
print("Backprojecting and fitting estimates.")
# testSet = np.arange(len(images))[0:10]
if evaluateWithGT:
testAzsGT = dataAzsGT
testObjAzsGT = dataObjAzsGT
testElevsGT = dataElevsGT
testLightAzsGT = dataLightAzsGT
testLightElevsGT = dataLightElevsGT
testLightIntensitiesGT = dataLightIntensitiesGT
testVColorGT = dataVColorGT
if useShapeModel:
testShapeParamsGT = dataShapeModelCoeffsGT
testLightCoefficientsGTRel = dataLightCoefficientsGTRel * dataAmbientIntensityGT[:,None]
testAzsRel = np.mod(testAzsGT - testObjAzsGT, 2*np.pi)
testOcclusions = dataOcclusions
testIds = dataIds
if multiObjects:
testObjAzMugRel = dataObjAzMugRel
testObjAzGTRel = dataObjAzGTRel
testMugPosOffset = dataMugPosOffset
testTeapotPosOffset = dataTeapotPosOffset
testVColorMug = dataVColorsMug
testObjDistGT = dataObjDistGT
testObjRotationGT = dataObjRotationGT
testObjDistMug = dataObjDistMug
testObjRotationMug = dataObjRotationMug
testObjAzMug = dataObjAzMug
testVColorsMug = dataVColorsMug
else:
#There is no GT.
testAzsGT = np.zeros(len(testSet))
testObjAzsGT = np.zeros(len(testSet))
testElevsGT = np.zeros(len(testSet))
testLightAzsGT = np.zeros(len(testSet))
testLightElevsGT = np.zeros(len(testSet))
testLightIntensitiesGT = np.zeros(len(testSet))
testOcclusions = np.zeros(len(testSet))
testIds = np.zeros(len(testSet))
testVColorGT = np.zeros(len(testSet))
if useShapeModel:
testShapeParamsGT = np.zeros(len(testSet))
testLightCoefficientsGTRel = np.zeros(len(testSet))
testAzsRel = np.zeros(len(testSet))
if multiObjects:
testObjAzMugRel = np.zeros(len(testSet))
testObjAzGTRel = np.zeros(len(testSet))
testMugPosOffset = np.zeros(len(testSet))
testTeapotPosOffset = np.zeros(len(testSet))
testVColorMug = np.zeros(len(testSet))
testObjDistGT = np.zeros(len(testSet))
testObjRotationGT = np.zeros(len(testSet))
testObjDistMug = np.zeros(len(testSet))
testObjRotationMug = np.zeros(len(testSet))
testObjAzMug = np.zeros(len(testSet))
testVColorsMug = np.zeros(len(testSet))
##Read Training set labels
# trainGTPrefix = 'train4_occlusion_shapemodel'
#
# trainGTDir = 'groundtruth/' + trainGTPrefix + '/'
# trainGroundTruthFilename = trainGTDir + 'groundTruth.h5'
# trainGTDataFile = h5py.File(trainGroundTruthFilename, 'r')
#
# trainSet = np.load(experimentDir + 'train.npy')
#
# shapeGT = trainGTDataFile[trainGTPrefix].shape
# boolTrainSet = np.zeros(shapeGT).astype(np.bool)
# boolTrainSet[trainSet] = True
# trainGroundTruth = trainGTDataFile[trainGTPrefix][boolTrainSet]
# groundTruthTrain = np.zeros(shapeGT, dtype=trainGroundTruth.dtype)
# groundTruthTrain[boolTrainSet] = trainGroundTruth
# groundTruthTrain = groundTruthTrain[trainSet]
# dataTeapotIdsTrain = groundTruthTrain['trainTeapotIds']
# train = np.arange(len(trainSet))
#
# testSet = testSet[test]
#
# print("Reading experiment.")
# trainAzsGT = groundTruthTrain['trainAzsGT']
# trainObjAzsGT = groundTruthTrain['trainObjAzsGT']
# trainElevsGT = groundTruthTrain['trainElevsGT']
# trainLightAzsGT = groundTruthTrain['trainLightAzsGT']
# trainLightElevsGT = groundTruthTrain['trainLightElevsGT']
# trainLightIntensitiesGT = groundTruthTrain['trainLightIntensitiesGT']
# trainVColorGT = groundTruthTrain['trainVColorGT']
# trainScenes = groundTruthTrain['trainScenes']
# trainTeapotIds = groundTruthTrain['trainTeapotIds']
# trainEnvMaps = groundTruthTrain['trainEnvMaps']
# trainOcclusions = groundTruthTrain['trainOcclusions']
# trainTargetIndices = groundTruthTrain['trainTargetIndices']
# trainLightCoefficientsGT = groundTruthTrain['trainLightCoefficientsGT']
# trainLightCoefficientsGTRel = groundTruthTrain['trainLightCoefficientsGTRel']
# trainAmbientIntensityGT = groundTruthTrain['trainAmbientIntensityGT']
# trainIds = groundTruthTrain['trainIds']
#
# if useShapeModel:
# trainShapeModelCoeffsGT = groundTruthTrain['trainShapeModelCoeffsGT']
#
# trainLightCoefficientsGTRel = trainLightCoefficientsGTRel * trainAmbientIntensityGT[:,None]
#
# trainAzsRel = np.mod(trainAzsGT - trainObjAzsGT, 2*np.pi)
# latexify(columns=2)
#
# directory = 'tmp/occlusions'
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.hist(trainOcclusions*100, bins=40)
# ax.set_xlabel('Occlusion level (\%)')
# ax.set_ylabel('Counts')
# ax.set_title('Occlusion histogram')
# ax.set_xlim(0,100)
# fig.savefig(directory + '-histogram.pdf', bbox_inches='tight')
# plt.close(fig)
recognitionTypeDescr = ["near", "mean", "sampling"]
recognitionType = 1
optimizationTypeDescr = ["predict", "optimize", "joint"]
optimizationType = 1
computePredErrorFuns = True
method = 5
model = 1
maxiter = 100
numSamples = 1
mintime = time.time()
free_variables = [ chAz, chEl]
boundEl = (-np.pi, np.pi)
boundAz = (-3*np.pi, 3*np.pi)
boundscomponents = (0,None)
bounds = [boundAz,boundEl]
bounds = [(None , None ) for sublist in free_variables for item in sublist]
methods = ['dogleg', 'minimize', 'BFGS', 'L-BFGS-B', 'Nelder-Mead', 'TNC', 'SGDMom', 'probLineSearch']
# options = {'disp':False, 'maxiter':maxiter, 'lr':0.0001, 'momentum':0.1, 'decay':0.99}
options = {'disp':False, 'maxiter':maxiter}
azVar = 0.1
elVar = 0.1
vColorVar = 0.01
shCoeffsVar = 0.01
df_vars = np.concatenate([azVar*np.ones(chAz.shape), elVar*np.ones(chEl.shape), vColorVar*np.ones(chVColors.r.shape), shCoeffsVar*np.ones(chLightSHCoeffs.r.shape)])
options = {'disp':False, 'maxiter':maxiter, 'df_vars':df_vars}
testRenderer = 0
# #Read texture UV mapping
# sceneFilename = 'teapot.blend'
# with bpy.data.libraries.load(filepath=sceneFilename) as (data_from, data_to):
# for attr in dir(data_to):
# setattr(data_to, attr, getattr(data_from, attr))
# teapot = bpy.data.scenes['Teapot'].objects[1]
# vmod, fmod_list, vcmod, vnmod, uvmod, haveTexturesmod_list, texturesmod_list = scene_io_utils.unpackBlenderObject(teapot, '', False)
# color = np.array([0,0,0])
# texturesmod_list[0][0][0] = ch.Ch(np.ones(texturesmod_list[0][0][0].shape)*color[None,None,:])
# testRenderer = 2
if useShapeModel:
import shape_model
#%% Load data
filePath = 'data/teapotModel.pkl'
teapotModel = shape_model.loadObject(filePath)
faces = teapotModel['faces']
#%% Sample random shape Params
latentDim = np.shape(teapotModel['ppcaW'])[1]
shapeParams = np.zeros(latentDim)
chShapeParams = ch.Ch(shapeParams.copy())
meshLinearTransform=teapotModel['meshLinearTransform']
W=teapotModel['ppcaW']
b=teapotModel['ppcaB']
chVertices = shape_model.VerticesModel(chShapeParams=chShapeParams,meshLinearTransform=meshLinearTransform,W = W,b=b)
chVertices.init()
chVertices = ch.dot(geometry.RotateZ(-np.pi/2)[0:3,0:3],chVertices.T).T
smFaces = [[faces]]
smVColors = [chVColors*np.ones(chVertices.shape)]
# smUVs = ch.Ch(np.zeros([chVertices.shape[0],2]))
smUVs = uvmod[0]
# smHaveTextures = [[False]]
smHaveTextures = [[False]]
smTexturesList = [[None]]
# smTexturesList = [[texturesmod_list[0][0][0]]]
chVertices = chVertices - ch.mean(chVertices, axis=0)
minZ = ch.min(chVertices[:,2])
chMinZ = ch.min(chVertices[:,2])
zeroZVerts = chVertices[:,2]- chMinZ
chVertices = ch.hstack([chVertices[:,0:2] , zeroZVerts.reshape([-1,1])])
chVertices = chVertices*0.09
smCenter = ch.array([0,0,0.1])
smVertices = [chVertices]
chNormals = shape_model.chGetNormals(chVertices, faces)
smNormals = [chNormals]
if useShapeModel:
center = smCenter
UVs = smUVs
v = smVertices
vn = smNormals
Faces = smFaces
VColors = smVColors
HaveTextures = smHaveTextures
TexturesList = smTexturesList
else:
v, vn = v_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0]
Faces = f_list_teapots[currentTeapotModel][0]
VColors = vc_teapots[currentTeapotModel][0]
UVs = uv_teapots[currentTeapotModel][0]
HaveTextures = haveTextures_list_teapots[currentTeapotModel][0]
TexturesList = textures_list_teapots[currentTeapotModel][0]
v, vn, teapotPosOffset = transformObject(v, vn, chScale, chObjAz, chObjDist, chObjRotation, np.array([0,0,0]))
verticesCube, facesCube, normalsCube, vColorsCube, texturesListCube, haveTexturesCube = getCubeData()
uvCube = np.zeros([verticesCube.shape[0],2])
chCubePosition = ch.Ch([0, 0, 0])
chCubeScale = ch.Ch([10.0])
chCubeAzimuth = ch.Ch([0])
chCubeVCColors = ch.Ch(np.ones_like(vColorsCube) * 1.0)
v_transf, vn_transf = transformObject2([verticesCube], [normalsCube], chCubeScale, chCubeAzimuth, chCubePosition)
v_scene = [v]
f_list_scene = [smFaces]
vc_scene = [smVColors]
vn_scene = [vn]
uv_scene = [smUVs]
haveTextures_list_scene = [smHaveTextures]
textures_list_scene = [smTexturesList]
addObjectData(v_scene, f_list_scene, vc_scene, vn_scene, uv_scene, haveTextures_list_scene, textures_list_scene, v_transf, [[facesCube]],[chCubeVCColors], vn_transf, [uvCube], haveTexturesCube, texturesListCube)
if multiObjects:
verticesMug, normalsMug, mugPosOffset = transformObject(v_mug, vn_mug, chScale, chObjAzMug + np.pi / 2, chObjDistMug, chObjRotationMug, np.array([0,0,0]))
VerticesB = [v] + [verticesMug]
NormalsB = [vn] + [normalsMug]
FacesB = [Faces] + [f_list_mug]
VColorsB = [VColors] + [vc_mug]
UVsB = [UVs] + [uv_mug]
HaveTexturesB = [HaveTextures] + [haveTextures_list_mug]
TexturesListB = [TexturesList] + [textures_list_mug]
renderer = createRendererTarget(glMode, chAz, chEl, chDist, center, VerticesB, VColorsB, FacesB, NormalsB, light_color,chComponent, chVColors, np.array([0,0,0]), chDisplacement, width, height, UVsB, HaveTexturesB, TexturesListB, frustum, None)
renderer.initGL()
renderer.initGLTexture()
else:
# renderer = createRendererTarget(glMode, chAz, chEl, chDist, smCenter, [v], [smVColors], [smFaces], [vn], light_color, chComponent, chVColors, 0, chDisplacement, width,height, [smUVs], [smHaveTextures], [smTexturesList], frustum, win )
renderer = createRendererTarget(glMode, chAz, chEl, chDist, smCenter, v_scene, vc_scene, f_list_scene, vn_scene, light_color, chComponent, chVColors, 0, chDisplacement, width,height, uv_scene, haveTextures_list_scene, textures_list_scene, frustum, win )
renderer.overdraw = True
renderer.nsamples = 8
renderer.msaa = True
renderer.initGL()
renderer.initGLTexture()
# renderer.initGL_AnalyticRenderer()
renderer.imageGT = None
renderer.r
# chShapeParams[:] = np.zeros([latentDim])
chVerticesMean = chVertices.r.copy()
else:
renderer = renderer_teapots[testRenderer]
# plt.imsave('testrender.png', sqeRenderer.render_image)
loadMask = True
if loadMask:
masksGT = loadMasks(gtDir + '/masks_occlusion/', testSet)
# ### Groundtruth triplets generation
#
# groundTruthFilename = 'groundtruth/' + gtPrefix + '/' '/groundTruth.h5'
# gtDataFileToRender = h5py.File(groundTruthFilename, 'r')
# groundTruthToRender = gtDataFileToRender[gtPrefix]
#
# rangeGT = np.arange(len(groundTruthToRender))
#
# for gtIdx in rangeGT[:]:
# groundTruthToRender['trainEnvMapPhiOffsets'][gtIdx]
#
# color = groundTruthToRender['trainVColorGT'][gtIdx]
# az = groundTruthToRender['trainObjAzsGT'][gtIdx] - groundTruthToRender['trainAzsGT'][gtIdx]
# el = groundTruthToRender['trainElevsGT'][gtIdx]
# lightCoefficientsRel = groundTruthToRender['trainLightCoefficientsGTRel'][gtIdx] * groundTruthToRender['trainAmbientIntensityGT'][gtIdx]
#
# if useShapeModel:
# shapeParams = groundTruthToRender['trainShapeModelCoeffsGT'][gtIdx]
#
# chAz[:] = 0
# chEl[:] = el
# chObjAz[:] = az
# chVColors[:] = color
# chLightSHCoeffs[:] = lightCoefficientsRel
# if useShapeModel:
# chShapeParams[:] = shapeParams
#
# image = renderer.r[:].copy()
# lin2srgb(image)
# cv2.imwrite(gtDir + '/backprojections/im' + str(gtIdx) + '.jpeg', 255 * image[:, :, [2, 1, 0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
#
# ipdb.set_trace()
# vis_im = np.array(renderer.indices_image==1).copy().astype(np.bool)
# im = skimage.io.imread('renderergt539.jpeg').astype(np.float32)/255.
# rendererGT[:] = srgb2lin(im.copy())
# post = generative_models.layerPosteriorsRobustCh(rendererGT, renderer, vis_im, 'MASK', globalPrior, variances)[0].r>0.05
# render = renderer.r.copy()
# render[~mask*vis_im] = np.concatenate([np.ones([1000,1000])[:,:,None], np.zeros([1000,1000])[:,:,None],np.zeros([1000,1000])[:,:,None]], axis=2)[~mask*vis_im]
#
# render[renderer.boundarybool_image.astype(np.bool)] = renderer.r[renderer.boundarybool_image.astype(np.bool)]
#
# cv2.imwrite('renderer' + '.jpeg' , 255*lin2srgb(render[:,:,[2,1,0]]), [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# cv2.imwrite('rendererGT' + '.jpeg' , 255*lin2srgb(rendererGT.r[:,:,[2,1,0]]), [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# plt.imsave('renderered.png', lin2srgb(render))
##### Tests with optimizing multiple objects in scene and camera settings.
# test_i = 0
# for test_i in range(len(testSet)):
#
# color = testVColorGT[test_i]
# az = testAzsGT[test_i]
# el = testElevsGT[test_i]
# lightCoefficientsRel = testLightCoefficientsGTRel[test_i]
#
# if useShapeModel:
# shapeParams = testShapeParamsGT[test_i]
#
# chAz[:] = 0
# chEl[:] = el
# chVColors[:] = color
# chLightSHCoeffs[:] = lightCoefficientsRel
# if useShapeModel:
# chShapeParams[:] = shapeParams
#
# chObjAz[:] = testObjAzsGT[test_i] - testAzsGT[test_i]
# chObjDist[:] = testObjDistGT[test_i]
# chObjRotation[:] = testObjRotationGT[test_i] - testAzsGT[test_i]
# chObjAzMug[:] = testObjAzMug[test_i] - testAzsGT[test_i]
# chObjDistMug[:] = testObjDistMug[test_i]
# chObjRotationMug[:] = testObjRotationMug[test_i] - testAzsGT[test_i]
#
# chVColorsMug[:] = testVColorMug[test_i]
#
# image = skimage.transform.resize(images[test_i], [height, width])
# imageSrgb = image.copy()
# rendererGT[:] = srgb2lin(image)
#
# masksGT = loadMasks(gtDir + '/masks_occlusion/', testSet)
# masksMug = loadMasksMug(gtDir + '/masks_occlusion/', testSet)
#
# maskTeapot = masksGT[test_i]
# maskMug = masksMug[test_i]
#
# coords = np.meshgrid(np.arange(width)-width/2, np.arange(height)-height/2)
#
# coordsMugX = coords[1][maskMug]
# coordsMugY = coords[0][maskMug]
#
# bbRendererGT = rendererGT.r.copy()
#
# bbRendererGT[coordsMugX.min()+width/2, coordsMugY.min()+height/2 : coordsMugY.max() + height/2] = np.array([1,0,0])
# bbRendererGT[coordsMugX.max()+width/2, coordsMugY.min()+height/2 : coordsMugY.max() + height/2] = np.array([1,0,0])
# bbRendererGT[coordsMugX.min()+width/2 : coordsMugX.max() + width/2, coordsMugY.min()+height/2] = np.array([1,0,0])
# bbRendererGT[coordsMugX.min()+width/2:coordsMugX.max()+width/2, coordsMugY.max() + height/2] = np.array([1,0,0])
#
# coordsTeapotX = coords[1][maskTeapot]
# coordsTeapotY = coords[0][maskTeapot]
#
# bbRendererGT[coordsTeapotX.min()+width/2, coordsTeapotY.min()+height/2 : coordsTeapotY.max() + height/2] = np.array([1,0,0])
# bbRendererGT[coordsTeapotX.max()+width/2, coordsTeapotY.min()+height/2 : coordsTeapotY.max() + height/2] = np.array([1,0,0])
# bbRendererGT[coordsTeapotX.min()+width/2 : coordsTeapotX.max() + width/2, coordsTeapotY.min()+height/2] = np.array([1,0,0])
# bbRendererGT[coordsTeapotX.min()+width/2:coordsTeapotX.max()+width/2, coordsTeapotY.max() + height/2] = np.array([1,0,0])
#
#
# posMug = np.array([(coordsMugY.min() + coordsMugY.max())/2, (coordsMugX.min() + coordsMugX.max())/2])
# posTeapot = np.array([(coordsTeapotY.min() + coordsTeapotY.max())/2, (coordsTeapotX.min() + coordsTeapotX.max())/2])
#
# # createRendererTarget(glMode, False, chAz, chEl, chDist, center, VerticesB, VColorsB, FacesB, NormalsB, light_color,chComponent, chVColors, np.array([0,0,0]), chDisplacement, width, height, UVsB, HaveTexturesB, TexturesListB, frustum, None)
# chAzCam = ch.Ch([0])
# chElCam = ch.Ch(chEl.r.copy())
#
# relPosTeapotGT = v[0].r.sum(axis=0)/v[0].r.shape[0]
# relPosMugGT = verticesMug[0].r.sum(axis=0)/verticesMug[0].r.shape[0]
#
# bbRendererGT[posMug[1] - 2+width/2:posMug[1] + 2+width/2, posMug[0] - 2 + height/2: posMug[0] + 2 + height/2] = np.array([1,0,0])
# bbRendererGT[posTeapot[1] - 2+width/2:posTeapot[1] + 2+width/2, posTeapot[0] - 2 + height/2: posTeapot[0] + 2 + height/2] = np.array([1,0,0])
#
# _, _, camTransfomMatGT = setupCamera(np.array([0,0,0]), chAz.r, chEl.r, chDist, np.array([0,0,0.1]), width, height)
# camEyeGT = camTransfomMatGT[0:4,0:4].dot(np.array([0,0,0,1]))[0:3]
#
# vecMugToCamGT = camEyeGT - (mugPosOffset + np.array([0,0,0.1]))
# mugCamElGT = 2*ch.arctan(ch.norm(ch.array([0,-1,0])*ch.norm(vecMugToCamGT) - vecMugToCamGT*ch.norm(ch.array([0,-1,0])))/ch.norm(ch.array([0,-1,0])*ch.norm(vecMugToCamGT) + ch.norm(ch.array([0,-1,0]))*vecMugToCamGT))
#
# vecTeapotToCamGT = camEyeGT - (teapotPosOffset + np.array([0,0,0.1]))
# teapotCamElGT = 2*ch.arctan(ch.norm(ch.array([0,-1,0])*ch.norm(vecTeapotToCamGT) - vecTeapotToCamGT*ch.norm(ch.array([0,-1,0])))/ch.norm(ch.array([0,-1,0])*ch.norm(vecTeapotToCamGT) + ch.norm(ch.array([0,-1,0]))*vecTeapotToCamGT))
#
# objDisplacementMat = computeHemisphereTransformation(chObjRotationMug, 0, chObjDistMug, np.array([0, 0, 0.05]))
# pointMug = objDisplacementMat[0:3, 3]
#
# # pointMug = ch.Ch([0,0,0.1])
# imPosMugCam, modelRotation, camTransfomMat = setupCamera(pointMug, chAzCam, chElCam, chDist, np.array([0,0,0.1]), width, height)
# imPosMug = imPosMugCam - np.array([height/2, width/2])
#
# objDisplacementMat = computeHemisphereTransformation(chObjRotation, 0, chObjDist, np.array([0, 0, 0.1]))
# pointTeapot = objDisplacementMat[0:3, 3]
# # pointTeapot = ch.Ch([0,0,0.1])
#
# imPosTeapotCam, modelRotation, camTransfomMat = setupCamera(pointTeapot, chAzCam, chElCam, chDist, np.array([0,0,0.1]), width, height)
# imPosTeapot = imPosTeapotCam - np.array([height/2, width/2])
#
# errMug = ch.sum((imPosMug - posMug)**2)
# errTeapot = ch.sum((imPosTeapot - posTeapot)**2)
#
# camEye = camTransfomMat[0:4,0:4].dot(np.array([0,0,0, 1]))[0:3]
#
# vecMugToCam = camEye - pointMug
# mugCamEl = 2*ch.arctan(ch.norm(ch.array([0,-1,0])*ch.norm(vecMugToCam) - vecMugToCam*ch.norm(ch.array([0,-1,0])))/ch.norm(ch.array([0,-1,0])*ch.norm(vecMugToCam) + ch.norm(ch.array([0,-1,0]))*vecMugToCam))
#
# vecTeapotToCam = camEye - pointTeapot
#
# teapotCamEl = 2*ch.arctan(ch.norm(ch.array([0,-1,0])*ch.norm(vecTeapotToCam) - vecTeapotToCam*ch.norm(ch.array([0,-1,0])))/ch.norm(ch.array([0,-1,0])*ch.norm(vecTeapotToCam) + ch.norm(ch.array([0,-1,0]))*vecTeapotToCam))
#
# chElPredMug = mugCamElGT
# chElPredTeapot = teapotCamElGT
#
# errElMug = ch.sum((chElPredMug*180/np.pi - mugCamEl*180/np.pi)**2)
# errElTeapot = ch.sum((chElPredTeapot*180/np.pi - teapotCamEl*180/np.pi)**2)
#
# # spatialFreeVars = [pointTeapot[0:2], pointMug[0:2], chElCam]
# spatialFreeVars = [chObjRotationMug, chObjRotation, chObjDistMug, chObjDist, chElCam]
#
# spatialErrorFun = errMug + errTeapot + errElMug+ errElTeapot
#
# def cbS(_):
# pass
#
# bbRendererGT[imPosTeapot[1] - 2 +width/2:imPosTeapot[1] + 2+width/2, imPosTeapot[0] - 2 + height/2: imPosTeapot[0] + 2 + height/2] = np.array([0,0,1])
# bbRendererGT[imPosMug[1] - 2+width/2:imPosMug[1] + 2+width/2, imPosMug[0] - 2 + height/2: imPosMug[0] + 2 + height/2] = np.array([0,0,1])
#
# plt.imsave('tmp/rendererPred' + str(test_i) + '.png', renderer.r)
#
# #Dumb initial state (center of table).
# chObjDist[:] = 0
# chObjRotation[:] = 0
# chObjDistMug[:] = 0
# chObjRotationMug[:] = 0
#
# plt.imsave('tmp/rendererInit' + str(test_i) + '.png', renderer.r)
#
# # ch.minimize({'raw': spatialErrorFun }, bounds=[(-0.5,0.5), (-0.5,0.5), (0,np.pi/2)], method=methods[1], x0=spatialFreeVars, callback=cbS, options={'disp':False, 'maxiter':10})
# ch.minimize({'raw': spatialErrorFun }, bounds=[(-2*np.pi,2*np.pi), (-2*np.pi,2*np.pi), (0,0.5), (0,0.5), (0,np.pi/2)], method=methods[2], x0=spatialFreeVars, callback=cbS, options={'disp':True, 'maxiter':50})
#
# bbRendererGT[imPosTeapot[1] - 2+width/2:imPosTeapot[1] + 2+width/2, imPosTeapot[0] - 2 + height/2: imPosTeapot[0] + 2 + height/2] = np.array([0,1,0])
# bbRendererGT[imPosMug[1] - 2+width/2:imPosMug[1] + 2+width/2, imPosMug[0] - 2 + height/2: imPosMug[0] + 2 + height/2] = np.array([0,1,0])
#
# plt.imsave('tmp/bbRendererGT' + str(test_i) + '.png', bbRendererGT)
#
# chEl[:] = chElCam.r.copy()
#
# plt.imsave('tmp/rendererOpt' + str(test_i) + '.png', renderer.r)
import skimage.color
nearGTOffsetRelAz = 0.01
nearGTOffsetEl = 0.01
nearGTOffsetLighCoeffs = np.zeros(9)
nearGTOffsetVColor = np.zeros(3)
############ RECOGNITION MDOEL PREDICTIONS (COMPUTE AND SAVE, DON'T USE THIS EVERY TIME)
#Load trained recognition models
nnBatchSize = 100
azsPredictions = np.array([])
recomputeMeans = False
includeMeanBaseline = False
recomputePredictions = False
recomputePredictionsPose = False
if includeMeanBaseline:
meanTrainLightCoefficientsGTRel = np.repeat(np.mean(trainLightCoefficientsGTRel, axis=0)[None,:], numTests, axis=0)
meanTrainElevation = np.repeat(np.mean(trainElevsGT, axis=0), numTests, axis=0)
meanTrainAzimuthRel = np.repeat(0, numTests, axis=0)
meanTrainShapeParams = np.repeat(np.zeros([latentDim])[None,:], numTests, axis=0)
meanTrainVColors = np.repeat(np.mean(trainVColorGT, axis=0)[None,:], numTests, axis=0)
chShapeParams[:] = np.zeros([10])
meanTrainShapeVertices = np.repeat(chVertices.r.copy()[None,:], numTests, axis=0)
# if recomputeMeans or not os.path.isfile(experimentDir + "meanTrainEnvMapProjections.npy"):
# envMapTexture = np.zeros([180,360,3])
# approxProjectionsPredList = []
# for train_i in range(len(trainSet)):
# pEnvMap = SHProjection(envMapTexture, np.concatenate([trainLightCoefficientsGTRel[train_i][:,None], trainLightCoefficientsGTRel[train_i][:,None], trainLightCoefficientsGTRel[train_i][:,None]], axis=1))
# approxProjectionPred = np.sum(pEnvMap, axis=(2,3))
#
# approxProjectionsPredList = approxProjectionsPredList + [approxProjectionPred[None,:]]
# approxProjections = np.vstack(approxProjectionsPredList)
# meanTrainEnvMapProjections = np.mean(approxProjections, axis=0)
# np.save(experimentDir + 'meanTrainEnvMapProjections.npy', meanTrainEnvMapProjections)
# else:
# meanTrainEnvMapProjections = np.load(experimentDir + 'meanTrainEnvMapProjections.npy')
# meanTrainEnvMapProjections = np.repeat(meanTrainEnvMapProjections[None,:], numTests, axis=0)
meanTrainEnvMapProjections = None
rangeTests = np.arange(len(testSet))
if recomputePredictionsPose or not os.path.isfile(trainModelsDirPose + "elevsPred.npy"):
if 'neuralNetPose' in parameterRecognitionModels:
poseModel = ""
with open(experimentDir + 'neuralNetModelPose.pickle', 'rb') as pfile:
neuralNetModelPose = pickle.load(pfile)
meanImage = neuralNetModelPose['mean']
modelType = neuralNetModelPose['type']
param_values = neuralNetModelPose['params']
grayTestImages = 0.3*images[:,:,:,0] + 0.59*images[:,:,:,1] + 0.11*images[:,:,:,2]
grayTestImages = grayTestImages[:,None, :,:]
grayTestImages = grayTestImages - meanImage.reshape([1,1, grayTestImages.shape[2],grayTestImages.shape[3]])
network = lasagne_nn.load_network(modelType=modelType, param_values=param_values)
posePredictionFun = lasagne_nn.get_prediction_fun(network)
posePredictions = np.zeros([len(grayTestImages), 4])
for start_idx in range(0, len(grayTestImages), nnBatchSize):
posePredictions[start_idx:start_idx + nnBatchSize] = posePredictionFun(grayTestImages.astype(np.float32)[start_idx:start_idx + nnBatchSize])
# posePredictions = posePredictionFun(grayTestImages.astype(np.float32))
cosAzsPred = posePredictions[:,0]
sinAzsPred = posePredictions[:,1]
cosElevsPred = posePredictions[:,2]
sinElevsPred = posePredictions[:,3]
elevsPred = np.arctan2(sinElevsPred, cosElevsPred)
azsPred = np.arctan2(sinAzsPred, cosAzsPred)
np.save(trainModelsDirPose + 'elevsPred.npy', elevsPred)
np.save(trainModelsDirPose + 'azsPred.npy', azsPred)
# ##Get predictions with dropout on to get samples.
# with open(trainModelsDirPose + 'neuralNetModelPose.pickle', 'rb') as pfile:
# neuralNetModelPose = pickle.load(pfile)
#
# meanImage = neuralNetModelPose['mean']
# modelType = neuralNetModelPose['type']
# param_values = neuralNetModelPose['params']
#
# # network = lasagne_nn.load_network(modelType=modelType, param_values=param_values)
# nonDetPosePredictionFun = lasagne_nn.get_prediction_fun_nondeterministic(network)
# posePredictionsSamples = []
# cosAzsPredSamples = []
# sinAzsPredSamples = []
# cosElevsPredSamples = []
# sinElevsPredSamples = []
# for i in range(100):
# posePredictionsSample = np.zeros([len(grayTestImages), 4])
# for start_idx in range(0, len(grayTestImages), nnBatchSize):
# posePredictionsSample[start_idx:start_idx + nnBatchSize] = nonDetPosePredictionFun(grayTestImages.astype(np.float32)[start_idx:start_idx + nnBatchSize])
#
# cosAzsPredSample = posePredictionsSample[:,0]
# sinAzsPredSample = posePredictionsSample[:,1]
# cosAzsPredSamples = cosAzsPredSamples + [cosAzsPredSample[:,None]]
# sinAzsPredSamples = sinAzsPredSamples + [sinAzsPredSample[:,None]]
#
# cosElevsPredSample = posePredictionsSample[:,2]
# sinElevsPredSample = posePredictionsSample[:,3]
# cosElevsPredSamples = cosElevsPredSamples + [cosElevsPredSample[:,None]]
# sinElevsPredSamples = sinElevsPredSamples + [sinElevsPredSample[:,None]]
#
# cosAzsPredSamples = np.hstack(cosAzsPredSamples)
# sinAzsPredSamples = np.hstack(sinAzsPredSamples)
#
# cosElevsPredSamples = np.hstack(cosElevsPredSamples)
# sinElevsPredSamples = np.hstack(sinElevsPredSamples)
#
# azsPredictions = np.arctan2(sinAzsPredSamples, cosAzsPredSamples)
# elevsPredictions = np.arctan2(sinElevsPredSamples, cosElevsPredSamples)
#
# np.save(trainModelsDirPose + 'azsPredictions.npy', azsPredictions)
# np.save(trainModelsDirPose + 'elevsPredictions.npy', elevsPredictions)
# ##Get predictions with dropout on to get samples.
# with open(trainModelsDirPose + 'neuralNetModelPose.pickle', 'rb') as pfile:
# neuralNetModelPose = pickle.load(pfile)
#
else:
elevsPred = np.load(trainModelsDirPose + 'elevsPred.npy')[rangeTests]
azsPred = np.load(trainModelsDirPose + 'azsPred.npy')[rangeTests]
# azsPredictions = np.load(trainModelsDirPose + 'azsPredictions.npy')[rangeTests]
azsPredictions = None
# elevsPredictions = np.load(trainModelsDirPose + 'elevsPredictions.npy')[rangeTests]
elevsPredictions = None
if recomputePredictions or not os.path.isfile(trainModelsDirVColor + "vColorsPred.npy"):
if 'neuralNetVColors' in parameterRecognitionModels:
import theano
# theano.sandbox.cuda.use('cpu')
import lasagne
import lasagne_nn
nnModel = ""
with open(experimentDir + 'neuralNetModelAppearance.pickle', 'rb') as pfile:
neuralNetModelAppearance = pickle.load(pfile)
meanImage = neuralNetModelAppearance['mean']
modelType = neuralNetModelAppearance['type']
param_values = neuralNetModelAppearance['params']
testImages = images.reshape([images.shape[0],3,images.shape[1],images.shape[2]]) - meanImage.reshape([1,meanImage.shape[2], meanImage.shape[0],meanImage.shape[1]]).astype(np.float32)
network = lasagne_nn.load_network(modelType=modelType, param_values=param_values)
appPredictionFun = lasagne_nn.get_prediction_fun(network)
appPredictions = np.zeros([len(testSet), 3])
for start_idx in range(0, len(testSet), nnBatchSize):
appPredictions[start_idx:start_idx + nnBatchSize] = appPredictionFun(testImages.astype(np.float32)[start_idx:start_idx + nnBatchSize])
vColorsPred = appPredictions
np.save(trainModelsDirPose + 'vColorsPred.npy', vColorsPred)
if 'randForestVColors' in parameterRecognitionModels:
with open(trainModelsDirVColor + 'randForestModelVColor.pickle', 'rb') as pfile:
randForestModelVColor = pickle.load(pfile)
colorWindow = 30
image = images[0]
croppedImages = images[:,image.shape[0]/2-colorWindow:image.shape[0]/2+colorWindow,image.shape[1]/2-colorWindow:image.shape[1]/2+colorWindow,:]
vColorsPred = recognition_models.testRandomForest(randForestModelVColor, croppedImages.reshape([len(testSet),-1]))
if 'linearRegressionVColors' in parameterRecognitionModels:
with open(trainModelsDirVColor + 'linearRegressionModelVColor.pickle', 'rb') as pfile:
linearRegressionModelVColor = pickle.load(pfile)
colorWindow = 30
image = images[0]
croppedImages = images[:,image.shape[0]/2-colorWindow:image.shape[0]/2+colorWindow,image.shape[1]/2-colorWindow:image.shape[1]/2+colorWindow,:]
vColorsPred = recognition_models.testLinearRegression(linearRegressionModelVColor, croppedImages.reshape([len(testSet),-1]))
if 'medianVColors' in parameterRecognitionModels:
# recognition_models.medianColor(image, win)
colorWindow = 30
imagesWin = images[:,images.shape[1]/2-colorWindow:images.shape[1]/2+colorWindow,images.shape[2]/2-colorWindow:images.shape[2]/2+colorWindow,:]
vColorsPred = np.median(imagesWin.reshape([images.shape[0],-1,3]), axis=1)/1.4
# return color
else:
vColorsPred = np.load(trainModelsDirVColor + 'vColorsPred.npy')[rangeTests]
SHModel = ""
import theano
import theano.tensor as T
## Theano NN error function 1.
# import lasagne_nn
# import lasagne
#
# with open(trainModelsDirPose + 'neuralNetModelAppearance.pickle', 'rb') as pfile:
# neuralNetModelPose = pickle.load(pfile)
#
# meanImage = neuralNetModelPose['mean'].reshape([150,150,3])
#
# modelType = neuralNetModelPose['type']
# param_values = neuralNetModelPose['params']
# network = lasagne_nn.load_network(modelType=modelType, param_values=param_values)
# layer = lasagne.layers.get_all_layers(network)[6]
# inputLayer = lasagne.layers.get_all_layers(network)[0]
# layer_output = lasagne.layers.get_output(layer, deterministic=True)
# dim_output= layer.output_shape[1]
#
# networkGT = lasagne_nn.load_network(modelType=modelType, param_values=param_values)
# layerGT = lasagne.layers.get_all_layers(networkGT)[6]
# inputLayerGT = lasagne.layers.get_all_layers(networkGT)[0]
# layer_outputGT = lasagne.layers.get_output(layerGT, deterministic=True)
#
# rendererGray = 0.3*renderer[:,:,0] + 0.59*renderer[:,:,1] + 0.11*renderer[:,:,2]
# rendererGrayGT = 0.3*rendererGT[:,:,0] + 0.59*rendererGT[:,:,1] + 0.11*rendererGT[:,:,2]
#
# chThError = TheanoFunOnOpenDR(theano_input=inputLayer.input_var, theano_output=layer_output, opendr_input=renderer - meanImage, dim_output = dim_output,
# theano_input_gt=inputLayerGT.input_var, theano_output_gt=layer_outputGT, opendr_input_gt=rendererGT - meanImage)
#
# chThError.compileFunctions(layer_output, theano_input=inputLayer.input_var, dim_output=dim_output, theano_input_gt=inputLayerGT.input_var, theano_output_gt=layer_outputGT)
#
# chThError.r
#
## Theano NN error function finite differences.
# with open(trainModelsDirPose + 'neuralNetModelAzimuthTriplet.pickle', 'rb') as pfile:
# neuralNetModelPose = pickle.load(pfile)
#meanImage = neuralNetModelPose['mean'].reshape([150,150])
# modelType = neuralNetModelPose['type']
# param_values = neuralNetModelPose['params']
#
# network = lasagne_nn.load_network(modelType=modelType, param_values=param_values, imgSize=75)
#
# # layer = lasagne.layers.get_all_layers(network)[-2]
# inputLayer = lasagne.layers.get_all_layers(network)[0]
# layer_output = lasagne.layers.get_output(network, deterministic=True)
# dim_output= network.output_shape[1]
#
# networkGT = lasagne_nn.load_network(modelType=modelType, param_values=param_values, imgSize=75)
# # layerGT = lasagne.layers.get_all_layers(networkGT)[-2]
# inputLayerGT = lasagne.layers.get_all_layers(networkGT)[0]
# # layer_outputGT = lasagne.layers.get_output(layerGT, deterministic=True)
# layer_outputGT = lasagne.layers.get_output(networkGT, deterministic=True)
#
# rendererGray = 0.3*renderer[:,:,0] + 0.59*renderer[:,:,1] + 0.11*renderer[:,:,2]
# rendererGrayGT = 0.3*rendererGT[:,:,0] + 0.59*rendererGT[:,:,1] + 0.11*rendererGT[:,:,2]
# chThError = TheanoFunFiniteDiff(theano_input=inputLayer.input_var, theano_output=layer_output, opendr_input=rendererGray, dim_output = dim_output,
# theano_input_gt=inputLayerGT.input_var, theano_output_gt=layer_outputGT, opendr_input_gt=rendererGrayGT, imSize=75)
#
# chThError.compileFunctions(layer_output, theano_input=inputLayer.input_var, dim_output=dim_output, theano_input_gt=inputLayerGT.input_var, theano_output_gt=layer_outputGT)
#
# chThError.r
if recomputePredictions or not os.path.isfile(trainModelsDirLightCoeffs + "relLightCoefficientsPred.npy"):
if 'neuralNetModelSHLight' in parameterRecognitionModels:
import theano
# theano.sandbox.cuda.use('cpu')
import lasagne
import lasagne_nn
nnModel = ""
with open(experimentDir + 'neuralNetModelLight.pickle', 'rb') as pfile:
neuralNetModelSHLight = pickle.load(pfile)
meanImage = neuralNetModelSHLight['mean']
modelType = neuralNetModelSHLight['type']
param_values = neuralNetModelSHLight['params']
testImages = images.reshape([images.shape[0],3,images.shape[1],images.shape[2]]) - meanImage.reshape([1,meanImage.shape[2], meanImage.shape[0],meanImage.shape[1]]).astype(np.float32)
network = lasagne_nn.load_network(modelType=modelType, param_values=param_values)
lightPredictionFun = lasagne_nn.get_prediction_fun(network)
lightPredictions = np.zeros([len(testImages), 9])
for start_idx in range(0, len(testImages), nnBatchSize):
lightPredictions[start_idx:start_idx + nnBatchSize] = lightPredictionFun(testImages.astype(np.float32)[start_idx:start_idx + nnBatchSize])
relLightCoefficientsPred = lightPredictions
np.save(trainModelsDirLightCoeffs + 'relLightCoefficientsPred.npy', relLightCoefficientsPred)
else:
relLightCoefficientsPred = np.load(trainModelsDirLightCoeffs + 'relLightCoefficientsPred.npy')[rangeTests]
if recomputePredictions or not os.path.isfile(trainModelsDirShapeParams + "shapeParamsPred.npy"):
if 'neuralNetModelShape' in parameterRecognitionModels and useShapeModel:
import theano
# theano.sandbox.cuda.use('cpu')
import lasagne
import lasagne_nn
nnModel = ""
with open(experimentDir + 'neuralNetModelShape.pickle', 'rb') as pfile:
neuralNetModelSHLight = pickle.load(pfile)
meanImage = neuralNetModelSHLight['mean']
modelType = neuralNetModelSHLight['type']
param_values = neuralNetModelSHLight['params']
grayTestImages = 0.3*images[:,:,:,0] + 0.59*images[:,:,:,1] + 0.11*images[:,:,:,2]
grayTestImages = grayTestImages[:,None, :,:]
grayTestImages = grayTestImages - meanImage.reshape([1,1, grayTestImages.shape[2],grayTestImages.shape[3]])
network = lasagne_nn.load_network(modelType=modelType, param_values=param_values)
shapePredictionFun = lasagne_nn.get_prediction_fun(network)
shapePredictions = np.zeros([len(grayTestImages), latentDim])
for start_idx in range(0, len(grayTestImages), nnBatchSize):
shapePredictions[start_idx:start_idx + nnBatchSize] = shapePredictionFun(grayTestImages.astype(np.float32)[start_idx:start_idx + nnBatchSize])
shapeParamsPred = shapePredictions
np.save(trainModelsDirShapeParams + 'shapeParamsPred.npy', shapeParamsPred)
# #Samples:
# shapeParamsPredSamples = []
# shapeParamsNonDetFun = lasagne_nn.get_prediction_fun_nondeterministic(network)
#
# for i in range(100):
# shapeParamsPredictionsSample = np.zeros([len(grayTestImages), 10])
# for start_idx in range(0, len(grayTestImages), nnBatchSize):
# shapeParamsPredictionsSample[start_idx:start_idx + nnBatchSize] = shapeParamsNonDetFun(grayTestImages.astype(np.float32)[start_idx:start_idx + nnBatchSize])
#
# shapeParamsPredSamples = shapeParamsPredSamples + [shapeParamsPredictionsSample[:,:][:,:,None]]
#
# shapeParamsPredSamples = np.concatenate(shapeParamsPredSamples, axis=2)
#
# np.save(trainModelsDirShapeParams + 'shapeParamsPredSamples.npy', shapeParamsPredSamples)
else:
shapeParamsPred = np.load(trainModelsDirShapeParams + 'shapeParamsPred.npy')[rangeTests]
# shapeParamsPredSamples = np.load(trainModelsDirShapeParams + 'shapeParamsPredSamples.npy')[rangeTests]
# if recomputePredictions or not os.path.isfile(trainModelsDirShapeParams + "neuralNetModelMaskLarge.npy"):
# if 'neuralNetModelMask' in parameterRecognitionModels:
#
# import theano
# # theano.sandbox.cuda.use('cpu')
# import lasagne
# import lasagne_nn
#
#
# nnModel = ""
# with open(trainModelsDirLightCoeffs + 'neuralNetModelMaskLarge.pickle', 'rb') as pfile:
# neuralNetModelSHLight = pickle.load(pfile)
#
# meanImage = neuralNetModelSHLight['mean']
# modelType = neuralNetModelSHLight['type']
# param_values = neuralNetModelSHLight['params']
#
# testImages = images.reshape([images.shape[0],3,images.shape[1],images.shape[2]]) - meanImage.reshape([1,meanImage.shape[2], meanImage.shape[0],meanImage.shape[1]]).astype(np.float32)
#
# network = lasagne_nn.load_network(modelType=modelType, param_values=param_values)
# maskPredictionFun = lasagne_nn.get_prediction_fun(network)
#
# maskPredictions = np.zeros([len(testImages), 50*50])
# for start_idx in range(0, len(testImages), nnBatchSize):
# maskPredictions[start_idx:start_idx + nnBatchSize] = maskPredictionFun(testImages.astype(np.float32)[start_idx:start_idx + nnBatchSize])
#
# maskPredictions = np.reshape(maskPredictions, [len(testImages), 50,50])
#
# np.save(trainModelsDirShapeParams + 'maskPredictions.npy', maskPredictions)
#
# # # #Samples:
# # maskSamples = []
# # maskPredNonDetFun = lasagne_nn.get_prediction_fun_nondeterministic(network)
# #
# # for i in range(100):
# # maskPredictionsSamples = np.zeros([len(testImages), 50*50])
# # for start_idx in range(0, len(testImages),nnBatchSize):
# # maskPredictionsSamples[start_idx:start_idx + nnBatchSize] = maskPredNonDetFun(testImages.astype(np.float32)[start_idx:start_idx + nnBatchSize])
# #
# # maskSamples = maskSamples + [maskPredictionsSamples[:,:][:,:,None]]
# #
# # maskSamples = np.concatenate(maskSamples, axis=2)
# # loadMask = True
# #
# # gtDirMask = 'groundtruth/train4_occlusion_mask/'
# #
# # masksDir = gtDirMask + 'masks_occlusion/'
# # if loadMask:
# # masksGT = loadMasks(masksDir, testSet)
# else:
# maskPredictions = np.load(trainModelsDirShapeParams + 'maskPredictions.npy')[rangeTests]
loadMask = False
if loadMask:
masksGT = loadMasks(gtDir + '/masks_occlusion/', testSet)
print("Finished loading and compiling recognition models")
envMapDic = {}
SHFilename = 'data/LightSHCoefficients.pickle'
with open(SHFilename, 'rb') as pfile:
envMapDic = pickle.load(pfile)
hdritems = list(envMapDic.items())[:]
analyzeSamples = False
# def analyzeHue(figurePath, rendererGT, renderer, sampleEl, sampleAz, sampleSH, sampleVColorsPredictions=None, sampleStds=0.1):
# global stds
# global chAz
# global test_i
# global samplingMode
#
# plt.ioff()
# fig = plt.figure()
#
# stds[:] = sampleStds
# negLikModel = -ch.sum(generative_models.LogGaussianModel(renderer=renderer, groundtruth=rendererGT, variances=variances))
# negLikModelRobust = -ch.sum(generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances))
# stds = ch.Ch([initialPixelStdev])
#
# models = [negLikModel, negLikModelRobust, negLikModelRobust]
# errorFunRobust = models[1]
# errorFunGaussian = models[0]
#
# vColorsPredSamplesHSV = cv2.cvtColor(np.uint8(sampleVColorsPredictions.reshape([1, 100, 3])*255), cv2.COLOR_RGB2HSV)[0,:,0]
#
# plt.hist(vColorsPredSamplesHSV, bins=30, alpha=0.2)
#
# hueGT = cv2.cvtColor(np.uint8(testVColorGT[test_i][None,None,:]*255), cv2.COLOR_RGB2HSV)[0,0,0]
# huePred = cv2.cvtColor(np.uint8(vColorsPred[test_i][None,None,:]*255), cv2.COLOR_RGB2HSV)[0,0,0]
#
# chAz[:] = sampleAz
# chEl[:] = sampleEl
# currentVColors = chVColors.r
# currentHSV = cv2.cvtColor(np.uint8(currentVColors[None,None,:]*255), cv2.COLOR_RGB2HSV)[0,0]
#
# chLightSHCoeffs[:] = sampleSH
#
# trainingTeapots = [0]
# hueRange = np.arange(0,255,5)
# # chThErrors = np.zeros([len(trainingTeapots), len(hueRange)])
#
# robustErrors = np.array([])
# gaussianErrors = np.array([])
# hues = np.array([])
# for hue_i, hue in enumerate(hueRange):
# hues = np.append(hues, hue)
#
# color = cv2.cvtColor(np.array([hue, currentHSV[1],currentHSV[2]])[None,None,:].astype(np.uint8), cv2.COLOR_HSV2RGB)/255
# chVColors[:] = color
#
# for idx, renderer_idx in enumerate(trainingTeapots):
# renderer_i = renderer_teapots[renderer_idx]
# rendererGray = 0.3*renderer_i[:,:,0] + 0.59*renderer_i[:,:,1] + 0.11*renderer_i[:,:,2]
# # chThError.opendr_input = rendererGray
# # chThErrors[idx, az_i] = chThError.r
#
# robustErrors = np.append(robustErrors, errorFunRobust.r)
# gaussianErrors = np.append(gaussianErrors, errorFunGaussian.r)
#
# x1,x2,y1,y2 = plt.axis()
#
# robustErrors = robustErrors - np.min(robustErrors)
# gaussianErrors = gaussianErrors - np.min(gaussianErrors)
# # chThErrors = chThErrors - np.min(chThErrors)
# plt.plot(hues, robustErrors*y2/np.max(robustErrors), c='brown')
# plt.plot(hues, gaussianErrors*y2/np.max(gaussianErrors), c='purple')
#
# chThError.opendr_input = renderer
# lineStyles = ['-', '--', '-.', ':']
#
# # plt.axvline(np.mod(bestAzNormal*180/np.pi,360), linewidth=2, c='purple')
# # plt.axvline(np.mod(bestAzRobust*180/np.pi,360), linewidth=2, c='brown')
# # plt.axvline(bestHue, linewidth=2, c='y')
# plt.axvline(hueGT, linewidth=2,c='g')
# plt.axvline(huePred, linewidth=2,c='r')
#
# # plt.axvline(np.mod(currentAz*180/np.pi, 360), linewidth=2, linestyle='--',c='b')
#
# plt.xlabel('Sample')
# plt.ylabel('Angular error')
#
# plt.axis((0,255,y1,y2))
# plt.title('Neuralnet multiple predictions')
# fig.savefig(figurePath + 'sample' + '.png', bbox_inches='tight')
# plt.close(fig)
#
# chVColors[:] = currentVColors
# cv2.imwrite(figurePath + '_render.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
#
# def analyzeAz(figurePath, rendererGT, renderer, sampleEl, sampleVColor, sampleSH, sampleAzsPredictions=None, sampleStds=0.1):
# global stds
# global chAz
# global test_i
# global samplingMode
#
# plt.ioff()
# fig = plt.figure()
#
# stds[:] = sampleStds
# negLikModel = -ch.sum(generative_models.LogGaussianModel(renderer=renderer, groundtruth=rendererGT, variances=variances))
# negLikModelRobust = -ch.sum(generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances))
# models = [negLikModel, negLikModelRobust, negLikModelRobust]
# errorFunRobust = models[1]
# errorFunGaussian = models[0]
#
# plt.hist(np.mod(sampleAzsPredictions*180/np.pi,360), bins=30, alpha=0.2)
#
# bestAz = testAzsRel[test_i]
#
# currentAz = chAz.r.copy()
# chEl[:] = sampleEl
# chVColors[:] = sampleVColor
# chLightSHCoeffs[:] = sampleSH
#
# trainingTeapots = [0,14,20,25,26,1]
# trainingTeapots = [0]
# azRange = np.arange(0,2*np.pi,5*np.pi/180)
# chThErrors = np.zeros([len(trainingTeapots), len(azRange)])
#
# robustErrors = np.array([])
# gaussianErrors = np.array([])
# angles = np.array([])
# for az_i, az in enumerate(azRange):
# angles = np.append(angles, az*180/np.pi)
# chAz[:] = az
# # for idx, renderer_idx in enumerate(trainingTeapots):
# # renderer_i = renderer_teapots[renderer_idx]
# # rendererGray = 0.3*renderer_i[:,:,0] + 0.59*renderer_i[:,:,1] + 0.11*renderer_i[:,:,2]
# # chThError.opendr_input = rendererGray
# chThErrors[0, az_i] = chThError.r
#
# robustErrors = np.append(robustErrors, errorFunRobust.r)
# gaussianErrors = np.append(gaussianErrors, errorFunGaussian.r)
#
# x1,x2,y1,y2 = plt.axis()
#
# robustErrors = robustErrors - np.min(robustErrors)
# gaussianErrors = gaussianErrors - np.min(gaussianErrors)
# chThErrors = chThErrors - np.min(chThErrors)
# plt.plot(angles, robustErrors*y2/np.max(robustErrors), c='brown')
# plt.plot(angles, gaussianErrors*y2/np.max(gaussianErrors), c='purple')
#
# # chThError.opendr_input = renderer
# lineStyles = ['-', '--', '-.', ':']
# for renderer_idx in range(len(trainingTeapots)):
# plt.plot(angles, chThErrors[renderer_idx]*y2/np.max(chThErrors[renderer_idx]), linestyle=lineStyles[np.mod(renderer_idx,4)], c='y')
#
# if len(trainingTeapots) > 1:
# prodErrors = np.prod(chThErrors, axis=0)
# plt.plot(angles, prodErrors*y2/np.max(prodErrors), linestyle='-', c='black')
# meanErrors = np.mean(chThErrors, axis=0)
# plt.plot(angles, meanErrors*y2/np.max(meanErrors), linestyle='--', c='black')
# # plt.plot(angles, gaussianErrors*robustErrors*y2/np.max(gaussianErrors*robustErrors), linestyle='--', c='black')
#
# # plt.axvline(np.mod(bestAzNormal*180/np.pi,360), linewidth=2, c='purple')
# # plt.axvline(np.mod(bestAzRobust*180/np.pi,360), linewidth=2, c='brown')
# plt.axvline(np.mod(bestAz*180/np.pi,360), linewidth=2, c='y')
# plt.axvline(testAzsRel[test_i]*180/np.pi, linewidth=2,c='g')
# plt.axvline(np.mod(azsPred[test_i]*180/np.pi, 360), linewidth=2,c='r')
#
# plt.axvline(np.mod(currentAz*180/np.pi, 360), linewidth=2, linestyle='--',c='b')
#
# plt.xlabel('Sample')
# plt.ylabel('Angular error')
#
# # if samplingMode == False:
# #
# # scaleAzSamples = np.array(errorFunAzSamples)
# # scaleAzSamples = scaleAzSamples - np.min(scaleAzSamples) + 1
# # scaleAzSamples = scaleAzSamples*0.25*y2/np.max(scaleAzSamples)
# # for azSample_i, azSample in enumerate(scaleAzSamples):
# # plt.plot(np.mod(totalAzSamples[azSample_i]*180/np.pi, 360), azSample, marker='o', ms=20., c='r')
# #
# # scaleAzSamples = np.array(errorFunGaussianAzSamples)
# # scaleAzSamples = scaleAzSamples - np.min(scaleAzSamples) + 1
# # scaleAzSamples = scaleAzSamples*0.4*y2/np.max(scaleAzSamples)
# # for azSample_i, azSample in enumerate(scaleAzSamples):
# # plt.plot(np.mod(totalAzSamples[azSample_i]*180/np.pi, 360), azSample, marker='o', ms=20., c='g')
# #
# # scaleAzSamples = np.array(errorFunAzSamplesPred)
# # scaleAzSamples = scaleAzSamples - np.min(scaleAzSamples) + 1
# # scaleAzSamples = scaleAzSamples*0.65*y2/np.max(scaleAzSamples)
# # for azSample_i, azSample in enumerate(scaleAzSamples):
# # plt.plot(np.mod(totalAzSamples[azSample_i]*180/np.pi, 360), azSample, marker='o', ms=20., c='b')
# #
# # scaleAzSamples = np.array(errorFunGaussianAzSamplesPred)
# # scaleAzSamples = scaleAzSamples - np.min(scaleAzSamples) + 1
# # scaleAzSamples = scaleAzSamples*0.75*y2/np.max(scaleAzSamples)
# # for azSample_i, azSample in enumerate(scaleAzSamples):
# # plt.plot(np.mod(totalAzSamples[azSample_i]*180/np.pi, 360), azSample, marker='o', ms=20., c='y')
#
# plt.axis((0,360,y1,y2))
# plt.title('Neuralnet multiple predictions')
# fig.savefig(figurePath + 'sample' + '.png', bbox_inches='tight')
# plt.close(fig)
#
# chAz[:] = currentAz
# cv2.imwrite(figurePath + '_render.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
#Fit:
print("Fitting predictions")
print("Using " + modelsDescr[model])
errorFun = models[model]
pixelErrorFun = pixelModels[model]
testSamples = 1
if recognitionType == 2:
testSamples = numSamples
predSamples = 50
chDisplacement[:] = np.array([0.0, 0.0,0.0])
chScale[:] = np.array([1.0,1.0,1.0])
chObjAz[:] = 0
shapeIm = [height, width]
#Update all error functions with the right renderers.
print("Using " + modelsDescr[model])
negLikModel = -ch.sum(generative_models.LogGaussianModel(renderer=renderer, groundtruth=rendererGT, variances=variances))/ numPixels
negLikModelRobust = -ch.sum(generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances))/ numPixels
pixelLikelihoodCh = generative_models.LogGaussianModel(renderer=renderer, groundtruth=rendererGT, variances=variances)
pixelLikelihoodRobustCh = generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances)
# pixelLikelihoodRobustSQErrorCh = generative_models.LogRobustSQErrorModel(sqeRenderer, foregroundPrior=globalPrior, variances=variances)
post = generative_models.layerPosteriorsRobustCh(rendererGT, renderer, np.array([]), 'FULL', globalPrior, variances)[0]
# postSqerror = generative_models.layerPosteriorsRobustSQErrorCh(sqeRenderer, np.array([]), 'MASK', globalPrior, variances)[0].r>0.5
# negLikModelRobustSQError = generative_models.NLLRobustSQErrorModel(sqeRenderer=sqeRenderer, Q=globalPrior.r*np.ones([height, width]),variances=variances) / numPixels
models = [negLikModel, negLikModelRobust]
pixelModels = [pixelLikelihoodCh, pixelLikelihoodRobustCh]
modelsDescr = ["Gaussian Model", "Outlier model" ]
errorFun = models[model]
testRangeStr = str(testSet[0]) + '-' + str(testSet[-1])
testDescription = 'photorealistic-replicateopendr_server-' + testRangeStr
testPrefix = experimentPrefix + '_' + testDescription + '_' + optimizationTypeDescr[optimizationType] + '_' + str(len(testSet)) + 'samples_'
testPrefixBase = testPrefix
runExp = True
shapePenaltyTests = [0,0,0,0]
# shapePenaltyTests = [0,0,0,0]
stdsTests = [0.03]
# stdsTests = [0.03]
modelTests = len(stdsTests)*[1]
# modelTests = [1]
methodTests = len(stdsTests)*[1]
maxOptIters = len(stdsTests)*[40]
if makeVideo:
plt.ioff()
import matplotlib.animation as animation
Writer = animation.writers['ffmpeg']
writer = Writer(fps=1, metadata=dict(title='Fitting process', artist=''), bitrate=1800)
figvid, ((vax1, vax2, vax3, vax4), (vax5, vax6, vax7, vax8)) = plt.subplots(2, 4, figsize=(12, 5))
figvid.delaxes(vax8)
vax1.axes.get_xaxis().set_visible(False)
vax1.axes.get_yaxis().set_visible(False)
vax1.set_title("Ground truth")
vax2.axes.get_xaxis().set_visible(False)
vax2.axes.get_yaxis().set_visible(False)
vax2.set_title("Recognition")
vax3.axes.get_xaxis().set_visible(False)
vax3.axes.get_yaxis().set_visible(False)
vax3.set_title("Fit")
vax4.axes.get_xaxis().set_visible(False)
vax4.axes.get_yaxis().set_visible(False)
vax4.set_title("Posterior")
vax5.axes.get_xaxis().set_visible(False)
vax5.axes.get_yaxis().set_visible(False)
vax5.set_title("Env Map GT")
vax6.axes.get_xaxis().set_visible(False)
vax6.axes.get_yaxis().set_visible(False)
vax6.set_title("Env Map Recognition")
vax7.axes.get_xaxis().set_visible(False)
vax7.axes.get_yaxis().set_visible(False)
vax7.set_title("Env Map Fit")
plt.tight_layout()
vidImgs = []
nearestNeighbours = False
if nearestNeighbours:
trainImages = readImages(imagesDir, trainIds, loadFromHdf5)
trainImagesR = trainImages.reshape([len(trainImages), -1])
methodsPred = ["Mean Baseline", "Nearest Neighbours", "Recognition", "Fit" ]
plotColors = ['k', 'm', 'b', 'r']
segmentVColorError = np.array([])
useSegmentation = True
segmentVColorsList = []
global annot_t
annot_t = None
def cb(_):
global t
global samplingMode
elapsed_time = time.time() - t
print("Ended interation in " + str(elapsed_time))
# if samplingMode:
# analyzeAz(resultDir + 'az_samples/test' + str(test_i) +'/azNum' + str(sampleAzNum) + '_it' + str(iterat) , rendererGT, renderer, chEl.r, chVColors.r, chLightSHCoeffs.r, azsPredictions[test_i], sampleStds=stds.r)
# else:
# analyzeAz(resultDir + 'az_samples/test' + str(test_i) +'/min_azNum' + str(sampleAzNum) + '_it' + str(iterat) , rendererGT, renderer, chEl.r, chVColors.r, chLightSHCoeffs.r, azsPredictions[test_i], sampleStds=stds.r)
global pixelErrorFun
global errorFun
global iterat
iterat = iterat + 1
print("Callback! " + str(iterat))
print("Sq Error: " + str(errorFun.r))
if chThError is not None:
print("Theano Error: " + str(chThError.r))
global negLikModelRobustSmallStd
global bestShapeParamsSmallStd
global bestRobustSmallStdError
# if minimizingShape and useShapeModel:
# # if negLikModelRobustSmallStd.r < bestRobustSmallStdError:
# # bestRobustSmallStdError = negLikModelRobustSmallStd.r.copy()
# # bestShapeParamsSmallStd = chShapeParams.r.copy()
# maxShapeSize = 2.5
# largeShapeParams = np.abs(chShapeParams.r) > maxShapeSize
# if np.any(largeShapeParams):
# print("Warning: found large shape parameters to fix!")
# chShapeParams[largeShapeParams] = np.sign(chShapeParams.r[largeShapeParams])*maxShapeSize
if getColorFromCRF:
global chVColors
global Q
global color
segmentation = np.argmax(Q.r, axis=0).reshape(renderer.r.shape[:2])
if np.sum(segmentation == 0) == 0:
vColor = color
else:
segmentRegion = segmentation == 0
vColor = np.median(rendererGT.reshape([-1, 3])[segmentRegion.ravel()], axis=0) * 1.4
vColor = vColor / max(np.max(vColor), 1.)
vis_im = np.array(renderer.indices_image == 1).copy().astype(np.bool)
chVColors[:] = vColor
colorRegion = np.all(renderer.r != 0,axis=2).ravel() * segmentRegion.ravel() * vis_im.ravel()
vColor = vColor * np.mean(
rendererGT.r.reshape([-1, 3])[colorRegion] / renderer.r.reshape([-1, 3])[colorRegion])
chVColors[:] = vColor
if makeVideo:
global im1
global im2
global vidImgs
global writer
global writer_i
global rendererRecognition
plt.figure(figvid.number)
im1 = vax1.imshow(lin2srgb(rendererGT.r.copy()))
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.8)
im2 = vax2.imshow(lin2srgb(rendererRecognition.copy()))
im3 = vax3.imshow(lin2srgb(renderer.r.copy()))
stdsOld = stds.r
stds[:] = 0.05
vis_im = np.array(renderer.indices_image==1).copy().astype(np.bool)
post = generative_models.layerPosteriorsRobustCh(rendererGT, renderer, vis_im, 'MASK', globalPrior, variances)[0].r>0.5
stds[:] = stdsOld
im4 = vax4.imshow(post.copy())
# plt.colorbar(im4, ax=vax4, use_gridspec=True)
pEnvMap = SHProjection(envMapTexture, np.concatenate([chLightSHCoeffs.r[:,None], chLightSHCoeffs.r[:,None], chLightSHCoeffs.r[:,None]], axis=1))
approxProjectionFitted = np.sum(pEnvMap, axis=(2,3))
# approxProjectionFitted[approxProjectionFitted<0] = 0
#
# approxProjectionGT[approxProjectionGT<0] = 0
# approxProjectionPred[approxProjectionPred<0] = 0
# cv2.imwrite(resultDir + 'approxProjectionGT.jpeg' , 255*np.concatenate([approxProjectionGT[...,None], approxProjectionGT[...,None], approxProjectionGT[...,None]], axis=2)[:,:,[2,1,0]])
# cv2.imwrite(resultDir + 'approxProjectionPred.jpeg' , 255*np.concatenate([approxProjectionPred[...,None], approxProjectionPred[...,None], approxProjectionPred[...,None]], axis=2)[:,:,[2,1,0]])
# cv2.imwrite(resultDir + 'approxProjectionFitted.jpeg' , 255*np.concatenate([approxProjectionFitted[...,None], approxProjectionFitted[...,None], approxProjectionFitted[...,None]], axis=2)[:,:,[2,1,0]])
cv2.imwrite(resultDir + 'approxProjectionGT.jpeg' , 255*np.sum(pEnvMapGT, axis=3)[:,:,[2,1,0]])
cv2.imwrite(resultDir + 'approxProjectionPred.jpeg' , 255*np.sum(pEnvMapPred, axis=3)[:,:,[2,1,0]])
cv2.imwrite(resultDir + 'approxProjectionFitted.jpeg' , 255*np.sum(pEnvMap, axis=3)[:,:,[2,1,0]])
approxProjectionGTlocal = skimage.io.imread(resultDir +'approxProjectionGT.jpeg').astype(np.float32)/255.
approxProjectionPredlocal = skimage.io.imread(resultDir +'approxProjectionPred.jpeg').astype(np.float32)/255.
approxProjectionFittedlocal = skimage.io.imread(resultDir +'approxProjectionFitted.jpeg').astype(np.float32)/255.
approxProjectionGTlocal = skimage.transform.resize(approxProjectionGTlocal, [75,150])
approxProjectionPredlocal = skimage.transform.resize(approxProjectionPredlocal, [75,150])
approxProjectionFittedlocal = skimage.transform.resize(approxProjectionFittedlocal, [75,150])
im5 = vax5.imshow(approxProjectionGTlocal.copy())
im6 = vax6.imshow(approxProjectionPredlocal.copy())
im7 = vax7.imshow(approxProjectionFittedlocal.copy())
# if annot_t is not None:
# annot_t.remove()
annot_t = vax3.annotate("Fitting iter: " + str(iterat), xy=(1, 0), xycoords='axes fraction', fontsize=16,
xytext=(-20, 5), textcoords='offset points', ha='right', va='bottom', bbox=bbox_props)
plt.tight_layout()
vidImgs.append([im1,im2, im3,im4, im5, im6, im7, annot_t])
if iterat == 1:
vidImgs.append([im1,im2, im3,im4, im5, im6, im7, annot_t])
vidImgs.append([im1,im2, im3,im4, im5, im6, im7, annot_t])
# figvid.savefig(resultDir + 'videos/' + 'lastFig.png')
# writer_i.grab_frame()
global imagegt
global gradAz
global gradEl
global performance
global azimuths
global elevations
global shapeParams
t = time.time()
replaceExisting = False
for testSetting, model in enumerate(modelTests):
model = modelTests[testSetting]
method = methodTests[testSetting]
shapePenalty = shapePenaltyTests[testSetting]
stds[:] = stdsTests[testSetting]
testPrefix = testPrefixBase + '_method' + str(method) + 'errorFun' + str(model) + '_std' + str(stds.r[0]) + '_shapePen'+ str(shapePenalty)
resultDir = 'results/' + testPrefix + '/'
if not os.path.exists(resultDir + 'imgs/'):
os.makedirs(resultDir + 'results/')
if not os.path.exists(resultDir + 'imgs/'):
os.makedirs(resultDir + 'imgs/')
if not os.path.exists(resultDir + 'imgs/samples/'):
os.makedirs(resultDir + 'imgs/samples/')
## NN individual prediction samples analysis.
if not os.path.exists(resultDir + 'nn_samples/'):
os.makedirs(resultDir + 'nn_samples/')
if not os.path.exists(resultDir + 'az_samples/'):
os.makedirs(resultDir + 'az_samples/')
if not os.path.exists(resultDir + 'hue_samples/'):
os.makedirs(resultDir + 'hue_samples/')
if makeVideo:
if not os.path.exists(resultDir + 'videos/'):
os.makedirs(resultDir + 'videos/')
azimuths = []
elevations = []
vColors = []
lightCoeffs = []
approxProjections = []
likelihoods = []
shapeParams = []
posteriors = []
startTime = time.time()
samplingMode = False
fittedVColorsList = []
fittedRelLightCoeffsList = []
approxProjectionsFittedList = []
approxProjectionsGTList = []
approxProjectionsPredList = []
fittedAzs = np.array([])
fittedElevs = np.array([])
fittedRelLightCoeffs = []
fittedShapeParamsList = []
fittedVColors = []
fittedPosteriorsList = []
revertedSamples = np.array([])
errorFunAzSamples = []
errorFunAzSamplesPred = []
errorFunGaussianAzSamplesPred = []
errorFunGaussianAzSamples = []
if nearestNeighbours:
azimuthNearestNeighboursList = []
elevationNearestNeighboursList = []
vColorNearestNeighboursList = []
shapeParamsNearestNeighboursList = []
lightCoeffsNearestNeighboursList = []
nearestNeighboursErrorFuns = np.array([])
approxProjectionsNearestNeighbourList = []
nearestNeighboursPosteriorsList = []
errorsShapeVerticesSoFar = np.array([])
predictedErrorFuns = np.array([])
predictedPosteriorsList = []
if includeMeanBaseline:
meanBaselineErrorFuns = np.array([])
meanBaselinePosteriorList = []
errorsFittedShapeVertices = np.array([])
fittedErrorFuns = np.array([])
fittedShapeParams = np.array([])
if (computePredErrorFuns and optimizationType == 0) or optimizationType != 0:
for test_i in range(len(testAzsRel)):
print("************** Minimizing loss of prediction " + str(test_i) + "of " + str(len(testAzsRel)))
resultDir = 'results/' + testPrefix + '/results/'
testDir = resultDir + str(test_i) + '/'
if not os.path.exists(testDir):
os.makedirs(testDir)
if not replaceExisting and os.path.isfile(testDir + 'fitted_' + 'shapeParams'+ 'npy.npy'):
continue
bestFittedAz = chAz.r
bestFittedEl = chEl.r
bestModelLik = np.finfo('f').max
bestVColors = chVColors.r
bestLightSHCoeffs = chLightSHCoeffs.r
if useShapeModel:
bestShapeParams = chShapeParams.r
testId = testIds[test_i]
image = skimage.transform.resize(images[test_i], [height,width])
imageSrgb = image.copy()
rendererGT[:] = srgb2lin(image)
# sqeRenderer.imageGT = ch.Ch(image)
negLikModel = -ch.sum(generative_models.LogGaussianModel(renderer=renderer, groundtruth=rendererGT, variances=variances))/numPixels
# negLikModelRobust = -ch.sum(generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances))/numPixels
# negLikModelRobust = generative_models.NLLRobustModel(renderer=renderer, groundtruth=rendererGT, Q=globalPrior.r*np.ones([height, width]),variances=variances) / numPixels
negLikModelRobust = -ch.sum(generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances))/ numPixels
# negLikModelRobustSQError = generative_models.NLLRobustSQErrorModel(sqeRenderer=sqeRenderer, Q=globalPrior.r*np.ones([height, width]),
# variances=variances) / numPixels
models = [negLikModel, negLikModelRobust]
stds[:] = stdsTests[testSetting]
if makeVideo:
writer_i = Writer(fps=1, metadata=dict(title='', artist=''), bitrate=1800)
writer_i.setup(figvid, resultDir + 'videos/vid_'+ str(test_i) + '.mp4', dpi=70)
vidImgs = []
if nearestNeighbours:
onenn_i = one_nn(trainImagesR, imageSrgb.ravel())
nearesTrainImage = trainImages[onenn_i]
cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/id' + str(testId) +'_nearestneighbour' + '.png', cv2.cvtColor(np.uint8(nearesTrainImage[0].copy()*255), cv2.COLOR_RGB2BGR))
azimuthNearestNeighbour = trainAzsRel[onenn_i]
elevationNearestNeighbour = trainElevsGT[onenn_i]
vColorNearestNeighbour = trainVColorGT[onenn_i]
if useShapeModel:
shapeParamsNearestNeighbour = trainShapeModelCoeffsGT[onenn_i]
lightCoeffsNearestNeighbour = trainLightCoefficientsGTRel[onenn_i]
azimuthNearestNeighboursList = azimuthNearestNeighboursList + [azimuthNearestNeighbour]
elevationNearestNeighboursList = elevationNearestNeighboursList + [elevationNearestNeighbour]
vColorNearestNeighboursList = vColorNearestNeighboursList + [vColorNearestNeighbour]
shapeParamsNearestNeighboursList = shapeParamsNearestNeighboursList + [shapeParamsNearestNeighbour]
lightCoeffsNearestNeighboursList = lightCoeffsNearestNeighboursList + [lightCoeffsNearestNeighbour]
chAz[:] = azimuthNearestNeighbour
chEl[:] = elevationNearestNeighbour
chVColors[:] = vColorNearestNeighbour
chLightSHCoeffs[:] = lightCoeffsNearestNeighbour
if useShapeModel:
chShapeParams[:] = shapeParamsNearestNeighbour
nearestNeighboursErrorFuns = np.append(nearestNeighboursErrorFuns, errorFun.r)
# nearestNeighboursPosteriorsList = nearestNeighboursPosteriorsList + [np.array(renderer.indices_image==1).copy().astype(np.bool)]
if includeMeanBaseline:
chAz[:] = meanTrainAzimuthRel[test_i]
chEl[:] = meanTrainElevation[test_i]
chVColors[:] = meanTrainVColors[test_i]
chLightSHCoeffs[:] = meanTrainLightCoefficientsGTRel[test_i]
if useShapeModel:
chShapeParams[:] = meanTrainShapeParams[test_i]
# meanBaselinePosteriorList = meanBaselinePosteriorList + [np.array(renderer.indices_image==1).copy().astype(np.bool)[None,:]]
meanBaselineErrorFuns = np.append(meanBaselineErrorFuns, errorFun.r)
stdsSmall = ch.Ch([0.01])
variancesSmall = stdsSmall ** 2
negLikModelRobustSmallStd = -ch.sum(generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variancesSmall))/numPixels
if not os.path.exists(resultDir + 'imgs/test'+ str(test_i) + '/'):
os.makedirs(resultDir + 'imgs/test'+ str(test_i) + '/')
if not os.path.exists(resultDir + 'imgs/test'+ str(test_i) + '/crf/'):
os.makedirs(resultDir + 'imgs/test'+ str(test_i) + '/crf/')
if not os.path.exists(resultDir + 'imgs/crf/'):
os.makedirs(resultDir + 'imgs/crf/')
if not os.path.exists(resultDir + 'imgs/test'+ str(test_i) + '/SH/'):
os.makedirs(resultDir + 'imgs/test'+ str(test_i) + '/SH/')
cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/id' + str(testId) +'_groundtruth' + '.png', cv2.cvtColor(np.uint8(lin2srgb(rendererGT.r.copy())*255), cv2.COLOR_RGB2BGR))
for sample in range(testSamples):
if recognitionType == 0:
#Prediction from (near) ground truth.
color = testVColorGT[test_i] + nearGTOffsetVColor
az = testAzsRel[test_i] + nearGTOffsetRelAz
el = testElevsGT[test_i] + nearGTOffsetEl
lightCoefficientsRel = testLightCoefficientsGTRel[test_i]
if useShapeModel:
shapeParams = testShapeParamsGT[test_i]
elif recognitionType == 1 or recognitionType == 2:
#Recognition estimate:
az = azsPred[test_i]
el = min(max(elevsPred[test_i],radians(1)), np.pi/2-radians(1))
color = vColorsPred[test_i]
lightCoefficientsRel = relLightCoefficientsPred[test_i]
if useShapeModel:
shapeParams = shapeParamsPred[test_i]
chAz[:] = az
chEl[:] = el
chVColors[:] = color
chLightSHCoeffs[:] = lightCoefficientsRel
if useShapeModel:
chShapeParams[:] = shapeParams
rendererRecognition = renderer.r.copy()
cv2.imwrite(resultDir + 'imgs/test' + str(test_i) + '/sample' + str(sample) + '_predicted' + '.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy()) * 255), cv2.COLOR_RGB2BGR))
np.save(testDir + 'pred_' + 'az' + 'npy', az)
np.save(testDir + 'pred_' + 'el'+ 'npy', el)
np.save(testDir + 'pred_' + 'color'+ 'npy', color)
np.save(testDir + 'pred_' + 'lightCoefficientsRel'+ 'npy', lightCoefficientsRel)
np.save(testDir + 'pred_' + 'shapeParams'+ 'npy', shapeParams)
if not os.path.exists(resultDir + 'az_samples/'):
os.makedirs(resultDir + 'az_samples/')
#
# analyzeAz(resultDir + 'az_samples/test' + str(test_i), rendererGT, renderer, chEl.r, chVColors.r, chLightSHCoeffs.r, azsPredictions[test_i], sampleStds=stds.r)
# Dense CRF prediction of labels:
vis_im = np.array(renderer.indices_image==1).copy().astype(np.bool)
bound_im = renderer.boundarybool_image.astype(np.bool)
# if optimizationTypeDescr[optimizationType] != 'optimize':
# import densecrf_model
# #
# # plt.imsave(resultDir + 'imgs/test'+ str(test_i) + '/crf/renderer', renderer.r)
# #
# sample_i = test_i
# segmentation, Q = densecrf_model.crfInference(rendererGT.r, vis_im, bound_im, [0.75,0.25,0.01], resultDir + 'imgs/crf/Q_' + str(sample_i))
# if np.sum(segmentation==0) == 0:
# segmentVColorsList = segmentVColorsList + [color]
# else:
#
# segmentRegion = segmentation==0
# segmentColor = np.median(rendererGT.reshape([-1,3])[segmentRegion.ravel()], axis=0)
# segmentVColorsList = segmentVColorsList + [segmentColor]
#
# optLightSHCoeffs = chLightSHCoeffs.r
if evaluateWithGT:
hdridx = dataEnvMaps[test_i]
envMapTexture = np.zeros([360,180,3])
envMapFound = False
for hdrFile, hdrValues in hdritems:
if hdridx == hdrValues[0]:
envMapCoeffsGT = hdrValues[1]
envMapFilename = hdrFile
try:
envMapTexture = np.array(imageio.imread(envMapFilename))[:,:,0:3]
except:
envMapTexture = np.zeros([360,180,3])
envMapTexture = cv2.resize(src=envMapTexture, dsize=(360,180))
envMapFound = True
break
if not envMapFound:
ipdb.set_trace()
pEnvMapGT = SHProjection(envMapTexture, np.concatenate([testLightCoefficientsGTRel[test_i][:,None], testLightCoefficientsGTRel[test_i][:,None], testLightCoefficientsGTRel[test_i][:,None]], axis=1))
approxProjectionGT = np.sum(pEnvMapGT, axis=(2,3))
approxProjectionsGTList = approxProjectionsGTList + [approxProjectionGT[None,:]]
cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/SH/' + str(hdridx) + '_GT.jpeg' , 255*np.sum(pEnvMapGT, axis=3)[:,:,[2,1,0]])
pEnvMapPred = SHProjection(envMapTexture, np.concatenate([relLightCoefficientsPred[test_i][:,None], relLightCoefficientsPred[test_i][:,None], relLightCoefficientsPred[test_i][:,None]], axis=1))
approxProjectionPred = np.sum(pEnvMapPred, axis=(2,3))
approxProjectionsPredList = approxProjectionsPredList + [approxProjectionPred[None,:]]
predictedPosteriorsList = predictedPosteriorsList + [np.array(renderer.indices_image==1).copy().astype(np.bool)[None,:]]
if nearestNeighbours:
pEnvMap = SHProjection(envMapTexture, np.concatenate([lightCoeffsNearestNeighbour.ravel()[:,None], lightCoeffsNearestNeighbour.ravel()[:,None], lightCoeffsNearestNeighbour.ravel()[:,None]], axis=1))
approxProjectionNearestNeighbour = np.sum(pEnvMap, axis=(2,3))
approxProjectionsNearestNeighbourList = approxProjectionsNearestNeighbourList + [approxProjectionNearestNeighbour[None,:]]
cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/SH/' + str(hdridx) + '_NearestNeighbour.jpeg' , 255*np.sum(pEnvMap, axis=3)[:,:,[2,1,0]])
cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/SH/' + str(hdridx) + '_Pred.jpeg' , 255*np.sum(pEnvMapPred, axis=3)[:,:,[2,1,0]])
## RecognitionType =2 : Use samples from neural net to explore the space better.
if recognitionType == 2:
errorFunAzSamples = []
errorFunAzSamplesPred = []
errorFunGaussianAzSamplesPred = []
errorFunGaussianAzSamples = []
if not os.path.exists(resultDir + 'az_samples/test' + str(test_i) + '/'):
os.makedirs(resultDir + 'az_samples/test' + str(test_i) + '/')
if not os.path.exists(resultDir + 'hue_samples/test' + str(test_i) + '/'):
os.makedirs(resultDir + 'hue_samples/test' + str(test_i) + '/')
samplingMode = True
cv2.imwrite(resultDir + 'az_samples/test' + str(test_i) + '_gt.png', cv2.cvtColor(np.uint8(lin2srgb(rendererGT.r.copy())*255), cv2.COLOR_RGB2BGR))
# stds[:] = 0.1
#
# azSampleStdev = np.std(azsPredictions[test_i])
azSampleStdev = np.sqrt(-np.log(np.min([np.mean(sinAzsPredSamples[test_i])**2 + np.mean(cosAzsPredSamples[test_i])**2,1])))
predAz = chAz.r
numSamples = max(int(np.ceil(azSampleStdev*180./(np.pi*25.))),1)
azSamples = np.linspace(0, azSampleStdev, numSamples)
totalAzSamples = predAz + np.concatenate([azSamples, -azSamples[1:]])
sampleAzNum = 0
# model = 1
# errorFun = models[model]
bestPredAz = chAz.r
# bestPredEl = chEl.r
bestPredEl = min(max(chEl.r.copy(),radians(1)), np.pi/2-radians(1))
bestPredVColors = chVColors.r.copy()
bestPredLightSHCoeffs = chLightSHCoeffs.r.copy()
if useShapeModel:
bestPredShapeParams = chShapeParams.r.copy()
bestModelLik = np.finfo('f').max
bestPredModelLik = np.finfo('f').max
# analyzeAz(resultDir + 'az_samples/test' + str(test_i) +'/pre' , rendererGT, renderer, chEl.r, chVColors.r, chLightSHCoeffs.r, azsPredictions[test_i], sampleStds=stds.r)
# analyzeHue(resultDir + 'hue_samples/test' + str(test_i) +'/pre', rendererGT, renderer, chEl.r, chAz.r, chLightSHCoeffs.r, vColorsPredSamples[test_i], sampleStds=stds.r)
for sampleAz in totalAzSamples:
iterat = 0
sampleAzNum += 1
chAz[:] = sampleAz
# chEl[:] = elsample
print("Minimizing first step")
model = 1
errorFun = models[model]
# method = 5
chLightSHCoeffs[:] = lightCoefficientsRel
chVColors[:] = color
#Todo test with adding chEl.
free_variables = [chLightSHCoeffs]
options={'disp':False, 'maxiter':2}
errorFunAzSamplesPred = errorFunAzSamplesPred + [errorFun.r]
errorFunGaussianAzSamplesPred = errorFunGaussianAzSamplesPred + [models[0].r]
# analyzeAz(resultDir + 'az_samples/test' + str(test_i) +'/azNum' + str(sampleAzNum), rendererGT, renderer, chEl.r, chVColors.r, chLightSHCoeffs.r, azsPredictions[test_i], sampleStds=stds.r)
if models[1].r.copy() < bestPredModelLik:
print("Found best angle!")
# bestPredModelLik = errorFun.r.copy()
bestPredModelLik = models[1].r.copy()
bestPredAz = sampleAz
bestPredEl = min(max(chEl.r.copy(),radians(1)), np.pi/2-radians(1))
bestPredVColors = chVColors.r.copy()
bestPredLightSHCoeffs = chLightSHCoeffs.r.copy()
if useShapeModel:
bestPredShapeParams = chShapeParams.r.copy()
bestModelLik = errorFun.r.copy()
# cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/best_predSample' + str(numPredSamples) + '.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
errorFunAzSamples = errorFunAzSamples + [errorFun.r]
errorFunGaussianAzSamples = errorFunGaussianAzSamples + [models[0].r]
color = bestPredVColors
lightCoefficientsRel = bestPredLightSHCoeffs
az = bestPredAz
if useShapeModel:
shapeParams = bestShapeParams
# previousAngles = np.vstack([previousAngles, np.array([[azsample, elsample],[chAz.r.copy(), chEl.r.copy()]])])
samplingMode = False
chAz[:] = az
chAz[:] = az
chEl[:] = min(max(el,radians(1)), np.pi/2-radians(1))
chVColors[:] = color.copy()
chLightSHCoeffs[:] = lightCoefficientsRel.copy()
if useShapeModel:
chShapeParams[:] = shapeParams.copy()
cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/best_sample' + '.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
predictedErrorFuns = np.append(predictedErrorFuns, errorFun.r)
# analyzeAz(resultDir + 'az_samples/test' + str(test_i) + '_samples', rendererGT, renderer, chEl.r, color, lightCoefficientsRel,
# azsPredictions[test_i], sampleStds=stds.r)
iterat = 0
sampleAzNum = 0
sys.stdout.flush()
## Finally: Do Fitting!
ignore = False
reverted = False
if optimizationTypeDescr[optimizationType] == 'optimize':
#Get VCOLOR using CRF:
print("** Minimizing from initial predicted parameters. **")
globalPrior[:] = 0.9
errorFun = models[model]
vColor = color
if useCRFOcclusionPred:
priorProbs = [0.75, 0.25, 0.01]
test_sample = test_i
Q = recognition_models.segmentCRFModel(renderer=renderer, groundtruth=rendererGT,
priorProbs=priorProbs, resultDir=resultDir,
test_i=test_sample)
getColorFromCRF = False
segmentation = np.argmax(Q.r, axis=0).reshape(renderer.r.shape[:2])
if np.sum(segmentation == 0) == 0:
ignore = True
vColor = color
else:
segmentRegion = segmentation == 0
vColor = np.median(rendererGT.reshape([-1, 3])[segmentRegion.ravel()], axis=0) * 1.4
vColor = vColor / max(np.max(vColor), 1.)
vis_im = np.array(renderer.indices_image == 1).copy().astype(np.bool)
chVColors[:] = vColor
colorRegion = np.all(renderer.r != 0,axis=2).ravel() * segmentRegion.ravel() * vis_im.ravel()
if colorRegion.sum() == 0:
ignore = True
else:
vColor = vColor * np.median(rendererGT.r.reshape([-1, 3])[colorRegion] / renderer.r.reshape([-1, 3])[colorRegion])
if not ignore:
color = vColor
chVColors[:] = vColor
## Bayes Optimization
### Bayesian Optimization tests.
if useCRFOcclusionPred:
free_variables = [ chAz, chEl]
free_variables_app_light = [ chLightSHCoeffs]
azSampleStdev = np.sqrt(-np.log(np.min([np.mean(np.sin(azsPredictions)[test_i])**2 + np.mean(np.cos(azsPredictions)[test_i])**2,1])))
azMean = azsPred[test_i]
elMean = elevsPred[test_i]
elSampleStdev = np.sqrt(-np.log(np.min([np.mean(np.sin(elevsPredictions)[test_i])**2 + np.mean(np.cos(elevsPredictions)[test_i])**2,1])))
elBound = np.min(elSampleStdev, np.pi/4)
elMean = min(max(elevsPred[test_i],radians(1)), np.pi/2-radians(1))
shapeStdevs = np.min(np.vstack([2*np.sqrt(np.cov(shapeParamsPredSamples[test_i]).diagonal()), np.ones([latentDim])*3.5]), 0)
azBound = min(azSampleStdev, np.pi)
optBounds = [(azMean - azBound, azMean + azBound), (min(max(elMean - elBound,0), np.pi/2), min(max(elMean + elBound,0), np.pi/2))] + [(max(shapeParams[shape_i] - shapeStdevs[shape_i],-3.5), min(shapeParams[shape_i] + shapeStdevs[shape_i],3.5)) for shape_i in range(len(shapeStdevs))]
optBounds = [(azMean - azBound, azMean + azBound), (min(max(elMean - elBound,0), np.pi/2), min(max(elMean + elBound,0), np.pi/2))]
numSamples = max(int(np.ceil(2*azSampleStdev*180./(np.pi*20.))),1)
# azSamples = np.linspace(0, azSampleStdev, numSamples)
azSamples = np.linspace(0, np.pi, 10)
numSamples = max(int(np.ceil(2*elSampleStdev*180./(np.pi*10.))),1)
elSamples = np.linspace(0, 2*elSampleStdev, numSamples)
elSamples = chEl.r + np.concatenate([elSamples, -elSamples[1:]])
totalAzSamples = chAz.r + np.concatenate([azSamples, -azSamples[1:]])
if len(totalAzSamples) == 0:
totalAzSamples = np.array([chAz.r])
if azSampleStdev < 20*np.pi/180:
totalAzSamples = np.array([chAz.r])
totalElSamples = np.array([elSample for elSample in elSamples if elSample > 0 and elSample < np.pi/2])
if len(totalElSamples) == 0:
totalElSamples = np.array([chEl.r])
totalElSamples = np.array([chEl.r])
# totalElSamples = np.array([testElevsGT[test_i]])
# chShapeParams[:] = testShapeParamsGT[test_i]
# chLightSHCoeffs[:] = testLightCoefficientsGTRel[test_i]
# chVColors[:] = testVColorGT[test_i]
# vColor = testVColorGT[test_i]
# lightCoefficientsRel = testLightCoefficientsGTRel[test_i]
totalSamples = np.meshgrid(totalAzSamples, totalElSamples)
totalSamples = np.hstack([totalSamples[0].reshape([-1,1]),totalSamples[1].reshape([-1,1])])
import diffrender_opt
# objFun = diffrender_opt.opendrObjectiveFunction(errorFun, free_variables)
# res = diffrender_opt.bayesOpt(objFun, objBounds)
methodOpt = methods[method]
stds[:] = 0.1
# crfObjFun = diffrender_opt.opendrObjectiveFunctionCRF(free_variables, rendererGT, renderer, vColor, chVColors, chLightSHCoeffs, lightCoefficientsRel, free_variables_app_light, resultDir, test_i, stds.r, methodOpt, False, False)
cv2.imwrite(
resultDir + 'imgs/test' + str(test_i) + '/sample' + str(sample) + '_predictedCRF' + '.png',
cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy()) * 255), cv2.COLOR_RGB2BGR))
# errorFunCRF = -ch.sum(generative_models.LogCRFModel(renderer=renderer, groundtruth=rendererGT, Q=Q.r.reshape([3,height, width]),
# variances=variances)) / numPixels
errorFunNLLCRF = generative_models.NLLCRFModel(renderer=renderer, groundtruth=rendererGT, Q=Q.r.reshape([3,height, width]),
variances=variances) / numPixels
# errorFun = models[1]
# samplesEvals = []
# for azSample in totalSamples[:,0]:
# chAz[:] = azSample
# samplesEvals = samplesEvals + [errorFunNLLCRF.r]
#
# samplesEvals = np.array(samplesEvals)
#
# # samplesEvals = crfObjFun(totalSamples)
#
# #Start Optimiaztion
#
# # res = diffrender_opt.bayesOpt(crfObjFun, totalSamples, samplesEvals, optBounds)
# #
# # optAz = res.x_opt[0]
# # optEl = res.x_opt[1]
# # # optShapeParams = res.x_opt[2:]
# bestSample = np.argmin(samplesEvals)
# optAz = totalSamples[bestSample][0]
# optEl = totalSamples[bestSample][1]
#
# azfig = plt.figure()
# ax = azfig.add_subplot(111)
# ax.plot(np.mod(totalSamples[:,0], 2*np.pi), samplesEvals, 'ro')
# y1, y2 = ax.get_ylim()
# ax.vlines(testAzsRel[test_i], y1, y2, 'g', label = 'Groundtruth Az')
# ax.vlines(np.mod(optAz, 2*np.pi), y1, y2, 'r', label = 'Optimum CRF Az')
# ax.vlines(np.mod(az, 2*np.pi), y1, y2, 'b', label = 'Recognition Az')
# ax.set_xlim((0, np.pi*2))
# legend = ax.legend()
# azfig.savefig(resultDir + 'imgs/test' + str(test_i) + '/samplesAzPlot_' + str(int(azSampleStdev*180/np.pi)) + '.png')
# plt.close(azfig)
#
# legend = ax.legend()
#
# chAz[:] = optAz
# chEl[:] = optEl
# #### Local search:
#
# cv2.imwrite(
# resultDir + 'imgs/test' + str(test_i) + '/sample' + str(sample) + '_optimizedCRFPose' + '.png',
# cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy()) * 255), cv2.COLOR_RGB2BGR))
free_variables = [chShapeParams, chAz, chEl, chLightSHCoeffs, chVColors]
#
# stds[:] = 0.03
# shapePenalty = 0.0001
stds[:] = stdsTests[testSetting]
shapePenalty = 0.0001
options={'disp':False, 'maxiter':100}
# options={'disp':False, 'maxiter':2}
minimizingShape = True
# method = 5
ch.minimize({'raw': errorFun }, bounds=None, method=methods[method], x0=free_variables, callback=cb, options=options)
maxShapeSize = 4
largeShapeParams = np.abs(chShapeParams.r) > maxShapeSize
if np.any(largeShapeParams) or chEl.r > np.pi/2 + radians(10) or chEl.r < -radians(15) or np.linalg.norm(chVertices.r - chVerticesMean) >= 3.5:
print("Warning: found large shape parameters to fix!")
reverted = True
# chShapeParams[largeShapeParams] = np.sign(chShapeParams.r[largeShapeParams])*maxShapeSize
cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/it1'+ '.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
# if reverted:
# cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/reverted1'+ '.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
# reverted = False
# chAz[:] = az
# chEl[:] = min(max(el,radians(1)), np.pi/2-radians(1))
# chVColors[:] = color.copy()
# chLightSHCoeffs[:] = lightCoefficientsRel.copy()
# if useShapeModel:
# chShapeParams[:] = shapeParams.copy()
#
# cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/it1'+ '.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
#
# free_variables = [chVColors, chLightSHCoeffs]
# stds[:] = 0.01
# shapePenalty = 0.0
# options={'disp':False, 'maxiter':50}
# # options={'disp':False, 'maxiter':2}
# # free_variables = [chShapeParams ]
# minimizingShape = False
# getColorFromCRF = False
# ch.minimize({'raw': errorFunNLLCRF }, bounds=None, method=methods[method], x0=free_variables, callback=cb, options=options)
# largeShapeParams = np.abs(chShapeParams.r) > maxShapeSize
# if np.any(largeShapeParams) or chEl.r > np.pi/2 + radians(10) or chEl.r < -radians(15) or np.linalg.norm(chVertices.r - chVerticesMean) >= 3.5:
# print("Warning: found large shape parameters to fix!")
# reverted = True
# cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/reverted2'+ '.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
#
# cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/it2'+ '.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
else:
## *** If not using CRF ***
free_variables = [chShapeParams, chAz, chEl, chLightSHCoeffs, chVColors]
#
# shapePenalty = 0.0001
stds[:] = stdsTests[testSetting]
shapePenalty = shapePenaltyTests[testSetting]
options = {'disp': False, 'maxiter': maxOptIters[testSetting]}
# options={'disp':False, 'maxiter':2}
minimizingShape = True
errorFun = models[1]
# errorFunFast = generative_models.NLLRobustModel(renderer=renderer, groundtruth=rendererGT, Q=globalPrior.r*np.ones([height, width]),
# variances=variances) / numPixels
#
# plt.imsave('errorscolors1.png', sqeRenderer.render_image)
#
# chVColors[:] = testVColorGT[test_i] + nearGTOffsetVColor
# chAz[:]= testAzsRel[test_i] + nearGTOffsetRelAz
# chEl[:] = testElevsGT[test_i] + nearGTOffsetEl
# chLightSHCoeffs[:] = testLightCoefficientsGTRel[test_i]
# chShapeParams[:] = testShapeParamsGT[test_i]
# # chLightSHCoeffs[:] = 0
# # chLightSHCoeffs[2] = 0.5
# # chLightSHCoeffs[0] = 1
# plt.imsave('render.png', renderer.r)
#
# imageGT = renderer.r.copy()
#
# chEl[:] = chEl[:].r + 0.2
#
# SQError = (rendererGT.r - renderer)**2
#
# sqeRenderer.imageGT = ch.Ch(imageGT)
#
# plt.imsave('errors.png', sqeRenderer.r)
# plt.imsave('errorscolors.png', sqeRenderer.render_image)
# # plt.imsave('renderer2.png', sqeRenderer.render_dedx)
# # plt.imsave('errorsdx.png', sqeRenderer.render_dedx, cmap=matplotlib.cm.coolwarm, vmin=-1, vmax=1)
# drAz = np.sum(sqeRenderer.dr_wrt(chEl).toarray().reshape([height,width, 3]), axis=2)
#
# drAzErrorFun = -pixelLikelihoodRobustSQErrorCh.dr_wrt(chEl).toarray().reshape([height, width])
#
# plt.ioff()
# fig = plt.figure()
# ax = fig.add_subplot(111, aspect='equal')
# ims = ax.imshow(drAz, cmap=matplotlib.cm.coolwarm, vmin=-4, vmax=4)
# fig.colorbar(ims)
# fig.savefig('drAzfig.png', bbox_inches='tight')
# plt.close(fig)
# plt.ioff()
#
# fig = plt.figure()
# ax = fig.add_subplot(111, aspect='equal')
# ims = ax.imshow(drAzErrorFun, cmap=matplotlib.cm.coolwarm, vmin=-300, vmax=300)
# fig.colorbar(ims)
# fig.savefig('drAzfigErrFun.png', bbox_inches='tight')
# plt.close(fig)
#
#
# plt.imsave('errorsdr_wrtaz.png', drAz, cmap=matplotlib.cm.coolwarm, vmin=drAz.min(), vmax=drAz.max())
#
# plt.imsave('SQerrorsdr_wrtaz.png', SQError.dr_wrt(chEl).toarray().reshape([height,width, 3]), cmap=matplotlib.cm.coolwarm, vmin=-4, vmax=4)
# plt.imsave('errorsgt.png', sqeRenderer.imageGT.r)
#
# sys.exit()
ch.minimize({'raw': errorFun}, bounds=None, method=methods[method], x0=free_variables, callback=cb, options=options)
maxShapeSize = 4
largeShapeParams = np.abs(chShapeParams.r) > maxShapeSize
if np.any(largeShapeParams) or chEl.r > np.pi / 2 + radians(10) or chEl.r < -radians(15) or np.linalg.norm(
chVertices.r - chVerticesMean) >= 3.5:
print("Warning: found large shape parameters to fix!")
reverted = True
# chShapeParams[largeShapeParams] = np.sign(chShapeParams.r[largeShapeParams])*maxShapeSize
cv2.imwrite(resultDir + 'imgs/test' + str(test_i) + '/it1' + '.png',
cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy()) * 255), cv2.COLOR_RGB2BGR))
# if reverted:
# cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/reverted1'+ '.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
# reverted = False
# chAz[:] = az
# chEl[:] = min(max(el,radians(1)), np.pi/2-radians(1))
# chVColors[:] = color.copy()
# chLightSHCoeffs[:] = lightCoefficientsRel.copy()
# if useShapeModel:
# chShapeParams[:] = shapeParams.copy()
# #
# cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/it1'+ '.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
#
# free_variables = [chVColors]
# stds[:] = 0.01
# shapePenalty = 0.0
# options = {'disp': False, 'maxiter': 50}
# # options={'disp':False, 'maxiter':2}
# # free_variables = [chShapeParams ]
# minimizingShape = False
# getColorFromCRF = False
# ch.minimize({'raw': errorFun}, bounds=None, method=methods[method], x0=free_variables, callback=cb, options=options)
#
# largeShapeParams = np.abs(chShapeParams.r) > maxShapeSize
# if np.any(largeShapeParams) or chEl.r > np.pi/2 + radians(10) or chEl.r < -radians(15) or np.linalg.norm(chVertices.r - chVerticesMean) >= 3.5:
# print("Warning: found large shape parameters to fix!")
# reverted = True
# cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/reverted2'+ '.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
#
# cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/it2'+ '.png', cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
if not reverted:
bestFittedAz = chAz.r.copy()
bestFittedEl = min(max(chEl.r.copy(),radians(1)), np.pi/2-radians(1))
bestVColors = chVColors.r.copy()
bestLightSHCoeffs = chLightSHCoeffs.r.copy()
if useShapeModel:
bestShapeParams = chShapeParams.r.copy()
else:
revertedSamples = np.append(revertedSamples, test_i)
bestFittedAz = az
bestFittedEl = min(max(el,radians(1)), np.pi/2-radians(1))
bestVColors = color.copy()
bestLightSHCoeffs = lightCoefficientsRel.copy()
if useShapeModel:
bestShapeParams = shapeParams.copy()
chAz[:] = bestFittedAz
chEl[:] = min(max(bestFittedEl,radians(1)), np.pi/2-radians(1))
chVColors[:] = bestVColors
chLightSHCoeffs[:] = bestLightSHCoeffs
if useShapeModel:
chShapeParams[:] = bestShapeParams
if makeVideo:
if len(vidImgs) > 0:
im_ani = animation.ArtistAnimation(figvid, vidImgs, interval=2000, repeat_delay=5000, repeat=True, blit=False)
im_ani.save(resultDir + 'videos/fitting_'+ str(testSet[test_i]) + '.mp4', fps=None, writer=writer, codec='mp4')
vidImgs[-1][7].remove()
writer_i.finish()
cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/fitted'+ '.png',cv2.cvtColor(np.uint8(lin2srgb(renderer.r.copy())*255), cv2.COLOR_RGB2BGR))
np.save(testDir + 'fitted_' + 'az' + 'npy', chAz.r)
np.save(testDir + 'fitted_' + 'el'+ 'npy', chEl.r)
np.save(testDir + 'fitted_' + 'color'+ 'npy', chVColors.r)
np.save(testDir + 'fitted_' + 'lightCoefficientsRel'+ 'npy', chLightSHCoeffs.r)
np.save(testDir + 'fitted_' + 'shapeParams'+ 'npy', chShapeParams.r)
if optimizationTypeDescr[optimizationType] != 'predict':
if evaluateWithGT:
fittedErrorFuns = np.append(fittedErrorFuns, bestModelLik)
fittedAzs = np.append(fittedAzs, bestFittedAz)
fittedElevs = np.append(fittedElevs, bestFittedEl)
fittedVColorsList = fittedVColorsList + [bestVColors]
fittedRelLightCoeffsList = fittedRelLightCoeffsList + [bestLightSHCoeffs]
if useShapeModel:
fittedShapeParamsList = fittedShapeParamsList + [bestShapeParams]
pEnvMap = SHProjection(envMapTexture, np.concatenate([bestLightSHCoeffs[:,None], bestLightSHCoeffs[:,None], bestLightSHCoeffs[:,None]], axis=1))
approxProjectionFitted = np.sum(pEnvMap, axis=(2,3))
approxProjectionsFittedList = approxProjectionsFittedList + [approxProjectionFitted[None,:]]
cv2.imwrite(resultDir + 'imgs/test'+ str(test_i) + '/SH/' + str(hdridx) + '_Fitted.jpeg' , 255*np.sum(pEnvMap, axis=3)[:,:,[2,1,0]])
#Best std for posterior recognition.
stds[:] = 0.1
vis_im = np.array(renderer.indices_image==1).copy().astype(np.bool)
post = generative_models.layerPosteriorsRobustCh(rendererGT, renderer, vis_im, 'MASK', globalPrior, variances)[0].r>0.5
# postSqerror = generative_models.layerPosteriorsRobustSQErrorCh(sqeRenderer, np.array([]), 'MASK', globalPrior, variances)[0].r>0.5
fittedPosteriorsList = fittedPosteriorsList + [post[None,:]]
stds[:] = stdsTests[testSetting]
plt.imsave(resultDir + 'imgs/test'+ str(test_i) + '/' + str(hdridx) + '_Outlier.jpeg', np.tile(post.reshape(shapeIm[0],shapeIm[1],1), [1,1,3]).astype(np.float32))
#Every now and then (or after the final test case), produce plots to keep track of work accross different levels of occlusion.
experimentDic = {'model':model, 'method':method, 'shapePenalty':shapePenalty, 'stds':stds.r, 'dataIds': testIds, 'gtPrefix': gtPrefix, 'trainPrefixPose': trainPrefixPose, 'trainPrefixVColor': trainPrefixVColor,
'trainPrefixLightCoeffs': trainPrefixLightCoeffs, 'trainPrefixShapeParams': trainPrefixShapeParams,
'trainModelsDirAppLight': trainModelsDirAppLight, 'experimentDir': experimentDir, 'testSet': testSet,
'methodsPred': methodsPred, 'testOcclusions': testOcclusions, 'likelihoods': likelihoods,
'testPrefixBase': testPrefixBase, 'parameterRecognitionModels': parameterRecognitionModels, 'azimuths': azimuths,
'elevations': elevations, 'vColors': vColors, 'lightCoeffs': lightCoeffs, 'shapeParams': shapeParams, 'evaluateWithGT':evaluateWithGT}
with open(resultDir + 'experiment.pickle', 'wb') as pfile:
pickle.dump(experimentDic, pfile)
if evaluateWithGT:
if np.mod(test_i+1,100) == 0 or test_i + 1 >= len(testSet):
if approxProjectionsPredList:
approxProjectionsPred = np.vstack(approxProjectionsPredList)
if approxProjectionsGTList:
approxProjectionsGT = np.vstack(approxProjectionsGTList)
if predictedPosteriorsList:
predictedPosteriors = np.vstack(predictedPosteriorsList)
if nearestNeighbours:
approxProjectionsNearestNeighbours = np.vstack(approxProjectionsNearestNeighbourList)
if optimizationTypeDescr[optimizationType] != 'predict':
if fittedVColorsList:
fittedVColors = np.vstack(fittedVColorsList)
if fittedRelLightCoeffsList:
fittedRelLightCoeffs = np.vstack(fittedRelLightCoeffsList)
if approxProjectionsFittedList:
approxProjectionsFitted = np.vstack(approxProjectionsFittedList)
if fittedShapeParamsList and useShapeModel:
fittedShapeParams = np.vstack(fittedShapeParamsList)
if fittedPosteriorsList:
fittedPosteriors = np.vstack(fittedPosteriorsList)
if nearestNeighbours:
azimuthNearestNeighbours = np.concatenate(azimuthNearestNeighboursList)
elevationNearestNeighbours = np.concatenate(elevationNearestNeighboursList)
vColorNearestNeighbours = np.vstack(vColorNearestNeighboursList)
if useShapeModel:
shapeParamsNearestNeighbours = np.vstack(shapeParamsNearestNeighboursList)
lightCoeffsNearestNeighbours = np.vstack(lightCoeffsNearestNeighboursList)
# nearestNeighboursPosteriors = np.vstack(nearestNeighboursPosteriorsList)
if optimizationTypeDescr[optimizationType] != 'predict':
numFitted = range(len(fittedAzs))
else:
numFitted = range(test_i+1)
if includeMeanBaseline:
# meanBaselinePosteriors = np.vstack(meanBaselinePosteriorList)
azimuths = [meanTrainAzimuthRel]
elevations= [meanTrainElevation]
vColors= [meanTrainVColors]
lightCoeffs= [meanTrainLightCoefficientsGTRel]
if useShapeModel:
shapeParams = [meanTrainShapeParams]
else:
shapeParams = [None]
approxProjections= [meanTrainEnvMapProjections]
likelihoods = [meanBaselineErrorFuns]
segmentations = [None]
ipdb.set_trace()
else:
azimuths = [None]
elevations= [None]
vColors= [None]
lightCoeffs= [None]
approxProjections= [None]
shapeParams = [None]
likelihoods = [None]
segmentations = [None]
if nearestNeighbours:
azimuths = azimuths + [azimuthNearestNeighbours]
elevations = elevations + [elevationNearestNeighbours]
vColors = vColors + [vColorNearestNeighbours]
lightCoeffs = lightCoeffs + [lightCoeffsNearestNeighbours]
approxProjections = approxProjections + [approxProjectionsNearestNeighbours]
if useShapeModel:
shapeParams = shapeParams + [shapeParamsNearestNeighbours]
else:
shapeParams = shapeParams + [None]
segmentations = segmentations + [None]
else:
azimuths = azimuths + [None]
elevations= elevations + [None]
vColors= vColors + [None]
lightCoeffs= lightCoeffs + [None]
approxProjections = approxProjections + [None]
likelihoods = likelihoods + [None]
shapeParams = shapeParams + [None]
segmentations = segmentations + [None]
azimuths = azimuths + [azsPred]
elevations = elevations + [elevsPred]
vColors = vColors + [vColorsPred]
lightCoeffs = lightCoeffs + [relLightCoefficientsPred]
approxProjections = approxProjections + [approxProjectionsPred]
likelihoods = likelihoods + [predictedErrorFuns]
segmentations = segmentations + [predictedPosteriors]
if useShapeModel:
shapeParams = shapeParams + [shapeParamsPred]
else:
shapeParams = shapeParams + [None]
if optimizationTypeDescr[optimizationType] != 'predict':
azimuths = azimuths + [fittedAzs]
elevations = elevations + [fittedElevs]
vColors = vColors + [fittedVColors]
lightCoeffs = lightCoeffs + [fittedRelLightCoeffs]
approxProjections = approxProjections + [approxProjectionsFitted]
if useShapeModel:
shapeParams = shapeParams + [fittedShapeParams]
else:
shapeParams = shapeParams + [None]
likelihoods = likelihoods + [fittedErrorFuns]
segmentations = segmentations + [fittedPosteriors]
else:
azimuths = azimuths + [None]
elevations = elevations + [None]
vColors = vColors + [None]
lightCoeffs = lightCoeffs + [None]
approxProjections = approxProjections + [None]
likelihoods = likelihoods + [None]
segmentations = segmentations + [None]
shapeParams = shapeParams + [None]
errorsPosePredList, errorsLightCoeffsList, errorsShapeParamsList, errorsShapeVerticesList, errorsEnvMapList, errorsLightCoeffsCList, errorsVColorsEList, errorsVColorsCList, errorsVColorsSList, errorsSegmentationList \
= computeErrors(numFitted, azimuths, testAzsRel, elevations, testElevsGT, vColors, testVColorGT, lightCoeffs, testLightCoefficientsGTRel, approxProjections, approxProjectionsGT, shapeParams, testShapeParamsGT, useShapeModel, chShapeParams, chVertices, segmentations, masksGT)
meanAbsErrAzsList, meanAbsErrElevsList, meanErrorsLightCoeffsList, meanErrorsShapeParamsList, meanErrorsShapeVerticesList, meanErrorsLightCoeffsCList, meanErrorsEnvMapList, meanErrorsVColorsEList, meanErrorsVColorsCList, meanErrorsVColorsCList, meanErrorsSegmentationList \
= computeErrorAverages(np.mean, numFitted, useShapeModel, errorsPosePredList, errorsLightCoeffsList, errorsShapeParamsList, errorsShapeVerticesList, errorsEnvMapList, errorsLightCoeffsCList, errorsVColorsEList, errorsVColorsCList, errorsVColorsSList, errorsSegmentationList)
medianAbsErrAzsList, medianAbsErrElevsList,medianErrorsLightCoeffsList, medianErrorsShapeParamsList, medianErrorsShapeVerticesList, medianErrorsLightCoeffsCList, medianErrorsEnvMapList, medianErrorsVColorsEList, medianErrorsVColorsCList, medianErrorsVColorsCList, medianErrorsSegmentationList \
= computeErrorAverages(np.median, numFitted, useShapeModel, errorsPosePredList, errorsLightCoeffsList, errorsShapeParamsList, errorsShapeVerticesList, errorsEnvMapList, errorsLightCoeffsCList, errorsVColorsEList, errorsVColorsCList, errorsVColorsSList, errorsSegmentationList)
#Write statistics to file.
with open(resultDir + 'performance.txt', 'w') as expfile:
for method_i in range(len(azimuths)):
# expfile.write(str(z))
expfile.write("Mean Azimuth Error " + methodsPred[method_i] + " " + str(meanAbsErrAzsList) + '\n')
expfile.write("Mean Elevation Error " + methodsPred[method_i] + " " + str(meanAbsErrElevsList[method_i])+ '\n')
expfile.write("Mean SH Components Error " + methodsPred[method_i] + " " + str(meanErrorsLightCoeffsList[method_i])+ '\n')
expfile.write("Mean SH Components Error " + methodsPred[method_i] + " " + str(meanErrorsLightCoeffsCList[method_i])+ '\n')
expfile.write("Mean Vertex Colors Error E " + methodsPred[method_i] + " " + str(meanErrorsVColorsEList[method_i])+ '\n')
expfile.write("Mean Vertex Colors Error C " + methodsPred[method_i] + " " + str(meanErrorsVColorsCList[method_i])+ '\n')
expfile.write("Mean Vertex Colors Error S " + methodsPred[method_i] + " " + str(meanErrorsVColorsCList[method_i]) + '\n')
expfile.write("Mean Shape Error " + methodsPred[method_i] + " " + str(meanErrorsShapeParamsList[method_i])+ '\n')
expfile.write("Mean Shape Vertices Error " + methodsPred[method_i] + " " + str(meanErrorsShapeVerticesList[method_i])+ '\n')
expfile.write("Mean Segmentation Error " + methodsPred[method_i] + " " + str(meanErrorsSegmentationList[method_i])+ '\n\n')
#Write statistics to file.
with open(resultDir + 'median-performance.txt', 'w') as expfile:
for method_i in range(len(azimuths)):
# expfile.write(str(z))
expfile.write("Median Azimuth Error " + methodsPred[method_i] + " " + str(medianAbsErrAzsList) + '\n')
expfile.write("Median Elevation Error " + methodsPred[method_i] + " " + str(medianAbsErrElevsList[method_i])+ '\n')
expfile.write("Median SH Components Error " + methodsPred[method_i] + " " + str(medianErrorsLightCoeffsList[method_i])+ '\n')
expfile.write("Median SH Components Error " + methodsPred[method_i] + " " + str(medianErrorsLightCoeffsCList[method_i])+ '\n')
expfile.write("Median Vertex Colors Error E " + methodsPred[method_i] + " " + str(medianErrorsVColorsEList[method_i])+ '\n')
expfile.write("Median Vertex Colors Error C " + methodsPred[method_i] + " " + str(medianErrorsVColorsCList[method_i])+ '\n')
expfile.write("Mean Vertex Colors Error S " + methodsPred[method_i] + " " + str(meanErrorsVColorsCList[method_i]) + '\n')
expfile.write("Median Shape Error " + methodsPred[method_i] + " " + str(medianErrorsShapeParamsList[method_i])+ '\n')
expfile.write("Median Shape Vertices Error " + methodsPred[method_i] + " " + str(medianErrorsShapeVerticesList[method_i])+ '\n')
expfile.write("Median Segmentation Error " + methodsPred[method_i] + " " + str(medianErrorsSegmentationList[method_i])+ '\n\n')
if not os.path.exists(resultDir + 'stats/'):
os.makedirs(resultDir + 'stats/')
with open(resultDir + 'stats/' + 'performance' + str(test_i) + '.txt', 'w') as expfile:
for method_i in range(len(azimuths)):
expfile.write("Mean Azimuth Error " + methodsPred[method_i] + " " + str(meanAbsErrAzsList) + '\n')
expfile.write("Mean Elevation Error " + methodsPred[method_i] + " " + str(meanAbsErrElevsList[method_i])+ '\n')
expfile.write("Mean SH Components Error " + methodsPred[method_i] + " " + str(meanErrorsLightCoeffsList[method_i])+ '\n')
expfile.write("Mean SH Components Error " + methodsPred[method_i] + " " + str(meanErrorsLightCoeffsCList[method_i])+ '\n')
expfile.write("Mean Vertex Colors Error E " + methodsPred[method_i] + " " + str(meanErrorsVColorsEList[method_i])+ '\n')
expfile.write("Mean Vertex Colors Error C " + methodsPred[method_i] + " " + str(meanErrorsVColorsCList[method_i])+ '\n')
expfile.write("Mean Shape Error " + methodsPred[method_i] + " " + str(meanErrorsShapeParamsList[method_i])+ '\n')
expfile.write("Mean Shape Vertices Error " + methodsPred[method_i] + " " + str(meanErrorsShapeVerticesList[method_i])+ '\n')
expfile.write("Mean Segmentation Error " + methodsPred[method_i] + " " + str(meanErrorsSegmentationList[method_i])+ '\n\n')
segmentationsDic = {'segmentations':segmentations}
with open(resultDir + 'segmentations.pickle', 'wb') as pfile:
pickle.dump(segmentationsDic, pfile)
np.save(resultDir + 'reverted.npy', revertedSamples)
envMapDic = {'approxProjections':approxProjections, 'approxProjectionsGT':approxProjectionGT}
with open(resultDir + 'approxProjections.pickle', 'wb') as pfile:
pickle.dump(envMapDic, pfile)
# ipdb.set_trace()
totalTime = time.time() - startTime
print("Took " + str(totalTime/test_i) + " time per instance.")
experimentErrorsDic = {'errorsPosePredList':errorsPosePredList, 'errorsLightCoeffsList':errorsLightCoeffsList, 'errorsShapeParamsLis':errorsShapeParamsList, 'errorsShapeVerticesList':errorsShapeVerticesList, 'errorsEnvMapList':errorsEnvMapList, 'errorsLightCoeffsCList':errorsLightCoeffsCList, 'errorsVColorsEList':errorsVColorsEList, 'errorsVColorsCList':errorsVColorsCList, 'errorsVColorsCList':errorsVColorsCList, 'errorsSegmentationList':errorsSegmentationList}
#
with open(resultDir + 'experiment_errors.pickle', 'wb') as pfile:
pickle.dump(experimentErrorsDic, pfile)
| 134,439 | 47.586917 | 485 | py |
inversegraphics | inversegraphics-master/export_groundtruth.py | import save_exr_images
from save_exr_images import exportExrImages
import os
print ("Reading xml ")
outputDir = '../data/output/'
imgDir = outputDir + "images/"
lines = [line.strip() for line in open(outputDir + 'groundtruth.txt')]
if not os.path.exists(imgDir):
os.makedirs(imgDir)
for instance in lines:
parts = instance.split(' ')
teapot = int(parts[3])
frame = int(parts[4])
sceneNum = int(parts[5])
targetIndex = int(parts[6])
prefix = ''
if len(parts) == 17:
prefix = parts[16]
try:
exportExrImages(outputDir, imgDir, teapot, frame, sceneNum, targetIndex, prefix)
except Exception as e:
print(e)
| 678 | 19.575758 | 88 | py |
inversegraphics | inversegraphics-master/diffrender_groundtruth_multi.py | __author__ = 'pol'
import matplotlib
# matplotlib.use('Qt4Agg')
import bpy
import scene_io_utils
import mathutils
from math import radians
import timeit
import time
import opendr
import chumpy as ch
import geometry
import image_processing
import pdb
import numpy as np
import cv2
from blender_utils import *
import generative_models
import matplotlib.pyplot as plt
from opendr_utils import *
import OpenGL.GL as GL
import light_probes
import imageio
from OpenGL import contextdata
from light_probes import SHProjection
import collision
import copy
plt.ion()
#########################################
# Initialization starts here
#########################################
prefix = 'new_scenes'
previousGTPrefix = ''
#Main script options:
renderFromPreviousGT = False
useShapeModel = True
useOpenDR = False
useBlender = True
renderBlender = True
captureEnvMapFromBlender = False
parseSceneInstantiations = False
loadBlenderSceneFile = True
useCycles = True
unpackModelsFromBlender = False
unpackSceneFromBlender = False
loadSavedSH = False
generateTriplets = False
replaceNewGroundtruth = True
renderOcclusions = False
occlusionMin = 0.0
occlusionMax = 0.9
renderTeapots = True
renderMugs = False
showMug = False
centeredObject = True
fixedCamDistance = True
useDirectionalLight = False
renderBackground = True
teapotSceneIndex = 0
mugSceneIndex = 0
if renderTeapots and renderMugs:
mugSceneIndex = 1
glModes = ['glfw','mesa']
glMode = glModes[0]
width, height = (64, 64)
win = -1
coords = np.meshgrid(np.arange(width) - width / 2, np.arange(height) - height / 2)
coordsMugX = np.array([0])
coordsMugY = np.array([0])
coordsTeapotX = np.array([0])
coordsTeapotY = np.array([0])
if useOpenDR:
if glMode == 'glfw':
import glfw
#Initialize base GLFW context for the Demo and to share context among all renderers.
glfw.init()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.DEPTH_BITS,32)
glfw.window_hint(glfw.VISIBLE, GL.GL_FALSE)
# win = glfw.create_window(width, height, "Demo", None, None)
# glfw.make_context_current(win)
angle = 60 * 180 / numpy.pi
clip_start = 0.05
clip_end = 10
frustum = {'near': clip_start, 'far': clip_end, 'width': width, 'height': height}
camDistance = 0.4
teapotsFile = 'teapots.txt'
teapots = [line.strip() for line in open(teapotsFile)]
renderTeapotsList = np.arange(len(teapots))[0:1]
mugs = []
renderMugsList = np.array([])
if renderMugs:
mugs = [line.strip() for line in open('mugs.txt')]
renderMugsList = np.arange(len(mugs))[0:1]
sceneIdx = 0
replaceableScenesFile = 'data/scene_replaceables_backup_new.txt'
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
targetParentIdx = 0
targetIndex = targetIndices[targetParentIdx]
targetParentPosition = targetPositions[targetParentIdx]
targetPosition = targetParentPosition
tex_srgb2lin = True
v, f_list, vc, vn, uv, haveTextures_list, textures_list = scene_io_utils.loadSavedScene(sceneDicFile, tex_srgb2lin)
removeObjectData(int(targetIndex), v, f_list, vc, vn, uv, haveTextures_list, textures_list)
targetModels = []
mugModels = []
blender_teapots = []
blender_mugs = []
if useBlender:
selection = [ teapots[i] for i in renderTeapotsList]
scene_io_utils.loadTargetsBlendData()
for teapotIdx, teapotName in enumerate(selection):
teapot = bpy.data.scenes[teapotName[0:63]].objects['teapotInstance' + str(renderTeapotsList[teapotIdx])]
teapot.layers[1] = True
teapot.layers[2] = True
targetModels = targetModels + [teapot]
blender_teapots = blender_teapots + [teapot]
if renderMugs:
selectionMugs = [ mugs[i] for i in renderMugsList]
scene_io_utils.loadMugsBlendData()
for mugIdx, mugName in enumerate(selectionMugs):
mug = bpy.data.scenes[mugName[0:63]].objects['mugInstance' + str(renderMugsList[mugIdx])]
mug.layers[1] = True
mug.layers[2] = True
mugModels = mugModels + [mug]
blender_mugs = blender_mugs + [mug]
v_teapots, f_list_teapots, vc_teapots, vn_teapots, uv_teapots, haveTextures_list_teapots, textures_list_teapots, vflat, varray, center_teapots = scene_io_utils.loadTeapotsOpenDRData(renderTeapotsList, useBlender, unpackModelsFromBlender, targetModels)
if renderMugs:
v_mugs, f_list_mugs, vc_mugs, vn_mugs, uv_mugs, haveTextures_list_mugs, textures_list_mugs, vflat_mugs, varray_mugs, center_mugs = scene_io_utils.loadMugsOpenDRData(renderMugsList, useBlender, unpackModelsFromBlender, mugModels)
v_mug = v_mugs[0][0]
f_list_mug = f_list_mugs[0][0]
chVColorsMug = ch.Ch([1,0,0])
vc_mug = [chVColorsMug * np.ones(v_mug[0].shape)]
vn_mug = vn_mugs[0][0]
uv_mug = uv_mugs[0][0]
haveTextures_list_mug = haveTextures_list_mugs[0][0]
textures_list_mug = textures_list_mugs[0][0]
chObjAz = ch.Ch([0])
chDist = ch.Ch([camDistance])
chObjAzGT = ch.Ch([0])
chAzGT = ch.Ch([0])
chElGT = ch.Ch([0])
chAzRelGT = chAzGT - chObjAzGT
chDistGT = ch.Ch([camDistance])
chComponentGT = ch.Ch(np.array([2, 0.25, 0.25, 0.12,-0.17,0.36,0.1,0.,0.]))
chObjDistGT = ch.Ch([0])
chObjRotationGT = ch.Ch([0])
chObjAzMug = ch.Ch([0])
chObjDistMug = ch.Ch([0])
chObjRotationMug = ch.Ch([0])
light_colorGT = ch.ones(3)
chVColorsGT = ch.Ch([0.8,0.8,0.8])
shCoefficientsFile = 'data/sceneSH' + str(sceneIdx) + '.pickle'
chAmbientIntensityGT = ch.Ch([0.1])
clampedCosCoeffs = clampedCosineCoefficients()
chAmbientSHGT = ch.zeros([9])
envMapDic = {}
SHFilename = 'data/LightSHCoefficients.pickle'
with open(SHFilename, 'rb') as pfile:
envMapDic = pickle.load(pfile)
phiOffset = ch.Ch([0])
totalOffset = phiOffset + chObjAzGT
envMapCoeffs = ch.Ch(list(envMapDic.items())[0][1][1])
chLightRadGT = ch.Ch([0.1])
chLightDistGT = ch.Ch([0.5])
chLightIntensityGT = ch.Ch([1])
chLightAzGT = ch.Ch([0])
chLightElGT = ch.Ch([0])
chGlobalConstantGT = ch.Ch([0.5])
angleGT = ch.arcsin(chLightRadGT/chLightDistGT)
zGT = chZonalHarmonics(angleGT)
shDirLightGT = chZonalToSphericalHarmonics(zGT, np.pi/2 - chLightElGT, chLightAzGT - np.pi/2)
envMapCoeffsRotated = ch.Ch(np.dot(light_probes.chSphericalHarmonicsZRotation(totalOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]])
envMapCoeffsRotatedRel = ch.Ch(np.dot(light_probes.chSphericalHarmonicsZRotation(phiOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]])
shCoeffsRGB = envMapCoeffsRotated
shCoeffsRGBRel = envMapCoeffsRotatedRel
chShCoeffs = 0.3*shCoeffsRGB[:,0] + 0.59*shCoeffsRGB[:,1] + 0.11*shCoeffsRGB[:,2]
chShCoeffsRel = 0.3*shCoeffsRGBRel[:,0] + 0.59*shCoeffsRGBRel[:,1] + 0.11*shCoeffsRGBRel[:,2]
chAmbientSHGT = chShCoeffs.ravel() * chAmbientIntensityGT * clampedCosCoeffs
chAmbientSHGTRel = chShCoeffsRel.ravel() * chAmbientIntensityGT * clampedCosCoeffs
chComponentGT = chAmbientSHGT
chComponentGTRel = chAmbientSHGTRel
chDisplacementGT = ch.Ch([0.0,0.0,0.0])
chScaleGT = ch.Ch([1, 1.,1.])
currentTeapotModel = 0
currentMugModel = 0
if renderTeapots:
addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, v_teapots[currentTeapotModel][0], f_list_teapots[currentTeapotModel][0], vc_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0], uv_teapots[currentTeapotModel][0], haveTextures_list_teapots[currentTeapotModel][0], textures_list_teapots[currentTeapotModel][0])
if renderMugs:
addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, v_mugs[currentMugModel][0], f_list_mugs[currentMugModel][0], vc_mugs[currentMugModel][0], vn_mugs[currentMugModel][0], uv_mugs[currentMugModel][0], haveTextures_list_mugs[currentMugModel][0], textures_list_mugs[currentMugModel][0])
center = center_teapots[currentTeapotModel]
if useOpenDR:
rendererGT = createRendererGT(glMode, chAzGT, chElGT, chDistGT, center, v, vc, f_list, vn, light_colorGT, chComponentGT, chVColorsGT, targetPosition[:].copy(), chDisplacementGT, width,height, uv, haveTextures_list, textures_list, frustum, None )
rendererGT.overdraw = False
rendererGT.nsamples = 1
rendererGT.msaa = False
rendererGT.initGL()
rendererGT.initGLTexture()
vis_gt = np.array(rendererGT.indices_image!=1).copy().astype(np.bool)
vis_mask = np.array(rendererGT.indices_image==1).copy().astype(np.bool)
shapeIm = vis_gt.shape
numPixels = height * width
## For blender parallell rendering.
import multiprocessing
numTileAxis = np.ceil(np.sqrt(multiprocessing.cpu_count())/2)
numTileAxis = 3
smCenterGT = ch.array([0,0,0.1])
shapeVerticesScaling = 0.09
teapotFilePath = 'data/teapotModel.pkl'
if useShapeModel:
teapot_i = -1
import shape_model
#%% Load data
teapotModel = shape_model.loadObject(teapotFilePath)
faces = teapotModel['faces']
#%% Sample random shape Params
latentDim = np.shape(teapotModel['ppcaW'])[1]
shapeParams = np.random.randn(latentDim)
chShapeParamsGT = ch.Ch(shapeParams)
meshLinearTransform=teapotModel['meshLinearTransform']
W=teapotModel['ppcaW']
b=teapotModel['ppcaB']
chVerticesGT = shape_model.VerticesModel(chShapeParams=chShapeParamsGT,meshLinearTransform=meshLinearTransform,W = W,b=b)
chVerticesGT.init()
chVerticesGT = ch.dot(geometry.RotateZ(-np.pi/2)[0:3,0:3],chVerticesGT.T).T
chNormalsGT = shape_model.chGetNormals(chVerticesGT, faces)
smNormalsGT = [chNormalsGT]
smFacesGT = [[faces]]
smVColorsGT = [chVColorsGT*np.ones(chVerticesGT.shape)]
smUVsGT = [ch.Ch(np.zeros([chVerticesGT.shape[0],2]))]
smHaveTexturesGT = [[False]]
smTexturesListGT = [[None]]
chVerticesGT = chVerticesGT - ch.mean(chVerticesGT, axis=0)
minZ = ch.min(chVerticesGT[:,2])
chMinZ = ch.min(chVerticesGT[:,2])
zeroZVerts = chVerticesGT[:,2]- chMinZ
chVerticesGT = ch.hstack([chVerticesGT[:,0:2] , zeroZVerts.reshape([-1,1])])
chVerticesGT = chVerticesGT*shapeVerticesScaling
smCenterGT = ch.array([0,0,0.1])
smVerticesGT = [chVerticesGT]
chNormalsGT = shape_model.chGetNormals(chVerticesGT, faces)
smNormalsGT = [chNormalsGT]
else:
latentDim = 1
chShapeParamsGT = ch.array([0])
if useShapeModel:
teapotMesh = createMeshFromData('teapotShapeModelMesh', chVerticesGT.r.tolist(),
faces.astype(np.int32).tolist())
teapotMesh.layers[0] = True
teapotMesh.layers[1] = True
teapotMesh.pass_index = 1
targetGroup = bpy.data.groups.new('teapotShapeModelGroup')
targetGroup.objects.link(teapotMesh)
teapot = bpy.data.objects.new('teapotShapeModel', None)
teapot.dupli_type = 'GROUP'
teapot.dupli_group = targetGroup
teapot.pass_index = 1
mat = makeMaterial('teapotMat', (0, 0, 0), (0, 0, 0), 1)
setMaterial(teapotMesh, mat)
### Renderer (only teapot)
teapotToRender = -1
# renderTeapotsList = np.arange(len(teapots))
renderTeapotsList = np.arange(len(teapots))[0:1]
targetModels = []
chAz = ch.Ch([0])
chObjAz = ch.Ch([0])
chAzRel = chAz - chObjAz
chEl = ch.Ch([0.0])
chDist = ch.Ch([camDistance])
#Initialize to a random set of SH coefficients
chLightSHCoeffs = ch.Ch(np.array([2, 0.25, 0.25, 0.12,-0.17,0.36,0.1,0.,0.]))
# if multiObjects:
chObjDist = ch.Ch([0])
chObjRotation = ch.Ch([0])
chObjAzMug = ch.Ch([0])
chObjDistMug = ch.Ch([0])
chObjRotationMug = ch.Ch([0])
chVColorsMug = ch.Ch([1,0,0])
chComponent = chLightSHCoeffs * clampedCosCoeffs
light_color = ch.ones(3)
chVColors = ch.Ch([0.4,0.4,0.4])
chDisplacement = ch.Ch([0.0, 0.0,0.0])
chScale = ch.Ch([1.0,1.0,1.0])
if useShapeModel:
import shape_model
#%% Load data
shapeParams = np.zeros(latentDim)
chShapeParams = ch.Ch(shapeParams.copy())
chVertices = shape_model.VerticesModel(chShapeParams=chShapeParams, meshLinearTransform=meshLinearTransform,W = W,b=b)
chVertices.init()
chVertices = ch.dot(geometry.RotateZ(-np.pi/2)[0:3,0:3],chVertices.T).T
smFaces = [[faces]]
smVColors = [chVColors*np.ones(chVertices.shape)]
smUVs = ch.Ch(np.zeros([chVertices.shape[0],2]))
smHaveTextures = [[False]]
smTexturesList = [[None]]
chVertices = chVertices - ch.mean(chVertices, axis=0)
minZ = ch.min(chVertices[:,2])
chMinZ = ch.min(chVertices[:,2])
zeroZVerts = chVertices[:,2]- chMinZ
chVertices = ch.hstack([chVertices[:,0:2] , zeroZVerts.reshape([-1,1])])
chVertices = chVertices*0.09
smCenter = ch.array([0,0,0.1])
smVertices = [chVertices]
chNormals = shape_model.chGetNormals(chVertices, faces)
smNormals = [chNormals]
if useShapeModel:
center = smCenter
UVs = smUVs
v = smVertices
vn = smNormals
Faces = smFaces
VColors = smVColors
UVs = smUVs
HaveTextures = smHaveTextures
TexturesList = smTexturesList
else:
v, vn = v_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0]
Faces = f_list_teapots[currentTeapotModel][0]
VColors = vc_teapots[currentTeapotModel][0]
UVs = uv_teapots[currentTeapotModel][0]
HaveTextures = haveTextures_list_teapots[currentTeapotModel][0]
TexturesList = textures_list_teapots[currentTeapotModel][0]
v, vn, teapotPosOffset = transformObject(v, vn, chScale, chObjAz, chObjDist, chObjRotation, np.array([0,0,0]))
if renderMugs:
verticesMug, normalsMug, mugPosOffset = transformObject(v_mug, vn_mug, chScale, chObjAzMug + np.pi / 2, chObjDistMug, chObjRotationMug, np.array([0,0,0]))
VerticesB = [v] + [verticesMug]
NormalsB = [vn] + [normalsMug]
FacesB = [Faces] + [f_list_mug]
VColorsB = [VColors] + [vc_mug]
UVsB = [UVs] + [uv_mug]
HaveTexturesB = [HaveTextures] + [haveTextures_list_mug]
TexturesListB = [TexturesList] + [textures_list_mug]
renderer = createRendererTarget(glMode, chAz, chEl, chDist, center, VerticesB, VColorsB, FacesB, NormalsB, light_color,chComponent, chVColors, np.array([0,0,0]), chDisplacement, width, height, UVsB, HaveTexturesB, TexturesListB, frustum, None)
else:
renderer = createRendererTarget(glMode, chAz, chEl, chDist, smCenter, [v], [smVColors], [smFaces], [vn], light_color, chComponent, chVColors, 0, chDisplacement, width,height, [smUVs], [smHaveTextures], [smTexturesList], frustum, None )
renderer.msaa = False
renderer.overdraw = True
# chShapeParams[:] = np.zeros([latentDim])
chVerticesMean = chVertices.r.copy()
# else:
# renderer = renderer_teapots[teapotToRender]
########################################################
####### Initialization ends here
########################################################
print("Creating Ground Truth")
trainAzsGT = np.array([])
trainObjAzsGT = np.array([])
trainElevsGT = np.array([])
trainLightAzsGT = np.array([])
trainLightElevsGT = np.array([])
trainLightIntensitiesGT = np.array([])
trainVColorGT = np.array([])
trainScenes = np.array([], dtype=np.uint8)
trainTeapotIds = np.array([], dtype=np.uint8)
trainEnvMaps = np.array([], dtype=np.uint8)
trainOcclusions = np.array([])
trainTargetIndices = np.array([], dtype=np.uint8)
trainIds = np.array([], dtype=np.uint32)
#Multi
trainObjDistGT = np.array([])
trainObjRotationGT = np.array([])
trainObjDistMug = np.array([])
trainObjRotationMug = np.array([])
trainObjAzMug = np.array([])
trainVColorsMug = np.array([])
trainTeapotElRel = np.array([])
trainMugElRel = np.array([])
trainMugPosOffset = np.array([])
trainTeapotPosOffset = np.array([])
trainLightCoefficientsGT = np.array([]).reshape([0,9])
trainLightCoefficientsGTRel = np.array([]).reshape([0,9])
trainAmbientIntensityGT = np.array([])
trainEnvMapPhiOffsets = np.array([])
trainShapeModelCoeffsGT = np.array([]).reshape([0,latentDim])
trainBBMug = np.array([],dtype=np.int8).reshape([0,4])
trainBBTeapot = np.array([],dtype=np.int8).reshape([0,4])
trainTeapotPresent = np.array([], dtype=np.bool)
trainMugPresent = np.array([], dtype=np.bool)
gtDir = 'groundtruth/' + prefix + '/'
if not os.path.exists(gtDir + 'images/'):
os.makedirs(gtDir + 'images/')
if not os.path.exists(gtDir + 'sphericalharmonics/'):
os.makedirs(gtDir + 'sphericalharmonics/')
if not os.path.exists(gtDir + 'images_opendr/'):
os.makedirs(gtDir + 'images_opendr/')
if not os.path.exists(gtDir + 'masks_occlusion/'):
os.makedirs(gtDir + 'masks_occlusion/')
if generateTriplets:
if not os.path.exists(gtDir + 'triplets1/'):
os.makedirs(gtDir + 'triplets1/')
if not os.path.exists(gtDir + 'triplets2/'):
os.makedirs(gtDir + 'triplets2/')
print("Generating renders")
sceneLines = [line.strip() for line in open(replaceableScenesFile)]
scenesToRender = range(len(sceneLines))[:]
scenesToRender = range(len(sceneLines))[2:3]
# scenesToRender = np.random.shuffle(scenesToRender)
trainSize = 10000
renderTeapotsList = np.arange(len(teapots))[0:1]
ignoreEnvMaps = np.loadtxt('data/bad_envmaps.txt')
hdritems = list(envMapDic.items())[:]
hdrstorender = []
phiOffsets = [0, np.pi/2, np.pi, 3*np.pi/2]
for hdrFile, hdrValues in hdritems:
hdridx = hdrValues[0]
envMapCoeffs = hdrValues[1]
if hdridx not in ignoreEnvMaps:
hdrstorender = hdrstorender + [(hdrFile,hdrValues)]
# if not os.path.exists('light_probes/envMap' + str(hdridx)):
# os.makedirs('light_probes/envMap' + str(hdridx))
#
# for phiOffset in phiOffsets:
#
# # phiOffset = np.random.uniform(0,2*np.pi, 1)
# from numpy.random import choice
# objAzGT = np.pi/2
# chObjAzGT[:] = 0
# totalOffset = phiOffset + chObjAzGT.r
# envMapCoeffsRotated = np.dot(light_probes.sphericalHarmonicsZRotation(totalOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]].copy()
# envMapCoeffsRotatedRel = np.dot(light_probes.sphericalHarmonicsZRotation(phiOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]].copy()
# shCoeffsRGB = envMapCoeffsRotated.copy()
# shCoeffsRGBRel = envMapCoeffsRotatedRel.copy()
# chShCoeffs = 0.3*shCoeffsRGB[:,0] + 0.59*shCoeffsRGB[:,1] + 0.11*shCoeffsRGB[:,2]
# chShCoeffsRel = 0.3*shCoeffsRGBRel[:,0] + 0.59*shCoeffsRGBRel[:,1] + 0.11*shCoeffsRGBRel[:,2]
# chAmbientSHGT = chShCoeffs * chAmbientIntensityGT * clampedCosCoeffs
# chAmbientSHGTRel = chShCoeffsRel * chAmbientIntensityGT * clampedCosCoeffs
# chComponentGT[:] = chAmbientSHGT.r[:].copy()
# chComponentGTRel[:] = chAmbientSHGTRel.r[:].copy()
# cv2.imwrite('light_probes/envMap' + str(hdridx) + '/opendr_' + str(np.int(180*phiOffset/np.pi)) + '.png' , 255*rendererGT.r[:,:,[2,1,0]])
# sys.exit("")
gtDtype = [('trainIds', trainIds.dtype.name), ('trainAzsGT', trainAzsGT.dtype.name),('trainObjAzsGT', trainObjAzsGT.dtype.name),('trainElevsGT', trainElevsGT.dtype.name),
('trainLightAzsGT', trainLightAzsGT.dtype.name),('trainLightElevsGT', trainLightElevsGT.dtype.name),('trainLightIntensitiesGT', trainLightIntensitiesGT.dtype.name),
('trainVColorGT', trainVColorGT.dtype.name, (3,) ),('trainScenes', trainScenes.dtype.name),('trainTeapotIds', trainTeapotIds.dtype.name),
('trainEnvMaps', trainEnvMaps.dtype.name),('trainOcclusions', trainOcclusions.dtype.name),('trainTargetIndices', trainTargetIndices.dtype.name),
('trainLightCoefficientsGT',trainLightCoefficientsGT.dtype, (9,)), ('trainLightCoefficientsGTRel', trainLightCoefficientsGTRel.dtype, (9,)),
('trainAmbientIntensityGT', trainAmbientIntensityGT.dtype), ('trainEnvMapPhiOffsets', trainEnvMapPhiOffsets.dtype),
('trainShapeModelCoeffsGT', trainShapeModelCoeffsGT.dtype, (latentDim,)),
('trainObjDistGT', trainObjDistGT.dtype),
('trainObjRotationGT', trainObjRotationGT.dtype),
('trainObjDistMug', trainObjDistMug.dtype),
('trainObjRotationMug', trainObjRotationMug.dtype),
('trainObjAzMug', trainObjAzMug.dtype),
('trainVColorsMug', trainVColorsMug.dtype, (3,)),
('trainTeapotElRel', trainTeapotElRel.dtype),
('trainMugElRel', trainMugElRel.dtype),
('trainMugPosOffset', trainMugPosOffset.dtype,(3,)),
('trainTeapotPosOffset', trainTeapotPosOffset.dtype,(3,)),
('trainBBMug', trainBBMug.dtype, (4,)),
('trainBBTeapot', trainBBTeapot.dtype, (4,)),
('trainTeapotPresent', trainTeapotPresent.dtype),
('trainMugPresent', trainMugPresent.dtype)]
groundTruth = np.array([], dtype = gtDtype)
groundTruthFilename = gtDir + 'groundTruth.h5'
# gtDirToRender = 'groundtruth/' + previousGTPrefix + '/'
# gtDataFileToRender = h5py.File(gtDirToRender + 'groundTruth.h5', 'w')
# gtDatasetToRender = gtDataFileToRender.create_dataset(previousGTPrefix, data=groundTruth, maxshape=(None,))
nextId = 0
if not replaceNewGroundtruth:
gtDataFile = h5py.File(groundTruthFilename, 'a')
try:
gtDataset = gtDataFile[prefix]
if gtDataset.size > 0:
nextId = gtDataset['trainIds'][-1] + 1
except:
gtDataset = gtDataFile.create_dataset(prefix, data=groundTruth, maxshape=(None,))
else:
gtDataFile = h5py.File(groundTruthFilename, 'w')
gtDataset = gtDataFile.create_dataset(prefix, data=groundTruth, maxshape=(None,))
train_i = nextId
#Re-producible groundtruth generation.
if train_i == 0:
np.random.seed(1)
# np.random.seed(2)
unlinkedObj = None
scenesToRenderOcclusions = []
scenes = []
lenScenes = 0
#Compute how many different locations can the teapot be instantiated across all scenes.
for sceneIdx in scenesToRender:
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
# if renderOcclusions:
# targetIndicesNew = []
# occlusionSceneFile = 'data/occlusions/occlusionScene' + str(sceneNumber) + '.pickle'
# with open(occlusionSceneFile, 'rb') as pfile:
# occlusions = pickle.load(pfile)
#
# for targetidx, targetIndex in enumerate(targetIndices):
# if not occlusions[targetIndex][1]:
# print("Scene idx " + str(sceneIdx) + " at index " + str(targetIndex) + " has no proper occlusion.")
# else:
# targetIndicesNew = targetIndicesNew + [targetIndex]
# targetIndices = targetIndicesNew
#
# collisionSceneFile = 'data/collisions/collisionScene' + str(sceneNumber) + '.pickle'
# scenes = scenes + [targetIndices]
# with open(collisionSceneFile, 'rb') as pfile:
# collisions = pickle.load(pfile)
#
# for targetidx, targetIndex in enumerate(targetIndices):
# if not collisions[targetIndex][1]:
# print("Scene idx " + str(sceneIdx) + " at index " + str(targetIndex) + " collides everywhere.")
scenes = scenes + [targetIndices]
lenScenes += len(targetIndices)
groundTruthInfo = {'prefix':prefix, 'previousGTPrefix':previousGTPrefix, 'renderFromPreviousGT':renderFromPreviousGT, 'useShapeModel':useShapeModel, 'renderOcclusions':renderOcclusions, 'useOpenDR':useOpenDR, 'useBlender':useBlender, 'renderBlender':renderBlender, 'captureEnvMapFromBlender':captureEnvMapFromBlender, 'parseSceneInstantiations':parseSceneInstantiations, 'loadBlenderSceneFile':loadBlenderSceneFile, 'useCycles':useCycles, 'unpackModelsFromBlender':unpackModelsFromBlender, 'unpackSceneFromBlender':unpackSceneFromBlender, 'loadSavedSH':loadSavedSH, 'renderTeapots':renderTeapots, 'renderMugs':renderMugs, 'width':width, 'height':height, 'angle':angle, 'clip_start':clip_start, 'clip_end':clip_end, 'camDistance':camDistance, 'renderTeapotsList':renderTeapotsList, 'renderMugsList':renderMugsList, 'replaceableScenesFile':replaceableScenesFile, 'teapotsFile':teapotsFile, 'SHFilename':SHFilename, 'light_colorGT':light_colorGT, 'chDisplacement':chDisplacement, 'chDisplacementGT':chDisplacementGT, 'chScale':chScale, 'chScaleGT':chScaleGT}
with open(gtDir + 'gtInfo.pickle', 'wb') as pfile:
pickle.dump(groundTruthInfo, pfile)
#Generate GT labels before rendering them.
if not renderFromPreviousGT:
for scene_i, sceneIdx in enumerate(scenesToRender):
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndicesScene, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
print("Generating groundtruth for scene: " + str(sceneNumber))
# targetIndices = scenes[scene_i]
# if not targetIndices:
# continue
targetIndices = targetIndicesScene
sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
collisionSceneFile = 'data/collisions/collisionScene' + str(sceneNumber) + '.pickle'
with open(collisionSceneFile, 'rb') as pfile:
collisions = pickle.load(pfile)
# if renderOcclusions:
# occlusionSceneFile = 'data/occlusions/occlusionScene' + str(sceneNumber) + '.pickle'
# with open(occlusionSceneFile, 'rb') as pfile:
# occlusions = pickle.load(pfile)
v2, f_list2, vc2, vn2, uv2, haveTextures_list2, textures_list2 = scene_io_utils.loadSavedScene(sceneDicFile, tex_srgb2lin)
if useBlender and not loadBlenderSceneFile:
bpy.ops.wm.read_factory_settings()
scene = scene_io_utils.loadBlenderScene(sceneIdx, replaceableScenesFile)
scene_io_utils.setupScene(scene, roomInstanceNum, scene.world, scene.camera, width, height, 16, useCycles, True)
scene.update()
# Save barebones scene.
elif useBlender and loadBlenderSceneFile:
bpy.ops.wm.read_factory_settings()
scene_io_utils.loadSceneBlendData(sceneIdx, replaceableScenesFile)
scene = bpy.data.scenes['Main Scene']
#bpy.ops.wm.save_as_mainfile(filepath="/home/pol/sceneload.blend")
# Configure scene
if useBlender:
if renderTeapots:
targetModels = []
blender_teapots = []
teapots = [line.strip() for line in open('teapots.txt')]
selection = [teapots[i] for i in renderTeapotsList]
scene_io_utils.loadTargetsBlendData()
for teapotIdx, teapotName in enumerate(selection):
teapot = bpy.data.scenes[teapotName[0:63]].objects['teapotInstance' + str(renderTeapotsList[teapotIdx])]
teapot.layers[1] = True
teapot.layers[2] = True
targetModels = targetModels + [teapotIdx]
blender_teapots = blender_teapots + [teapot]
if renderMugs:
blender_mugs = []
selectionMugs = [mugs[i] for i in renderMugsList]
scene_io_utils.loadMugsBlendData()
for mugIdx, mugName in enumerate(selectionMugs):
mug = bpy.data.scenes[mugName[0:63]].objects['mugInstance' + str(renderMugsList[mugIdx])]
mug.layers[1] = True
mug.layers[2] = True
mugModels = mugModels + [mug]
blender_mugs = blender_mugs + [mug]
# setupSceneGroundtruth(scene, width, height, clip_start, 2000, 'CUDA', 'CUDA_MULTI_2')
# bpy.ops.wm.save_as_mainfile(filepath="/home/pol/sceneload1.blend")
setupSceneGroundtruth(scene, width, height, clip_start, 2000)
# bpy.ops.wm.save_as_mainfile(filepath="/home/pol/sceneload2.blend")
treeNodes = scene.world.node_tree
links = treeNodes.links
cubeScene = createCubeScene(scene)
setEnviornmentMapStrength(20, cubeScene)
unlinkedObj = None
unlinkedCubeObj = None
envMapFilename = None
for targetidx, targetIndex in enumerate(targetIndices):
targetPosition = targetPositions[np.where(targetIndex==np.array(targetIndicesScene))[0][0]]
# if sceneIdx != currentScene or targetIndex != currentTargetIndex:
# targetPosition = targetPositions[np.where(targetIndex == np.array(targetIndicesScene))[0]]
v, f_list, vc, vn, uv, haveTextures_list, textures_list = copy.deepcopy(v2), copy.deepcopy(f_list2), copy.deepcopy(vc2), copy.deepcopy(vn2)\
, copy.deepcopy(uv2), copy.deepcopy(haveTextures_list2), copy.deepcopy(textures_list2)
removeObjectData(len(v) - 1 - targetIndex, v, f_list, vc, vn, uv, haveTextures_list, textures_list)
# if sceneIdx != currentScene or targetIndex != currentTargetIndex:
parentIdx = instances[targetIndex][1]
if useBlender:
if unlinkedObj != None:
scene.objects.link(unlinkedObj)
cubeScene.objects.link(unlinkedCubeObj)
unlinkedObj = scene.objects[str(targetIndex)]
unlinkedCubeObj = cubeScene.objects['cube' + str(targetIndex)]
scene.objects.unlink(unlinkedObj)
cubeScene.objects.unlink(unlinkedCubeObj)
parentSupportObj = scene.objects[str(parentIdx)]
supportWidthMax, supportWidthMin = modelWidth(parentSupportObj.dupli_group.objects,
parentSupportObj.matrix_world)
supportDepthMax, supportDepthMin = modelDepth(parentSupportObj.dupli_group.objects,
parentSupportObj.matrix_world)
# supportRad = min(0.5 * np.sqrt((supportDepthMax - supportDepthMin) ** 2),0.5 * np.min((supportWidthMax - supportWidthMin) ** 2))
supportRad = np.sqrt(0.5 * (supportDepthMax - supportDepthMin) ** 2 + 0.5 * (supportWidthMax - supportWidthMin) ** 2)
distRange = min(supportRad, 0.3)
sceneCollisions = {}
collisionsFile = 'data/collisions/discreteCollisions_scene' + str(sceneNumber) + '_targetIdx' + str(targetIndex) + '.pickle'
distInterval = 0.05
rotationRange = 2 * np.pi
rotationInterval = 10 * np.pi / 180
if not parseSceneInstantiations and os.path.exists(collisionsFile):
with open(collisionsFile, 'rb') as pfile:
sceneCollisions = pickle.load(pfile)
supportRad = sceneCollisions['supportRad']
instantiationBinsTeapot = sceneCollisions['instantiationBinsTeapot']
distRange = sceneCollisions['distRange']
rotationRange = sceneCollisions['rotationRange']
rotationInterval = sceneCollisions['rotationInterval']
distInterval = sceneCollisions['distInterval']
instantiationBinsMug = sceneCollisions['instantiationBinsMug']
totalBinsTeapot = sceneCollisions['totalBinsTeapot']
totalBinsMug = sceneCollisions['totalBinsMug']
else:
assert(useBlender)
print("Parsing collisions for scene " + str(sceneNumber))
placeCamera(cubeScene.camera, 0,
45, chDistGT.r[0].copy(),
center[:].copy() + targetPosition[:].copy())
scaleZ = 0.2
scaleY = 0.2
scaleX = 0.2
cubeTeapot = createCube(scaleX, scaleY, scaleZ, 'cubeTeapot')
cubeTeapot.matrix_world = mathutils.Matrix.Translation(targetPosition)
# cubeTeapot = getCubeObj(teapot)
# cubeMug = getCubeObj(mug)
scaleZ = 0.1
scaleY = 0.1
scaleX = 0.1
cubeMug = createCube(scaleX, scaleY, scaleZ, 'cubeMug')
cubeMug.matrix_world = mathutils.Matrix.Translation(targetPosition)
cubeScene.objects.link(cubeTeapot)
cubeParentSupportObj = cubeScene.objects['cube' + str(parentIdx)]
cubeRoomObj = cubeScene.objects['cube' + str(roomInstanceNum)]
cubeScene.update()
objDisplacementMat = computeHemisphereTransformation(chObjRotationGT, 0, chObjDistGT, np.array([0, 0, 0]))
objOffset = objDisplacementMat[0:3, 3]
instantiationBinsTeapot, totalBinsTeapot = collision.parseSceneCollisions(gtDir, scene_i, targetIndex, cubeTeapot,
cubeScene, objOffset, chObjDistGT,
chObjRotationGT, cubeParentSupportObj,
cubeRoomObj, distRange, rotationRange,
distInterval, rotationInterval)
cubeScene.objects.unlink(cubeTeapot)
cubeScene.objects.link(cubeMug)
instantiationBinsMug, totalBinsMug = collision.parseSceneCollisions(gtDir, scene_i, targetIndex, cubeMug,
cubeScene, objOffset,
chObjDistGT, chObjRotationGT,
cubeParentSupportObj, cubeRoomObj,
distRange,
rotationRange, distInterval,
rotationInterval)
cubeScene.objects.unlink(cubeMug)
deleteObject(cubeTeapot)
deleteObject(cubeMug)
sceneCollisions = {'totalBinsTeapot': totalBinsTeapot, 'totalBinsMug': totalBinsMug,
'supportRad': supportRad, 'instantiationBinsTeapot': instantiationBinsTeapot,
'distRange': distRange, 'rotationRange': rotationRange,
'distInterval': distInterval, 'rotationInterval': rotationInterval,
'instantiationBinsMug': instantiationBinsMug}
with open(collisionsFile, 'wb') as pfile:
pickle.dump(sceneCollisions, pfile)
#Go to next target Index or scene if there are no plausible place instantiations for teapot or mug.
if instantiationBinsTeapot.sum() < 1 or instantiationBinsMug.sum() < 1:
print("This scene position has not place to instantiate the objects.")
continue
# if useShapeModel
for teapot_i in renderTeapotsList:
if useShapeModel:
teapot_i = -1
else:
currentTeapotModel = teapot_i
center = center_teapots[teapot_i]
print("Ground truth on new teapot" + str(teapot_i))
##Destroy and create renderer
if useOpenDR:
rendererGT.makeCurrentContext()
rendererGT.clear()
contextdata.cleanupContext(contextdata.getContext())
if glMode == 'glfw':
glfw.destroy_window(rendererGT.win)
del rendererGT
if renderTeapots:
currentTeapotModel = teapot_i
center = center_teapots[teapot_i]
if useShapeModel:
center = smCenterGT
UVs = smUVsGT
vGT = smVerticesGT
vnGT = smNormalsGT
Faces = smFacesGT
VColors = smVColorsGT
UVs = smUVsGT
HaveTextures = smHaveTexturesGT
TexturesList = smTexturesListGT
else:
vGT, vnGT = v_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0]
Faces = f_list_teapots[currentTeapotModel][0]
VColors = vc_teapots[currentTeapotModel][0]
UVs = uv_teapots[currentTeapotModel][0]
HaveTextures = haveTextures_list_teapots[currentTeapotModel][0]
TexturesList = textures_list_teapots[currentTeapotModel][0]
vGT, vnGT, teapotPosOffset = transformObject(vGT, vnGT, chScaleGT, chObjAzGT, chObjDistGT, chObjRotationGT, targetPosition)
if renderMugs:
verticesMug, normalsMug, mugPosOffset = transformObject(v_mug, vn_mug, chScale, chObjAzMug + np.pi / 2, chObjDistMug, chObjRotationMug, targetPosition)
VerticesMug = [verticesMug]
NormalsMug = [normalsMug]
FacesMug = [f_list_mug]
VColorsMug = [vc_mug]
UVsMug = [uv_mug]
HaveTexturesMug = [haveTextures_list_mug]
TexturesListMug = [textures_list_mug]
else:
VerticesMug = []
NormalsMug = []
FacesMug = []
VColorsMug = []
UVsMug = []
HaveTexturesMug = []
TexturesListMug = []
if renderTeapots:
VerticesTeapot = [vGT]
NormalsTeapot = [vnGT]
FacesTeapot = [Faces]
VColorsTeapot = [VColors]
UVsTeapot = [UVs]
HaveTexturesTeapot = [HaveTextures]
TexturesListTeapot = [TexturesList]
else:
VerticesTeapot = []
NormalsTeapot = []
FacesTeapot = []
VColorsTeapot = []
UVsTeapot = []
HaveTexturesTeapot = []
TexturesListTeapot = []
# addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, verticesMug, f_list_mug, vc_mug, normalsMug, uv_mug, haveTextures_list_mug, textures_list_mug)
# addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, vGT, Faces, VColors, vnGT, UVs, HaveTextures, TexturesList)
VColorsTeapot = [[np.ones_like(VColorsTeapot[0][0]) * chVColorsGT.reshape([1, 3])]]
if renderBackground:
vc_scene = VColorsTeapot + VColorsMug + vc
vn_scene = NormalsTeapot + NormalsMug + vn
else:
vc_scene = VColorsTeapot + VColorsMug
vn_scene = NormalsTeapot + NormalsMug
vc_scene_illum = []
for mesh in range(len(vc_scene)):
if useDirectionalLight:
vc_scene_illum += [computeGlobalAndDirectionalLighting(vn_scene[mesh], vc_scene[mesh], chLightAzGT, chLightElGT, chLightIntensityGT, chGlobalConstantGT)]
else:
vc_scene_illum += [computeSphericalHarmonics(vn_scene[mesh], vc_scene[mesh], light_colorGT, chComponentGT)]
if renderBackground:
v_scene = VerticesTeapot + VerticesMug + v
f_scene = FacesTeapot + FacesMug + f_list
UVsTeapot + UVsMug + uv
HaveTexturesTeapot + HaveTexturesMug + haveTextures_list
TexturesListTeapot + TexturesListMug + textures_list
else:
v_scene = VerticesTeapot + VerticesMug
f_scene = FacesTeapot + FacesMug
uvs_scene = UVsTeapot + UVsMug
haveTextures_scene = HaveTexturesTeapot + HaveTexturesMug
textures_list_scene = TexturesListTeapot + TexturesListMug
rendererGT = createRendererGT(glMode, chAzGT, chElGT, chDistGT, center, v_scene, vc_scene_illum, f_scene, vn_scene, light_colorGT, chComponentGT, chVColorsGT, targetPosition.copy(), chDisplacementGT, width, height, uvs_scene, haveTextures_scene, textures_list_scene, frustum, None)
rendererGT.overdraw = True
rendererGT.nsamples = 1
rendererGT.msaa = False
rendererGT.initGL()
rendererGT.initGLTexture()
## Blender: Unlink and link new teapot.
if useBlender:
if renderTeapots:
# if currentScene != -1 and currentTargetIndex != -1 and currentTeapot != -1 and teapot != None:
if teapot.name in scene.objects:
scene.objects.unlink(teapot)
if useShapeModel:
deleteInstance(teapot)
if not useShapeModel:
teapot = blender_teapots[currentTeapotModel]
else:
teapotMesh = createMeshFromData('teapotShapeModelMesh', chVerticesGT.r.tolist(),
faces.astype(np.int32).tolist())
teapotMesh.layers[0] = True
teapotMesh.layers[1] = True
teapotMesh.pass_index = 1
targetGroup = bpy.data.groups.new('teapotShapeModelGroup')
targetGroup.objects.link(teapotMesh)
teapot = bpy.data.objects.new('teapotShapeModel', None)
teapot.dupli_type = 'GROUP'
teapot.dupli_group = targetGroup
teapot.pass_index = 1
mat = makeMaterial('teapotMat', (0, 0, 0), (0, 0, 0), 1)
setMaterial(teapotMesh, mat)
# center = centerOfGeometry(teapot.dupli_group.objects, teapot.matrix_world)
placeNewTarget(scene, teapot, targetPosition[:].copy())
teapot.layers[0] = True
teapot.layers[1] = True
original_matrix_world = teapot.matrix_world.copy()
if renderMugs:
if mug.name in scene.objects:
scene.objects.unlink(mug)
# deleteInstance(mug)
mug = blender_mugs[currentTeapotModel]
placeNewTarget(scene, mug, targetPosition[:].copy())
mug.layers[1] = True
mug.layers[0] = True
original_matrix_world_mug = mug.matrix_world.copy()
# LIGHT RANDOMIZATION
hdrchoice = np.random.choice(len(hdrstorender))
hdrchoice = 0
hdrValues = hdrstorender[hdrchoice][1]
hdrFile = hdrstorender[hdrchoice][0]
hdridx = hdrValues[0]
envMapCoeffs = hdrValues[1]
envMapFilename = hdrFile
# if not useBlender:
envMapTexture = np.array(imageio.imread(envMapFilename))[:,:,0:3]
numTeapotTrain = 0
numAttempts = 0 #Rejection sampling
maxAttempts = max(int(trainSize/(lenScenes*len(renderTeapotsList))),1)
exitInstantiationLoop = False
while numTeapotTrain < maxAttempts and not exitInstantiationLoop:
numAttempts = numAttempts + 1
if numAttempts > 50 and numAttempts/(numTeapotTrain + 1) > 20:
exitInstantiationLoop = True
ignore = False
meanValIntensityOffset = np.random.uniform(-0.5,0.5)
meanValIntensityOffset = 0
chAmbientIntensityGTVals = (0.8 + meanValIntensityOffset)/(0.3*envMapCoeffs[0,0] + 0.59*envMapCoeffs[0,1]+ 0.11*envMapCoeffs[0,2])
# chAmbientIntensityGTVals = chGlobalConstantGT.r
#Light randomization
phiOffsetVals = np.random.uniform(0,2*np.pi, 1)
# phiOffsetVals = 0
# phiOffset[:] = 0
from numpy.random import choice
chAzGTVals = np.mod(np.random.uniform(0,np.pi, 1) - np.pi/2, 2*np.pi)
chElGTVals = np.random.uniform(0.05,np.pi/2, 1)
chLightAzGTVals = np.random.uniform(0,2*np.pi, 1)
# chLightAzGTVals = chLightAzGT.r
chLightElGTVals = np.random.uniform(0,np.pi/2, 1)
# chLightElGTVals = chLightElGT.r
# chLightIntensityGTVals = chLightIntensityGT.r
chLightIntensityGTVals = np.random.uniform(0,1)
chGlobalConstantGTVals = np.random.uniform(0.1,0.9)
# chAmbientIntensityGTVals = chGlobalConstantGTVals
chVColorsGTVals = np.random.uniform(0.0,1.0, [1, 3])
envMapCoeffsRotatedVals = np.dot(light_probes.chSphericalHarmonicsZRotation(totalOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
envMapCoeffsRotatedRelVals = np.dot(light_probes.chSphericalHarmonicsZRotation(phiOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
# SHAPE RANDOMIZATION
shapeParams = np.random.randn(latentDim)
# shapeParams = np.zeros(latentDim)
# shapeParams[0:4] = np.random.randn(4)
chShapeParamsGTVals = shapeParams
## Update renderer scene latent variables.
# chAmbientIntensityGT[:] = chGlobalConstantGTVals
chAmbientIntensityGT[:] = chGlobalConstantGT.r
phiOffset[:] = phiOffsetVals
chAzGT[:] = chAzGTVals
chElGT[:] = chElGTVals
chLightAzGT[:] = chLightAzGTVals
chLightElGT[:] = chLightElGTVals
teapotCamElGT = 0
teapotPosOffsetVals = 0
if useOpenDR:
cameraEye = np.linalg.inv(np.r_[rendererGT.camera.view_mtx, np.array([[0, 0, 0, 1]])])[0:3, 3]
vecToCenter = targetPosition - cameraEye
vecToCenter = vecToCenter / np.linalg.norm(vecToCenter)
rightCamVec = np.cross(vecToCenter, np.array([0, 0, 1]))
chObjAzGTVals = 0
chObjDistGTVals = 0
chObjRotationGTVals = 0
if renderTeapots:
chObjAzGTVals = np.random.uniform(0, np.pi * 2)
chObjDistGTVals = np.random.uniform(0, np.minimum(supportRad, 0.3))
chObjAzGT[:] = chObjAzGTVals
chVColorsGT[:] = chVColorsGTVals
try:
chShapeParamsGT[:] = shapeParams
except:
chShapeParamsGT[:] = np.random.randn(latentDim)
#Instantiate such that we always see the handle if only a bit!
chObjRotationGTVals = np.random.uniform(0,np.pi*2)
if not centeredObject:
teapotPlacement = np.random.choice(instantiationBinsTeapot.sum())
chObjDistGTVals = totalBinsTeapot[0].ravel()[instantiationBinsTeapot][teapotPlacement]
chObjRotationGTVals = totalBinsTeapot[1].ravel()[instantiationBinsTeapot][teapotPlacement]
chObjDistGTVals = np.random.uniform(chObjDistGTVals - distInterval/2, chObjDistGTVals + distInterval/2)
chObjRotationGTVals = np.mod(np.random.uniform(chObjRotationGTVals - rotationInterval/2, chObjRotationGTVals + rotationInterval/2), 2*np.pi)
else:
chObjDistGTVals = 0
chObjRotationGTVals = 0
chObjDistGT[:] = chObjDistGTVals
chObjRotationGT[:] = chObjRotationGTVals
if not centeredObject:
vecToTeapot = targetPosition + teapotPosOffset.r - cameraEye
vecToTeapot = vecToTeapot / np.linalg.norm(vecToTeapot)
teapotPosRight = np.sign(rightCamVec.dot(vecToTeapot))
angleToTeapot = np.arccos(vecToTeapot.dot(vecToCenter))
if np.isnan(angleToTeapot):
angleToMug = 0
ignore = True
chObjAzGTRel = chAzGT.r - teapotPosRight * angleToTeapot
teapotPosOffsetVals = teapotPosOffset.r
chObjDistMugVals = 0
chObjRotationMugVals = 0
chObjAzMugVals = 0
mugCamElGT = 0
mugPosOffsetVals = 0
chVColorsMugVals = np.random.uniform(0.0, 1, [1, 3])
if renderMugs:
chObjDistMugVals = np.random.uniform(0,np.minimum(supportRad, 0.4))
chObjRotationMugVals = np.random.uniform(0,np.pi*2)
instantiationBinsMugUpdated = instantiationBinsMug
if renderTeapots:
if not centeredObject:
ys = -np.cos(totalBinsMug[1]) * totalBinsMug[0]
xs = np.sin(totalBinsMug[1]) * totalBinsMug[0]
y = -np.cos(chObjRotationGTVals) * chObjDistGTVals
x = np.sin(chObjRotationGTVals) * chObjDistGTVals
instantiationBinsMugUpdated = instantiationBinsMug.copy()
# instantiationBinsMugUpdated[(totalBinsMug[0] > chObjDistGTVals - 0.15) & (totalBinsMug[0] < chObjDistGTVals + 0.15) & (totalBinsMug[1] > chObjRotationGTVals - 15*np.pi/180) & (totalBinsMug[1] < chObjRotationGTVals + 15*np.pi/180)] = False
instantiationBinsMugUpdated[np.sqrt((ys-y)**2 + (xs-x)**2) < 0.15] = False
if instantiationBinsMugUpdated.sum() == 0:
ignore = True
mugPlacement = np.random.choice(instantiationBinsMugUpdated.sum())
chObjDistMugVals = totalBinsMug[0].ravel()[instantiationBinsMugUpdated][mugPlacement]
chObjRotationMugVals = totalBinsMug[1].ravel()[instantiationBinsMugUpdated][mugPlacement]
chObjDistMugVals = np.random.uniform(chObjDistMugVals - distInterval/2, chObjDistMugVals + distInterval/2)
chObjRotationMugVals = np.mod(np.random.uniform(chObjRotationMugVals - rotationInterval/2, chObjRotationMugVals + rotationInterval/2), 2*np.pi)
else:
chObjDistGTVals = 0
chObjRotationGTVals = 0
chObjDistMug[:] = chObjDistMugVals
chObjRotationMug[:] = chObjRotationMugVals
vecToMug = targetPosition + mugPosOffset.r - cameraEye
vecToMug = vecToMug / np.linalg.norm(vecToMug)
mugPosRight = np.sign(rightCamVec.dot(vecToMug))
angleToMug = np.arccos(vecToMug.dot(vecToCenter))
if np.isnan(angleToMug):
angleToMug = 0
ignore = True
chObjAzMugVals = np.random.uniform(chAzGT.r - mugPosRight*angleToMug - 2 * np.pi / 3, chAzGT.r - mugPosRight * angleToMug + 2 * np.pi / 3)
chObjAzMugRel = chAzGT.r - mugPosRight*angleToMug
chObjAzMug[:] = chObjAzMugVals
chVColorsMug[:] = chVColorsMugVals
vecMugToCamGT = cameraEye - (mugPosOffset + center)
mugCamElGT = 2 * ch.arctan(ch.norm(ch.array([0, -1, 0]) * ch.norm(vecMugToCamGT) - vecMugToCamGT * ch.norm(ch.array([0, -1, 0]))) / ch.norm(ch.array([0, -1, 0]) * ch.norm(vecMugToCamGT) + ch.norm(ch.array([0, -1, 0])) * vecMugToCamGT))
vecTeapotToCamGT = cameraEye - (teapotPosOffset + center)
teapotCamElGT = 2 * ch.arctan(ch.norm(ch.array([0, -1, 0]) * ch.norm(vecTeapotToCamGT) - vecTeapotToCamGT * ch.norm(ch.array([0, -1, 0]))) / ch.norm(ch.array([0, -1, 0]) * ch.norm(vecTeapotToCamGT) + ch.norm(
ch.array([0, -1, 0])) * vecTeapotToCamGT))
mugPosOffsetVals = mugPosOffset.r
if useBlender and not ignore:
updateEnviornmentMap(envMapFilename, scene)
rotateEnviornmentMap(totalOffset.r.copy(), scene)
placeCamera(scene.camera, -chAzGT.r[:].copy() * 180 / np.pi,
chElGT.r[:].copy() * 180 / np.pi, chDistGT.r[0].copy(),
center[:].copy() + targetPosition[:].copy())
if renderTeapots:
azimuthRot = mathutils.Matrix.Rotation(chObjAzGT.r[:].copy(), 4, 'Z')
teapot.matrix_world = mathutils.Matrix.Translation(original_matrix_world.to_translation() + mathutils.Vector(teapotPosOffset.r)) * azimuthRot * (mathutils.Matrix.Translation(-original_matrix_world.to_translation())) * original_matrix_world
setObjectDiffuseColor(teapot, chVColorsGT.r.copy())
if useShapeModel:
mesh = teapot.dupli_group.objects[0]
for vertex_i, vertex in enumerate(mesh.data.vertices):
vertex.co = mathutils.Vector(chVerticesGT.r[vertex_i])
if renderMugs:
setObjectDiffuseColor(mug, chVColorsMug.r.copy())
azimuthRotMug = mathutils.Matrix.Rotation(chObjAzMug.r[:].copy() - np.pi / 2, 4, 'Z')
mug.matrix_world = mathutils.Matrix.Translation(original_matrix_world_mug.to_translation() + mathutils.Vector(mugPosOffset.r)) * azimuthRotMug * (mathutils.Matrix.Translation(-original_matrix_world_mug.to_translation())) * original_matrix_world_mug
scene.update()
## Some validation checks:
if useOpenDR and not ignore:
if renderTeapots:
occlusion = getOcclusionFraction(rendererGT, id=teapotSceneIndex)
vis_occluded = np.array(rendererGT.indices_image == teapotSceneIndex+1).copy().astype(np.bool)
vis_occluded = np.ones_like(vis_occluded)
vis_im = np.array(rendererGT.image_mesh_bool([teapotSceneIndex])).copy().astype(np.bool)
vis_im = np.ones_like(vis_im)
if renderMugs:
occlusionMug = getOcclusionFraction(rendererGT, id=mugSceneIndex)
vis_occluded_mug = np.array(rendererGT.indices_image == mugSceneIndex+1).copy().astype(np.bool)
vis_im_mug = np.array(rendererGT.image_mesh_bool([mugSceneIndex])).copy().astype(np.bool)
if renderTeapots:
if occlusion < occlusionMin or occlusion > occlusionMax:
ignore = True
# if occlusion > 0.9 or vis_occluded.sum() < 10 or np.isnan(occlusion):
#Cian change:
if occlusion > 0.01 or vis_occluded.sum() < 10 or np.isnan(occlusion):
ignore = True
if np.sum(vis_im[:,0]) > 1 or np.sum(vis_im[0,:]) > 1 or np.sum(vis_im[:,-1]) > 1 or np.sum(vis_im[-1,:]) > 1:
ignore = True
if renderMugs and not showMug:
if occlusionMug > 0.9 or vis_occluded_mug.sum() < 10 or np.isnan(occlusionMug):
ignore = True
#Check that objects are not only partly in the viewing plane of the camera:
if np.sum(vis_im_mug[:,0]) > 1 or np.sum(vis_im_mug[0,:]) > 1 or np.sum(vis_im_mug[:,-1]) > 1 or np.sum(vis_im_mug[-1,:]) > 1:
if showMug:
ignore = True
#Don't take into account "ignore" variable for now...
ignore = False
# if not ignore:
# # Ignore if camera collides with occluding object as there are inconsistencies with OpenDR and Blender.
# cameraEye = np.linalg.inv(np.r_[rendererGT.camera.view_mtx, np.array([[0, 0, 0, 1]])])[0:3,3]
#
# vDists = rendererGT.v.r[rendererGT.f[rendererGT.visibility_image[
# rendererGT.visibility_image != 4294967295].ravel()].ravel()] - cameraEye
#
# minDistToObjects = 0.2
# maxDistToObjects = 0.6
#
# #Ignore when teapot or mug is up to 10 cm to the camera eye, or too far (more than 1 meter).
#
# if np.min(np.linalg.norm(vDists, axis=1)) <= clip_start:
# ignore = True
#
# if renderTeapots:
# vDistsTeapot = rendererGT.v.r[rendererGT.f[rendererGT.visibility_image[vis_occluded].ravel()].ravel()] - cameraEye
# if np.min(np.linalg.norm(vDistsTeapot, axis=1)) <= minDistToObjects or np.min(np.linalg.norm(vDistsTeapot, axis=1)) > maxDistToObjects:
# ignore = True
#
# if renderMugs:
# vDistsMug = rendererGT.v.r[rendererGT.f[rendererGT.visibility_image[vis_occluded_mug].ravel()].ravel()] - cameraEye
# if np.min(np.linalg.norm(vDistsMug, axis=1)) <= minDistToObjects or np.min(np.linalg.norm(vDistsMug, axis=1)) > maxDistToObjects:
# ignore = True
#
# if useBlender:
# if renderTeapots:
# cubeTeapot = getCubeObj(teapot)
# cubeScene.objects.link(cubeTeapot)
#
# if renderMugs:
# cubeMug = getCubeObj(mug)
# cubeScene.objects.link(cubeMug)
#
# cubeParentSupportObj = cubeScene.objects['cube'+str(parentIdx)]
# cubeRoomObj = cubeScene.objects['cube' + str(roomInstanceNum)]
# cubeScene.update()
#
# if renderTeapots:
# if collision.targetCubeSceneCollision(cubeTeapot, cubeScene, 'cube'+str(roomInstanceNum), cubeParentSupportObj):
# print("Teapot intersects with an object.")
# ignore = True
#
# if not ignore and collision.instancesIntersect(mathutils.Matrix.Translation(mathutils.Vector((0, 0, +0.02))), [cubeTeapot], mathutils.Matrix.Identity(4), [cubeParentSupportObj]):
# print("Teapot interesects supporting object.")
# ignore = True
#
#
# if not ignore and collision.instancesIntersect(mathutils.Matrix.Identity(4), [cubeTeapot], mathutils.Matrix.Identity(4), [cubeRoomObj]):
# print("Teapot intersects room")
# ignore = True
#
# if not ignore and not ignore and not collision.instancesIntersect(mathutils.Matrix.Translation(mathutils.Vector((0, 0, -0.02))), [cubeTeapot], mathutils.Matrix.Identity(4), [cubeParentSupportObj]):
# print("Teapot not on table.")
# ignore = True
#
# if renderMugs:
# if not ignore and collision.targetCubeSceneCollision(cubeMug, cubeScene, 'cube' + str(roomInstanceNum), cubeParentSupportObj):
# print("Mug intersects with an object.")
# ignore = True
#
# if not ignore and collision.instancesIntersect(mathutils.Matrix.Translation(mathutils.Vector((0, 0, +0.02))), [cubeMug], mathutils.Matrix.Identity(4), [cubeParentSupportObj]):
# print("Mug intersects supporting object")
# ignore = True
#
# if not ignore and collision.instancesIntersect(mathutils.Matrix.Identity(4), [cubeMug], mathutils.Matrix.Identity(4), [cubeRoomObj]):
# print("Mug intersects room")
# ignore = True
#
# if not ignore and not collision.instancesIntersect(mathutils.Matrix.Translation(mathutils.Vector((0, 0, -0.02))), [cubeMug], mathutils.Matrix.Identity(4), [cubeParentSupportObj]):
# print("Mug not on table.")
# ignore = True
#
# if not ignore:
# print("No collision issues")
#
# if renderTeapots:
# cubeScene.objects.unlink(cubeTeapot)
# deleteObject(cubeTeapot)
#
# if renderMugs:
# cubeScene.objects.unlink(cubeMug)
# deleteObject(cubeMug)
## Environment map update if using Cycles.
if not ignore and useBlender:
if captureEnvMapFromBlender:
envMapCoeffs = captureSceneEnvMap(scene, envMapTexture, roomInstanceNum,
totalOffset.r.copy(), links, treeNodes, teapot, center,
targetPosition, width, height, 2000, gtDir, train_i)
placeCamera(scene.camera, -chAzGT.r[:].copy() * 180 / np.pi,
chElGT.r[:].copy() * 180 / np.pi, chDistGT.r[0].copy(),
center[:].copy() + targetPosition[:].copy())
if useBlender:
envMapCoeffsRotated[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(0),
envMapCoeffs[[0, 3, 2, 1, 4, 5, 6, 7, 8]])[[0, 3, 2, 1, 4, 5, 6, 7, 8]]
envMapCoeffsRotatedRel[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(-chObjAzGT.r),
envMapCoeffs[[0, 3, 2, 1, 4, 5, 6, 7, 8]])[[0, 3, 2, 1, 4, 5, 6, 7, 8]]
else:
envMapCoeffsRotated[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(totalOffset),
envMapCoeffs[[0, 3, 2, 1, 4, 5, 6, 7, 8]])[[0, 3, 2, 1, 4, 5, 6, 7, 8]]
envMapCoeffsRotatedRel[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(phiOffset),
envMapCoeffs[[0, 3, 2, 1, 4, 5, 6, 7, 8]])[[0, 3, 2, 1, 4, 5, 6, 7, 8]]
envMapCoeffsRotatedVals = envMapCoeffsRotated.r
envMapCoeffsRotatedRelVals = envMapCoeffsRotatedRel.r
if renderBlender and not ignore:
bpy.context.screen.scene = scene
scene.render.filepath = gtDir + 'images/im' + str(train_i) + '.jpeg'
# image = np.array(imageio.imread(scene.render.filepath))[:, :, 0:3]
# image[image > 1] = 1
# blenderRender = image
# lin2srgb(blenderRender)
# cv2.imwrite(gtDir + 'images/im' + str(train_i) + '.jpeg', 255 * blenderRender[:, :, [2, 1, 0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
scene.render.image_settings.file_format = 'JPEG'
scene.render.filepath = gtDir + 'images/im' + str(train_i) + '.jpeg'
scene.render.layers[0].use = True
nt = bpy.context.scene.node_tree
nt.nodes['Render Layers'].layer = 'RenderLayer'
scene.render.layers['RenderLayer'].use_pass_combined = True
scene.render.layers['RenderLayer'].use_pass_environment = True
backgroundNode = scene.world.node_tree.nodes['WorldBackground']
backgroundNode.inputs[1].default_value = 2
bpy.ops.render.render(write_still=True)
image = np.array(imageio.imread(scene.render.filepath))[:, :, 0:3]
blenderRender = srgb2lin(image)
if captureEnvMapFromBlender and useOpenDR:
blenderRenderGray = 0.3 * blenderRender[:, :, 0] + 0.59 * blenderRender[:, :,
1] + 0.11 * blenderRender[:, :, 2]
# For some reason I need to correct average intensity in OpenDR a few times before it gets it right:
rendererGTGray = 0.3 * rendererGT[:, :, 0].r[:] + 0.59 * rendererGT[:, :, 1].r[:] + 0.11 * rendererGT[:, :, 2].r[:]
meanIntensityScale = np.mean(blenderRenderGray[vis_occluded]) / np.mean(rendererGTGray[vis_occluded]).copy()
chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy() * meanIntensityScale
rendererGTGray = 0.3 * rendererGT[:, :, 0].r[:] + 0.59 * rendererGT[:, :, 1].r[:] + 0.11 * rendererGT[:, :, 2].r[:]
meanIntensityScale2 = np.mean(blenderRenderGray[vis_occluded]) / np.mean(rendererGTGray[vis_occluded]).copy()
chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy() * meanIntensityScale2
rendererGTGray = 0.3 * rendererGT[:, :, 0].r[:] + 0.59 * rendererGT[:, :, 1].r[:] + 0.11 * rendererGT[:, :, 2].r[:]
meanIntensityScale3 = np.mean(blenderRenderGray[vis_occluded]) / np.mean(rendererGTGray[vis_occluded]).copy()
chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy() * meanIntensityScale3
rendererGTGray = 0.3 * rendererGT[:, :, 0].r[:] + 0.59 * rendererGT[:, :, 1].r[:] + 0.11 * rendererGT[:, :, 2].r[:]
meanIntensityScale4 = np.mean(blenderRenderGray[vis_occluded]) / np.mean(rendererGTGray[vis_occluded]).copy()
chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy() * meanIntensityScale4
rendererGTGray = 0.3 * rendererGT[:, :, 0].r[:] + 0.59 * rendererGT[:, :, 1].r[:] + 0.11 * rendererGT[:, :, 2].r[:]
meanIntensityScale5 = np.mean(blenderRenderGray[vis_occluded]) / np.mean(rendererGTGray[vis_occluded]).copy()
chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy() * meanIntensityScale5
if np.mean(rendererGTGray, axis=(0, 1)) < 0.01:
ignore = True
if useOpenDR:
image = rendererGT.r[:].copy()
lin2srgb(image)
if not ignore:
if useOpenDR:
cv2.imwrite(gtDir + 'images_opendr/im' + str(train_i) + '.jpeg', 255 * image[:, :, [2, 1, 0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
if renderTeapots:
np.save(gtDir + 'masks_occlusion/mask' + str(train_i) + '.npy_occluded', vis_occluded)
np.save(gtDir + 'masks_occlusion/mask' + str(train_i) + '.npy', vis_im)
coordsTeapotX = coords[1][vis_im]
coordsTeapotY = coords[0][vis_im]
if renderMugs:
np.save(gtDir + 'masks_occlusion/mask' + str(train_i) + '_mug_occluded.npy', vis_occluded_mug)
np.save(gtDir + 'masks_occlusion/mask' + str(train_i) + '_mug.npy', vis_im_mug)
coordsMugX = coords[1][vis_im_mug]
coordsMugY = coords[0][vis_im_mug]
#Add groundtruth to arrays
trainAzsGT = chAzGTVals
trainObjAzsGT = chObjAzGTVals
trainElevsGT = chElGTVals
trainLightAzsGT = chLightAzGTVals
trainLightElevsGT = chLightElGTVals
trainLightIntensitiesGT = chLightIntensityGTVals
trainVColorGT = chVColorsGTVals
lightCoeffs = envMapCoeffsRotatedVals[None, :].copy().squeeze()
lightCoeffs = 0.3*lightCoeffs[:,0] + 0.59*lightCoeffs[:,1] + 0.11*lightCoeffs[:,2]
trainLightCoefficientsGT = lightCoeffs
lightCoeffsRel = envMapCoeffsRotatedRelVals[None, :].copy().squeeze()
lightCoeffsRel = 0.3*lightCoeffsRel[:,0] + 0.59*lightCoeffsRel[:,1] + 0.11*lightCoeffsRel[:,2]
trainLightCoefficientsGTRel = lightCoeffsRel
trainAmbientIntensityGT = chAmbientIntensityGTVals
trainEnvMapPhiOffsets = phiOffset
trainScenes = sceneNumber
trainTeapotIds = teapot_i
trainEnvMaps = hdridx
trainShapeModelCoeffsGT = chShapeParamsGTVals.copy()
trainOcclusions = -1
trainIds = train_i
trainTargetIndices = targetIndex
trainObjDistGT = chObjDistGTVals
trainObjRotationGT = chObjRotationGTVals
trainObjDistMug = chObjDistMugVals
trainObjRotationMug = chObjRotationMugVals
trainObjAzMug = chObjAzMugVals
trainVColorsMug = chVColorsMugVals
trainMugElRel = mugCamElGT
trainTeapotElRel = teapotCamElGT
trainMugPosOffset = mugPosOffsetVals
trainTeapotPosOffset = teapotPosOffsetVals
trainBBMug = np.array([coordsMugX.min(),coordsMugX.max(),coordsMugY.min(),coordsMugY.max(),])
trainBBTeapot = np.array([coordsTeapotX.min(),coordsTeapotX.max(),coordsTeapotY.min(),coordsTeapotY.max(),])
trainTeapotPresent = renderTeapots
trainMugPresent = renderMugs
gtDataset.resize(gtDataset.shape[0] + 1, axis=0)
gtDataset[-1] = np.array([(trainIds, trainAzsGT, trainObjAzsGT, trainElevsGT,
trainLightAzsGT, trainLightElevsGT,
trainLightIntensitiesGT, trainVColorGT, trainScenes,
trainTeapotIds, trainEnvMaps, trainOcclusions,
trainTargetIndices, trainLightCoefficientsGT,
trainLightCoefficientsGTRel, trainAmbientIntensityGT,
phiOffsetVals, trainShapeModelCoeffsGT,
trainObjDistGT,
trainObjRotationGT,
trainObjDistMug,
trainObjRotationMug,
trainObjAzMug,
trainVColorsMug,
trainTeapotElRel,
trainMugElRel,
trainMugPosOffset,
trainTeapotPosOffset,
trainBBMug,
trainBBTeapot,
trainTeapotPresent,
trainMugPresent
)], dtype=gtDtype)
gtDataFile.flush()
train_i = train_i + 1
numTeapotTrain = numTeapotTrain + 1
if np.mod(train_i, 100) == 0:
print("Generated " + str(train_i) + " GT instances.")
print("Generating groundtruth. Iteration of " + str(range(int(trainSize/(lenScenes*len(hdrstorender)*len(renderTeapotsList))))) + " teapots")
if renderFromPreviousGT:
groundTruthFilename = 'groundtruth/' + previousGTPrefix + '/groundTruth.h5'
gtDataFileToRender = h5py.File(groundTruthFilename, 'r')
groundTruthToRender = gtDataFileToRender[previousGTPrefix]
else:
exit()
currentScene = -1
currentTeapot = -1
currentTargetIndex = -1
teapot = None
if renderFromPreviousGT:
rangeGT = np.arange(len(groundTruthToRender))
else:
rangeGT = np.arange(len(groundTruthToRender))
teapot_i = 0
# experimentPrefix = 'train4_occlusion_shapemodel_10k'
# experimentDir = 'experiments/' + experimentPrefix + '/'
# subsetToRender = np.load(experimentDir + 'test.npy')[np.arange(0,100)]
subsetToRender = np.arange(len(rangeGT))
# subsetToRender = np.arange(len(rangeGT))
if useShapeModel:
teapot_i = -1
# addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, v_teapots[currentTeapotModel][0], f_list_teapots[currentTeapotModel][0], vc_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0], uv_teapots[currentTeapotModel][0], haveTextures_list_teapots[currentTeapotModel][0], textures_list_teapots[currentTeapotModel][0])
idxToRender = rangeGT[:][subsetToRender]
sceneIdxsToRender = groundTruthToRender['trainScenes'][idxToRender]
sortedSceneIndices = np.argsort(sceneIdxsToRender)
sortedSceneAndTargetIdxs = np.arange(len(idxToRender))
for sceneIdx in np.unique(sceneIdxsToRender[sortedSceneIndices]):
sortedTargetIdxs = np.argsort(groundTruthToRender['trainTargetIndices'][idxToRender][np.where(sceneIdxsToRender[sortedSceneIndices]==sceneIdx)])
sortedSceneAndTargetIdx = idxToRender[sortedSceneIndices][np.where(sceneIdxsToRender[sortedSceneIndices]==sceneIdx)][sortedTargetIdxs]
sortedSceneAndTargetIdxs[np.where(sceneIdxsToRender[sortedSceneIndices]==sceneIdx)] = sortedSceneAndTargetIdx
if renderFromPreviousGT:
for gtIdx in sortedSceneAndTargetIdxs:
if not replaceNewGroundtruth:
if gtIdx in gtDataset['trainIds']:
continue
sceneNumber = groundTruthToRender['trainScenes'][gtIdx]
# train_i = np.where(idxToRender==gtIdx)[0][0]
train_i = gtIdx
sceneIdx = scene_io_utils.getSceneIdx(sceneNumber, replaceableScenesFile)
print("Rendering scene: " + str(sceneIdx))
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndicesScene, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
if sceneIdx != currentScene:
# v, f_list, vc, vn, uv, haveTextures_list, textures_list = sceneimport.loadSavedScene(sceneDicFile)
import copy
v2, f_list2, vc2, vn2, uv2, haveTextures_list2, textures_list2 = scene_io_utils.loadSavedScene(sceneDicFile, tex_srgb2lin)
if sceneIdx != currentScene:
if useBlender and not loadBlenderSceneFile:
bpy.ops.wm.read_factory_settings()
scene = scene_io_utils.loadBlenderScene(sceneIdx, replaceableScenesFile)
scene_io_utils.setupScene(scene, roomInstanceNum, scene.world, scene.camera, width, height, 16, useCycles, True)
scene.update()
#Save barebones scene.
elif useBlender and loadBlenderSceneFile:
bpy.ops.wm.read_factory_settings()
scene_io_utils.loadSceneBlendData(sceneIdx, replaceableScenesFile)
scene = bpy.data.scenes['Main Scene']
# Configure scene
if useBlender:
if renderTeapots:
targetModels = []
blender_teapots = []
teapots = [line.strip() for line in open('teapots.txt')]
selection = [teapots[i] for i in renderTeapotsList]
scene_io_utils.loadTargetsBlendData()
for teapotIdx, teapotName in enumerate(selection):
teapot = bpy.data.scenes[teapotName[0:63]].objects['teapotInstance' + str(renderTeapotsList[teapotIdx])]
teapot.layers[1] = True
teapot.layers[2] = True
targetModels = targetModels + [teapotIdx]
blender_teapots = blender_teapots + [teapot]
if renderMugs:
blender_mugs = []
selectionMugs = [mugs[i] for i in renderMugsList]
scene_io_utils.loadMugsBlendData()
for mugIdx, mugName in enumerate(selectionMugs):
mug = bpy.data.scenes[mugName[0:63]].objects['mugInstance' + str(renderMugsList[mugIdx])]
mug.layers[1] = True
mug.layers[2] = True
mugModels = mugModels + [mug]
blender_mugs = blender_mugs + [mug]
# setupSceneGroundtruth(scene, width, height, clip_start, 5000, 'CUDA', 'CUDA_MULTI_2')
setupSceneGroundtruth(scene, width, height, clip_start, 5000)
treeNodes = scene.world.node_tree
links = treeNodes.links
unlinkedObj = None
envMapFilename = None
targetIndex = groundTruthToRender['trainTargetIndices'][gtIdx]
if sceneIdx != currentScene and not renderMugs and sceneIdx == 44 and sceneNumber == 114:
if len(v) > 1:
removeObjectData(1, v, f_list, vc, vn, uv, haveTextures_list, textures_list)
if sceneIdx != currentScene or targetIndex != currentTargetIndex:
targetPosition = targetPositions[np.where(targetIndex==np.array(targetIndicesScene))[0][0]]
import copy
v, f_list, vc, vn, uv, haveTextures_list, textures_list = copy.deepcopy(v2), copy.deepcopy(f_list2), copy.deepcopy(vc2), copy.deepcopy(vn2), copy.deepcopy(uv2), copy.deepcopy(haveTextures_list2), copy.deepcopy(textures_list2)
removeObjectData(len(v) -1 - targetIndex, v, f_list, vc, vn, uv, haveTextures_list, textures_list)
if sceneIdx != currentScene or targetIndex != currentTargetIndex:
if useBlender:
if unlinkedObj != None:
scene.objects.link(unlinkedObj)
try:
unlinkedObj = scene.objects[str(targetIndex)]
scene.objects.unlink(unlinkedObj)
except:
pass
if sceneIdx == 44 and sceneNumber == 114:
try:
scene.objects.unlink(scene.objects['fc035f7d732166b97b6fd5468f603b31_cleaned'])
except:
pass
teapot_i = groundTruthToRender['trainTeapotIds'][gtIdx]
if useShapeModel:
teapot_i = -1
if sceneIdx != currentScene or targetIndex != currentTargetIndex or teapot_i != currentTeapot:
##Destroy and create renderer
if useOpenDR:
rendererGT.makeCurrentContext()
rendererGT.clear()
contextdata.cleanupContext(contextdata.getContext())
if glMode == 'glfw':
glfw.destroy_window(rendererGT.win)
del rendererGT
if generateTriplets:
renderer.makeCurrentContext()
renderer.clear()
contextdata.cleanupContext(contextdata.getContext())
if glMode == 'glfw':
glfw.destroy_window(renderer.win)
del renderer
if renderTeapots:
currentTeapotModel = teapot_i
center = center_teapots[teapot_i]
if useShapeModel:
center = smCenterGT
UVs = smUVsGT
vGT = smVerticesGT
vnGT = smNormalsGT
Faces = smFacesGT
VColors = smVColorsGT
UVs = smUVsGT
HaveTextures = smHaveTexturesGT
TexturesList = smTexturesListGT
else:
vGT, vnGT = v_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0]
Faces = f_list_teapots[currentTeapotModel][0]
VColors = vc_teapots[currentTeapotModel][0]
UVs = uv_teapots[currentTeapotModel][0]
HaveTextures = haveTextures_list_teapots[currentTeapotModel][0]
TexturesList = textures_list_teapots[currentTeapotModel][0]
vGT, vnGT, teapotPosOffset = transformObject(vGT, vnGT, chScaleGT, chObjAzGT, chObjDistGT, chObjRotationGT, targetPosition)
if renderMugs:
verticesMug, normalsMug, mugPosOffset = transformObject(v_mug, vn_mug, chScale, chObjAzMug + np.pi / 2, chObjDistMug,
chObjRotationMug, targetPosition)
VerticesMug = [verticesMug]
NormalsMug = [normalsMug]
FacesMug = [f_list_mug]
VColorsMug = [vc_mug]
UVsMug = [uv_mug]
HaveTexturesMug = [haveTextures_list_mug]
TexturesListMug = [textures_list_mug]
else:
VerticesMug = []
NormalsMug = []
FacesMug = []
VColorsMug = []
UVsMug = []
HaveTexturesMug = []
TexturesListMug = []
if renderTeapots:
VerticesTeapot = [vGT]
NormalsTeapot = [vnGT]
FacesTeapot = [Faces]
VColorsTeapot = [VColors]
UVsTeapot = [UVs]
HaveTexturesTeapot = [HaveTextures]
TexturesListTeapot = [TexturesList]
else:
VerticesTeapot = []
NormalsTeapot = []
FacesTeapot = []
VColorsTeapot = []
UVsTeapot = []
HaveTexturesTeapot = []
TexturesListTeapot = []
# addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, verticesMug, f_list_mug, vc_mug, normalsMug, uv_mug, haveTextures_list_mug, textures_list_mug)
# addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, vGT, Faces, VColors, vnGT, UVs, HaveTextures, TexturesList)
rendererGT = createRendererGT(glMode, chAzGT, chElGT, chDistGT, center, VerticesTeapot + VerticesMug + v,
VColorsTeapot + VColorsMug + vc, FacesTeapot + FacesMug + f_list, NormalsTeapot + NormalsMug + vn,
light_colorGT, chComponentGT, chVColorsGT, targetPosition.copy(), chDisplacementGT, width, height,
UVsTeapot + UVsMug + uv, HaveTexturesTeapot + HaveTexturesMug + haveTextures_list,
TexturesListTeapot + TexturesListMug + textures_list, frustum, None)
rendererGT.overdraw = False
rendererGT.nsamples = 1
rendererGT.msaa = False
rendererGT.initGL()
rendererGT.initGLTexture()
rendererGT.makeCurrentContext()
if generateTriplets:
if useShapeModel:
center = smCenter
UVs = smUVs
v = smVertices
vn = smNormals
Faces = smFaces
VColors = smVColors
UVs = smUVs
HaveTextures = smHaveTextures
TexturesList = smTexturesList
else:
v, vn = v_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0]
Faces = f_list_teapots[currentTeapotModel][0]
VColors = vc_teapots[currentTeapotModel][0]
UVs = uv_teapots[currentTeapotModel][0]
HaveTextures = haveTextures_list_teapots[currentTeapotModel][0]
TexturesList = textures_list_teapots[currentTeapotModel][0]
v, vn, teapotPosOffset = transformObject(v, vn, chScale, chObjAz, chObjDist, chObjRotation, np.array([0, 0, 0]))
if renderMugs:
verticesMug, normalsMug, mugPosOffset = transformObject(v_mug, vn_mug, chScale, chObjAzMug + np.pi / 2, chObjDistMug,
chObjRotationMug, np.array([0, 0, 0]))
VerticesB = [v] + [verticesMug]
NormalsB = [vn] + [normalsMug]
FacesB = [Faces] + [f_list_mug]
VColorsB = [VColors] + [vc_mug]
UVsB = [UVs] + [uv_mug]
HaveTexturesB = [HaveTextures] + [haveTextures_list_mug]
TexturesListB = [TexturesList] + [textures_list_mug]
renderer = createRendererTarget(glMode, chAz, chEl, chDist, center, VerticesB, VColorsB, FacesB, NormalsB, light_color,
chComponent, chVColors, np.array([0, 0, 0]), chDisplacement, width, height, UVsB,
HaveTexturesB, TexturesListB, frustum, None)
else:
renderer = createRendererTarget(glMode, chAz, chEl, chDist, smCenter, [v], [smVColors], [smFaces], [vn], light_color,
chComponent, chVColors, 0, chDisplacement, width, height, [smUVs], [smHaveTextures],
[smTexturesList], frustum, None)
rendererGT.makeCurrentContext()
## Blender: Unlink and link new teapot.
if useBlender:
if renderTeapots:
# if currentScene != -1 and currentTargetIndex != -1 and currentTeapot != -1 and teapot != None:
if teapot.name in scene.objects:
scene.objects.unlink(teapot)
if useShapeModel:
deleteInstance(teapot)
if not useShapeModel:
teapot = blender_teapots[currentTeapotModel]
else:
teapotMesh = createMeshFromData('teapotShapeModelMesh', chVerticesGT.r.tolist(),
faces.astype(np.int32).tolist())
teapotMesh.layers[0] = True
teapotMesh.layers[1] = True
teapotMesh.pass_index = 1
targetGroup = bpy.data.groups.new('teapotShapeModelGroup')
targetGroup.objects.link(teapotMesh)
teapot = bpy.data.objects.new('teapotShapeModel', None)
teapot.dupli_type = 'GROUP'
teapot.dupli_group = targetGroup
teapot.pass_index = 1
mat = makeMaterial('teapotMat', (0, 0, 0), (0, 0, 0), 1)
setMaterial(teapotMesh, mat)
# center = centerOfGeometry(teapot.dupli_group.objects, teapot.matrix_world)
placeNewTarget(scene, teapot, targetPosition[:].copy())
teapot.layers[0] = True
teapot.layers[1] = True
original_matrix_world = teapot.matrix_world.copy()
if renderMugs:
if mug.name in scene.objects:
scene.objects.unlink(mug)
# deleteInstance(mug)
mug = blender_mugs[currentTeapotModel]
placeNewTarget(scene, mug, targetPosition[:].copy())
mug.layers[0] = True
mug.layers[1] = True
original_matrix_world_mug = mug.matrix_world.copy()
hdridx = groundTruthToRender['trainEnvMaps'][gtIdx]
envMapFilename = ""
for hdrFile, hdrValues in hdritems:
if hdridx == hdrValues[0]:
envMapCoeffs[:] = hdrValues[1]
envMapFilename = hdrFile
# updateEnviornmentMap(envMapFilename, scene)
envMapTexture = np.array(imageio.imread(envMapFilename))[:,:,0:3]
break
assert(envMapFilename != "")
print("Render " + str(gtIdx) + "of " + str(len(groundTruthToRender)))
ignore = False
# chAmbientIntensityGT[:] = groundTruthToRender['trainAmbientIntensityGT'][gtIdx]
chAmbientIntensityGT[:] = groundTruthToRender['trainAmbientIntensityGT'][gtIdx]
phiOffset[:] = groundTruthToRender['trainEnvMapPhiOffsets'][gtIdx]
chObjAzGT[:] = groundTruthToRender['trainObjAzsGT'][gtIdx]
chAzGT[:] = groundTruthToRender['trainAzsGT'][gtIdx]
chElGT[:] = groundTruthToRender['trainElevsGT'][gtIdx]
chLightAzGT[:] = groundTruthToRender['trainLightAzsGT'][gtIdx]
chLightElGT[:] = groundTruthToRender['trainLightElevsGT'][gtIdx]
# chLightIntensityGT[:] = np.random.uniform(5,10, 1)
chVColorsGT[:] = groundTruthToRender['trainVColorGT'][gtIdx]
try:
chShapeParamsGT[:] = groundTruthToRender['trainShapeModelCoeffsGT'][gtIdx]
except:
chShapeParamsGT[:] = np.random.randn(latentDim)
try:
chObjDistGT[:] = groundTruthToRender['trainObjDistGT'][gtIdx]
except:
chObjDistGT[:] = 0
try:
chObjRotationGT[:] = groundTruthToRender['trainObjRotationGT'][gtIdx]
except:
chObjRotationGT[:] = 0
if renderMugs:
chObjDistMug[:] = groundTruthToRender['trainObjDistMug'][gtIdx]
chObjRotationMug[:] = groundTruthToRender['trainObjRotationMug'][gtIdx]
chObjAzMug[:] = groundTruthToRender['trainObjAzMug'][gtIdx]
chVColorsMug[:] = groundTruthToRender['trainVColorsMug'][gtIdx]
if captureEnvMapFromBlender and not ignore and useBlender:
envMapCoeffs = captureSceneEnvMap(scene, envMapTexture, roomInstanceNum, totalOffset.r.copy(), links, treeNodes, teapot, center, targetPosition, width, height, 2000, gtDir, train_i)
if useBlender and captureEnvMapFromBlender:
envMapCoeffsRotated[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(0), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
envMapCoeffsRotatedRel[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(-chObjAzGT.r), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
else:
envMapCoeffsRotated[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(totalOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
envMapCoeffsRotatedRel[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(phiOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
envMapCoeffsRotatedVals = envMapCoeffsRotated.r
envMapCoeffsRotatedRelVals = envMapCoeffsRotatedRel.r
if useOpenDR and not ignore:
if renderTeapots:
occlusion = getOcclusionFraction(rendererGT, id=teapotSceneIndex)
vis_occluded = np.array(rendererGT.indices_image == teapotSceneIndex + 1).copy().astype(np.bool)
vis_im = np.array(rendererGT.image_mesh_bool([teapotSceneIndex])).copy().astype(np.bool)
if renderBlender and useBlender and not ignore:
placeCamera(scene.camera, -chAzGT.r[:].copy() * 180 / np.pi,
chElGT.r[:].copy() * 180 / np.pi, chDistGT.r[0].copy(),
center[:].copy() + targetPosition[:].copy())
if renderTeapots:
azimuthRot = mathutils.Matrix.Rotation(chObjAzGT.r[:].copy(), 4, 'Z')
teapot.matrix_world = mathutils.Matrix.Translation(
original_matrix_world.to_translation() + mathutils.Vector(teapotPosOffset.r)) * azimuthRot * (
mathutils.Matrix.Translation(-original_matrix_world.to_translation())) * original_matrix_world
setObjectDiffuseColor(teapot, chVColorsGT.r.copy())
if useShapeModel:
mesh = teapot.dupli_group.objects[0]
for vertex_i, vertex in enumerate(mesh.data.vertices):
vertex.co = mathutils.Vector(chVerticesGT.r[vertex_i])
if renderMugs:
setObjectDiffuseColor(mug, chVColorsMug.r.copy())
azimuthRotMug = mathutils.Matrix.Rotation(chObjAzMug.r[:].copy() - np.pi / 2, 4, 'Z')
mug.matrix_world = mathutils.Matrix.Translation(
original_matrix_world_mug.to_translation() + mathutils.Vector(mugPosOffset.r)) * azimuthRotMug * (
mathutils.Matrix.Translation(-original_matrix_world_mug.to_translation())) * original_matrix_world_mug
scene.update()
scene.render.filepath = gtDir + 'images/im' + str(train_i) + '.jpeg'
bpy.ops.render.render(write_still=True)
# image = np.array(imageio.imread(scene.render.filepath))[:, :, 0:3]
# image[image > 1] = 1
# blenderRender = image
# lin2srgb(blenderRender)
# cv2.imwrite(gtDir + 'images/im' + str(train_i) + '.jpeg', 255 * blenderRender[:, :, [2, 1, 0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
image = np.array(imageio.imread(scene.render.filepath))[:, :, 0:3]
blenderRender = image
blenderRenderGray = 0.3*blenderRender[:,:,0] + 0.59*blenderRender[:,:,1] + 0.11*blenderRender[:,:,2]
#For some reason I need to correct average intensity in OpenDR a few times before it gets it right:
# rendererGTGray = 0.3*rendererGT[:,:,0].r[:] + 0.59*rendererGT[:,:,1].r[:] + 0.11*rendererGT[:,:,2].r[:]
# meanIntensityScale = np.mean(blenderRenderGray[vis_occluded])/np.mean(rendererGTGray[vis_occluded]).copy()
# chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy()*meanIntensityScale
# rendererGTGray = 0.3*rendererGT[:,:,0].r[:] + 0.59*rendererGT[:,:,1].r[:] + 0.11*rendererGT[:,:,2].r[:]
# meanIntensityScale2 = np.mean(blenderRenderGray[vis_occluded])/np.mean(rendererGTGray[vis_occluded]).copy()
# chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy()*meanIntensityScale2
# rendererGTGray = 0.3*rendererGT[:,:,0].r[:] + 0.59*rendererGT[:,:,1].r[:] + 0.11*rendererGT[:,:,2].r[:]
# meanIntensityScale3 = np.mean(blenderRenderGray[vis_occluded])/np.mean(rendererGTGray[vis_occluded]).copy()
# chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy()*meanIntensityScale3
# rendererGTGray = 0.3*rendererGT[:,:,0].r[:] + 0.59*rendererGT[:,:,1].r[:] + 0.11*rendererGT[:,:,2].r[:]
# meanIntensityScale4 = np.mean(blenderRenderGray[vis_occluded])/np.mean(rendererGTGray[vis_occluded]).copy()
# chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy()*meanIntensityScale4
# rendererGTGray = 0.3*rendererGT[:,:,0].r[:] + 0.59*rendererGT[:,:,1].r[:] + 0.11*rendererGT[:,:,2].r[:]
# meanIntensityScale5 = np.mean(blenderRenderGray[vis_occluded])/np.mean(rendererGTGray[vis_occluded]).copy()
# chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy()*meanIntensityScale5
# lin2srgb(blenderRender)
if useOpenDR:
image = rendererGT.r[:].copy()
lin2srgb(image)
if not ignore:
if generateTriplets:
lightCoeffsRel = envMapCoeffsRotatedRel.r[None, :].copy().squeeze()
lightCoeffsRel = 0.3 * lightCoeffsRel[:, 0] + 0.59 * lightCoeffsRel[:, 1] + 0.11 * lightCoeffsRel[:, 2]
chLightSHCoeffs[:] = lightCoeffsRel * chAmbientIntensityGT.r
chObjAz[:] = 0
chAz[:] = chAzRelGT.r + np.random.choice([-1,1]) * np.random.uniform(0,10) * np.pi / 180
chEl[:] = chElGT.r
chVColors[:] = chVColorsGT.r
chShapeParams[:] = chShapeParamsGT.r
renderer.makeCurrentContext()
if useOpenDR:
cv2.imwrite(gtDir + 'triplets1/im' + str(train_i) + '.jpeg' , 255*lin2srgb(renderer.r.copy())[:,:,[2,1,0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
chAz[:] = chAzRelGT.r + np.random.choice([-1,1]) * np.random.uniform(10,40) * np.pi / 180
if useOpenDR:
cv2.imwrite(gtDir + 'triplets2/im' + str(train_i) + '.jpeg', 255 * lin2srgb(renderer.r.copy())[:, :, [2, 1, 0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
rendererGT.makeCurrentContext()
if useBlender and renderBlender:
cv2.imwrite(gtDir + 'images/im' + str(train_i) + '.jpeg' , 255*blenderRender[:,:,[2,1,0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
if useOpenDR:
cv2.imwrite(gtDir + 'images_opendr/im' + str(train_i) + '.jpeg' , 255*image[:,:,[2,1,0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# cv2.imwrite(gtDir + 'images_opendr/im' + str(train_i) + '.jpeg' , 255*image[:,:,[2,1,0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
if useOpenDR:
np.save(gtDir + 'masks_occlusion/mask' + str(train_i)+ '.npy', vis_occluded)
#Add groundtruth to arrays
trainAzsGT = chAzGT.r
trainObjAzsGT = chObjAzGT.r
trainElevsGT = chElGT.r
trainLightAzsGT = chLightAzGT.r
trainLightElevsGT = chLightElGT.r
trainLightIntensitiesGT = groundTruthToRender['trainLightIntensitiesGT'][gtIdx]
trainVColorGT = chVColorsGT.r
lightCoeffs = envMapCoeffsRotated.r[None, :].copy().squeeze()
lightCoeffs = 0.3*lightCoeffs[:,0] + 0.59*lightCoeffs[:,1] + 0.11*lightCoeffs[:,2]
trainLightCoefficientsGT = lightCoeffs
lightCoeffsRel = envMapCoeffsRotatedRel.r[None, :].copy().squeeze()
lightCoeffsRel = 0.3*lightCoeffsRel[:,0] + 0.59*lightCoeffsRel[:,1] + 0.11*lightCoeffsRel[:,2]
trainLightCoefficientsGTRel = lightCoeffsRel
trainAmbientIntensityGT = chAmbientIntensityGT.r
trainEnvMapPhiOffsets = phiOffset
trainScenes = sceneNumber
trainTeapotIds = teapot_i
trainEnvMaps = hdridx
trainShapeModelCoeffsGT = chShapeParamsGT.r.copy()
trainOcclusions = occlusion
trainIds = train_i
trainTargetIndices = targetIndex
trainObjDistGT = chObjDistGT.r
trainObjRotationGT = chObjRotationGT.r
trainObjDistMug = chObjDistMug.r
trainObjRotationMug = chObjRotationMug.r
trainObjAzMug = chObjAzMug.r
trainVColorsMug = chVColorsMug.r
try:
trainMugElRel = groundTruthToRender['trainMugElRel'][gtIdx]
except:
trainMugElRel = -1
try:
trainTeapotElRel = groundTruthToRender['trainTeapotElRel'][gtIdx]
except:
trainTeapotElRel = -1
try:
trainMugPosOffset = groundTruthToRender['trainMugPosOffset'][gtIdx]
except:
trainMugPosOffset = np.array([0,0,0])
try:
trainTeapotPosOffset = groundTruthToRender['trainTeapotPosOffset'][gtIdx]
except:
trainTeapotPosOffset = np.array([0,0,0])
try:
trainBBMug = groundTruthToRender['trainBBMug'][gtIdx]
except:
trainBBMug = np.array([0,0,0,0])
try:
trainBBTeapot = groundTruthToRender['trainBBTeapot'][gtIdx]
except:
trainBBTeapot = np.array([0,0,0,0])
trainTeapotPresent = renderTeapots
trainMugPresent = renderMugs
gtDataset.resize(gtDataset.shape[0] + 1, axis=0)
gtDataset[-1] = np.array([(trainIds, trainAzsGT, trainObjAzsGT, trainElevsGT,
trainLightAzsGT, trainLightElevsGT,
trainLightIntensitiesGT, trainVColorGT, trainScenes,
trainTeapotIds, trainEnvMaps, trainOcclusions,
trainTargetIndices, trainLightCoefficientsGT,
trainLightCoefficientsGTRel, trainAmbientIntensityGT,
trainEnvMapPhiOffsets, trainShapeModelCoeffsGT,
trainObjDistGT,
trainObjRotationGT,
trainObjDistMug,
trainObjRotationMug,
trainObjAzMug,
trainVColorsMug,
trainTeapotElRel,
trainMugElRel,
trainMugPosOffset,
trainTeapotPosOffset,
trainBBMug,
trainBBTeapot,
trainTeapotPresent,
trainMugPresent
)], dtype=gtDtype)
train_i = train_i + 1
currentScene = sceneIdx
currentTargetIndex = targetIndex
currentTeapot = teapot_i
gtDataFile.flush()
# np.savetxt(gtDir + 'data.txt',np.array(np.hstack([trainIds[:,None], trainAzsGT[:,None], trainObjAzsGT[:,None], trainElevsGT[:,None], phiOffsets[:,None], trainOcclusions[:,None]])), fmt="%g")
gtDataFile.close()
gtDataFileToRender.close()
| 106,833 | 48.232258 | 1,055 | py |
inversegraphics | inversegraphics-master/utils.py | import numpy as np
import os
import skimage
import skimage.io
import h5py
import ipdb
import scipy.spatial.distance
import image_processing
import matplotlib
__author__ = 'pol'
import recognition_models
import pickle
def joinExperiments(range1, range2, testSet1,methodsPred1,testOcclusions1,testPrefixBase1,parameterRecognitionModels1,azimuths1,elevations1,vColors1,lightCoeffs1,likelihoods1,shapeParams1,segmentations1, approxProjections1, approxProjectionsGT1, testSet2,methodsPred2,testOcclusions2,testPrefixBase2,parameterRecognitionModels2,azimuths2,elevations2,vColors2,lightCoeffs2,likelihoods2,shapeParams2,segmentations2, approxProjections2, approxProjectionsGT2):
testSet = np.append(testSet1, testSet2)
parameterRecognitionModels = [parameterRecognitionModels1] + [parameterRecognitionModels2]
testPrefixBase = [testPrefixBase1] + [testPrefixBase2]
methodsPred = methodsPred1
testOcclusions = np.append(testOcclusions1, testOcclusions2)
azimuths = []
elevations = []
vColors = []
lightCoeffs = []
shapeParams = []
likelihoods = []
segmentations = []
approxProjections = []
approxProjectionsGT = []
for method in range(len(methodsPred)):
if azimuths1[method] is not None and azimuths2[method] is not None:
azimuths = azimuths + [np.append(azimuths1[method][range1], azimuths2[method][range2])]
else:
azimuths = azimuths + [None]
if elevations1[method] is not None and elevations2[method] is not None:
elevations = elevations + [np.append(elevations1[method][range1], elevations2[method][range2])]
else:
elevations = elevations + [None]
if vColors1[method] is not None and vColors2[method] is not None:
vColors = vColors + [np.vstack([vColors1[method][range1], vColors2[method][range2]])]
else:
vColors = vColors + [None]
if lightCoeffs1[method] is not None and lightCoeffs2[method] is not None:
lightCoeffs = lightCoeffs + [np.vstack([lightCoeffs1[method][range1], lightCoeffs2[method][range2]])]
else:
lightCoeffs = lightCoeffs + [None]
if shapeParams1[method] is not None and shapeParams2[method] is not None:
shapeParams = shapeParams + [np.vstack([shapeParams1[method][range1], shapeParams2[method][range2]])]
else:
shapeParams = shapeParams + [None]
if likelihoods1[method] is not None and likelihoods2[method] is not None:
likelihoods = likelihoods + [np.append(likelihoods1[method][range1], likelihoods2[method][range2])]
else:
likelihoods = likelihoods + [None]
if segmentations1[method] is not None and segmentations2[method] is not None:
segmentations = segmentations + [np.vstack([segmentations1[method][range1], segmentations2[method][range2]])]
else:
segmentations = segmentations + [None]
if approxProjections1 is not None and approxProjections2 is not None:
if approxProjections1[method] is not None and approxProjections2[method] is not None:
approxProjections = approxProjections + [np.vstack([approxProjections1[method][range1], approxProjections2[method][range2]])]
else:
approxProjections = approxProjections + [None]
if approxProjectionsGT1 is not None and approxProjectionsGT2 is not None:
if approxProjectionsGT1[method] is not None and approxProjectionsGT2[method] is not None:
approxProjectionsGT = approxProjectionsGT + [np.vstack([approxProjectionsGT1[method][range1], approxProjectionsGT2[method][range2]])]
else:
approxProjectionsGT = approxProjectionsGT + [None]
return testSet, parameterRecognitionModels, testPrefixBase, methodsPred, testOcclusions, azimuths, elevations, vColors, lightCoeffs, shapeParams, likelihoods, segmentations, approxProjections, approxProjectionsGT
def latexify(fig_width=None, fig_height=None, columns=1):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figue.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1,2])
if fig_width is None: fig_width = 3.39 if columns==1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {'backend': 'pdf',
'axes.labelsize': 12, # fontsize for x and y labels (was 10)
'axes.titlesize': 12,
'font.size': 12, # was 10
'legend.fontsize': 12, # was 10
'xtick.labelsize': 14,
'ytick.labelsize': 14,
'text.usetex': True,
'figure.figsize': [fig_width,fig_height],
'font.family': 'serif',
'lines.linewidth':2
}
matplotlib.rcParams.update(params)
def saveScatterPlots(resultDir, testOcclusions, useShapeModel, errorsPosePred, errorsPoseFitted,errorsLightCoeffsC,errorsFittedLightCoeffsC,errorsEnvMap,errorsFittedEnvMap,errorsLightCoeffs,errorsFittedLightCoeffs,errorsShapeParams,errorsFittedShapeParams,errorsShapeVertices,errorsFittedShapeVertices,errorsVColorsE,errorsFittedVColorsE,errorsVColorsC,errorsFittedVColorsC, errorsVColorsS,errorsFittedVColorsS):
latexify(columns=2)
directory = resultDir + 'pred-azimuth-errors_fitted-azimuth-error'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(abs(errorsPosePred[0]), abs(errorsPoseFitted[0]), s=20, vmin=0, vmax=100,
c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted azimuth errors')
ax.set_ylabel('Fitted azimuth errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
for test_i in range(len(errorsPosePred[0])):
if abs(errorsPoseFitted[0][test_i]) > abs(errorsPosePred[0][test_i]) + 10:
plt.annotate(
str(test_i),
xy=(abs(errorsPosePred[0][test_i]), abs(errorsPoseFitted[0][test_i])), xytext=(0, 15),
textcoords='offset points', ha='right', va='bottom',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'), size=8)
# bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
ax.set_title('Recognition vs fitted azimuth errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + 'pred-elevation-errors_fitted-elevation-error'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(abs(errorsPosePred[1]), abs(errorsPoseFitted[1]), s=20, vmin=0, vmax=100,
c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
for test_i in range(len(errorsPosePred[1])):
if abs(errorsPoseFitted[1][test_i]) > abs(errorsPosePred[1][test_i]) + 10:
plt.annotate(
str(test_i),
xy=(abs(errorsPosePred[1][test_i]), abs(errorsPoseFitted[1][test_i])), xytext=(15, 15),
textcoords='offset points', ha='right', va='bottom',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'), size=8)
# bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted elevation errors')
ax.set_ylabel('Fitted elevation errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted azimuth errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + 'shcoeffsserrorsC_fitted-shcoeffsserrorsC'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(np.mean(errorsLightCoeffsC, axis=1), np.mean(errorsFittedLightCoeffsC, axis=1), s=20,
vmin=0, vmax=100, c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
for test_i in range(len(errorsLightCoeffsC)):
if np.mean(errorsFittedLightCoeffsC, axis=1)[test_i] > np.mean(errorsLightCoeffsC, axis=1)[test_i] + 0.1:
plt.annotate(
str(test_i),
xy=(np.mean(errorsLightCoeffsC, axis=1)[test_i], np.mean(errorsFittedLightCoeffsC, axis=1)[test_i]), xytext=(0, 15),
textcoords='offset points', ha='right', va='bottom',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'), size=8)
# bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted SH coefficients errors')
ax.set_ylabel('Fitted SH coefficients errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted SH coefficients errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
if errorsEnvMap is not None and errorsFittedEnvMap is not None:
directory = resultDir + 'shcoeffsserrorsC_fitted-shEnvMap'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(errorsEnvMap, errorsFittedEnvMap, s=20,
vmin=0, vmax=100, c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted SH coefficients errors')
ax.set_ylabel('Fitted SH coefficients errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted SH Environment map errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + 'shcoeffsserrors_fitted-shcoeffsserrors'
# Show scatter correlations with occlusions.
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(np.mean(errorsLightCoeffs, axis=1), np.mean(errorsFittedLightCoeffs, axis=1), s=20,
vmin=0, vmax=100, c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted SH coefficients errors')
ax.set_ylabel('Fitted SH coefficients errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted SH coefficients errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
if useShapeModel:
directory = resultDir + 'shapeparamserrors_fitted-shapeparamserrors'
# Show scatter correlations with occlusions.
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(np.mean(errorsShapeParams, axis=1), np.mean(errorsFittedShapeParams, axis=1), s=20,
vmin=0, vmax=100, c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted shape parameters errors')
ax.set_ylabel('Fitted shape parameters errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted Shape parameters errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
if errorsShapeVertices is not None and errorsFittedShapeVertices is not None:
directory = resultDir + 'shapeverticeserrors_fitted-shapeverticesserrors'
# Show scatter correlations with occlusions.
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(errorsShapeVertices, errorsFittedShapeVertices, s=20, vmin=0, vmax=100,
c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
for test_i in range(len(errorsLightCoeffsC)):
if errorsFittedShapeVertices[test_i] > errorsShapeVertices[test_i] + 0.75:
plt.annotate(
str(test_i),
xy=(errorsShapeVertices[test_i], errorsFittedShapeVertices[test_i]),
xytext=(15, 15),
textcoords='offset points', ha='right', va='bottom',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'), size=8)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted shape vertices errors')
ax.set_ylabel('Fitted shape vertices errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted Shape parameters errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + 'vColorsE_fitted-vColorsE'
# Show scatter correlations with occlusions.
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(errorsVColorsE, errorsFittedVColorsE, s=20, vmin=0, vmax=100, c=testOcclusions * 100,
cmap=matplotlib.cm.plasma)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted VColor E coefficients errors')
ax.set_ylabel('Fitted VColor E coefficients errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted vertex color errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + 'vColorsC_fitted-vColorsC'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(errorsVColorsC, errorsFittedVColorsC, s=20, vmin=0, vmax=100, c=testOcclusions * 100,
cmap=matplotlib.cm.plasma)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted vertex color errors')
ax.set_ylabel('Fitted vertex color errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted vertex color errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + 'vColorsS_fitted-vColorsS'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(errorsVColorsS, errorsFittedVColorsS, s=20, vmin=0, vmax=100, c=testOcclusions * 100,
cmap=matplotlib.cm.plasma)
for test_i in range(len(errorsVColorsS)):
if errorsFittedVColorsS[test_i] > errorsVColorsS[test_i] + 0.1:
plt.annotate(
str(test_i),
xy=(errorsVColorsS[test_i], errorsFittedVColorsS[test_i]),
xytext=(15, 15),
textcoords='offset points', ha='right', va='bottom',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'), size=8)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted VColor coefficients errors')
ax.set_ylabel('Fitted VColor coefficients errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2, y2)))
ax.set_ylim((0, max(x2, y2)))
ax.plot([0, max(x2, y2)], [0, max(x2, y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted vertex color errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
def saveScatterPlotsMethodFit(methodFitNum, resultDir, testOcclusions, useShapeModel, errorsPosePred, errorsPoseFitted,errorsLightCoeffsC,errorsFittedLightCoeffsC,errorsEnvMap,errorsFittedEnvMap,errorsLightCoeffs,errorsFittedLightCoeffs,errorsShapeParams,errorsFittedShapeParams,errorsShapeVertices,errorsFittedShapeVertices,errorsVColorsE,errorsFittedVColorsE,errorsVColorsC,errorsFittedVColorsC, errorsVColorsS,errorsFittedVColorsS):
latexify(columns=2)
directory = resultDir + 'pred-azimuth-errors_fitted-azimuth-error-' + str(methodFitNum) + '-'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(abs(errorsPosePred[0]), abs(errorsPoseFitted[0]), s=20, vmin=0, vmax=100,
c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted azimuth errors')
ax.set_ylabel('Fitted azimuth errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
for test_i in range(len(errorsPosePred[0])):
if abs(errorsPoseFitted[0][test_i]) > abs(errorsPosePred[0][test_i]) + 10:
plt.annotate(
str(test_i),
xy=(abs(errorsPosePred[0][test_i]), abs(errorsPoseFitted[0][test_i])), xytext=(0, 15),
textcoords='offset points', ha='right', va='bottom',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'), size=8)
# bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
ax.set_title('Recognition vs fitted azimuth errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + 'pred-elevation-errors_fitted-elevation-error-' + str(methodFitNum) + '-'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(abs(errorsPosePred[1]), abs(errorsPoseFitted[1]), s=20, vmin=0, vmax=100,
c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
for test_i in range(len(errorsPosePred[1])):
if abs(errorsPoseFitted[1][test_i]) > abs(errorsPosePred[1][test_i]) + 10:
plt.annotate(
str(test_i),
xy=(abs(errorsPosePred[1][test_i]), abs(errorsPoseFitted[1][test_i])), xytext=(15, 15),
textcoords='offset points', ha='right', va='bottom',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'), size=8)
# bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted elevation errors')
ax.set_ylabel('Fitted elevation errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted azimuth errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + 'shcoeffsserrorsC_fitted-shcoeffsserrorsC-' + str(methodFitNum) + '-'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(np.mean(errorsLightCoeffsC, axis=1), np.mean(errorsFittedLightCoeffsC, axis=1), s=20,
vmin=0, vmax=100, c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
for test_i in range(len(errorsLightCoeffsC)):
if np.mean(errorsFittedLightCoeffsC, axis=1)[test_i] > np.mean(errorsLightCoeffsC, axis=1)[test_i] + 0.1:
plt.annotate(
str(test_i),
xy=(np.mean(errorsLightCoeffsC, axis=1)[test_i], np.mean(errorsFittedLightCoeffsC, axis=1)[test_i]), xytext=(0, 15),
textcoords='offset points', ha='right', va='bottom',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'), size=8)
# bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted SH coefficients errors')
ax.set_ylabel('Fitted SH coefficients errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted SH coefficients errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
if errorsEnvMap is not None and errorsFittedEnvMap is not None:
directory = resultDir + 'shcoeffsserrorsC_fitted-shEnvMap-' + str(methodFitNum) + '-'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(errorsEnvMap, errorsFittedEnvMap, s=20,
vmin=0, vmax=100, c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted SH coefficients errors')
ax.set_ylabel('Fitted SH coefficients errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted SH Environment map errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + 'shcoeffsserrors_fitted-shcoeffsserrors-' + str(methodFitNum) + '-'
# Show scatter correlations with occlusions.
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(np.mean(errorsLightCoeffs, axis=1), np.mean(errorsFittedLightCoeffs, axis=1), s=20,
vmin=0, vmax=100, c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted SH coefficients errors')
ax.set_ylabel('Fitted SH coefficients errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted SH coefficients errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
if useShapeModel:
directory = resultDir + 'shapeparamserrors_fitted-shapeparamserrors-' + str(methodFitNum) + '-'
# Show scatter correlations with occlusions.
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(np.mean(errorsShapeParams, axis=1), np.mean(errorsFittedShapeParams, axis=1), s=20,
vmin=0, vmax=100, c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted shape parameters errors')
ax.set_ylabel('Fitted shape parameters errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted Shape parameters errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
if errorsShapeVertices is not None and errorsFittedShapeVertices is not None:
directory = resultDir + 'shapeverticeserrors_fitted-shapeverticesserrors-' + str(methodFitNum) + '-'
# Show scatter correlations with occlusions.
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(errorsShapeVertices, errorsFittedShapeVertices, s=20, vmin=0, vmax=100,
c=testOcclusions * 100, cmap=matplotlib.cm.plasma)
for test_i in range(len(errorsLightCoeffsC)):
if errorsFittedShapeVertices[test_i] > errorsShapeVertices[test_i] + 0.75:
plt.annotate(
str(test_i),
xy=(errorsShapeVertices[test_i], errorsFittedShapeVertices[test_i]),
xytext=(15, 15),
textcoords='offset points', ha='right', va='bottom',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'), size=8)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted shape vertices errors')
ax.set_ylabel('Fitted shape vertices errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted Shape parameters errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + 'vColorsE_fitted-vColorsE-' + str(methodFitNum) + '-'
# Show scatter correlations with occlusions.
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(errorsVColorsE, errorsFittedVColorsE, s=20, vmin=0, vmax=100, c=testOcclusions * 100,
cmap=matplotlib.cm.plasma)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted VColor E coefficients errors')
ax.set_ylabel('Fitted VColor E coefficients errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted vertex color errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + 'vColorsC_fitted-vColorsC-' + str(methodFitNum) + '-'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(errorsVColorsC, errorsFittedVColorsC, s=20, vmin=0, vmax=100, c=testOcclusions * 100,
cmap=matplotlib.cm.plasma)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted vertex color errors')
ax.set_ylabel('Fitted vertex color errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2,y2)))
ax.set_ylim((0, max(x2,y2)))
ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted vertex color errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + 'vColorsS_fitted-vColorsS-' + str(methodFitNum) + '-'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(errorsVColorsS, errorsFittedVColorsS, s=20, vmin=0, vmax=100, c=testOcclusions * 100,
cmap=matplotlib.cm.plasma)
for test_i in range(len(errorsVColorsS)):
if errorsFittedVColorsS[test_i] > errorsVColorsS[test_i] + 0.1:
plt.annotate(
str(test_i),
xy=(errorsVColorsS[test_i], errorsFittedVColorsS[test_i]),
xytext=(15, 15),
textcoords='offset points', ha='right', va='bottom',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'), size=8)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Predicted VColor coefficients errors')
ax.set_ylabel('Fitted VColor coefficients errors')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, max(x2, y2)))
ax.set_ylim((0, max(x2, y2)))
ax.plot([0, max(x2, y2)], [0, max(x2, y2)], ls="--", c=".3")
ax.set_title('Recognition vs fitted vertex color errors')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
def saveLikelihoodPlots(resultDir, occlusions, methodsPred, plotColors, plotMethodsIndices, meanLikelihoodArr):
latexify(columns=2)
directory = resultDir + 'predictionLikelihood-FitLikelihood'
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(occlusions, meanLikelihoodArr[0], c='b', label='Ground-truth NLL')
ax.plot(occlusions, meanLikelihoodArr[1], c='r', label='Robust Fit NLL')
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Mean Likelihood')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
# ax.set_xlim((min(x1), 100))
# ax.set_ylim((-0.0, y2))
ax.set_title('Cumulative prediction per occlusion level')
fig.savefig(directory + '-plot.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + 'predictionLikelihoodGaussian-FitLikelihoodGaussian'
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(occlusions, meanLikelihoodArr[2], c='b', label='Groundtruth NLL')
ax.plot(occlusions, meanLikelihoodArr[3], c='r', label='Robust Fit NLL')
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Mean likelihood')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
# ax.set_xlim((min(x1), 100))
# ax.set_ylim((-0.0, y2))
ax.set_title('Cumulative prediction per occlusion level')
fig.savefig(directory + '-plot.pdf', bbox_inches='tight')
plt.close(fig)
def saveLikelihoodScatter(resultDirOcclusion, testSet, testOcclusions, likelihoods):
# Show scatter correlations with occlusions.
directory = resultDirOcclusion + 'robustLikelihood_fitted-robustLikelihood'
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
scat = ax.scatter(likelihoods[0][testSet], likelihoods[1][testSet], s=20, vmin=0, vmax=100, c=testOcclusions * 100,
cmap=matplotlib.cm.plasma)
cbar = fig.colorbar(scat, ticks=[0, 50, 100])
cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
ax.set_xlabel('Ground-truth NLL (avg)')
ax.set_ylabel('Recognition+Fit NLL (avg)')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((min(x1,y1), max(x2,y2)))
ax.set_ylim((min(x1,y1), max(x2,y2)))
ax.plot([min(x1,y1), max(x2,y2)], [min(x1,y1), max(x2,y2)], ls="--", c=".3")
ax.set_title('GT vs Recognition+Robust fit negative log-likelihood')
fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
plt.close(fig)
# directory = resultDirOcclusion + 'gaussianLikelihood_fitted-gaussianLikelihood'
# fig = plt.figure()
# ax = fig.add_subplot(111, aspect='equal')
# scat = ax.scatter(likelihoods[2][testSet], likelihoods[3][testSet], s=20, vmin=0, vmax=100, c=testOcclusions * 100,
# cmap=matplotlib.cm.plasma)
# cbar = fig.colorbar(scat, ticks=[0, 50, 100])
# cbar.ax.set_yticklabels(['0%', '50%', '100%']) # vertically oriented colorbar
# ax.set_xlabel('Ground-truth avg likelihood')
# ax.set_ylabel('Fitted avg likelihood')
# x1, x2 = ax.get_xlim()
# y1, y2 = ax.get_ylim()
# ax.set_xlim((0, max(x2,y2)))
# ax.set_ylim((0, max(x2,y2)))
# ax.plot([0, max(x2,y2)], [0, max(x2,y2)], ls="--", c=".3")
# ax.set_title('GT vs Recognition+Gaussian Fit likelihood')
# fig.savefig(directory + '-performance-scatter.pdf', bbox_inches='tight')
# plt.close(fig)
def saveHistograms():
pass
# latexify(columns=2)
# directory = resultDir + 'reconition-azimuth'
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.hist(errorsPosePredList[2][0], bins=40)
# ax.set_xlabel('Azimuth errors')
# ax.set_ylabel('Counts')
# ax.set_title('Azimuth errors histogram')
# ax.set_xlim((-180, 180))
# fig.savefig(directory + '-histogram.pdf', bbox_inches='tight')
# plt.close(fig)
#
# directory = resultDir + 'robust-azimuth'
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.hist(errorsPosePredList[4][0], bins=40)
# ax.set_xlabel('Azimuth errors')
# ax.set_ylabel('Counts')
# ax.set_xlim((-180, 180))
# ax.set_title('Azimuth errors histogram')
# fig.savefig(directory + '-histogram.pdf', bbox_inches='tight')
# plt.close(fig)
#
# directory = resultDir + 'robust-elevation'
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.hist(errorsPosePredList[4][1], bins=100)
# ax.set_xlabel('Elevation errors')
# ax.set_ylabel('Counts')
# x1, x2 = ax.get_xlim()
# y1, y2 = ax.get_ylim()
# ax.set_xlim((-90, 90))
# ax.set_title('Elevation errors histogram')
# fig.savefig(directory + '-histogram.pdf', bbox_inches='tight')
# plt.close(fig)
#
#
# directory = resultDir + 'recognition-elevation'
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.hist(errorsPosePredList[2][1], bins=30)
# ax.set_xlabel('Elevation errors')
# ax.set_ylabel('Counts')
# ax.set_title('Elevation errors histogram')
# ax.set_xlim((-90, 90))
# ax.set_ylim((y1, y2))
# fig.savefig(directory + '-histogram.pdf', bbox_inches='tight')
# plt.close(fig)
# directory = resultDir + 'robust-elevation-occlusion'
# fig = plt.figure()
# ax = fig.add_subplot(111)
# scat = ax.scatter(testOcclusions*100, abs(errorsPosePredList[4][1]), s=20, vmin=0, vmax=100)
# ax.set_xlabel('Occlusion level (\%)')
# ax.set_ylabel('Robust fit elevation errors')
# x1, x2 = ax.get_xlim()
# y1, y2 = ax.get_ylim()
# ax.set_xlim((0, max(x2,y2)))
# ax.set_ylim((0, max(x2,y2)))
# ax.set_title('Fitted azimuth errors')
# fig.savefig(directory + '-scatter.pdf', bbox_inches='tight')
# plt.close(fig)
#
# directory = resultDir + 'recognition-elevation-occlusion'
# fig = plt.figure()
# ax = fig.add_subplot(111)
# scat = ax.scatter(testOcclusions*100, abs(errorsPosePredList[2][1]), s=20, vmin=0, vmax=100)
# ax.set_ylabel('Robust fit elevation errors')
# ax.set_xlabel('Occlusion level (\%)')
# x1, x2 = ax.get_xlim()
# y1, y2 = ax.get_ylim()
# ax.set_xlim((0, max(x2,y2)))
# ax.set_ylim((0, max(x2,y2)))
# ax.set_title('Fitted azimuth errors')
# fig.savefig(directory + '-scatter.pdf', bbox_inches='tight')
# plt.close(fig)
def saveConditionalHistograms(resultDir, occlusions, methodsPred, variablesDescr, plotMethodsIndices, errorsList):
latexify(columns=2)
numBins = 20
numErrBins = 40
for variable_i in range(len(variablesDescr)):
variableDescr = variablesDescr[variable_i]
errors = errorsList[variable_i]
directory = resultDir + variableDescr
maxErr = np.max(np.array([np.max(errors[method_i]) if errors[method_i] is not None else 0 for method_i in plotMethodsIndices]))
for method_i in plotMethodsIndices:
condHists = []
if errors[method_i] is not None and len(errors[method_i]) > 0:
fig = plt.figure()
ax = fig.add_subplot(111)
bins = np.linspace(0,0.9,numBins)
occlusionBinIndices = np.array([(occlusions < bins[i]) & (occlusions > bins[i-1]) for i in np.arange(1,len(bins))])
# ax.plot(occlusions, errorsPosePredList[method_i], c=plotColors[method_i], linestyle=plotStyles[method_i], label=methodsPred[method_i])
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Error Cond. Histogram')
# x1, x2 = ax.get_xlim()
# y1, y2 = ax.get_ylim()
if variableDescr == 'Illumination':
maxErr = 0.2
for i in range(len(bins)-1):
hist, bin_edges = np.histogram(errors[method_i][occlusionBinIndices[i]], numErrBins, range=(0,maxErr), density=True)
condHists = condHists + [hist]
# ipdb.set_trace()
histMat = np.flipud(np.vstack(condHists).T)
maxHistProb = np.max(histMat)
ax.matshow(histMat, cmap=matplotlib.cm.gray, interpolation='none', extent=[0,90,0,45], vmin=0, vmax=maxHistProb)
# locs, labels = ax.xticks()
# ax.set_xticks(bins)
ax.set_xlim((0, 90))
ax.set_ylim((0, 45))
yticks = np.linspace(0,45,10)
ytickslabels = ["{:1.2f}".format(num) for num in np.linspace(0, maxErr, 10)]
ax.set_yticks(yticks)
ax.set_yticklabels(ytickslabels)
# ax.set_xlim((0, 100))
# ax.set_ylim((-0.0, y2))
# ax.set_title('Cumulative segmentation accuracy per occlusion level')
ax.tick_params(axis='both', which='major', labelsize=8)
ax.tick_params(axis='both', which='minor', labelsize=8)
fig.savefig(directory + '-' + methodsPred[method_i] + '-conditional-histogram.pdf', bbox_inches='tight')
plt.close(fig)
def saveOcclusionPlots(resultDir, prefix, occlusions, methodsPred, plotColors, plotStyles, plotMethodsIndices, useShapeModel, meanAbsErrAzsArr, meanAbsErrElevsArr, meanErrorsVColorsCArr, meanErrorsVColorsEArr, meanErrorsVColorsSArr, meanErrorsLightCoeffsArr, meanErrorsShapeParamsArr, meanErrorsShapeVerticesArr, meanErrorsLightCoeffsCArr, meanErrorsEnvMapArr,meanErrorsSegmentationArr):
latexify(columns=2)
directory = resultDir + prefix + 'predictionMeanError-Segmentation'
fig = plt.figure()
ax = fig.add_subplot(111)
for method_i in plotMethodsIndices:
if meanErrorsSegmentationArr[method_i] is not None and len(meanErrorsSegmentationArr[method_i]) > 0:
ax.plot(occlusions, meanErrorsSegmentationArr[method_i], c=plotColors[method_i], linestyle=plotStyles[method_i], label=methodsPred[method_i])
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Segmentation accuracy')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, 90))
ax.set_ylim((-0.0, y2))
# ax.set_title('Cumulative segmentation accuracy per occlusion level')
fig.savefig(directory + '-performance-plot.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + prefix + 'predictionMeanError-Azimuth'
fig = plt.figure()
ax = fig.add_subplot(111)
for method_i in plotMethodsIndices:
if meanAbsErrAzsArr[method_i] is not None and len(meanAbsErrAzsArr[method_i]) > 0:
ax.semilogy(occlusions, meanAbsErrAzsArr[method_i], c=plotColors[method_i], linestyle=plotStyles[method_i], label=methodsPred[method_i])
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Angular error')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, 90))
ax.set_ylim((y1, y2))
# ax.set_title('Cumulative prediction per occlusion level')
fig.savefig(directory + '-performance-plot.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + prefix +'predictionMeanError-Elev'
fig = plt.figure()
ax = fig.add_subplot(111)
for method_i in plotMethodsIndices:
if meanAbsErrElevsArr[method_i] is not None and len(meanAbsErrElevsArr[method_i]) > 0:
ax.plot(occlusions, meanAbsErrElevsArr[method_i], c=plotColors[method_i], linestyle=plotStyles[method_i], label=methodsPred[method_i])
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Angular error')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, 90))
ax.set_ylim((-0.0, y2))
# ax.set_title('Cumulative prediction per occlusion level')
fig.savefig(directory + '-performance-plot.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + prefix +'predictionMeanError-VColors-C'
fig = plt.figure()
ax = fig.add_subplot(111)
for method_i in plotMethodsIndices:
if meanErrorsVColorsCArr[method_i] is not None and len(meanErrorsVColorsCArr[method_i]) > 0:
ax.plot(occlusions, meanErrorsVColorsCArr[method_i], c=plotColors[method_i], linestyle=plotStyles[method_i], label=methodsPred[method_i])
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Appearance error')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, 90))
ax.set_ylim((-0.0, y2))
# ax.set_title('Cumulative prediction per occlusion level')
fig.savefig(directory + '-performance-plot.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + prefix +'predictionMeanError-VColors-E'
fig = plt.figure()
ax = fig.add_subplot(111)
for method_i in plotMethodsIndices:
if meanErrorsVColorsEArr[method_i] is not None and len(meanErrorsVColorsEArr[method_i]) > 0:
ax.plot(occlusions, meanErrorsVColorsEArr[method_i], c=plotColors[method_i], linestyle=plotStyles[method_i], label=methodsPred[method_i])
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Appearance error')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, 90))
ax.set_ylim((-0.0, y2))
# ax.set_title('Cumulative prediction per occlusion level')
fig.savefig(directory + '-performance-plot.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + prefix +'predictionMeanError-VColors-S'
fig = plt.figure()
ax = fig.add_subplot(111)
for method_i in plotMethodsIndices:
if meanErrorsVColorsSArr[method_i] is not None and len(meanErrorsVColorsSArr[method_i]) > 0:
ax.plot(occlusions, meanErrorsVColorsSArr[method_i], c=plotColors[method_i], linestyle=plotStyles[method_i], label=methodsPred[method_i])
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Appearance error')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, 90))
ax.set_ylim((-0.0, y2))
# ax.set_title('Cumulative prediction per occlusion level')
fig.savefig(directory + '-performance-plot.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + prefix +'predictionMeanError-SH'
fig = plt.figure()
ax = fig.add_subplot(111)
for method_i in plotMethodsIndices:
if meanErrorsLightCoeffsArr[method_i] is not None and len(meanErrorsLightCoeffsArr[method_i]) > 0:
ax.plot(occlusions, meanErrorsLightCoeffsArr[method_i], c=plotColors[method_i], linestyle=plotStyles[method_i], label=methodsPred[method_i])
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Illumination error')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, 90))
ax.set_ylim((-0.0, y2))
# ax.set_title('Cumulative prediction per occlusion level')
fig.savefig(directory + '-performance-plot.pdf', bbox_inches='tight')
plt.close(fig)
if useShapeModel:
directory = resultDir + prefix +'predictionMeanError-ShapeParams'
fig = plt.figure()
ax = fig.add_subplot(111)
for method_i in plotMethodsIndices:
if meanErrorsShapeParamsArr[method_i] is not None and len(meanErrorsShapeParamsArr[method_i]) > 0:
ax.plot(occlusions, meanErrorsShapeParamsArr[method_i], c=plotColors[method_i], linestyle=plotStyles[method_i], label=methodsPred[method_i])
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Shape parameters error')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, 90))
ax.set_ylim((-0.0, y2))
# ax.set_title('Cumulative prediction per occlusion level')
fig.savefig(directory + '-performance-plot.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + prefix +'predictionMeanError-ShapeVertices'
fig = plt.figure()
ax = fig.add_subplot(111)
for method_i in plotMethodsIndices:
if meanErrorsShapeVerticesArr[method_i] is not None and len(meanErrorsShapeVerticesArr[method_i]) > 0:
ax.semilogy(occlusions, meanErrorsShapeVerticesArr[method_i], c=plotColors[method_i], linestyle=plotStyles[method_i], label=methodsPred[method_i])
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Shape vertices error')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, 90))
ax.set_ylim((y1, y2))
# ax.set_title('Cumulative prediction per occlusion level')
fig.savefig(directory + '-performance-plot.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + prefix +'predictionMeanError-SH-C'
fig = plt.figure()
ax = fig.add_subplot(111)
for method_i in plotMethodsIndices:
if meanErrorsLightCoeffsCArr[method_i] is not None and len(meanErrorsLightCoeffsCArr[method_i]) > 0:
ax.plot(occlusions, meanErrorsLightCoeffsCArr[method_i], c=plotColors[method_i], linestyle=plotStyles[method_i], label=methodsPred[method_i])
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Illumination error')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, 90))
ax.set_ylim((-0.0, y2))
# ax.set_title('Cumulative prediction per occlusion level')
fig.savefig(directory + '-performance-plot.pdf', bbox_inches='tight')
plt.close(fig)
directory = resultDir + prefix +'predictionMeanError-SH-EnvMap'
fig = plt.figure()
ax = fig.add_subplot(111)
for method_i in plotMethodsIndices:
if meanErrorsEnvMapArr[method_i] is not None and len(meanErrorsEnvMapArr[method_i]) > 0:
ax.plot(occlusions, meanErrorsEnvMapArr[method_i], c=plotColors[method_i], linestyle=plotStyles[method_i], label=methodsPred[method_i])
legend = ax.legend(loc='best')
ax.set_xlabel('Occlusion (\%)')
ax.set_ylabel('Illumination error')
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
ax.set_xlim((0, 90))
ax.set_ylim((-0.0, y2))
# ax.set_title('Cumulative prediction per occlusion level')
fig.savefig(directory + '-performance-plot.pdf', bbox_inches='tight')
plt.close(fig)
from numpy.core.umath_tests import matrix_multiply
def scaleInvariantMSECoeff(x_pred, x_target):
#Rows: test samples
#Cols: target variables
scales = (matrix_multiply(x_pred[:,None,:],x_target[:,:,None])/matrix_multiply(x_pred[:,None,:],x_pred[:,:,None])).ravel()
return scales
def euclidean(X,Y):
"""
x: matrix (N,D), each row is an datapoint
y: matrix (M,D), each row is an datapoint
A MxN matrix of Euclidean distances is returned
"""
return scipy.spatial.distance.cdist(X, Y[None,:], 'euclidean').T
def one_nn(x_train, x_test, distance_f=euclidean):
"""
"""
distances = distance_f(x_train, x_test)
nn_indices = np.argmin(distances, 1)
return nn_indices
def shapeVertexErrors(chShapeParams, chVertices, testShapeParamsGT, shapeParamsPred):
oldShapeParams = chShapeParams.r.copy()
errorsShapeVertices = np.array([])
for test_i in range(len(testShapeParamsGT)):
chShapeParams[:] = testShapeParamsGT[test_i].copy()
vertsGT = chVertices.r.copy()
chShapeParams[:] = shapeParamsPred[test_i].copy()
vertsPred = chVertices.r.copy()
errorShapeVertices = np.sqrt(np.sum((vertsPred - vertsGT)**2))
errorsShapeVertices = np.append(errorsShapeVertices,errorShapeVertices)
chShapeParams[:] = oldShapeParams
return errorsShapeVertices
def computeErrors(setTest, azimuths, testAzsRel, elevations, testElevsGT, vColors, testVColorGT, lightCoeffs, testLightCoefficientsGTRel, approxProjections, approxProjectionsGT, shapeParams, testShapeParamsGT, useShapeModel, chShapeParams, chVertices, posteriors, masksGT):
errorsPosePredList = []
errorsLightCoeffsList = []
errorsShapeParamsList = []
errorsShapeVerticesList = []
errorsEnvMapList = []
errorsLightCoeffsCList = []
errorsVColorsEList = []
errorsVColorsCList = []
errorsVColorsSList = []
errorsSegmentation = []
for method in range(len(azimuths)):
print("Computing errors for method " + str(method))
azsPred = azimuths[method]
elevsPred = elevations[method]
vColorsPred = vColors[method]
posteriorsPred = posteriors[method]
relLightCoefficientsPred = lightCoeffs[method]
if approxProjectionsGT is not None:
approxProjectionsPred = approxProjections[method]
if useShapeModel:
shapeParamsPred = shapeParams[method]
if azsPred is not None:
errorsPosePred = recognition_models.evaluatePrediction(testAzsRel[setTest], testElevsGT[setTest], azsPred[setTest], elevsPred[setTest])
errorsPosePredList = errorsPosePredList + [errorsPosePred]
else:
errorsPosePredList = errorsPosePredList + [None]
if relLightCoefficientsPred is not None:
errorsLightCoeffs = (testLightCoefficientsGTRel[setTest] - relLightCoefficientsPred[setTest]) ** 2
errorsLightCoeffsList = errorsLightCoeffsList + [errorsLightCoeffs]
else:
errorsLightCoeffsList = errorsLightCoeffsList + [None]
if useShapeModel and shapeParamsPred is not None:
errorsShapeParams = (testShapeParamsGT[setTest] - shapeParamsPred[setTest]) ** 2
errorsShapeParamsList = errorsShapeParamsList + [errorsShapeParams]
errorsShapeVertices = shapeVertexErrors(chShapeParams, chVertices, testShapeParamsGT[setTest], shapeParamsPred[setTest])
errorsShapeVerticesList = errorsShapeVerticesList + [errorsShapeVertices]
else:
errorsShapeParamsList = errorsShapeParamsList + [None]
errorsShapeVerticesList = errorsShapeVerticesList + [None]
if approxProjectionsGT is not None and approxProjectionsPred is not None:
envMapProjScaling = scaleInvariantMSECoeff(approxProjectionsPred.reshape([len(approxProjectionsPred), -1])[setTest], approxProjectionsGT.reshape([len(approxProjectionsGT), -1])[setTest])
errorsEnvMap = np.mean((approxProjectionsGT[setTest] - envMapProjScaling[:,None, None]*approxProjectionsPred[setTest])**2, axis=(1,2))
errorsEnvMapList= errorsEnvMapList + [errorsEnvMap]
else:
errorsEnvMapList = errorsEnvMapList + [None]
if relLightCoefficientsPred is not None:
envMapScaling = scaleInvariantMSECoeff(relLightCoefficientsPred[setTest], testLightCoefficientsGTRel[setTest])
errorsLightCoeffsC = (testLightCoefficientsGTRel[setTest] - envMapScaling[:,None]* relLightCoefficientsPred[setTest]) ** 2
errorsLightCoeffsCList = errorsLightCoeffsCList + [errorsLightCoeffsC]
else:
errorsLightCoeffsCList = errorsLightCoeffsCList + [None]
if vColorsPred is not None:
errorsVColorsE = image_processing.eColourDifference(testVColorGT[setTest], vColorsPred[setTest])
errorsVColorsEList = errorsVColorsEList + [errorsVColorsE]
errorsVColorsC = image_processing.cColourDifference(testVColorGT[setTest], vColorsPred[setTest])
errorsVColorsCList = errorsVColorsCList + [errorsVColorsC]
errorsVColorsS = image_processing.scaleInvariantColourDifference(testVColorGT[setTest], vColorsPred[setTest])
errorsVColorsSList = errorsVColorsSList + [errorsVColorsS]
else:
errorsVColorsEList = errorsVColorsEList + [None]
errorsVColorsCList = errorsVColorsCList + [None]
errorsVColorsSList = errorsVColorsSList + [None]
if posteriorsPred is not None:
# if method == 3:
# ipdb.set_trace()
masksCat = np.concatenate([masksGT[:,:,:,None][setTest], posteriorsPred[:,:,:,None][setTest]], axis=3)
errorSegmentation = np.sum(np.all(masksCat, axis=3), axis=(1,2)) / np.sum(np.any(masksCat, axis=3), axis=(1,2))
errorsSegmentation = errorsSegmentation + [errorSegmentation]
else:
errorsSegmentation = errorsSegmentation + [None]
return errorsPosePredList, errorsLightCoeffsList, errorsShapeParamsList, errorsShapeVerticesList, errorsEnvMapList, errorsLightCoeffsCList, errorsVColorsEList, errorsVColorsCList, errorsVColorsSList, errorsSegmentation
def computeErrorAverages(averageFun, testSet, useShapeModel, errorsPosePredList, errorsLightCoeffsList, errorsShapeParamsList, errorsShapeVerticesList, errorsEnvMapList, errorsLightCoeffsCList, errorsVColorsEList, errorsVColorsCList, errorsVColorsSList, errorsSegmentation):
meanAbsErrAzsList = []
meanAbsErrElevsList = []
meanErrorsLightCoeffsList = []
meanErrorsShapeParamsList = []
meanErrorsShapeVerticesList = []
meanErrorsLightCoeffsCList = []
meanErrorsEnvMapList = []
meanErrorsVColorsEList = []
meanErrorsVColorsCList = []
meanErrorsVColorsSList = []
meanErrorsSegmentation = []
for method_i in range(len(errorsPosePredList)):
if errorsPosePredList[method_i] is not None:
meanAbsErrAzsList = meanAbsErrAzsList + [averageFun(np.abs(errorsPosePredList[method_i][0][testSet]))]
else:
meanAbsErrAzsList = meanAbsErrAzsList + [None]
if errorsPosePredList[method_i] is not None:
meanAbsErrElevsList = meanAbsErrElevsList + [averageFun(np.abs(errorsPosePredList[method_i][1][testSet]))]
else:
meanAbsErrElevsList = meanAbsErrElevsList + [None]
if errorsLightCoeffsList[method_i] is not None:
meanErrorsLightCoeffsList = meanErrorsLightCoeffsList + [averageFun(averageFun(errorsLightCoeffsList[method_i][testSet],axis=1), axis=0)]
else:
meanErrorsLightCoeffsList = meanErrorsLightCoeffsList + [None]
if useShapeModel:
if errorsShapeParamsList[method_i] is not None:
meanErrorsShapeParamsList = meanErrorsShapeParamsList + [averageFun(np.mean(errorsShapeParamsList[method_i][testSet],axis=1), axis=0)]
else:
meanErrorsShapeParamsList = meanErrorsShapeParamsList + [None]
if errorsShapeVerticesList[method_i] is not None:
meanErrorsShapeVerticesList = meanErrorsShapeVerticesList + [averageFun(errorsShapeVerticesList[method_i][testSet], axis=0)]
else:
meanErrorsShapeVerticesList = meanErrorsShapeVerticesList + [None]
if errorsLightCoeffsCList[method_i] is not None:
meanErrorsLightCoeffsCList = meanErrorsLightCoeffsCList + [averageFun(np.mean(errorsLightCoeffsCList[method_i][testSet],axis=1), axis=0)]
else:
meanErrorsLightCoeffsCList = meanErrorsLightCoeffsCList + [None]
if errorsEnvMapList[method_i] is not None:
meanErrorsEnvMapList = meanErrorsEnvMapList + [averageFun(errorsEnvMapList[method_i][testSet])]
else:
meanErrorsEnvMapList = meanErrorsEnvMapList + [None]
if errorsVColorsEList[method_i] is not None:
meanErrorsVColorsEList = meanErrorsVColorsEList + [averageFun(errorsVColorsEList[method_i][testSet], axis=0)]
else:
meanErrorsVColorsEList = meanErrorsVColorsEList + [None]
if errorsVColorsCList[method_i] is not None:
meanErrorsVColorsCList = meanErrorsVColorsCList + [averageFun(errorsVColorsCList[method_i][testSet], axis=0)]
else:
meanErrorsVColorsCList = meanErrorsVColorsCList + [None]
if errorsVColorsSList[method_i] is not None:
meanErrorsVColorsSList = meanErrorsVColorsSList + [averageFun(errorsVColorsSList[method_i][testSet], axis=0)]
else:
meanErrorsVColorsSList = meanErrorsVColorsSList + [None]
if errorsSegmentation[method_i] is not None:
meanErrorsSegmentation = meanErrorsSegmentation + [averageFun(errorsSegmentation[method_i][testSet], axis=0)]
else:
meanErrorsSegmentation = meanErrorsSegmentation + [None]
return meanAbsErrAzsList, meanAbsErrElevsList, meanErrorsLightCoeffsList, meanErrorsShapeParamsList, meanErrorsShapeVerticesList, meanErrorsLightCoeffsCList, meanErrorsEnvMapList, meanErrorsVColorsEList, meanErrorsVColorsCList, meanErrorsVColorsSList, meanErrorsSegmentation
def writeImagesHdf5(imagesDir, writeDir, imageSet, writeGray=False ):
print("Writing HDF5 file")
image = skimage.io.imread(imagesDir + 'im' + str(imageSet[0]) + '.jpeg')
imDtype = image.dtype
width = image.shape[1]
height = image.shape[0]
if not writeGray:
gtDataFile = h5py.File(writeDir + 'images.h5', 'w')
images = np.array([], dtype = np.dtype('('+ str(height)+','+ str(width) +',3)uint8'))
gtDataset = gtDataFile.create_dataset("images", data=images, maxshape=(None,height,width, 3))
# images = np.zeros([len(imageSet), height, width, 3], dtype=np.uint8)
else:
imageGray = 0.3*image[:,:,0] + 0.59*image[:,:,1] + 0.11*image[:,:,2]
grayDtype = imageGray.dtype
gtDataFile = h5py.File(writeDir + 'images_gray.h5', 'w')
# images = np.zeros([], dtype=np.float32)
images = np.array([], dtype = np.dtype('('+ str(height)+','+ str(width) +')f'))
gtDataset = gtDataFile.create_dataset("images", data=images, maxshape=(None,height,width))
for imageit, imageid in enumerate(imageSet):
gtDataset.resize(gtDataset.shape[0]+1, axis=0)
image = skimage.io.imread(imagesDir + 'im' + str(imageid) + '.jpeg').astype(np.uint8)
if not writeGray:
gtDataset[-1] = image
else:
image = image.astype(np.float32)/255.0
gtDataset[-1] = 0.3*image[:,:,0] + 0.59*image[:,:,1] + 0.11*image[:,:,2]
gtDataFile.flush()
gtDataFile.close()
print("Ended writing HDF5 file")
def loadMasks(imagesDir, maskSet):
masks = []
for imageit, imageid in enumerate(maskSet):
if os.path.isfile(imagesDir + 'mask' + str(imageid) + '.npy'):
masks = masks + [np.load(imagesDir + 'mask' + str(imageid) + '.npy')[None,:,:]]
return np.vstack(masks)
def loadMasksMug(imagesDir, maskSet):
masks = []
for imageit, imageid in enumerate(maskSet):
if os.path.isfile(imagesDir + 'mask' + str(imageid) + '_mug.npy'):
masks = masks + [np.load(imagesDir + 'mask' + str(imageid) + '_mug.npy')[None,:,:]]
return np.vstack(masks)
def readImages(imagesDir, imageSet, loadGray=False, loadFromHdf5=False):
if loadFromHdf5:
if not loadGray:
if os.path.isfile(imagesDir + 'images.h5'):
gtDataFile = h5py.File(imagesDir + 'images.h5', 'r')
boolSet = np.zeros(gtDataFile["images"].shape[0]).astype(np.bool)
boolSet[imageSet] = True
return gtDataFile["images"][boolSet,:,:,:].astype(np.float32)/255.0
else:
if os.path.isfile(imagesDir + 'images_gray.h5'):
gtDataFile = h5py.File(imagesDir + 'images_gray.h5', 'r')
boolSet = np.zeros(gtDataFile["images"].shape[0]).astype(np.bool)
boolSet[imageSet] = True
return gtDataFile["images"][boolSet,:,:].astype(np.float32)
else:
image = skimage.io.imread(imagesDir + 'im' + str(imageSet[0]) + '.jpeg')
width = image.shape[1]
height = image.shape[0]
if not loadGray:
images = np.zeros([len(imageSet), height, width, 3], dtype=np.float32)
else:
images = np.zeros([len(imageSet), height, width], dtype=np.float32)
for imageit, imageid in enumerate(imageSet):
if os.path.isfile(imagesDir + 'im' + str(imageid) + '.jpeg'):
image = skimage.io.imread(imagesDir + 'im' + str(imageid) + '.jpeg')
else:
print("Image " + str(imageid) + " does not exist!")
image = np.zeros_like(image)
image = image/255.0
if not loadGray:
images[imageit, :, :, :] = image
else:
images[imageit, :, :] = 0.3*image[:,:,0] + 0.59*image[:,:,1] + 0.11*image[:,:,2]
return images
def readImagesHdf5(imagesDir, loadGray=False):
if not loadGray:
if os.path.isfile(imagesDir + 'images.h5'):
gtDataFile = h5py.File(imagesDir + 'images.h5', 'r')
return gtDataFile["images"]
else:
if os.path.isfile(imagesDir + 'images_gray.h5'):
gtDataFile = h5py.File(imagesDir + 'images_gray.h5', 'r')
return gtDataFile["images"]
def getTriplets(parameterVals, closeDist = 5*np.pi/180, farDist=15*np.pi/180, normConst = 2*np.pi, chunkSize=10):
numTrainSet = len(parameterVals)
initIdx = np.random.permutation(np.arange(numTrainSet))
orderedIdx = np.argsort(parameterVals)
chunkArange = np.arange(chunkSize)
shuffledIdx = np.arange(numTrainSet)
for i in np.arange(0, numTrainSet, chunkSize):
maxi = min(numTrainSet, i + chunkSize)
shuffledIdx[i:i+chunkSize] = np.random.permutation(chunkArange)[:maxi] + i
idxDist5 = np.roll(np.arange(numTrainSet), int(numTrainSet * closeDist / normConst) )
idxDist15 = np.roll(np.arange(numTrainSet), int(numTrainSet * farDist / normConst) )
anchorIdx = orderedIdx[initIdx]
closeIdx = orderedIdx[shuffledIdx[idxDist5][initIdx]]
farIdx = orderedIdx[shuffledIdx[idxDist15][initIdx]]
return anchorIdx, closeIdx, farIdx
def generateExperiment(size, experimentDir, ratio, seed):
np.random.seed(seed)
data = np.arange(size)
np.random.shuffle(data)
train = data[0:np.int(size*ratio)]
test = data[np.int(size*ratio)::]
if not os.path.exists(experimentDir):
os.makedirs(experimentDir)
np.save(experimentDir + 'train.npy', train)
np.save(experimentDir + 'test.npy', test)
# saveScatter(xaxis*180/np.pi, yaxis[1], 'Azimuth error (ground-truth)', Azimuth (predicted), filename)
import matplotlib.pyplot as plt
def saveScatter(xaxis, yaxis, xlabel, ylabel, filename):
plt.ioff()
fig = plt.figure()
plt.scatter(xaxis, yaxis)
plt.xlabel('Elevation (degrees)')
plt.ylabel('Angular error')
x1,x2,y1,y2 = plt.axis()
plt.axis((0,90,-90,90))
plt.title('Performance scatter plot')
fig.savefig(filename)
plt.close(fig)
#Method from https://github.com/adamlwgriffiths/Pyrr/blob/master/pyrr/geometry.py
def create_cube(scale=(1.0,1.0,1.0), st=False, rgba=np.array([1.,1.,1.,1.]), dtype='float32', type='triangles'):
"""Returns a Cube reading for rendering."""
shape = [24, 3]
rgba_offset = 3
width, height, depth = scale
# half the dimensions
width /= 2.0
height /= 2.0
depth /= 2.0
vertices = np.array([
# front
# top right
( width, height, depth,),
# top left
(-width, height, depth,),
# bottom left
(-width,-height, depth,),
# bottom right
( width,-height, depth,),
# right
# top right
( width, height,-depth),
# top left
( width, height, depth),
# bottom left
( width,-height, depth),
# bottom right
( width,-height,-depth),
# back
# top right
(-width, height,-depth),
# top left
( width, height,-depth),
# bottom left
( width,-height,-depth),
# bottom right
(-width,-height,-depth),
# left
# top right
(-width, height, depth),
# top left
(-width, height,-depth),
# bottom left
(-width,-height,-depth),
# bottom right
(-width,-height, depth),
# top
# top right
( width, height,-depth),
# top left
(-width, height,-depth),
# bottom left
(-width, height, depth),
# bottom right
( width, height, depth),
# bottom
# top right
( width,-height, depth),
# top left
(-width,-height, depth),
# bottom left
(-width,-height,-depth),
# bottom right
( width,-height,-depth),
], dtype=dtype)
st_values = None
rgba_values = None
if st:
# default st values
st_values = np.tile(
np.array([
(1.0, 1.0,),
(0.0, 1.0,),
(0.0, 0.0,),
(1.0, 0.0,),
], dtype=dtype),
(6,1,)
)
if isinstance(st, bool):
pass
elif isinstance(st, (int, float)):
st_values *= st
elif isinstance(st, (list, tuple, np.ndarray)):
st = np.array(st, dtype=dtype)
if st.shape == (2,2,):
# min / max
st_values *= st[1] - st[0]
st_values += st[0]
elif st.shape == (4,2,):
# per face st values specified manually
st_values[:] = np.tile(st, (6,1,))
elif st.shape == (6,2,):
# st values specified manually
st_values[:] = st
else:
raise ValueError('Invalid shape for st')
else:
raise ValueError('Invalid value for st')
shape[-1] += st_values.shape[-1]
rgba_offset += st_values.shape[-1]
if len(rgba) > 0:
# default rgba values
rgba_values = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=dtype), (24,1,))
if isinstance(rgba, bool):
pass
elif isinstance(rgba, (int, float)):
# int / float expands to RGBA with all values == value
rgba_values *= rgba
elif isinstance(rgba, (list, tuple, np.ndarray)):
rgba = np.array(rgba, dtype=dtype)
if rgba.shape == (3,):
rgba_values = np.tile(rgba, (24,1,))
elif rgba.shape == (4,):
rgba_values[:] = np.tile(rgba, (24,1,))
elif rgba.shape == (4,3,):
rgba_values = np.tile(rgba, (6,1,))
elif rgba.shape == (4,4,):
rgba_values = np.tile(rgba, (6,1,))
elif rgba.shape == (6,3,):
rgba_values = np.repeat(rgba, 4, axis=0)
elif rgba.shape == (6,4,):
rgba_values = np.repeat(rgba, 4, axis=0)
elif rgba.shape == (24,3,):
rgba_values = rgba
elif rgba.shape == (24,4,):
rgba_values = rgba
else:
raise ValueError('Invalid shape for rgba')
else:
raise ValueError('Invalid value for rgba')
shape[-1] += rgba_values.shape[-1]
data = np.empty(shape, dtype=dtype)
data[:,:3] = vertices
if st_values is not None:
data[:,3:5] = st_values
if rgba_values is not None:
data[:,rgba_offset:] = rgba_values
if type == 'triangles':
# counter clockwise
# top right -> top left -> bottom left
# top right -> bottom left -> bottom right
indices = np.tile(np.array([0, 1, 2, 0, 2, 3], dtype='int'), (6,1))
for face in range(6):
indices[face] += (face * 4)
indices.shape = (-1,)
return data, indices
| 70,535 | 44.477756 | 456 | py |
inversegraphics | inversegraphics-master/extract.py | import matplotlib
# matplotlib.use('Agg')
import bpy
import numpy
import matplotlib.pyplot as plt
width = 110
height = 110
scene = bpy.data.scenes[0]
scene.render.resolution_x = width #perhaps set resolution in code
scene.render.resolution_y = height
scene.render.resolution_percentage = 100
bpy.data.objects['Cube'].pass_index = 1
scene.render.layers[0].use_pass_object_index = True
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
rl = tree.nodes[1]
links = tree.links
v = tree.nodes.new('CompositorNodeViewer')
v.location = 750,210
v.use_alpha = False
links.new(rl.outputs['Image'], v.inputs['Image']) # link Image output to Viewer input
outnode = tree.nodes.new('CompositorNodeOutputFile')
outnode.base_path = 'indexob.png'
links.new(rl.outputs['IndexOB'], outnode.inputs['Image'])
bpy.data.objects['Cube'].pass_index = 1
bpy.ops.render.render( write_still=False )
blendImage = bpy.data.images['Render Result']
image = numpy.flipud(numpy.array(blendImage.extract_render(scene=scene)).reshape([height,width,4]))[:,:,0:3]
#
# blendImage2 = bpy.data.images['Viewer Node']
#
# image2 = numpy.flipud(numpy.array(blendImage2.extract_render(scene=scene)).reshape([256,256,4]))[:,:,0:3]
print("DOne") | 1,234 | 24.204082 | 108 | py |
inversegraphics | inversegraphics-master/lasagne_visualize.py | from itertools import product
from lasagne.layers import get_output
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
def plot_loss(net):
train_loss = [row['train_loss'] for row in net.train_history_]
valid_loss = [row['valid_loss'] for row in net.train_history_]
plt.plot(train_loss, label='train loss')
plt.plot(valid_loss, label='valid loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
def plot_conv_weights(layer, figsize=(6, 6)):
"""Plot the weights of a specific layer.
Only really makes sense with convolutional layers.
Parameters
----------
layer : lasagne.layers.Layer
"""
W = layer.W.get_value()
shape = W.shape
nrows = np.ceil(np.sqrt(shape[0])).astype(int)
ncols = nrows
for feature_map in range(shape[1]):
figs, axes = plt.subplots(nrows, ncols, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[0]:
break
axes[r, c].imshow(W[i, feature_map], cmap='gray',
interpolation='nearest')
def plot_conv_activity(layer, x, figsize=(6, 8)):
"""Plot the acitivities of a specific layer.
Only really makes sense with layers that work 2D data (2D
convolutional layers, 2D pooling layers ...).
Parameters
----------
layer : lasagne.layers.Layer
x : numpy.ndarray
Only takes one sample at a time, i.e. x.shape[0] == 1.
"""
if x.shape[0] != 1:
raise ValueError("Only one sample can be plotted at a time.")
# compile theano function
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
shape = activity.shape
nrows = np.ceil(np.sqrt(shape[1])).astype(int)
ncols = nrows
figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize)
axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray',
interpolation='nearest')
axes[0, ncols // 2].set_title('original')
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[1]:
break
ndim = activity[0][i].ndim
if ndim != 2:
raise ValueError("Wrong number of dimensions, image data should "
"have 2, instead got {}".format(ndim))
axes[r + 1, c].imshow(-activity[0][i], cmap='gray',
interpolation='nearest')
def occlusion_heatmap(net, x, target, square_length=7):
"""An occlusion test that checks an image for its critical parts.
In this function, a square part of the image is occluded (i.e. set
to 0) and then the net is tested for its propensity to predict the
correct label. One should expect that this propensity shrinks of
critical parts of the image are occluded. If not, this indicates
overfitting.
Depending on the depth of the net and the size of the image, this
function may take awhile to finish, since one prediction for each
pixel of the image is made.
Currently, all color channels are occluded at the same time. Also,
this does not really work if images are randomly distorted by the
batch iterator.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
x : np.array
The input data, should be of shape (1, c, x, y). Only makes
sense with image data.
target : int
The true value of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
Results
-------
heat_array : np.array (with same size as image)
An 2D np.array that at each point (i, j) contains the predicted
probability of the correct class if the image is occluded by a
square with center (i, j).
"""
if (x.ndim != 4) or x.shape[0] != 1:
raise ValueError("This function requires the input data to be of "
"shape (1, c, x, y), instead got {}".format(x.shape))
if square_length % 2 == 0:
raise ValueError("Square length has to be an odd number, instead "
"got {}.".format(square_length))
num_classes = net.layers_[-1].num_units
img = x[0].copy()
bs, col, s0, s1 = x.shape
heat_array = np.zeros((s0, s1))
pad = square_length // 2 + 1
x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype)
probs = np.zeros((s0, s1, num_classes))
# generate occluded images
for i in range(s0):
# batch s1 occluded images for faster prediction
for j in range(s1):
x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant')
x_pad[:, i:i + square_length, j:j + square_length] = 0.
x_occluded[j] = x_pad[:, pad:-pad, pad:-pad]
y_proba = net.predict_proba(x_occluded)
probs[i] = y_proba.reshape(s1, num_classes)
# from predicted probabilities, pick only those of target class
for i in range(s0):
for j in range(s1):
heat_array[i, j] = probs[i, j, target]
return heat_array
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)):
"""Plot which parts of an image are particularly import for the
net to classify the image correctly.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
X : numpy.array
The input data, should be of shape (b, c, 0, 1). Only makes
sense with image data.
target : list or numpy.array of ints
The true values of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at. If more than one sample is passed to X, each of them needs
its own target.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
figsize : tuple (int, int)
Size of the figure.
Plots
-----
Figure with 3 subplots: the original image, the occlusion heatmap,
and both images super-imposed.
"""
if (X.ndim != 4):
raise ValueError("This function requires the input data to be of "
"shape (b, c, x, y), instead got {}".format(X.shape))
num_images = X.shape[0]
if figsize[1] is None:
figsize = (figsize[0], num_images * figsize[0] / 3)
figs, axes = plt.subplots(num_images, 3, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for n in range(num_images):
heat_img = occlusion_heatmap(
net, X[n:n + 1, :, :, :], target[n], square_length
)
ax = axes if num_images == 1 else axes[n]
img = X[n, :, :, :].mean(0)
ax[0].imshow(-img, interpolation='nearest', cmap='gray')
ax[0].set_title('image')
ax[1].imshow(-heat_img, interpolation='nearest', cmap='Reds')
ax[1].set_title('critical parts')
ax[2].imshow(-img, interpolation='nearest', cmap='gray')
ax[2].imshow(-heat_img, interpolation='nearest', cmap='Reds',
alpha=0.6)
ax[2].set_title('super-imposed')
| 7,737 | 31.512605 | 78 | py |
inversegraphics | inversegraphics-master/light_probes.py | from math import sin, cos, ceil, floor, pi
import importlib
import bpy
import numpy as np
import mathutils
from contextlib import contextmanager
from uuid import uuid4
from bpy.utils import register_module, unregister_module
from bpy import props as p
import json
import ipdb
import matplotlib.pyplot as plt
# bl_info = {
# "name": "Lightprobe",
# "description": "Gives ability to add light probes to a cycles render. \
# Light probes sample incoming light at that location and generate 9 \
# coefficients that can be used to quickly simluate that lighting in a real-time \
# game engine.",
# "category": "Object",
# "author": "Andrew Moffat",
# "version": (1, 0),
# "blender": (2, 7, 1)
# }
JSON_FILE_NAME = "lightprobes.json"
FAILSAFE_OFFSET = 0.00001
import cv2
spherical_harmonics_coeffs = np.array([
0.282095,
0.488603 ,
0.488603 ,
0.488603,
1.092548,
1.092548,
0.315392 ,
1.092548 ,
0.546274 ])
def processSphericalEnvironmentMap(envMapTexture):
mask = np.ones([envMapTexture.shape[0],envMapTexture.shape[1]]).astype(np.uint8)
mask[np.int(mask.shape[0]/2), np.int(mask.shape[1]/2)] = 0
distMask = cv2.distanceTransform(mask, cv2.DIST_L2, 5)
envMapTexture[distMask > mask.shape[0]/2,:] = 0
envMapMean = envMapTexture[distMask <= mask.shape[0]/2].mean()
envMapTexture[distMask <= mask.shape[0]/2, :] = envMapTexture[distMask <= mask.shape[0]/2]
return envMapTexture, envMapMean
def is_lightprobe(ob):
return ob.name.startswith("lightprobe-")
def all_active_lightprobes():
for ob in bpy.context.scene.objects:
if is_lightprobe(ob):
yield ob
def get_all_lightprobe_data():
all_data = []
scale_by = bpy.context.scene.unit_settings.scale_length
for probe in all_active_lightprobes():
coeffs = get_coeff_prop(probe)
if not coeffs:
continue
data = {}
data["loc"] = [p*scale_by for p in list(probe.location)]
data["name"] = probe.name or None
data["coeffs"] = coeffs
all_data.append(data)
return all_data
def get_or_create_probe_file():
if JSON_FILE_NAME not in bpy.data.texts:
bpy.data.texts.new(JSON_FILE_NAME)
f = bpy.data.texts[JSON_FILE_NAME]
return f
def write_lightprobe_data(data):
f = get_or_create_probe_file()
f.clear()
data = json.dumps(data, indent=4, sort_keys=True)
f.write(data)
# def fetch_integration_callback(name):
# parts = name.split(".")
# fn_name = parts[-1]
# module_name = ".".join(parts[:-1])
#
# try:
# module = importlib.import_module(module_name)
# except:
# return None
# else:
# fn = getattr(module, fn_name, None)
# return fn
#
# name, theta_res, phi_res, samples =
# # def pre_bake_hook(name, context, probe):
# fn = fetch_integration_callback(name)
# ret = None
# if fn:
# ret = fn(context, probe)
# return ret
#
# # def post_bake_hook(name, context, data, pre_bake_data):
# fn = fetch_integration_callback(name)
# if fn:
# fn(context, data, pre_bake_data)
def hide_object(ob):
""" hides an object from cycles rendering, and returns a function that,
when called, will restore the visibility """
v = ob.cycles_visibility
names = ("shadow", "camera", "glossy", "diffuse", "transmission", "scatter")
for name in names:
setattr(v, name, False)
def set_coeff_prop(ob, coeffs):
""" sets our SH coeffs onto an object datablock. we can't use the python
dictionary that we've generated, so we'll flatten it to something that
can be stored in a datablock """
ob["lightprobe_coeffs"] = json.dumps(coeffs)
def get_coeff_prop(ob):
""" retrieve our SH coeffs from our object datablock. this is essentially
unserializing it to our original data """
coeffs = ob.get("lightprobe_coeffs", None)
if not coeffs:
return None
data = json.loads(coeffs)
return data
def setup_material(ob):
scene = bpy.context.scene
mat = bpy.data.materials.new(ob.name)
mat.use_nodes = True
tree = mat.node_tree
for node in list(tree.nodes):
tree.nodes.remove(node)
#diffuse = tree.nodes.new("ShaderNodeBsdfGlossy")
diffuse = tree.nodes.new("ShaderNodeBsdfDiffuse")
diffuse.inputs["Color"].default_value = (1, 1, 1, 1)
diffuse.inputs["Roughness"].default_value = 1.0
diffuse_out = diffuse.outputs["BSDF"]
output = tree.nodes.new("ShaderNodeOutputMaterial")
# output.color_space = 'NONE'
output_in = output.inputs["Surface"]
tree.links.new(diffuse_out, output_in)
bake_node = tree.nodes.new("ShaderNodeTexImage")
bake_node.label = bake_node.name
bake_node.color_space = 'NONE'
bake_out = bake_node.outputs["Color"]
texture = create_lightmap_image(ob, 128, 128)
bake_node.image = texture
bake_node.image.colorspace_settings.name = 'Raw'
ob.data.uv_textures["lightmap"].active = True
color_uvmap = tree.nodes.new("ShaderNodeUVMap")
color_uvmap.uv_map = "lightmap"
tree.links.new(color_uvmap.outputs["UV"], bake_node.inputs["Vector"])
ob.data.materials.append(mat)
@contextmanager
def no_interfere_ctx():
old_selected_objects = bpy.context.selected_objects
active_object = bpy.context.active_object
yield
for obj in bpy.context.selected_objects:
obj.select = False
for obj in old_selected_objects:
obj.select = True
bpy.context.scene.objects.active = active_object
@contextmanager
def active(ob):
with no_interfere_ctx():
ob.select = True
bpy.context.scene.objects.active = ob
yield
def override_ctx(**kwargs):
ctx = bpy.context.copy()
ctx.update(kwargs)
return ctx
def create_lightmap_image(ob, width, height):
name = ob.name
bpy.ops.image.new(override_ctx(object=ob), name=name, width=width,
height=height, alpha=False, float=False)
return bpy.data.images[name]
def get_lightmap(ob):
return bpy.data.images[ob.name]
def add_lightprobe():
with no_interfere_ctx():
bpy.ops.mesh.primitive_cube_add()
probe = bpy.context.object
for _ in range(len(probe.data.uv_layers)):
bpy.ops.mesh.uv_texture_remove()
bpy.ops.mesh.uv_texture_add()
probe.data.uv_layers[0].name = "lightmap"
bpy.ops.uv.lightmap_pack(PREF_CONTEXT="ALL_FACES",
PREF_PACK_IN_ONE=True, PREF_NEW_UVLAYER=False,
PREF_APPLY_IMAGE=False, PREF_IMG_PX_SIZE=512, PREF_BOX_DIV=12,
PREF_MARGIN_DIV=0.1)
bpy.ops.object.modifier_add(type="SUBSURF")
bpy.context.object.modifiers["Subsurf"].levels = 4
bpy.ops.object.modifier_add(type="TRIANGULATE")
bpy.ops.object.convert(target='MESH')
probe.scale = mathutils.Vector((0.3, 0.3, 0.3))
bpy.ops.object.shade_smooth()
probe.name = "lightprobe-" + uuid4().hex
return probe
def bake(ob):
with active(ob):
cycles = bpy.context.scene.cycles
old_samples = cycles.samples
name, theta_res, phi_res, samples = probeProperties()
cycles.samples = samples
bpy.ops.object.bake(type="COMBINED")
cycles.samples = old_samples
def get_lightprobe_coefficients(probe, theta_res, phi_res):
probe.data.calc_tessface()
bake(probe)
lightmap = get_lightmap(probe)
return get_all_coefficients(probe, lightmap, theta_res, phi_res)
def sample_image(channels, width, height, pixel_data, loc):
""" samples a blender location at a particular xy integer location """
x, y = loc[0], loc[1]
pix_loc = int((y * width * channels) + x * channels)
r = pixel_data[pix_loc + 0]
g = pixel_data[pix_loc + 1]
b = pixel_data[pix_loc + 2]
return mathutils.Color((r, g, b))
def bilinear_interpolate(image, uv):
""" performs bilinear interpolation of a blender image using texture-space
uv coordinates. the boundary conditions are to extend the edges """
lightmap_size = mathutils.Vector(image.size)
width, height = lightmap_size[0], lightmap_size[1]
px_x, px_y = 1.0/width, 1.0/height
half_px_x, half_px_y = 1.0/(2*width), 1.0/(2*height)
left_coord = ceil(width * (uv[0] - half_px_x) - 1)
right_coord = ceil(width * (uv[0] + half_px_x) - 1)
bottom_coord = ceil(height * (uv[1] - half_px_y) - 1)
top_coord = ceil(height * (uv[1] + half_px_y) - 1)
# these are asking how much of 1-pixel (in uv space) has our uv coordinate
# traversed, starting at the left/bottom pixel boundary
lerp_x = (uv[0] - (left_coord + 0.5) / width) / px_x
lerp_y = (uv[1] - (bottom_coord + 0.5) / height) / px_y
# boundary conditions
if right_coord + 1 > width:
right_coord = left_coord
if left_coord < 0:
left_coord = right_coord
if top_coord + 1 > height:
top_coord = bottom_coord
if bottom_coord < 0:
bottom_coord = top_coord
ll_uv = mathutils.Vector((left_coord, bottom_coord))
lr_uv = mathutils.Vector((right_coord, bottom_coord))
ur_uv = mathutils.Vector((right_coord, top_coord))
ul_uv = mathutils.Vector((left_coord, top_coord))
pixel_data = image.pixels[:]
chan = image.channels
width, height = image.size
lower_left = mathutils.Vector(sample_image(chan, width, height, pixel_data, ll_uv))
lower_right = mathutils.Vector(sample_image(chan, width, height, pixel_data, lr_uv))
upper_right = mathutils.Vector(sample_image(chan, width, height, pixel_data, ur_uv))
upper_left = mathutils.Vector(sample_image(chan, width, height, pixel_data, ul_uv))
del pixel_data
top = upper_left.lerp(upper_right, lerp_x)
bottom = lower_left.lerp(lower_right, lerp_x)
color = mathutils.Color(bottom.lerp(top, lerp_y))
return color
# http://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
def triangle_intersection(v1, v2, v3, ray, origin):
""" performs moller-trumbore ray-triangle intersection and returns
barycentric coordinates if an intersection exists, None otherwise """
epsilon = 0.000001
edge1 = v2 - v1
edge2 = v3 - v1
P = ray.cross(edge2)
det = edge1.dot(P)
if det > -epsilon and det < epsilon:
return None
inv_det = 1.0 / det
T = origin - v1
u = T.dot(P) * inv_det
if u < 0 or u > 1:
return None
Q = T.cross(edge1)
v = ray.dot(Q) * inv_det
if v < 0 or u + v > 1:
return None
t = edge2.dot(Q) * inv_det
if t > epsilon:
w = 1 - u - v
return mathutils.Vector((u, v, w))
return None
def get_glsl_coefficients(coeffs):
""" a convenience function for testing SH coefficients in the shader
provided by the opengl orange book, second edition """
tmpl = "const vec3 L%d%s%d = vec3(%f, %f, %f);"
lines = []
for l, mdata in coeffs.items():
for m, color in mdata.items():
sign = ""
if m < 0:
sign = "m"
line = tmpl % (l, sign, abs(m), color[0], color[1], color[2])
lines.append(line)
return "\n".join(lines)
def get_all_coefficients(ob, lightmap, theta_res, phi_res):
""" returns all SH coefficients. theta_res and phi_res are the
sampling resolutions for theta (zenith) and phi (azimuth) respectively.
theta ranges from 0-pi, while phi ranges from 0-2pi """
mapping = {}
for l, m in spherical_harmonics.keys():
color = get_coefficients(ob, lightmap, l, m, theta_res, phi_res)
mapping.setdefault(l, {})[m] = color
return mapping
def get_coefficients(ob, lightmap, l, m, theta_res, phi_res):
""" returns the RGB spherical harmonic coefficients for a given
l and m """
c = mathutils.Color((0, 0, 0))
harmonic = spherical_harmonics[(l, m)]
# plt.imsave("lightmap.png", lightmap)
for theta in (pi * y / float(theta_res) for y in range(theta_res)):
for phi in (pi * 2 * x / float(phi_res) for x in range(phi_res)):
color = sample_icosphere_color(ob, lightmap, theta, phi)
c += (color * harmonic(theta, phi) * sin(theta)
/ (theta_res * phi_res))
return c.r, c.g, c.b
# http://cseweb.ucsd.edu/~ravir/papers/envmap/envmap.pdf
spherical_harmonics = {
(0, 0): lambda theta, phi: 0.282095,
(1, 1): lambda theta, phi: 0.488603 * sin(theta) * cos(phi),
(1, 0): lambda theta, phi: 0.488603 * cos(theta),
(1, -1): lambda theta, phi: 0.488603 * sin(theta) * sin(phi),
(2, -2): lambda theta, phi: 1.092548 * sin(theta) * cos(phi) * sin(theta) * sin(phi),
(2, -1): lambda theta, phi: 1.092548 * sin(theta) * sin(phi) * cos(theta),
(2, 0): lambda theta, phi: 0.315392 * (3 * cos(theta)**2 - 1),
(2, 1): lambda theta, phi: 1.092548 * sin(theta) * cos(phi) * cos(theta),
(2, 2): lambda theta, phi: 0.546274 * (((sin(theta) * cos(phi)) ** 2) - ((sin(theta) * sin(phi)) ** 2))
}
def SHProjection(envMap, shCoefficients):
#Backprojected.
phis = 2*ch.pi*np.tile((np.roll(np.arange(envMap.shape[1])[::-1], np.int(envMap.shape[1]/2))/envMap.shape[1]).reshape([1,envMap.shape[1],1]), [envMap.shape[0],1,3])
thetas = np.pi*np.tile((np.arange(envMap.shape[0])/envMap.shape[0]).reshape([envMap.shape[0],1,1]), [1,envMap.shape[1],3])
phis = np.mod(phis, 2*np.pi)
normalize = 1
pEnvMap = np.zeros([envMap.shape[0],envMap.shape[1], 3, 9])
pEnvMap[:,:,:,0] = shCoefficients[0].reshape([1,1,3]) * spherical_harmonics_coeffs[0] * normalize
pEnvMap[:,:,:,1] = shCoefficients[1].reshape([1,1,3]) * spherical_harmonics_coeffs[1]*np.sin(thetas) * np.cos(phis) * normalize
pEnvMap[:,:,:,2] = shCoefficients[2].reshape([1,1,3]) * spherical_harmonics_coeffs[2]*np.cos(thetas) * normalize
pEnvMap[:,:,:,3] = shCoefficients[3].reshape([1,1,3]) * spherical_harmonics_coeffs[3]*np.sin(thetas)*np.sin(phis) * normalize
pEnvMap[:,:,:,4] = shCoefficients[4].reshape([1,1,3]) * spherical_harmonics_coeffs[4]*np.sin(thetas) * np.cos(phis) * np.sin(phis)* np.sin(thetas) * normalize
pEnvMap[:,:,:,5] = shCoefficients[5].reshape([1,1,3]) * spherical_harmonics_coeffs[5]*np.sin(thetas) * np.sin(phis) * np.cos(thetas) * normalize
pEnvMap[:,:,:,6] = shCoefficients[6].reshape([1,1,3]) * spherical_harmonics_coeffs[6]*(3 * np.cos(thetas)**2 - 1) * normalize
pEnvMap[:,:,:,7] = shCoefficients[7].reshape([1,1,3]) * spherical_harmonics_coeffs[7]*np.sin(thetas) * np.cos(phis) * np.cos(thetas) * normalize
pEnvMap[:,:,:,8] = shCoefficients[8].reshape([1,1,3]) * spherical_harmonics_coeffs[8]*(((np.sin(thetas) * np.cos(phis)) ** 2) - ((np.sin(thetas) * np.sin(phis)) ** 2)) * normalize
return pEnvMap
def sphericalHarmonicsZRotation(angle):
return np.array([[1,0,0,0,0,0,0,0,0],[0, np.cos(angle), 0, np.sin(angle), 0,0,0,0,0],[0,0,1,0,0,0,0,0,0],[0, -np.sin(angle), 0, np.cos(angle), 0,0,0,0,0],[0,0,0,0,np.cos(2*angle),0,0,0,np.sin(2*angle)],[0,0,0,0,0,np.cos(angle), 0, np.sin(angle),0],[0,0,0,0,0,0,1,0,0],[0,0,0,0,0, -np.sin(angle),0, np.cos(angle),0],[0,0,0,0,-np.sin(2*angle),0,0,0,np.cos(2*angle)]])
import chumpy as ch
from chumpy import depends_on, Ch
class SphericalHarmonicsZRotation(Ch):
dterms = 'angle'
def compute_r(self):
return np.array([[1,0,0,0,0,0,0,0,0],[0, np.cos(self.angle), 0, np.sin(self.angle), 0,0,0,0,0],[0,0,1,0,0,0,0,0,0],[0, -np.sin(self.angle), 0, np.cos(self.angle), 0,0,0,0,0],[0,0,0,0,np.cos(2*self.angle),0,0,0,np.sin(2*self.angle)],[0,0,0,0,0,np.cos(self.angle), 0, np.sin(self.angle),0],[0,0,0,0,0,0,1,0,0],[0,0,0,0,0, -np.sin(self.angle),0, np.cos(self.angle),0],[0,0,0,0,-np.sin(2*self.angle),0,0,0,np.cos(2*self.angle)]])
#No need to make it differentiable for now.
def compute_dr_wrt(self, wrt):
return None
import chumpy as ch
def chSphericalHarmonicsZRotation(angle):
return SphericalHarmonicsZRotation(angle=angle)
def getEnvironmentMapCoefficients(envMap, normalize, phiOffset, type):
""" returns the RGB spherical harmonic coefficients for a given
l and m """
if type == 'equirectangular':
phis = 2*np.pi*np.tile((np.roll(np.arange(envMap.shape[1])[::-1], np.int(envMap.shape[1]/2))/envMap.shape[1]).reshape([1,envMap.shape[1],1]), [envMap.shape[0],1,3])
thetas = np.pi*np.tile((np.arange(envMap.shape[0])/envMap.shape[0]).reshape([envMap.shape[0],1,1]), [1,envMap.shape[1],3])
elif type == 'spherical':
vcoords = (-envMap.shape[0]/2 + np.tile(np.arange(envMap.shape[0]).reshape([envMap.shape[0], 1,1]), [1,envMap.shape[1],3]))/(envMap.shape[0]/2)
ucoords = (-envMap.shape[1]/2 + np.tile(np.arange(envMap.shape[1]).reshape([1,envMap.shape[1],1]), [envMap.shape[0],1,3]))/(envMap.shape[1]/2)
thetas=np.arctan2(vcoords,ucoords)
phis=pi*np.sqrt(ucoords*ucoords + vcoords*vcoords)
else:
print("Environment map format not recognized")
return
phis = np.mod(phis + phiOffset, 2*np.pi)
L = np.zeros([9,3])
# ipdb.set_trace()
L[0] = np.sum(envMap * spherical_harmonics_coeffs[0] *np.sin(thetas) , axis=(0,1))
L[1] = np.sum(envMap *spherical_harmonics_coeffs[1]*np.sin(thetas)*np.sin(thetas) * np.cos(phis), axis=(0,1))
L[2] = np.sum(envMap *spherical_harmonics_coeffs[2]*np.sin(thetas)*np.cos(thetas), axis=(0,1))
L[3] = np.sum(envMap *spherical_harmonics_coeffs[3]*np.sin(thetas)*np.sin(thetas)*np.sin(phis), axis=(0,1))
L[4] = np.sum(envMap *spherical_harmonics_coeffs[4]*np.sin(thetas)*np.sin(thetas) * np.cos(phis) * np.sin(phis)* np.sin(thetas), axis=(0,1))
L[5] = np.sum(envMap *spherical_harmonics_coeffs[5]*np.sin(thetas)*np.sin(thetas) * np.sin(phis) * np.cos(thetas) , axis=(0,1))
L[6] = np.sum(envMap *spherical_harmonics_coeffs[6]*np.sin(thetas)*(3 * np.cos(thetas)**2 - 1), axis=(0,1))
L[7] = np.sum(envMap *spherical_harmonics_coeffs[7]*np.sin(thetas) * np.cos(phis) * np.cos(thetas)* np.sin(thetas), axis=(0,1))
L[8] = np.sum(envMap *spherical_harmonics_coeffs[8]*np.sin(thetas)*(((np.sin(thetas) * np.cos(phis)) ** 2) - ((np.sin(thetas) * np.sin(phis)) ** 2)), axis=(0,1))
L = 2*np.pi*L*np.pi/(envMap.shape[0]*envMap.shape[1]*normalize)
return L
def sample_icosphere_color(ob, lightmap, theta, phi):
""" takes a theta and phi and casts a ray out from the center of an
icosphere, bilinearly sampling the surface where the ray intersects """
ray = angle_to_ray(theta, phi)
# we extend the ray arbitrarily so it's guaranteed to intersect with the
# icosphere, instead of falling short
ray *= 100
face, location = find_intersecting_face(ob, ray)
# it is possible that we couldn't find an intersecting face if the ray
# we shot aligns perfectly with a vertex. in this case, we'll offset the
# ray slightly and try again. this should not fail a second time.
if face is None:
ray.x += FAILSAFE_OFFSET
ray.y += FAILSAFE_OFFSET
ray.z += FAILSAFE_OFFSET
face, location = find_intersecting_face(ob, ray)
assert(face is not None)
color = sample_lightmap(ob, lightmap, face, location)
return color
def angle_to_ray(theta, phi):
""" converts a spherical coordinate to cartesian coordinate """
x = sin(theta) * cos(phi)
y = sin(theta) * sin(phi)
z = cos(theta)
ray = mathutils.Vector((x, y, z)).normalized()
return ray
def find_intersecting_face(ob, ray):
""" finds the face where a ray from the center of an icosphere
intersects """
mesh = ob.data
origin = mathutils.Vector()
# we'll use scale to ensure that our transform applies to our vertices.
# assume scale is uniform and just use x scale
scale = ob.scale[0]
for face in mesh.tessfaces:
v = face.vertices
v1 = mesh.vertices[v[0]].co * scale
v2 = mesh.vertices[v[1]].co * scale
v3 = mesh.vertices[v[2]].co * scale
intersection = triangle_intersection(v1, v2, v3, ray, origin)
if intersection:
return face, intersection
# we should never get here, but we may in the case of a ray aligning
# perfectly with a vertex. in this case, we'll catch this error up at
# the caller
return None, None
def sample_lightmap(ob, lightmap, face, loc):
""" """
mesh = ob.data
uvs = mesh.tessface_uv_textures[0].data[face.index]
location_uv = loc[0] * uvs.uv1 + loc[1] * uvs.uv2 + loc[2] * uvs.uv3
return bilinear_interpolate(lightmap, location_uv)
def bakeOp(context):
probe = context.object
name, theta_res, phi_res, samples = probeProperties()
# ret = pre_bake_hook(scene_settings.pre_bake_hook, context, probe)
coeffs = get_lightprobe_coefficients(probe, theta_res,
phi_res)
set_coeff_prop(probe, coeffs)
lp_data = get_all_lightprobe_data()
write_lightprobe_data(lp_data)
# post_bake_hook(scene_settings.post_bake_hook, context, lp_data, ret)
return lp_data
# class BakeAllOp(context):
# override = context.copy()
# for probe in all_active_lightprobes():
# override["object"] = probe
# bpy.ops.object.bake_lightprobe(override)
#
#
def resizeAll(context):
size = p.FloatProperty(name="Units", default=1)
dimensions = mathutils.Vector((self.size, self.size, self.size))
for probe in all_active_lightprobes():
probe.dimensions = dimensions
def lightProbeOp(context):
probe = add_lightprobe()
hide_object(probe)
probe.show_x_ray = True
with active(probe):
setup_material(probe)
def probeProperties():
name = ""
theta_res = 20
phi_res = 40
samples = 1000
return name, theta_res, phi_res, samples
| 22,272 | 33.639191 | 433 | py |
inversegraphics | inversegraphics-master/collision.py | __author__ = 'pol'
#From http://blender.stackexchange.com/a/9080
import bpy
import bmesh
from blender_utils import *
import blender_utils
def bmesh_copy_from_object(obj, objTransf, transform=True, triangulate=True, apply_modifiers=False):
assert (obj.type == 'MESH')
if apply_modifiers and obj.modifiers:
me = obj.to_mesh(bpy.context.scene, True, 'PREVIEW', calc_tessface=False)
bm = bmesh.new()
bm.from_mesh(me)
bpy.data.meshes.remove(me)
else:
me = obj.data
if obj.mode == 'EDIT':
bm_orig = bmesh.from_edit_mesh(me)
bm = bm_orig.copy()
else:
bm = bmesh.new()
bm.from_mesh(me)
# Remove custom data layers to save memory
for elem in (bm.faces, bm.edges, bm.verts, bm.loops):
for layers_name in dir(elem.layers):
if not layers_name.startswith("_"):
layers = getattr(elem.layers, layers_name)
for layer_name, layer in layers.items():
layers.remove(layer)
if transform:
bm.transform(objTransf * obj.matrix_world)
if triangulate:
bmesh.ops.triangulate(bm, faces=bm.faces)
return bm
def aabb_intersect(matrix_world1, instanceObjs1, matrix_world2, instanceObjs2):
minX1, maxX1 = blender_utils.modelWidth(instanceObjs1, matrix_world1)
minY1, maxY1 = blender_utils.modelDepth(instanceObjs1, matrix_world1)
minZ1, maxZ1 = blender_utils.modelHeight(instanceObjs1, matrix_world1)
minX2, maxX2 = blender_utils.modelWidth(instanceObjs2, matrix_world2)
minY2, maxY2 = blender_utils.modelDepth(instanceObjs2, matrix_world2)
minZ2, maxZ2 = blender_utils.modelHeight(instanceObjs2, matrix_world2)
return ((maxX1 > minX2) and (minX1 < maxX2) and (maxY1 > minY2) and (minY1 < maxY2) and (maxZ1 > minZ2) and (minZ1 < maxZ2))
def bmesh_check_intersect_objects(obj, objTransf, obj2, obj2Transf, selectface=False):
"""
Check if any faces intersect with the other object
returns a boolean
"""
from mathutils.bvhtree import BVHTree
assert(obj != obj2)
# Triangulate
bm = bmesh_copy_from_object(obj, objTransf, transform=True, triangulate=True)
bm2 = bmesh_copy_from_object(obj2, obj2Transf, transform=True, triangulate=True)
intersect = False
BMT1 = BVHTree.FromBMesh(bm)
BMT2 = BVHTree.FromBMesh(bm2)
overlap_pairs = BMT1.overlap(BMT2)
if len(overlap_pairs) > 0:
intersect = True
if selectface:
# deselect everything for both objects
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
bpy.context.scene.objects.active = obj2
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
for each in overlap_pairs:
obj.data.polygons[each[0]].select = True
obj.update_from_editmode()
obj2.data.polygons[each[1]].select = True
obj2.update_from_editmode()
bm.free()
bm2.free()
return intersect
def instancesIntersect(matrix_world1, instanceObjs1, matrix_world2, instanceObjs2):
if aabb_intersect(matrix_world1, instanceObjs1, matrix_world2, instanceObjs2):
# print ("AABB intersection!")
for mesh1 in instanceObjs1:
for mesh2 in instanceObjs2:
if bmesh_check_intersect_objects(mesh1, matrix_world1, mesh2, matrix_world2):
# print ("There's a MESH intersection!")
return True
# else:
# print ("There's NO intersection!")
# print("There's NO intersection!")
return False
def targetSceneCollision(target, scene, roomName, targetParentInstance):
# bpy.ops.mesh.primitive_cube_add
for sceneInstance in scene.objects:
if sceneInstance.type == 'EMPTY' and sceneInstance != target and sceneInstance.name != roomName and sceneInstance != targetParentInstance:
if instancesIntersect(target.matrix_world, target.dupli_group.objects, sceneInstance.matrix_world, sceneInstance.dupli_group.objects):
return True
return False
def targetCubeSceneCollision(target, scene, roomName, targetParentInstance):
# bpy.ops.mesh.primitive_cube_add
for sceneInstance in scene.objects:
if sceneInstance.type == 'MESH' and sceneInstance != target and sceneInstance.name != roomName and sceneInstance != targetParentInstance:
if instancesIntersect(mathutils.Matrix.Identity(4), [target], mathutils.Matrix.Identity(4), [sceneInstance]):
return True
return False
def parseSceneCollisions(gtDir, scene_i, target_i, target, scene, targetPosOffset, chObjDistGT, chObjRotationGT, targetParentInstance, roomObj, distRange, rotationRange, distInterval, rotationInterval):
scene.cycles.samples = 500
original_matrix_world = target.matrix_world.copy()
distBins = np.linspace(distInterval, distRange, distRange/distInterval)
rotBins = np.linspace(0, rotationRange, rotationRange / rotationInterval)
totalBins = np.meshgrid(distBins, rotBins)
totalBins[0] = np.append([0], totalBins[0].ravel())
totalBins[1] = np.append([0], totalBins[1].ravel())
boolBins = np.zeros(len(totalBins[0].ravel())).astype(np.bool)
scene.update()
for bin_i in range(len(totalBins[0].ravel())):
dist = totalBins[0].ravel()[bin_i]
rot = totalBins[1].ravel()[bin_i]
chObjDistGT[:]= dist
chObjRotationGT[:]= rot
ignore = False
azimuthRot = mathutils.Matrix.Rotation(0, 4, 'Z')
target.matrix_world = mathutils.Matrix.Translation(original_matrix_world.to_translation() + mathutils.Vector(targetPosOffset.r)) * azimuthRot * (mathutils.Matrix.Translation(-original_matrix_world.to_translation())) * original_matrix_world
if targetCubeSceneCollision(target, scene, roomObj.name, targetParentInstance):
# print("Teapot intersects with an object.")
ignore = True
# pass
if not instancesIntersect(mathutils.Matrix.Translation(mathutils.Vector((0, 0, -0.01))), [target], mathutils.Matrix.Identity(4), [targetParentInstance]):
# print("Teapot not on table.")
ignore = True
if instancesIntersect(mathutils.Matrix.Translation(mathutils.Vector((0, 0, +0.02))), [target], mathutils.Matrix.Identity(4), [targetParentInstance]):
# print("Teapot interesects supporting object.")
ignore = True
# ipdb.set_trace()
if instancesIntersect(mathutils.Matrix.Identity(4), [target], mathutils.Matrix.Identity(4), [roomObj]):
# print("Teapot intersects room")
ignore = True
boolBins[bin_i] = not ignore
# bpy.ops.render.render(write_still=True)
#
# import imageio
#
# image = np.array(imageio.imread(scene.render.filepath))[:, :, 0:3]
#
# from blender_utils import lin2srgb
# blender_utils.lin2srgb(image)
#
# cv2.imwrite(gtDir + 'images/scene' + str(scene_i) + '_' + 'targetidx' + str(target_i) + 'bin' + str(bin_i) + '_ignore' + str(ignore) + '.jpeg', 255 * image[:, :, [2, 1, 0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
return boolBins, totalBins
| 7,497 | 37.649485 | 247 | py |
inversegraphics | inversegraphics-master/blender_utils.py | import bpy
import bpy_extras
import numpy
import numpy as np
import mathutils
from math import radians
import h5py
import scipy.io
import cv2
import sys
import io
import os
import light_probes
import imageio
try:
import cPickle as pickle
except:
import pickle
import ipdb
import re
from collision import instancesIntersect
inchToMeter = 0.0254
def createMeshFromData(name, verts, faces):
# Create mesh and object
me = bpy.data.meshes.new(name+'Mesh')
ob = bpy.data.objects.new(name, me)
# Create mesh from given verts, faces.
me.from_pydata(verts, [], faces)
# Update mesh with new data
me.update()
return ob
def addLamp(scene, lightAz, lightEl, lightDist, center, lightIntensity):
#Add directional light to match spherical harmonics
lamp_data = bpy.data.lamps.new(name="point", type='POINT')
lamp = bpy.data.objects.new(name="point", object_data=lamp_data)
lamp.layers[1] = True
lamp.layers[2] = True
lampLoc = getRelativeLocation(lightAz, lightAz, lightEl, center)
lamp.location = mathutils.Vector((lampLoc[0],lampLoc[1],lampLoc[2]))
lamp.data.cycles.use_multiple_importance_sampling = True
lamp.data.use_nodes = True
lamp.data.node_tree.nodes['Emission'].inputs[1].default_value = lightIntensity
scene.objects.link(lamp)
def loadData():
#data
# fdata = h5py.File('../data/data-all-flipped-cropped-512.mat','r')
# data = fdata["data"]
data = scipy.io.loadmat('../data/data-all-flipped-cropped-512-scipy.mat')['data']
images = h5py.File('../data/images-all-flipped-cropped-512-color.mat','r')
# images = f["images"]
# imgs = numpy.array(images)
# N = imgs.shape[0]
# imgs = imgs.transpose(0,2,3,1)
# f = h5py.File('../data/all-flipped-cropped-512-crossval6div2_py-experiment.mat')
# experiments = f["experiments_data"]
# f = h5py.File('../data/all-flipped-cropped-512-crossval6div2_py-experiment.mat')
experiments = scipy.io.loadmat('../data/all-flipped-cropped-512-crossval6all2-experiment.mat')
return data, images, experiments['experiments_data']
def loadGroundTruth(rendersDir):
lines = [line.strip() for line in open(rendersDir + 'groundtruth.txt')]
groundTruthLines = []
imageFiles = []
segmentFiles = []
segmentSingleFiles = []
unoccludedFiles = []
prefixes = []
for instance in lines:
parts = instance.split(' ')
framestr = '{0:04d}'.format(int(parts[4]))
prefix = ''
az = float(parts[0])
objAz = float(parts[1])
el = float(parts[2])
objIndex = int(parts[3])
frame = int(parts[4])
sceneNum = int(parts[5])
targetIndex = int(parts[6])
spoutPosX = int(float(parts[7]))
spoutPosY = int(float(parts[8]))
handlePosX = int(float(parts[9]))
handlePosY = int(float(parts[10]))
tipPosX = int(float(parts[11]))
tipPosY = int(float(parts[12]))
spoutOccluded = int(float(parts[13]))
handleOccluded = int(float(parts[14]))
tipOccluded = int(float(parts[15]))
if len(parts) == 17:
prefix = parts[16]
outfilename = "render" + prefix + "_obj" + str(objIndex) + "_scene" + str(sceneNum) + '_target' + str(targetIndex) + '_' + framestr
outfilenamesingle = "render" + prefix + "_obj" + str(objIndex) + "_scene" + str(sceneNum) + '_target' + str(targetIndex) + '_single_' + framestr
outfilenameunoccluded = "render" + prefix + "_obj" + str(objIndex) + "_scene" + str(sceneNum) + '_target' + str(targetIndex) + '_unoccluded' + framestr
imageFile = rendersDir + "images/" + outfilename + ".png"
segmentFile = rendersDir + "images/" + outfilename + "_segment.png"
segmentFileSingle = rendersDir + "images/" + outfilenamesingle + "_segment.png"
unoccludedFile = rendersDir + "images/" + outfilenameunoccluded + ".png"
if os.path.isfile(imageFile):
imageFiles = imageFiles + [imageFile]
segmentFiles = segmentFiles + [segmentFile]
segmentSingleFiles = segmentSingleFiles + [segmentFileSingle]
unoccludedFiles = unoccludedFiles + [unoccludedFile]
prefixes = prefixes + [prefix]
groundTruthLines = groundTruthLines + [[az, objAz, el, objIndex, frame, 0.0, sceneNum, targetIndex, spoutPosX, spoutPosY, handlePosX, handlePosY, tipPosX, tipPosY, spoutOccluded, handleOccluded, tipOccluded]]
# groundTruth = numpy.zeros([len(groundTruthLines), 5])
groundTruth = numpy.array(groundTruthLines)
# groundTruth = numpy.hstack((groundTruth,numpy.zeros((groundTruth.shape[0],1))))
lines = [line.strip() for line in open(rendersDir + 'occlusions.txt')]
for instance in lines:
parts = instance.split(' ')
prefix = ''
if len(parts) == 6:
prefix = parts[5]
eqPrefixes = [ x==y for (x,y) in zip(prefixes, [prefix]*len(prefixes))]
try:
index = numpy.where((groundTruth[:, 3] == int(parts[0])) & (groundTruth[:, 4] == int(parts[1])) & (groundTruth[:,6] == int(parts[2])) & (groundTruth[:,7] == int(parts[3])) & (eqPrefixes))[0][0]
groundTruth[index, 5] = float(parts[4])
except:
print("Problem!")
return groundTruth, imageFiles, segmentFiles, segmentSingleFiles, unoccludedFiles, prefixes
def modifySpecular(scene, delta):
for model in scene.objects:
if model.type == 'MESH':
for mat in model.data.materials:
mat.specular_shader = 'PHONG'
mat.specular_intensity = mat.specular_intensity + delta
mat.specular_hardness = mat.specular_hardness / 4.0
def makeMaterial(name, diffuse, specular, alpha):
mat = bpy.data.materials.new(name)
mat.diffuse_color = diffuse
mat.diffuse_shader = 'LAMBERT'
mat.diffuse_intensity = 1.0
mat.specular_color = specular
mat.specular_shader = 'COOKTORR'
mat.specular_intensity = 0.5
mat.alpha = alpha
mat.ambient = 1
return mat
def setMaterial(ob, mat):
me = ob.data
me.materials.append(mat)
def look_at(obj_camera, point):
loc_camera = obj_camera.location
vecPoint = mathutils.Vector((point[0],point[1],point[2]))
direction = vecPoint - loc_camera
# point the cameras '-Z' and use its 'Y' as up
rot_quat = direction.to_track_quat('-Z', 'Y')
# assume we're using euler rotation
obj_camera.rotation_euler = rot_quat.to_euler()
def look_atZ(obj_camera, point):
loc_camera = obj_camera.location
vecPoint = mathutils.Vector((point[0],point[1],point[2]))
direction = vecPoint - loc_camera
# point the cameras '-Z' and use its 'Y' as up
rot_quat = direction.to_track_quat('Y', 'Z')
# assume we're using euler rotation
obj_camera.rotation_euler = rot_quat.to_euler()
def modelHeight(objects, transform):
maxZ = -999999;
minZ = 99999;
for model in objects:
if model.type == 'MESH':
for v in model.data.vertices:
if (transform * model.matrix_world * v.co).z > maxZ:
maxZ = (transform * model.matrix_world * v.co).z
if (transform * model.matrix_world * v.co).z < minZ:
minZ = (transform * model.matrix_world * v.co).z
return minZ, maxZ
def modelDepth(objects, transform):
maxY = -999999;
minY = 99999;
for model in objects:
if model.type == 'MESH':
for v in model.data.vertices:
if (transform * model.matrix_world * v.co).y > maxY:
maxY = (transform * model.matrix_world * v.co).y
if (transform * model.matrix_world * v.co).y < minY:
minY = (transform * model.matrix_world * v.co).y
return minY, maxY
def modelWidth(objects, transform):
maxX = -999999;
minX = 99999;
for model in objects:
if model.type == 'MESH':
for v in model.data.vertices:
if (transform * model.matrix_world * v.co).x > maxX:
maxX = (transform * model.matrix_world * v.co).x
if (transform * model.matrix_world * v.co).x < minX:
minX = (transform * model.matrix_world * v.co).x
return minX, maxX
def centerOfGeometry(objects, transform):
center = mathutils.Vector((0.0,0.0,0.0))
numVertices = 0.0
for model in objects:
if model.type == 'MESH':
numVertices = numVertices + len(model.data.vertices)
for v in model.data.vertices:
center = center + (transform * model.matrix_world * v.co)
return center/numVertices
def setEulerRotation(scene, eulerVectorRotation):
for model in scene.objects:
if model.type == 'MESH':
model.rotation_euler = eulerVectorRotation
scene.update()
def rotateMatrixWorld(scene, rotationMat):
for model in scene.objects:
if model.type == 'MESH':
model.matrix_world = rotationMat * model.matrix_world
scene.update()
def AutoNodeOff():
mats = bpy.data.materials
for cmat in mats:
cmat.use_nodes=False
def AutoNode():
mats = bpy.data.materials
for cmat in mats:
#print(cmat.name)
cmat.use_nodes=True
TreeNodes=cmat.node_tree
links = TreeNodes.links
shader=''
for n in TreeNodes.nodes:
if n.type == 'ShaderNodeTexImage' or n.type == 'RGBTOBW':
TreeNodes.nodes.remove(n)
if n.type == 'OUTPUT_MATERIAL':
shout = n
if n.type == 'BACKGROUND':
shader=n
if n.type == 'BSDF_DIFFUSE':
shader=n
if n.type == 'BSDF_GLOSSY':
shader=n
if n.type == 'BSDF_GLASS':
shader=n
if n.type == 'BSDF_TRANSLUCENT':
shader=n
if n.type == 'BSDF_TRANSPARENT':
shader=n
if n.type == 'BSDF_VELVET':
shader=n
if n.type == 'EMISSION':
shader=n
if n.type == 'HOLDOUT':
shader=n
if cmat.raytrace_mirror.use and cmat.raytrace_mirror.reflect_factor>0.001:
print("MIRROR")
if shader:
if not shader.type == 'BSDF_GLOSSY':
print("MAKE MIRROR SHADER NODE")
TreeNodes.nodes.remove(shader)
shader = TreeNodes.nodes.new('BSDF_GLOSSY') # RGB node
shader.location = 0,450
#print(shader.glossy)
links.new(shader.outputs[0],shout.inputs[0])
if not shader:
shader = TreeNodes.nodes.new('BSDF_DIFFUSE') # RGB node
shader.location = 0,450
shout = TreeNodes.nodes.new('OUTPUT_MATERIAL')
shout.location = 200,400
links.new(shader.outputs[0],shout.inputs[0])
if shader:
textures = cmat.texture_slots
for tex in textures:
if tex:
if tex.texture.type=='IMAGE':
img = tex.texture.image
#print(img.name)
shtext = TreeNodes.nodes.new('ShaderNodeTexImage')
shtext.location = -200,400
shtext.image=img
if tex.use_map_color_diffuse:
links.new(shtext.outputs[0],shader.inputs[0])
if tex.use_map_normal:
t = TreeNodes.nodes.new('RGBTOBW')
t.location = -0,300
links.new(t.outputs[0],shout.inputs[2])
links.new(shtext.outputs[0],t.inputs[0])
def cleanBPYScene(scene):
for blenderScene in bpy.data.scenes:
if blenderScene != scene:
if len(blenderScene.name) > 7 and blenderScene.name[0:7] == 'teapots':
bpy.data.scenes.remove(blenderScene)
def addEnvironmentMapWorld(scene):
scene.world.use_nodes = True
treeNodes=scene.world.node_tree
envTextureNode = treeNodes.nodes.new('ShaderNodeTexEnvironment')
mappingNode = treeNodes.nodes.new('ShaderNodeMapping')
links = treeNodes.links
links.new(mappingNode.outputs[0],envTextureNode.inputs[0])
texCoordNode = treeNodes.nodes.new('ShaderNodeTexCoord')
links.new(texCoordNode.outputs[0],mappingNode.inputs[0])
rgbToBWNode = treeNodes.nodes.new('ShaderNodeRGBToBW')
links.new(envTextureNode.outputs[0],rgbToBWNode.inputs[0])
colorBackgroundNode = treeNodes.nodes.new('ShaderNodeBackground')
colorBackgroundNode.name = "WorldBackground"
colorBackgroundNode.inputs[0].default_value[0:3] = (1.0,1.0,1.0)
links.new(rgbToBWNode.outputs[0],colorBackgroundNode.inputs[0])
# links.new(envTextureNode.outputs[0],colorBackgroundNode.inputs[0])
output_node = treeNodes.nodes['World Output']
links.new(colorBackgroundNode.outputs["Background"],output_node.inputs["Surface"])
# # links.new(rgbToBWNode.outputs[0],treeNodes.nodes['Background'].inputs[0])
# links.new(envTextureNode.outputs[0],treeNodes.nodes['Background'].inputs[0])
# mixShaderNode = treeNodes.nodes.new('ShaderNodeMixShader')
# mixShaderNode.name = 'mixShaderNode'
# light_pathNode = treeNodes.nodes.new('ShaderNodeLightPath')
# light_pathNode.name = 'lightPathNode'
# links.new(treeNodes.nodes['Background'].outputs[0],mixShaderNode.inputs[1])
# links.new(colorBackgroundNode.outputs[0],mixShaderNode.inputs[2])
# links.new(light_pathNode.outputs[0],mixShaderNode.inputs[0])
# mixShaderNode.inputs[0].default_value = 0
envTextureNode.color_space="NONE"
def setEnviornmentMapStrength(strength, scene):
backgroundNode = scene.world.node_tree.nodes['Background']
backgroundNode.inputs[1].default_value = strength
def updateEnviornmentMap(envMapFilename, scene):
envTextureNode = scene.world.node_tree.nodes['Environment Texture']
if envTextureNode.image != None:
envTextureNode.image.user_clear()
bpy.data.images.remove(envTextureNode.image)
image = bpy.data.images.load(envMapFilename)
envTextureNode.image = image
envTextureNode.color_space="NONE"
def rotateEnviornmentMap(angle, scene):
mappingNode = scene.world.node_tree.nodes['Mapping']
mappingNode.rotation[2] = angle
def cameraLookingInsideRoom(cameraAzimuth):
if cameraAzimuth > 270 and cameraAzimuth < 90:
return True
return False
def deleteInstance(instance):
for mesh in instance.dupli_group.objects:
mesh.user_clear()
bpy.data.objects.remove(mesh)
instance.dupli_group.user_clear()
bpy.data.groups.remove(instance.dupli_group)
instance.user_clear()
bpy.data.objects.remove(instance)
def deleteObject(object):
object.user_clear()
bpy.data.objects.remove(object)
def placeNewTarget(scene, target, targetPosition):
target.layers[1] = True
target.layers[2] = True
scene.objects.link(target)
target.matrix_world = mathutils.Matrix.Translation(targetPosition)
# center = centerOfGeometry(target.dupli_group.objects, target.matrix_world)
# original_matrix_world = target.matrix_world.copy()
# camera = scene.camera
# scene.update()
# look_at(camera, center)
scene.update()
def placeCamera(camera, azimuth, elevation, camDistance, center):
location = getRelativeLocation(azimuth, elevation, camDistance, center)
camera.location = location
look_at(camera, center)
def getRelativeLocation(azimuth, elevation, distance, center):
azimuthRot = mathutils.Matrix.Rotation(radians(-azimuth), 4, 'Z')
elevationRot = mathutils.Matrix.Rotation(radians(-elevation), 4, 'X')
originalLoc = mathutils.Vector((0,-distance, 0))
location = center + azimuthRot * elevationRot * originalLoc
return location
def srgb2lin(im):
cond1 = im <= 0.04045
im[cond1] = im[cond1]/12.92
im[~cond1] = ((im[~cond1]+0.055)/1.055)** 2.4
return im
def lin2srgb(im):
cond1 = im <= 0.0031308
im[cond1] = im[cond1]*12.92
im[~cond1] = (im[~cond1]**(1/2.4)*1.055) - 0.055
return im
def sortface(f):
if len(f) != 4:
return f
v=[mathutils.Vector(list(p)) for p in f]
v2m0=v[2]-v[0]
# The normal of the plane
v1m0 = v[1] - v[0]
n=v1m0.cross(v2m0)
#k=DotVecs(v[0],n)
#if DotVecs(v[3],n) != k:
# raise ValueError("Not Coplanar")
# Well, the above test would be a good hint to make triangles.
# Get a vector pointing along the plane perpendicular to v[0]-v[2]
n2=n.cross(v2m0)
# Get the respective distances along that line
k=[p.dot(n2) for p in v[1:]]
# Check if the vertices are on the proper side
cmp = lambda x, y: (x > y) - (x < y)
if cmp(k[1],k[0]) == cmp(k[1],k[2]):
#print "Bad",v
f.v=[f[0],f[2],f[3],f[1]]
def createCube(scaleX, scaleY, scaleZ, name):
bpy.ops.mesh.primitive_cube_add()
cubeObj = bpy.context.object
cubeObj.name = name
cubeScale = mathutils.Matrix([[scaleX / 2, 0, 0, 0], [0, scaleY / 2, 0, 0], [0, 0, scaleZ / 2, 0], [0, 0, 0, 1]])
cubeObj.data.transform(cubeScale * mathutils.Matrix.Translation(mathutils.Vector((0, 0, 1))))
bpy.context.scene.objects.unlink(cubeObj)
return cubeObj
def getCubeObj(instance):
minX1, maxX1 = modelWidth(instance.dupli_group.objects, mathutils.Matrix.Identity(4))
minY1, maxY1 = modelDepth(instance.dupli_group.objects, mathutils.Matrix.Identity(4))
minZ1, maxZ1 = modelHeight(instance.dupli_group.objects, mathutils.Matrix.Identity(4))
ob = instance.dupli_group.objects[0]
bbox = [ob.matrix_world * mathutils.Vector(corner) for corner in ob.bound_box]
# bpy.ops.mesh.primitive_cube_add()
# cubeObj2 = bpy.context.object
# cubeObj2.name = 'cube' + instance.name
#
# cubeScale = mathutils.Matrix([[(maxX1 - minX1)/ 2, 0, 0, 0], [0, (maxY1 - minY1)/ 2, 0, 0], [0, 0, (maxZ1 - minZ1)/ 2, 0], [0, 0, 0, 1]])
# cubeObj2.data.transform(cubeScale * mathutils.Matrix.Translation(mathutils.Vector((0,0,1))))
#
# cubeObj2.data.update()
# cubeObj2.data.show_double_sided = True
#
# cubeObj2.matrix_world = instance.matrix_world
mesh = bpy.data.meshes.new('meshCube' + instance.name)
cubeObj = bpy.data.objects.new('cube' + instance.name, mesh)
# cubeObj.name = 'cube' + instance.name
import bmesh
bm = bmesh.new()
# bm.from_mesh(cubeObj.data) # fill it in from a Mesh
bm.verts.ensure_lookup_table()
# for v_co in bm.verts.new((v_co))
#
bm.verts.new(bbox[0])
bm.verts.new(bbox[1])
bm.verts.new(bbox[2])
bm.verts.new(bbox[3])
bm.verts.new(bbox[4])
bm.verts.new(bbox[5])
bm.verts.new(bbox[6])
bm.verts.new(bbox[7])
faces = [(3, 7, 6, 2),
(6, 5, 4, 7),
(5, 1, 0, 4),
(4, 7, 3, 0),
(5, 6, 2, 1),
(1, 3, 2, 0),
]
# faces = [(1, 2, 3, 0),
# (5, 6, 7, 4),
# (1, 2, 6, 5),
# (0, 3, 7, 4),
# (1, 0, 4, 5),
# (2, 3, 7, 6),
# ]
bm.verts.ensure_lookup_table()
bm.faces.ensure_lookup_table()
for f_idx in faces:
bm.faces.new([bm.verts[i] for i in f_idx])
# ipdb.set_trace()
# Finish up, write the bmesh back to the mesh
bm.to_mesh(cubeObj.data)
cubeObj.matrix_world = instance.matrix_world
cubeObj.data.update()
cubeObj.data.show_double_sided = True
return cubeObj
def createCubeScene(scene):
bpy.ops.scene.new(type='EMPTY')
bpy.context.scene.name = "CubeScene"
cubeScene = bpy.context.scene
cubeScene.camera = scene.camera
cubeScene.world = scene.world
for sceneInstanceIdx, sceneInstance in enumerate(scene.objects):
if sceneInstance.type == 'EMPTY':
cubeObj = getCubeObj(sceneInstance)
cubeScene.objects.link(cubeObj)
cubeScene.update()
return cubeScene
def captureSceneEnvMap(scene, envMapTexture, roomInstanceNum, rotationOffset, links, treeNodes, teapot, center, targetPosition, width, height, cyclesSamples=3000, gtDir='', train_i=0):
bpy.context.screen.scene = scene
envMapTexture = cv2.resize(src=envMapTexture, dsize=(360, 180))
# envMapTexture = skimage.transform.resize(images[test_i], [height,width])
envMapGray = 0.3 * envMapTexture[:, :, 0] + 0.59 * envMapTexture[:, :, 1] + 0.11 * envMapTexture[:, :, 2]
envMapGrayMean = np.mean(envMapGray, axis=(0, 1))
envMapGrayRGB = np.concatenate([envMapGray[..., None], envMapGray[..., None], envMapGray[..., None]],
axis=2) / envMapGrayMean
envMapCoeffsNew = light_probes.getEnvironmentMapCoefficients(envMapGrayRGB, 1, 0, 'equirectangular')
pEnvMap = light_probes.SHProjection(envMapTexture, envMapCoeffsNew)
# pEnvMap = SHProjection(envMapGrayRGB, envMapCoeffs)
approxProjection = np.sum(pEnvMap, axis=3).astype(np.float32)
# envMapCoeffsNewRE = light_probes.getEnvironmentMapCoefficients(approxProjectionRE, 1, 0, 'equirectangular')
# pEnvMapRE = SHProjection(envMapTexture, envMapCoeffsNewRE)
# # pEnvMap = SHProjection(envMapGrayRGB, envMapCoeffs)
# approxProjectionRE = np.sum(pEnvMapRE, axis=3).astype(np.float32)
approxProjection[approxProjection < 0] = 0
cv2.imwrite(gtDir + 'im.exr', approxProjection)
# updateEnviornmentMap(envMapFilename, scene)
updateEnviornmentMap(gtDir + 'im.exr', scene)
rotateEnviornmentMap(-rotationOffset, scene)
cv2.imwrite(gtDir + 'sphericalharmonics/envMapProjOr' + str(train_i) + '.jpeg',
255 * approxProjection[:, :, [2, 1, 0]])
cv2.imwrite(gtDir + 'sphericalharmonics/envMapGrayOr' + str(train_i) + '.jpeg',
255 * envMapGrayRGB[:, :, [2, 1, 0]])
links.remove(treeNodes.nodes['lightPathNode'].outputs[0].links[0])
scene.world.cycles_visibility.camera = True
scene.camera.data.type = 'PANO'
scene.camera.data.cycles.panorama_type = 'EQUIRECTANGULAR'
scene.render.resolution_x = 360 # perhaps set resolution in code
scene.render.resolution_y = 180
roomInstance = scene.objects[str(roomInstanceNum)]
roomInstance.cycles_visibility.camera = False
roomInstance.cycles_visibility.shadow = False
teapot.cycles_visibility.camera = False
#NOt sure if should set to True or False
teapot.cycles_visibility.shadow = False
# image = cv2.imread(scene.render.filepath)
# image = np.float64(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))/255.0
scene.render.image_settings.file_format = 'OPEN_EXR'
scene.render.filepath = gtDir + 'sphericalharmonics/envMap' + str(train_i) + '.exr'
# bpy.context.user_preferences.system.compute_device_type = 'NONE'
# bpy.context.user_preferences.system.compute_device = 'CPU'
scene.cycles.samples = 1000
scene.camera.up_axis = 'Z'
# placeCamera(scene.camera, 0, 0, 1, )
scene.camera.location = center[:].copy() + targetPosition[:].copy()
look_at(scene.camera, center[:].copy() + targetPosition[:].copy() + mathutils.Vector((1, 0, 0)))
scene.update()
bpy.ops.render.render(write_still=True)
imageEnvMap = np.array(imageio.imread(scene.render.filepath))[:, :, 0:3]
cv2.imwrite(gtDir + 'sphericalharmonics/envMapCycles' + str(train_i) + '.jpeg', 255 * imageEnvMap[:, :, [2, 1, 0]])
envMapCoeffs = light_probes.getEnvironmentMapCoefficients(imageEnvMap, 1, 0, 'equirectangular')
pEnvMap = light_probes.SHProjection(envMapTexture, envMapCoeffs)
approxProjection = np.sum(pEnvMap, axis=3)
cv2.imwrite(gtDir + 'sphericalharmonics/envMapCyclesProjection' + str(train_i) + '.jpeg',
255 * approxProjection[:, :, [2, 1, 0]])
links.new(treeNodes.nodes['lightPathNode'].outputs[0], treeNodes.nodes['mixShaderNode'].inputs[0])
scene.cycles.samples = cyclesSamples
scene.render.filepath = 'opendr_blender.exr'
roomInstance.cycles_visibility.camera = True
scene.render.image_settings.file_format = 'OPEN_EXR'
scene.render.resolution_x = width # perhaps set resolution in code
scene.render.resolution_y = height
scene.camera.data.type = 'PERSP'
scene.world.cycles_visibility.camera = True
scene.camera.data.cycles.panorama_type = 'FISHEYE_EQUISOLID'
teapot.cycles_visibility.camera = True
teapot.cycles_visibility.shadow = True
# updateEnviornmentMap(envMapFilename, scene)
return envMapCoeffs
def setupSceneGroundtruth(scene, width, height, clip_start, cyclesSamples, device_type=None, compute_device=None):
scene.render.resolution_x = width # perhaps set resolution in code
scene.render.resolution_y = height
scene.render.tile_x = height
scene.render.tile_y = width
scene.cycles.samples = cyclesSamples
bpy.context.screen.scene = scene
addEnvironmentMapWorld(scene)
bpy.context.scene.render.image_settings.quality = 100
scene.render.image_settings.file_format = 'JPEG'
# scene.render.filepath = 'image.jpeg'
scene.sequencer_colorspace_settings.name = 'sRGB'
# scene.display_settings.display_device = 'None' #or sRGB
# bpy.context.user_preferences.filepaths.render_cache_directory = '/disk/scratch1/pol/.cache/'
#Uncomment if you can use any of these:
if device_type is not None:
bpy.context.user_preferences.system.compute_device_type = device_type
if compute_device is not None:
bpy.context.user_preferences.system.compute_device = compute_Device
# bpy.context.user_preferences.system.compute_device = compute_device
bpy.ops.wm.save_userpref()
scene.world.horizon_color = mathutils.Color((1.0, 1.0, 1.0))
scene.camera.data.clip_start = clip_start
scene.render.layers['RenderLayer'].use_pass_combined = True
scene.layers[0] = True
scene.camera.layers[0] = True
scene.render.layers.active = scene.render.layers['RenderLayer']
nt = bpy.context.scene.node_tree
nt.nodes['Render Layers'].layer = 'RenderLayer'
def setupScene(scene, roomInstanceNum, world, camera, width, height, numSamples, useCycles, useGPU, device_type=None, compute_device=None):
if useCycles:
#Switch Engine to Cycles
scene.render.engine = 'CYCLES'
if useGPU:
bpy.context.scene.cycles.device = 'GPU'
bpy.context.user_preferences.system.compute_device_type = device_type
bpy.context.user_preferences.system.compute_device = compute_device
scene.use_nodes = True
AutoNode()
cycles = bpy.context.scene.cycles
cycles.samples = 2000
cycles.max_bounces = 36
cycles.min_bounces = 4
cycles.caustics_reflective = False
cycles.caustics_refractive = False
cycles.diffuse_bounces = 36
cycles.glossy_bounces = 12
cycles.transmission_bounces = 2
cycles.volume_bounces = 12
cycles.transparent_min_bounces = 2
cycles.transparent_max_bounces = 2
world.cycles_visibility.camera = True
world.use_nodes = True
world.cycles.sample_as_light = True
world.cycles.sample_map_resolution = 2048
scene.render.threads = 4
scene.render.tile_x = height/2
scene.render.tile_y = width/2
scene.render.image_settings.compression = 0
scene.render.resolution_x = width #perhaps set resolution in code
scene.render.resolution_y = height
scene.render.resolution_percentage = 100
scene.camera = camera
# scene.objects.link(camera)
camera.up_axis = 'Y'
camera.data.angle = 60 * 180 / numpy.pi
camera.data.clip_start = 0.001
camera.data.clip_end = 10
roomInstance = scene.objects[str(roomInstanceNum)]
if useCycles:
roomInstance.cycles_visibility.shadow = False
scene.world = world
scene.world.light_settings.distance = 0.1
if not useCycles:
scene.render.use_raytrace = False
scene.render.use_shadows = False
# scene.view_settings.exposure = 5
# scene.view_settings.gamma = 0.5
scene.world.light_settings.use_ambient_occlusion = True
scene.world.light_settings.ao_blend_type = 'ADD'
scene.world.light_settings.use_indirect_light = True
scene.world.light_settings.indirect_bounces = 1
scene.world.light_settings.use_cache = True
scene.world.light_settings.ao_factor = 1
scene.world.light_settings.indirect_factor = 1
scene.world.light_settings.gather_method = 'APPROXIMATE'
world.light_settings.use_environment_light = False
world.light_settings.environment_energy = 0.0
world.horizon_color = mathutils.Color((1.0,1.0,1.0))
# world.light_settings.samples = 20
# world.light_settings.use_ambient_occlusion = False
#
# world.light_settings.ao_factor = 1
# world.exposure = 1.1
# world.light_settings.use_indirect_light = True
scene.sequencer_colorspace_settings.name = 'Linear'
scene.display_settings.display_device = 'None'
scene.update()
bpy.ops.scene.render_layer_add()
bpy.ops.scene.render_layer_add()
camera.layers[1] = True
scene.render.layers[0].use_pass_combined = True
scene.render.layers[1].use_pass_object_index = True
scene.render.layers[1].use_pass_combined = True
camera.layers[2] = True
scene.layers[1] = False
scene.layers[2] = False
scene.layers[0] = True
scene.render.layers[0].use = True
scene.render.layers[1].use = False
scene.render.layers[2].use = False
scene.render.use_sequencer = False
def addAmbientLightingScene(scene, useCycles):
roomName = ''
for model in scene.objects:
reg = re.compile('(room[0-9]+)')
res = reg.match(model.name)
if res:
roomName = res.groups()[0]
roomInstance = scene.objects[roomName]
ceilMinX, ceilMaxX = modelWidth(roomInstance.dupli_group.objects, roomInstance.matrix_world)
ceilWidth = (ceilMaxX - ceilMinX)
ceilMinY, ceilMaxY = modelDepth(roomInstance.dupli_group.objects, roomInstance.matrix_world)
ceilDepth = (ceilMaxY - ceilMinY)
ceilMinZ, ceilMaxZ = modelHeight(roomInstance.dupli_group.objects, roomInstance.matrix_world)
ceilPos = mathutils.Vector(((ceilMaxX + ceilMinX) / 2.0, (ceilMaxY + ceilMinY) / 2.0 , ceilMaxZ))
numLights = int(numpy.floor((ceilWidth-0.2)/1.2))
lightInterval = ceilWidth/numLights
for light in range(numLights):
lightXPos = light*lightInterval + lightInterval/2.0
lamp_data = bpy.data.lamps.new(name="Rect", type='AREA')
lamp = bpy.data.objects.new(name="Rect", object_data=lamp_data)
lamp.data.size = 0.2
lamp.data.size_y = ceilDepth - 0.2
lamp.data.shape = 'RECTANGLE'
lamp.location = mathutils.Vector((ceilPos.x - ceilWidth/2.0 + lightXPos, ceilPos.y, ceilMaxZ))
lamp.data.energy = 0.0025
if useCycles:
lamp.data.cycles.use_multiple_importance_sampling = True
lamp.data.use_nodes = True
lamp.data.node_tree.nodes['Emission'].inputs[1].default_value = 30
scene.objects.link(lamp)
lamp.layers[1] = True
lamp.layers[2] = True
def view_plane(camd, winx, winy, xasp, yasp):
#/* fields rendering */
ycor = yasp / xasp
use_fields = False
if (use_fields):
ycor *= 2
def BKE_camera_sensor_size(p_sensor_fit, sensor_x, sensor_y):
#/* sensor size used to fit to. for auto, sensor_x is both x and y. */
if (p_sensor_fit == 'VERTICAL'):
return sensor_y;
return sensor_x;
if (camd.type == 'ORTHO'):
#/* orthographic camera */
#/* scale == 1.0 means exact 1 to 1 mapping */
pixsize = camd.ortho_scale
else:
#/* perspective camera */
sensor_size = BKE_camera_sensor_size(camd.sensor_fit, camd.sensor_width, camd.sensor_height)
pixsize = (sensor_size * camd.clip_start) / camd.lens
#/* determine sensor fit */
def BKE_camera_sensor_fit(p_sensor_fit, sizex, sizey):
if (p_sensor_fit == 'AUTO'):
if (sizex >= sizey):
return 'HORIZONTAL'
else:
return 'VERTICAL'
return p_sensor_fit
sensor_fit = BKE_camera_sensor_fit(camd.sensor_fit, xasp * winx, yasp * winy)
if (sensor_fit == 'HORIZONTAL'):
viewfac = winx
else:
viewfac = ycor * winy
pixsize /= viewfac
#/* extra zoom factor */
pixsize *= 1 #params->zoom
#/* compute view plane:
# * fully centered, zbuffer fills in jittered between -.5 and +.5 */
xmin = -0.5 * winx
ymin = -0.5 * ycor * winy
xmax = 0.5 * winx
ymax = 0.5 * ycor * winy
#/* lens shift and offset */
dx = camd.shift_x * viewfac # + winx * params->offsetx
dy = camd.shift_y * viewfac # + winy * params->offsety
xmin += dx
ymin += dy
xmax += dx
ymax += dy
#/* fields offset */
#if (params->field_second):
# if (params->field_odd):
# ymin -= 0.5 * ycor
# ymax -= 0.5 * ycor
# else:
# ymin += 0.5 * ycor
# ymax += 0.5 * ycor
#/* the window matrix is used for clipping, and not changed during OSA steps */
#/* using an offset of +0.5 here would give clip errors on edges */
xmin *= pixsize
xmax *= pixsize
ymin *= pixsize
ymax *= pixsize
return xmin, xmax, ymin, ymax
def projection_matrix(camd, scene):
r = scene.render
left, right, bottom, top = view_plane(camd, r.resolution_x, r.resolution_y, 1, 1)
farClip, nearClip = camd.clip_end, camd.clip_start
Xdelta = right - left
Ydelta = top - bottom
Zdelta = farClip - nearClip
mat = [[0]*4 for i in range(4)]
mat[0][0] = nearClip * 2 / Xdelta
mat[1][1] = nearClip * 2 / Ydelta
mat[2][0] = (right + left) / Xdelta #/* note: negate Z */
mat[2][1] = (top + bottom) / Ydelta
mat[2][2] = -(farClip + nearClip) / Zdelta
mat[2][3] = -1
mat[3][2] = (-2 * nearClip * farClip) / Zdelta
# ipdb.set_trace()
# return sum([c for c in mat], [])
projMat = mathutils.Matrix(mat)
return projMat.transposed()
def image_projection(scene, point):
p4d = mathutils.Vector.Fill(4, 1)
p4d.x = point.x
p4d.y = point.y
p4d.z = point.z
projectionMat = projection_matrix(scene.camera.data, scene)
proj = projectionMat * scene.camera.matrix_world.inverted() * p4d
return [scene.render.resolution_x*(proj.x/proj.w + 1)/2, scene.render.resolution_y*(proj.y/proj.w + 1)/2]
def image_project(scene, camera, point):
co_2d = bpy_extras.object_utils.world_to_camera_view(scene, camera, point)
# print("2D Coords:", co_2d)
# If you want pixel coords
render_scale = scene.render.resolution_percentage / 100
render_size = ( int(scene.render.resolution_x * render_scale), int(scene.render.resolution_y * render_scale))
return (round(co_2d.x * render_size[0]), round(co_2d.y * render_size[1]))
#Need to verify!
def closestCameraIntersection(scene, point):
for instance in scene.objects:
if instance.type == 'EMPTY' and instance.dupli_type == 'GROUP':
instanceLoc = numpy.array(instance.location)
camLoc = numpy.array(scene.camera.location)
pointLoc = numpy.array(point)
invInstanceTransf = instance.matrix_world.inverted()
localCamTmp = invInstanceTransf * scene.camera.location
if numpy.linalg.norm(instanceLoc - camLoc) < numpy.linalg.norm(pointLoc - camLoc) and (instanceLoc - camLoc).dot(pointLoc - camLoc) > 0:
for mesh in instance.dupli_group.objects:
if mesh.type == 'MESH':
invMeshTransf = mesh.matrix_world.inverted()
localCam = invMeshTransf * localCamTmp
localPoint = invMeshTransf * invInstanceTransf * point
location, normal, index = mesh.ray_cast(localCam, localPoint)
if index != -1:
#Success.
return True
return False
def sceneIntersection(scene, point):
result, object, matrix, location, normal = scene.ray_cast(scene.camera.location, point)
return result
# def flattenMesh(mesh, transform):
# return
# def flattenInstance(instance, transform):
def setupBlender(teapot, width, height, angle, clip_start, clip_end, camDistance):
cam = bpy.data.cameras.new("MainCamera")
camera = bpy.data.objects.new("MainCamera", cam)
world = bpy.data.worlds.new("MainWorld")
bpy.ops.scene.new()
bpy.context.scene.name = 'Main Scene'
scene = bpy.context.scene
scene.objects.link(teapot)
scene.camera = camera
camera.up_axis = 'Y'
camera.data.angle = angle
camera.data.clip_start = clip_start
camera.data.clip_end = clip_end
scene.world = world
world.light_settings.use_environment_light = False
world.light_settings.environment_energy = 0.0
world.horizon_color = mathutils.Color((0.0,0.0,0.0))
scene.render.resolution_x = width #perhaps set resolution in code
scene.render.resolution_y = height
scene.render.resolution_percentage = 100
scene.update()
lamp_data2 = bpy.data.lamps.new(name="LampBotData", type='POINT')
lamp2 = bpy.data.objects.new(name="LampBot", object_data=lamp_data2)
lamp2.location = mathutils.Vector((0,0,1.5))
lamp2.data.energy = 1
scene.objects.link(lamp2)
lamp_data2 = bpy.data.lamps.new(name="LampBotData2", type='POINT')
lamp2 = bpy.data.objects.new(name="LampBot2", object_data=lamp_data2)
lamp2.location = mathutils.Vector((0,0,-1.5))
lamp2.data.energy = 0.5
scene.objects.link(lamp2)
bpy.context.screen.scene = scene
# teapot.matrix_world = mathutils.Matrix.Translation(mathutils.Vector((0,0,0)))
center = centerOfGeometry(teapot.dupli_group.objects, teapot.matrix_world)
azimuth = 90
elevation = 25
azimuthRot = mathutils.Matrix.Rotation(radians(-azimuth), 4, 'Z')
elevationRot = mathutils.Matrix.Rotation(radians(-elevation), 4, 'X')
originalLoc = mathutils.Vector((0,-camDistance, 0))
location = center + azimuthRot * elevationRot * originalLoc
camera.location = location
scene.update()
look_at(camera, center)
scene.update()
return scene
###########################################################
#
# Round values of the 3D vector
#
###########################################################
def r3d(v):
return round(v[0],6), round(v[1],6), round(v[2],6)
###########################################################
#
# Round values of the 2D vector
#
###########################################################
def r2d(v):
return round(v[0],6), round(v[1],6)
###########################################################
#
# Convert object name to be suitable for C definition
#
###########################################################
def clearName(name):
tmp=name.upper()
ret=""
for i in tmp:
if (i in " ./\-+#$%^!@"):
ret=ret+"_"
else:
ret=ret+i
return ret
###########################################################
#
# Build data for each object (MESH)
#
###########################################################
def getDrWrtAzimuth(SqE_raw, rotation):
rot, rot_dr = cv2.Rodrigues(np.array(rotation))
a = mathutils.Matrix(rot).to_euler()[2]
a2 = np.arctan(rot[1,0]/rot[0,0])
b = mathutils.Matrix(rot).to_euler()[1]
b2 = np.arctan(-rot[2,0]/np.sqrt(rot[2,1]**2 + rot[2,2]**2))
g = mathutils.Matrix(rot).to_euler()[0]
g2 = np.arctan(rot[2,1]/rot[2,2])
dra11 = -np.sin(a)*np.cos(b)
dra12 = -np.sin(a)*np.sin(b)*np.sin(g) - np.sin(a)*np.cos(g)
dra13 = -np.sin(a)*np.sin(b)*np.cos(g) + np.cos(a)*np.sin(g)
dra21 = np.cos(a)*np.cos(b)
dra22 = np.cos(a)*np.sin(b)*np.sin(g) - np.sin(a)*np.cos(g)
dra23 = np.cos(a)*np.sin(b)*np.cos(g) + np.sin(a)*np.cos(g)
dra31 = 0
dra32 = 0
dra33 = 0
rotwrtaz = np.array([[dra11,dra12,dra13], [dra21,dra22,dra23], [dra31,dra32,dra33]])
# ipdb.set_trace()
drazimuth = np.dot(SqE_raw.dr_wrt(rotation), np.dot(rot_dr , rotwrtaz.ravel()))/(2*400*400)
return drazimuth, np.array([a,g,b])
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
def setObjectDiffuseColor(object, color):
for mesh in object.dupli_group.objects:
if mesh.type == 'MESH':
for mat in mesh.data.materials:
mat.diffuse_color = color | 41,350 | 35.529152 | 220 | py |
inversegraphics | inversegraphics-master/shape_model.py | import numpy as np
import pickle
import chumpy as ch
import ipdb
from chumpy import depends_on, Ch
import scipy.sparse as sp
#%% Helper functions
def longToPoints3D(pointsLong):
nPointsLong = np.size(pointsLong)
return np.reshape(pointsLong, [nPointsLong/3, 3])
def shapeParamsToVerts(shapeParams, teapotModel):
landmarksLong = shapeParams.dot(teapotModel['ppcaW'].T) + teapotModel['ppcaB']
landmarks = longToPoints3D(landmarksLong)
vertices = teapotModel['meshLinearTransform'].dot(landmarks)
return vertices
def chShapeParamsToVerts(landmarks, meshLinearTransform):
vertices = ch.dot(meshLinearTransform,landmarks)
return vertices
class VerticesModel(Ch):
terms = 'meshLinearTransform', 'W', 'b'
dterms = 'chShapeParams'
def init(self):
self.jac = self.meshLinearTransform.dot(self.W.reshape([self.meshLinearTransform.shape[1], -1, len(self.chShapeParams)]).transpose((1,0,2))).reshape([-1,len(self.chShapeParams)])
def compute_r(self):
landmarks = np.dot(self.chShapeParams.r,self.W.T) + self.b
landmarks = landmarks.reshape([-1,3])
return np.dot(self.meshLinearTransform, landmarks)
def compute_dr_wrt(self,wrt):
if self.chShapeParams is wrt:
# ipdb.set_trace()
return self.jac
return None
def chShapeParamsToNormals(N, landmarks, linT):
T = ch.dot(linT,landmarks)
invT = []
nLandmarks = landmarks.r.shape[0]
for i in range(nLandmarks):
R = T[4*i:4*i+3,:3].T
invR = ch.linalg.inv(R.T)
invT = invT + [invR]
invT = ch.vstack(invT)
newNormals = ch.dot(N, invT)
import opendr.geometry
n = opendr.geometry.NormalizedNx3(newNormals)
return newNormals
def getT(targetPoints, linT):
T = linT.dot(targetPoints)
return T
def shapeParamsToNormals(shapeParams, teapotModel):
landmarksLong = shapeParams.dot(teapotModel['ppcaW'].T) + teapotModel['ppcaB']
landmarks = longToPoints3D(landmarksLong)
T = getT(landmarks, teapotModel['linT'])
nLandmarks = np.shape(landmarks)[0]
invT = np.empty([3*nLandmarks, 3])
for i in range(nLandmarks):
R = T[4*i:4*i+3,:3].T
invR = np.linalg.inv(R)
invT[3*i:3*(i+1),:] = invR
newNormals = np.array(teapotModel['N'].dot(invT))
normalize_v3(newNormals)
return newNormals
def saveObj(vertices, faces, normals, filePath):
with open(filePath, 'w') as f:
f.write("# OBJ file\n")
for v in vertices:
f.write("v %.4f %.4f %.4f\n" % (v[0], v[1], v[2]))
for n in normals:
f.write("vn %.4f %.4f %.4f\n" % (n[0], n[1], n[2]))
for p in faces:
f.write("f")
for i in p:
f.write(" %d" % (i + 1))
f.write("\n")
def loadObject(fileName):
with open(fileName, 'rb') as inpt:
return pickle.load(inpt)
def normalize_v3(arr):
lens = np.sqrt( arr[:,0]**2 + arr[:,1]**2 + arr[:,2]**2 )
arr[:,0] /= lens
arr[:,1] /= lens
arr[:,2] /= lens
return arr
def getNormals(vertices, faces):
norm = np.zeros( vertices.shape, dtype=vertices.dtype )
tris = vertices[faces]
n = np.cross( tris[::,1 ] - tris[::,0] , tris[::,2 ] - tris[::,0] )
normalize_v3(n)
norm[ faces[:,0] ] += n
norm[ faces[:,1] ] += n
norm[ faces[:,2] ] += n
normalize_v3(norm)
return norm
def chGetNormals(vertices, faces):
import opendr.geometry
return opendr.geometry.VertNormals(vertices, faces).reshape((-1,3))
| 3,538 | 28.491667 | 187 | py |
inversegraphics | inversegraphics-master/opendr_utils.py | __author__ = 'pol'
from utils import *
import opendr
import chumpy as ch
import geometry
import bpy
import mathutils
import numpy as np
from math import radians
from opendr.camera import ProjectPoints
from opendr.renderer import TexturedRenderer
from opendr.lighting import SphericalHarmonics
from opendr.lighting import LambertianPointLight
from opendr.renderer import ResidualRendererOpenDR
import ipdb
import light_probes
import scene_io_utils
from blender_utils import *
import imageio
import chumpy as ch
from chumpy import depends_on, Ch
import scipy.sparse as sp
class TheanoFunOnOpenDR(Ch):
terms = 'opendr_input_gt'
dterms = 'opendr_input'
initialized = False
def compileFunctions(self, theano_output, theano_input, dim_output, theano_input_gt, theano_output_gt):
import theano
import theano.tensor as T
self.prediction_fn = theano.function([theano_input], theano_output)
# self.J, updates = theano.scan(lambda i, y,x : T.grad(y[i], x), sequences=T.arange(y.shape[0]), non_sequences=[y,x])
# self.J, updates = theano.scan(lambda i, y,x : T.grad(y[i], x), sequences=T.arange(y.shape[0]), non_sequences=[y,x])
self.prediction_fn_gt = theano.function([theano_input_gt], theano_output_gt)
x = theano_input
gt_output = T.vector('gt_output')
# self.error = T.sum(theano_output + gt_output)
self.error = T.sum(T.pow(theano_output.ravel() - theano_output_gt.ravel(),2))
self.errorGrad = T.grad(self.error, x)
from theano.compile.nanguardmode import NanGuardMode
self.errorGrad_fun = theano.function([theano_input, theano_input_gt], self.errorGrad)
# self.error_fun = theano.function([theano_input, theano_input_gt], self.error, mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True))
self.error_fun = theano.function([theano_input, theano_input_gt], self.error)
# self.grad = theano.function([x], self.J, updates=updates, mode='FAST_RUN')
self.initialized = True
def compute_r(self):
if not self.initialized:
self.compileFunctions()
n_channels = 1
if len(self.opendr_input.r.shape) == 3:
n_channels = self.opendr_input.r.shape[2]
h = self.opendr_input.r.shape[1]
w = self.opendr_input.r.shape[0]
x = self.opendr_input.r.reshape([1,n_channels,h,w]).astype(np.float32)
x_gt = self.opendr_input_gt.r.reshape([1,n_channels,h,w]).astype(np.float32)
# out_gt = self.predict_input_gt().ravel().astype(np.float32)
output = np.array(self.error_fun(x, x_gt))
return output.ravel()
def predict_input(self):
n_channels = 1
if len(self.opendr_input.r.shape) == 3:
n_channels = self.opendr_input.r.shape[2]
h = self.opendr_input.r.shape[1]
w = self.opendr_input.r.shape[0]
x = self.opendr_input.r.reshape([1,n_channels,h,w]).astype(np.float32)
output = self.prediction_fn(x)
return output.ravel()
def predict_input_gt(self):
n_channels = 1
if len(self.opendr_input.r.shape) == 3:
n_channels = self.opendr_input.r.shape[2]
h = self.opendr_input.r.shape[1]
w = self.opendr_input.r.shape[0]
x_gt = self.opendr_input_gt.r.reshape([1, n_channels, h, w]).astype(np.float32)
output = self.prediction_fn_gt(x_gt)
return output.ravel()
def compute_dr_wrt(self,wrt):
if self.opendr_input is wrt:
if not self.initialized:
self.compileFunctions()
n_channels = 1
if len(self.opendr_input.r.shape) == 3:
n_channels = self.opendr_input.r.shape[2]
h = self.opendr_input.r.shape[1]
w = self.opendr_input.r.shape[0]
x = self.opendr_input.r.reshape([1, n_channels, h, w]).astype(np.float32)
x_gt = self.opendr_input_gt.r.reshape([1, n_channels, h, w]).astype(np.float32)
jac = np.array(self.errorGrad_fun(x,x_gt)).squeeze().reshape([1,self.opendr_input.r.size])
return sp.csr.csr_matrix(jac)
return None
def old_grads(self):
import theano
import theano.tensor as T
self.grad_fns = [theano.function([self.theano_input], theano.gradient.grad(self.theano_output.flatten()[grad_i], self.theano_input)) for grad_i in range(self.dim_output)]
x = self.opendr_input.r
jac = [sp.lil_matrix(np.array(grad_fun(x[None,None, :,:].astype(np.float32))).ravel()) for grad_fun in self.grad_fns]
return sp.vstack(jac).tocsr()
class TheanoFunFiniteDiff(Ch):
terms = 'opendr_input_gt'
dterms = 'opendr_input'
initialized = False
def compileFunctions(self, theano_output, theano_input, dim_output, theano_input_gt, theano_output_gt):
import theano
import theano.tensor as T
self.prediction_fn = theano.function([theano_input], theano_output)
# self.J, updates = theano.scan(lambda i, y,x : T.grad(y[i], x), sequences=T.arange(y.shape[0]), non_sequences=[y,x])
# self.J, updates = theano.scan(lambda i, y,x : T.grad(y[i], x), sequences=T.arange(y.shape[0]), non_sequences=[y,x])
self.prediction_fn_gt = theano.function([theano_input_gt], theano_output_gt)
# x = theano_input
# gt_output = T.vector('gt_output')
# # self.error = T.sum(theano_output + gt_output)
# self.error = T.sum(T.pow(theano_output.ravel() - theano_output_gt.ravel(),2))
#
# self.errorGrad = T.grad(self.error, x)
#
# from theano.compile.nanguardmode import NanGuardMode
# self.errorGrad_fun = theano.function([theano_input, theano_input_gt], self.errorGrad)
# # self.error_fun = theano.function([theano_input, theano_input_gt], self.error, mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True))
# self.error_fun = theano.function([theano_input, theano_input_gt], self.error)
# # self.grad = theano.function([x], self.J, updates=updates, mode='FAST_RUN')
self.initialized = True
def compute_r(self):
if not self.initialized:
self.compileFunctions()
x = self.opendr_input.r.copy()
x_gt = self.opendr_input_gt.r.copy()
xs = skimage.transform.resize(x, [self.imSize, self.imSize])[None,None,:,:].astype(np.float32)
x_gts = skimage.transform.resize(x_gt, [self.imSize, self.imSize])[None,None,:,:].astype(np.float32)
emb = self.prediction_fn(xs)
emb_gt = self.prediction_fn_gt(x_gts)
diff = np.sum((emb - emb_gt)**2)
return diff
def predict_input(self):
x = self.opendr_input.r[None,None,:,:].astype(np.float32)
output = self.prediction_fn(x)
return output.ravel()
def predict_input_gt(self):
x = self.opendr_input_gt.r[None,None,:,:].astype(np.float32)
output = self.prediction_fn_gt(x)
return output.ravel()
def compute_dr_wrt(self,wrt):
if self.opendr_input is wrt:
if not self.initialized:
self.compileFunctions()
delta = 0.01
x = self.opendr_input.r[None,None,:,:].astype(np.float32)
x_gt = self.opendr_input_gt.r[None,None,:,:].astype(np.float32)
# jac = np.array(self.errorGrad_fun(x,x_gt)).squeeze().reshape([1,self.opendr_input.r.size])
#Finite differences:
approxjacs = []
for idx, freevar in enumerate(vars):
f0 = self.error_fun(x, x_gt)
oldvar = freevar.r[:].copy()
freevar[:] = freevar.r[:] + delta
f1 = self.error_fun(x, x_gt)
diff = (f1 - f0) / np.abs(delta)
freevar[:] = oldvar.copy()
approxjacs = approxjacs + [diff]
approxjacs = np.concatenate(approxjacs)
return sp.csr.csr_matrix(approxjacs)
return None
def old_grads(self):
import theano
import theano.tensor as T
self.grad_fns = [theano.function([self.theano_input], theano.gradient.grad(self.theano_output.flatten()[grad_i], self.theano_input)) for grad_i in range(self.dim_output)]
x = self.opendr_input.r
jac = [sp.lil_matrix(np.array(grad_fun(x[None,None, :,:].astype(np.float32))).ravel()) for grad_fun in self.grad_fns]
return sp.vstack(jac).tocsr()
def recoverAmbientIntensities(hdritems, gtDataset, clampedCosCoeffs):
hdrscoeffs = np.zeros([100, 9,3])
for hdrFile, hdrValues in hdritems:
hdridx = hdrValues[0]
hdrscoeffs[hdridx] = hdrValues[1]
trainEnvMapCoeffs = hdrscoeffs[gtDataset['trainEnvMaps']]
trainTotaloffsets = gtDataset['trainEnvMapPhiOffsets'] + gtDataset['trainObjAzsGT']
rotations = np.vstack([light_probes.sphericalHarmonicsZRotation(trainTotaloffset)[None,:] for trainTotaloffset in trainTotaloffsets])
rotationsRel = np.vstack([light_probes.sphericalHarmonicsZRotation(trainEnvMapPhiOffsets)[None,:] for trainEnvMapPhiOffsets in gtDataset['trainEnvMapPhiOffsets']])
envMapCoeffsRotated = np.vstack([np.dot(rotations[i], trainEnvMapCoeffs[i,[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]][None,:] for i in range(len(rotations))])
envMapCoeffsRotatedRel = np.vstack([np.dot(rotationsRel[i], trainEnvMapCoeffs[i,[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]][None,:] for i in range(len(rotations))])
trainAmbientIntensityGT = gtDataset['trainComponentsGT'][:,0]/((0.3*envMapCoeffsRotated[:,0,0] + 0.59*envMapCoeffsRotated[:,0,1] + 0.11*envMapCoeffsRotated[:,0,2])*clampedCosCoeffs[0])
trainAmbientIntensityGT1 = gtDataset['trainComponentsGT'][:,1]/((0.3*envMapCoeffsRotated[:,1,0] + 0.59*envMapCoeffsRotated[:,1,1] + 0.11*envMapCoeffsRotated[:,1,2])*clampedCosCoeffs[1])
if np.any(trainAmbientIntensityGT != trainAmbientIntensityGT1):
print("Problem with recovery of intensities")
return trainAmbientIntensityGT
def SHSpherePlot():
#Visualize plots
ignoreEnvMaps = np.loadtxt('data/bad_envmaps.txt')
envMapDic = {}
SHFilename = 'data/LightSHCoefficients.pickle'
with open(SHFilename, 'rb') as pfile:
envMapDic = pickle.load(pfile)
hdritems = list(envMapDic.items())[0:10]
pEnvMapsList = []
envMapsList = []
width = 600
height = 300
for hdrFile, hdrValues in hdritems:
hdridx = hdrValues[0]
if hdridx not in ignoreEnvMaps:
if not os.path.exists('light_probes/envMap' + str(hdridx)):
os.makedirs('light_probes/envMap' + str(hdridx))
envMapCoeffs = hdrValues[1]
envMap = np.array(imageio.imread(hdrFile))[:,:,0:3]
# normalize = envMap.mean()*envMap.shape[0]*envMap.shape[1]
# envMap = 0.3*envMap[:,:,0] + 0.59*envMap[:,:,1] + 0.11*envMap[:,:,2]
# if envMap.shape[0] != height or envMap.shape[1] != width:
envMap = cv2.resize(src=envMap, dsize=(width,height))
print("Processing hdridx" + str(hdridx))
# envMapCoeffs = 0.3*envMapCoeffs[:,0] + 0.59*envMapCoeffs[:,1] + 0.11*envMapCoeffs[:,2]
# envMapCoeffs *= normalize
# pEnvMap = SHProjection(envMap, envMapCoeffs)
envMapMean = envMap.mean()
envMapCoeffs2 = light_probes.getEnvironmentMapCoefficients(envMap, 1, 0, 'equirectangular')
# envMapCoeffs2 = 0.3*envMapCoeffs2[:,0] + 0.59*envMapCoeffs2[:,1] + 0.11*envMapCoeffs2[:,2]
pEnvMap = SHProjection(envMap, envMapCoeffs2)
tm = cv2.createTonemapDrago(gamma=2.2)
tmEnvMap = tm.process(envMap)
cv2.imwrite('light_probes/envMap' + str(hdridx) + '/texture.jpeg' , 255*tmEnvMap[:,:,[2,1,0]])
approxProjections = []
for coeffApprox in np.arange(9) + 1:
approxProjection = np.sum(pEnvMap[:,:,:,0:coeffApprox], axis=3)
approxProjectionPos = approxProjection.copy()
approxProjectionPos[approxProjection<0] = 0
# tm = cv2.createTonemapDrago(gamma=2.2)
# tmApproxProjection = tm.process(approxProjection)
approxProjections = approxProjections + [approxProjection[None, :,:,:,None]]
cv2.imwrite('light_probes/envMap' + str(hdridx) + '/approx' + str(coeffApprox-1) + '.jpeg', 255 * approxProjectionPos[:,:,[2,1,0]])
approxProjections = np.concatenate(approxProjections, axis=4)
pEnvMapsList = pEnvMapsList + [approxProjections]
envMapsList = envMapsList + [envMap[None,:,:,:]]
pEnvMaps = np.concatenate(pEnvMapsList, axis=0)
envMaps = np.concatenate(envMapsList, axis=0)
#Total sum of squares
envMapsMean = np.mean(envMaps, axis=0)
pEnvMapsMean = np.mean(pEnvMaps, axis=0)
tsq = np.sum((envMaps - envMapsMean)**2)/(envMapsMean.shape[0]*envMapsMean.shape[1])
ess = np.sum((pEnvMaps - envMapsMean[None,:,:,:,None])**2, axis=(0,1,2,3))/(envMapsMean.shape[0]*envMapsMean.shape[1])
uess = np.sum((pEnvMaps - envMaps[:,:,:,:,None])**2, axis=(0,1,2,3))/(envMapsMean.shape[0]*envMapsMean.shape[1])
explainedVar = ess/tsq
tsq2 = uess + ess
ipdb.set_trace()
#Do something with standardized residuals.
stdres = (pEnvMaps - pEnvMapsMean)/np.sqrt(np.sum((pEnvMaps - pEnvMapsMean)**2,axis=0))
return explainedVar
def exportEnvMapSHImages(shCoeffsRGB, useBlender, scene, width, height, rendererGT):
import glob
for hdridx, hdrFile in enumerate(glob.glob("data/hdr/dataset/*")):
envMapFilename = hdrFile
envMapTexture = np.array(imageio.imread(envMapFilename))[:,:,0:3]
phiOffset = 0
phiOffsets = [0, np.pi/2, np.pi, 3*np.pi/2]
# phiOffsets = [0, np.pi/2, np.pi, 3*np.pi/2]
if not os.path.exists('light_probes/envMap' + str(hdridx)):
os.makedirs('light_probes/envMap' + str(hdridx))
cv2.imwrite('light_probes/envMap' + str(hdridx) + '/texture.png' , 255*envMapTexture[:,:,[2,1,0]])
for phiOffset in phiOffsets:
envMapMean = envMapTexture.mean()
envMapCoeffs = light_probes.getEnvironmentMapCoefficients(envMapTexture, envMapMean, -phiOffset, 'equirectangular')
shCoeffsRGB[:] = envMapCoeffs
if useBlender:
updateEnviornmentMap(envMapFilename, scene)
setEnviornmentMapStrength(0.3/envMapMean, scene)
rotateEnviornmentMap(phiOffset, scene)
scene.render.filepath = 'light_probes/envMap' + str(hdridx) + '/blender_' + str(np.int(180*phiOffset/np.pi)) + '.png'
bpy.ops.render.render(write_still=True)
cv2.imwrite('light_probes/envMap' + str(hdridx) + '/opendr_' + str(np.int(180*phiOffset/np.pi)) + '.png' , 255*rendererGT.r[:,:,[2,1,0]])
def exportEnvMapSHLightCoefficients():
import glob
envMapDic = {}
hdrs = glob.glob("data/hdr/dataset/*")
hdrstorender = hdrs
sphericalMap = False
for hdridx, hdrFile in enumerate(hdrstorender):
print("Processing env map" + str(hdridx))
envMapFilename = hdrFile
envMapTexture = np.array(imageio.imread(envMapFilename))[:,:,0:3]
phiOffset = 0
if not os.path.exists('light_probes/envMap' + str(hdridx)):
os.makedirs('light_probes/envMap' + str(hdridx))
# cv2.imwrite('light_probes/envMap' + str(hdridx) + '/texture.png' , 255*envMapTexture[:,:,[2,1,0]])
if sphericalMap:
envMapTexture, envMapMean = light_probes.processSphericalEnvironmentMap(envMapTexture)
envMapCoeffs = light_probes.getEnvironmentMapCoefficients(envMapTexture, 1, 0, 'spherical')
else:
envMapGray = 0.3*envMapTexture[:,:,0] + 0.59*envMapTexture[:,:,1] + 0.11*envMapTexture[:,:,2]
envMapGrayMean = np.mean(envMapGray, axis=(0,1))
envMapTexture = envMapTexture/envMapGrayMean
# envMapTexture = 4*np.pi*envMapTexture/np.sum(envMapTexture, axis=(0,1))
envMapCoeffs = light_probes.getEnvironmentMapCoefficients(envMapTexture, 1, 0, 'equirectangular')
pEnvMap = SHProjection(envMapTexture, envMapCoeffs)
approxProjection = np.sum(pEnvMap, axis=3)
# envMapCoeffs = light_probes.getEnvironmentMapCoefficients(envMapTexture, envMapMean, phiOffset, 'equirectangular')
envMapDic[hdrFile] = [hdridx, envMapCoeffs]
SHFilename = 'data/LightSHCoefficients.pickle'
with open(SHFilename, 'wb') as pfile:
pickle.dump(envMapDic, pfile)
def transformObject(v, vn, chScale, chObjAz, chObjDisplacement, chObjRotation, targetPosition):
scaleMat = geometry.Scale(x=chScale[0], y=chScale[1],z=chScale[2])[0:3,0:3]
chRotAzMat = geometry.RotateZ(a=-chObjAz)[0:3,0:3]
transformation = ch.dot(chRotAzMat, scaleMat)
invTranspModel = ch.transpose(ch.linalg.inv(transformation))
objDisplacementMat = computeHemisphereTransformation(chObjRotation, 0, chObjDisplacement, np.array([0,0,0]))
newPos = objDisplacementMat[0:3, 3]
vtransf = []
vntransf = []
for mesh_i, mesh in enumerate(v):
vtransf = vtransf + [ch.dot(v[mesh_i], transformation) + newPos + targetPosition]
# ipdb.set_trace()
# vtransf = vtransf + [v[mesh_i] + chPosition]
vndot = ch.dot(vn[mesh_i], invTranspModel)
vndot = vndot/ch.sqrt(ch.sum(vndot**2,1))[:,None]
vntransf = vntransf + [vndot]
return vtransf, vntransf, newPos
def createRendererTarget(glMode, chAz, chEl, chDist, center, v, vc, f_list, vn, light_color, chComponent, chVColors, targetPosition, chDisplacement, width,height, uv, haveTextures_list, textures_list, frustum, win ):
renderer = TexturedRenderer()
renderer.set(glMode=glMode)
vflat = [item for sublist in v for item in sublist]
rangeMeshes = range(len(vflat))
vnflat = [item for sublist in vn for item in sublist]
vcflat = [item for sublist in vc for item in sublist]
# vcch = [np.ones_like(vcflat[mesh])*chVColors.reshape([1,3]) for mesh in rangeMeshes]
vc_list = computeSphericalHarmonics(vnflat, vcflat, light_color, chComponent)
if len(vflat)==1:
vstack = vflat[0]
else:
vstack = ch.vstack(vflat)
camera, modelRotation, _ = setupCamera(vstack, chAz, chEl, chDist, center + targetPosition + chDisplacement, width, height)
setupTexturedRenderer(renderer, vstack, vflat, f_list, vc_list, vnflat, uv, haveTextures_list, textures_list, camera, frustum, win)
return renderer
def createNewRendererTarget(glMode, chAz, chEl, chDist, center, v, vc, f_list, vn, light_color, chComponent, chVColors, targetPosition, chDisplacement, width,height, uv, haveTextures_list, textures_list, frustum, win ):
renderer = ResidualRendererOpenDR()
renderer.set(glMode=glMode)
vflat = [item for sublist in v for item in sublist]
rangeMeshes = range(len(vflat))
vnflat = [item for sublist in vn for item in sublist]
vcflat = [item for sublist in vc for item in sublist]
# vcch = [np.ones_like(vcflat[mesh])*chVColors.reshape([1,3]) for mesh in rangeMeshes]
vc_list = computeSphericalHarmonics(vnflat, vcflat, light_color, chComponent)
if len(vflat)==1:
vstack = vflat[0]
else:
vstack = ch.vstack(vflat)
camera, modelRotation, _ = setupCamera(vstack, chAz, chEl, chDist, center + targetPosition + chDisplacement, width, height)
setupTexturedRenderer(renderer, vstack, vflat, f_list, vc_list, vnflat, uv, haveTextures_list, textures_list, camera, frustum, win)
return renderer
def getCubeData(scale=(2,2,2), st=False, rgb=np.array([0.0, 0.0, 0.0])):
dataCube, facesCube = create_cube(scale=(2,2,2), st=False, rgba=np.array([rgb[0], rgb[1], rgb[2], 1.0]), dtype='float32', type='triangles')
verticesCube = ch.Ch(dataCube[:,0:3])
UVsCube = ch.Ch(np.zeros([verticesCube.shape[0],2]))
facesCube = facesCube.reshape([-1,3])
import shape_model
normalsCube = -shape_model.chGetNormals(verticesCube, facesCube)
haveTexturesCube = [[False]]
texturesListCube = [[None]]
vColorsCube = ch.Ch(dataCube[:,3:6])
return verticesCube, facesCube, normalsCube, vColorsCube, texturesListCube, haveTexturesCube
def createRendererGT(glMode, chAz, chEl, chDist, center, v, vc, f_list, vn, light_color, chComponent, chVColors, targetPosition, chDisplacement, width,height, uv, haveTextures_list, textures_list, frustum, win ):
renderer = TexturedRenderer()
renderer.set(glMode=glMode)
vflat = [item for sublist in v for item in sublist]
rangeMeshes = range(len(vflat))
# vch = [ch.array(vflat[mesh]) for mesh in rangeMeshes]
vch = vflat
if len(vch)==1:
vstack = vch[0]
else:
vstack = ch.vstack(vch)
camera, modelRotation, _ = setupCamera(vstack, chAz, chEl, chDist, center + targetPosition + chDisplacement, width, height)
vnflat = [item for sublist in vn for item in sublist]
# vnch = [ch.array(vnflat[mesh]) for mesh in rangeMeshes]
# vnchnorm = [vnch[mesh]/ch.sqrt(vnch[mesh][:,0]**2 + vnch[mesh][:,1]**2 + vnch[mesh][:,2]**2).reshape([-1,1]) for mesh in rangeMeshes]
vcflat = [item for sublist in vc for item in sublist]
vcch = [vcflat[mesh] for mesh in rangeMeshes]
# vc_list = computeGlobalAndPointLighting(vch, vnch, vcch, lightPosGT, chGlobalConstantGT, light_colorGT)
setupTexturedRenderer(renderer, vstack, vch, f_list, vcch, vnflat, uv, haveTextures_list, textures_list, camera, frustum, win)
return renderer
#Old method
def generateSceneImages(width, height, envMapFilename, envMapMean, phiOffset, chAzGT, chElGT, chDistGT, light_colorGT, chComponentGT, glMode):
replaceableScenesFile = '../databaseFull/fields/scene_replaceables_backup.txt'
sceneLines = [line.strip() for line in open(replaceableScenesFile)]
for sceneIdx in np.arange(len(sceneLines)):
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
bpy.ops.wm.read_factory_settings()
scene_io_utils.loadSceneBlendData(sceneIdx, replaceableScenesFile)
scene = bpy.data.scenes['Main Scene']
bpy.context.screen.scene = scene
scene_io_utils.setupScene(scene, roomInstanceNum, scene.world, scene.camera, width, height, 16, True, False)
scene.update()
scene.render.resolution_x = width #perhaps set resolution in code
scene.render.resolution_y = height
scene.render.tile_x = height/2
scene.render.tile_y = width
scene.cycles.samples = 100
addEnvironmentMapWorld(envMapFilename, scene)
setEnviornmentMapStrength(0.3/envMapMean, scene)
rotateEnviornmentMap(phiOffset, scene)
if not os.path.exists('scenes/' + str(sceneNumber)):
os.makedirs('scenes/' + str(sceneNumber))
for targetIdx, targetIndex in enumerate(targetIndices):
targetPosition = targetPositions[targetIdx]
rendererGT.clear()
del rendererGT
v, f_list, vc, vn, uv, haveTextures_list, textures_list = scene_io_utils.loadSavedScene(sceneDicFile)
# removeObjectData(targetIndex, v, f_list, vc, vn, uv, haveTextures_list, textures_list)
# addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, v_teapots[currentTeapotModel][0], f_list_teapots[currentTeapotModel][0], vc_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0], uv_teapots[currentTeapotModel][0], haveTextures_list_teapots[currentTeapotModel][0], textures_list_teapots[currentTeapotModel][0])
vflat = [item for sublist in v for item in sublist]
rangeMeshes = range(len(vflat))
vch = [ch.array(vflat[mesh]) for mesh in rangeMeshes]
# vch[0] = ch.dot(vch[0], scaleMatGT) + targetPosition
if len(vch)==1:
vstack = vch[0]
else:
vstack = ch.vstack(vch)
cameraGT, modelRotationGT = setupCamera(vstack, chAzGT, chElGT, chDistGT, targetPosition, width, height)
# cameraGT, modelRotationGT = setupCamera(vstack, chAzGT, chElGT, chDistGT, center + targetPosition, width, height)
vnflat = [item for sublist in vn for item in sublist]
vnch = [ch.array(vnflat[mesh]) for mesh in rangeMeshes]
vnchnorm = [vnch[mesh]/ch.sqrt(vnch[mesh][:,0]**2 + vnch[mesh][:,1]**2 + vnch[mesh][:,2]**2).reshape([-1,1]) for mesh in rangeMeshes]
vcflat = [item for sublist in vc for item in sublist]
vcch = [ch.array(vcflat[mesh]) for mesh in rangeMeshes]
vc_list = computeSphericalHarmonics(vnchnorm, vcch, light_colorGT, chComponentGT)
rendererGT = TexturedRenderer()
rendererGT.set(glMode=glMode)
setupTexturedRenderer(rendererGT, vstack, vch, f_list, vc_list, vnchnorm, uv, haveTextures_list, textures_list, cameraGT, frustum, win)
cv2.imwrite('scenes/' + str(sceneNumber) + '/opendr_' + str(targetIndex) + '.png' , 255*rendererGT.r[:,:,[2,1,0]])
placeCamera(scene.camera, -chAzGT[0].r*180/np.pi, chElGT[0].r*180/np.pi, chDistGT, targetPosition)
scene.update()
scene.render.filepath = 'scenes/' + str(sceneNumber) + '/blender_' + str(targetIndex) + '.png'
bpy.ops.render.render(write_still=True)
def getOcclusionFraction(renderer, id=0):
vis_occluded = np.array(renderer.indices_image==id+1).copy().astype(np.bool)
vis_im = np.array(renderer.image_mesh_bool([id])).copy().astype(np.bool)
# return 1. - np.sum(vis_occluded)/np.sum(vis_im)
return 0.
#From http://www.ppsloan.org/publications/StupidSH36.pdf
def chZonalHarmonics(a):
zl0 = -ch.sqrt(ch.pi)*(-1.0 + ch.cos(a))
zl1 = 0.5*ch.sqrt(3.0*ch.pi)*ch.sin(a)**2
zl2 = -0.5*ch.sqrt(5.0*ch.pi)*ch.cos(a)*(-1.0 + ch.cos(a))*(ch.cos(a)+1.0)
z = [zl0, zl1, zl2]
return ch.concatenate(z)
# http://cseweb.ucsd.edu/~ravir/papers/envmap/envmap.pdf
chSpherical_harmonics = {
(0, 0): lambda theta, phi: ch.Ch([0.282095]),
(1, -1): lambda theta, phi: 0.488603 * ch.sin(theta) * ch.sin(phi),
(1, 0): lambda theta, phi: 0.488603 * ch.cos(theta),
(1, 1): lambda theta, phi: 0.488603 * ch.sin(theta) * ch.cos(phi),
(2, -2): lambda theta, phi: 1.092548 * ch.sin(theta) * ch.cos(phi) * ch.sin(theta) * ch.sin(phi),
(2, -1): lambda theta, phi: 1.092548 * ch.sin(theta) * ch.sin(phi) * ch.cos(theta),
(2, 0): lambda theta, phi: 0.315392 * (3 * ch.cos(theta)**2 - 1),
(2, 1): lambda theta, phi: 1.092548 * ch.sin(theta) * ch.cos(phi) * ch.cos(theta),
(2, 2): lambda theta, phi: 0.546274 * (((ch.sin(theta) * ch.cos(phi)) ** 2) - ((ch.sin(theta) * ch.sin(phi)) ** 2))
}
#From http://www.ppsloan.org/publications/StupidSH36.pdf
def chZonalToSphericalHarmonics(z, theta, phi):
sphCoeffs = []
for l in np.arange(len(z)):
for m in np.arange(np.int(-(l*2+1)/2),np.int((l*2+1)/2) + 1):
ylm_d = chSpherical_harmonics[(l,m)](theta,phi)
sh = np.sqrt(4*np.pi/(2*l + 1))*z[l]*ylm_d
sphCoeffs = sphCoeffs + [sh]
#Correct order in band l=1.
sphCoeffs[1],sphCoeffs[3] = sphCoeffs[3],sphCoeffs[1]
chSphCoeffs = ch.concatenate(sphCoeffs)
return chSphCoeffs
#From http://www.ppsloan.org/publications/StupidSH36.pdf
def clampedCosineCoefficients():
constants = []
for l in np.arange(3):
for m in np.arange(np.int(-(l*2+1)/2),np.int((l*2+1)/2) + 1):
normConstant = np.pi
if l > 1 and l % 1 == 0:
normConstant = 0
if l == 1:
normConstant = 2*np.pi/3
if l > 1 and l % 2 == 0:
normConstant = 2*np.pi*(((-1)**(l/2.-1.))/((l+2)*(l-1)))*(np.math.factorial(l)/((2**(l))*(np.math.factorial(l/2)**2)))
# normConstant = 0.785398
constants = constants + [normConstant]
return np.array(constants)
def setupCamera(v, chAz, chEl, chDist, objCenter, width, height):
chCamModelWorld = computeHemisphereTransformation(chAz, chEl, chDist, objCenter)
chMVMat = ch.dot(chCamModelWorld, np.array(mathutils.Matrix.Rotation(radians(270), 4, 'X')))
chInvCam = ch.linalg.inv(chMVMat)
modelRotation = chInvCam[0:3,0:3]
chRod = opendr.geometry.Rodrigues(rt=modelRotation).reshape(3)
chTranslation = chInvCam[0:3,3]
translation, rotation = (chTranslation, chRod)
camera = ProjectPoints(v=v, rt=rotation, t=translation, f= 1.12*ch.array([width,width]), c=ch.array([width,height])/2.0, k=ch.zeros(5))
camera.openglMat = np.array(mathutils.Matrix.Rotation(radians(180), 4, 'X'))
return camera, modelRotation, chMVMat
def computeHemisphereTransformation(chAz, chEl, chDist, objCenter):
chDistMat = geometry.Translate(x=ch.Ch(0), y=-chDist, z=ch.Ch(0))
chToObjectTranslate = geometry.Translate(x=objCenter[0], y=objCenter[1], z=objCenter[2])
chRotAzMat = geometry.RotateZ(a=chAz)
chRotElMat = geometry.RotateX(a=-chEl)
chCamModelWorld = ch.dot(chToObjectTranslate, ch.dot(chRotAzMat, ch.dot(chRotElMat,chDistMat)))
return chCamModelWorld
def computeSphericalHarmonics(vn, vc, light_color, components, ignoreFirstMesh = True):
# vnflat = [item for sublist in vn for item in sublist]
# vcflat = [item for sublist in vc for item in sublist]
A_list = []
rangeMeshes = range(len(vn))
for mesh in rangeMeshes:
if ignoreFirstMesh and len(vn) > 1:
if mesh > 0:
A_list += [SphericalHarmonics(vn=vn[mesh],components=components,light_color=light_color)]
else:
A_list += [1.]
else:
A_list += [SphericalHarmonics(vn=vn[mesh],components=components,light_color=light_color)]
vc_list = [A_list[mesh]*vc[mesh] for mesh in rangeMeshes]
return vc_list
def computeGlobalAndDirectionalLighting(vn, vc, chLightAzimuth, chLightElevation, chLightIntensity, chGlobalConstant):
# Construct point light source
rangeMeshes = range(len(vn))
vc_list = []
chRotAzMat = geometry.RotateZ(a=chLightAzimuth)[0:3,0:3]
chRotElMat = geometry.RotateX(a=chLightElevation)[0:3,0:3]
chLightVector = -ch.dot(chRotAzMat, ch.dot(chRotElMat, np.array([0,0,-1])))
for mesh in rangeMeshes:
l1 = ch.maximum(ch.dot(vn[mesh], chLightVector).reshape((-1,1)), 0.)
vcmesh = vc[mesh] * (chLightIntensity * l1 + chGlobalConstant)
vc_list = vc_list + [vcmesh]
return vc_list
def computeGlobalAndPointLighting(v, vn, vc, light_pos, globalConstant, light_color):
# Construct point light source
rangeMeshes = range(len(vn))
vc_list = []
for mesh in rangeMeshes:
l1 = LambertianPointLight(
v=v[mesh],
vn=vn[mesh],
num_verts=len(v[mesh]),
light_pos=light_pos,
vc=vc[mesh],
light_color=light_color)
vcmesh = vc[mesh]*(l1 + globalConstant)
vc_list = vc_list + [vcmesh]
return vc_list
def setupTexturedRenderer(renderer, vstack, vch, f_list, vc_list, vnch, uv, haveTextures_list, textures_list, camera, frustum, sharedWin=None):
f = []
f_listflat = [item for sublist in f_list for item in sublist]
lenMeshes = 0
for mesh_i, mesh in enumerate(f_listflat):
polygonLen = 0
for polygons in mesh:
f = f + [polygons + lenMeshes]
polygonLen += len(polygons)
lenMeshes += len(vch[mesh_i])
fstack = np.vstack(f)
# vnflat = [item for sublist in vnch for item in sublist]
if len(vnch)==1:
vnstack = vnch[0]
else:
vnstack = ch.vstack(vnch)
# vc_listflat = [item for sublist in vc_list for item in sublist]
if len(vc_list)==1:
vcstack = vc_list[0]
else:
vcstack = ch.vstack(vc_list)
uvflat = [item for sublist in uv for item in sublist]
ftstack = np.vstack(uvflat)
texturesch = []
textures_listflat = [item for sublist in textures_list for item in sublist]
for texture_list in textures_listflat:
if texture_list is not None:
for texture in texture_list:
if texture is not None:
texturesch = texturesch + [ch.array(texture)]
if len(texturesch) == 0:
texture_stack = ch.Ch([])
elif len(texturesch) == 1:
texture_stack = texturesch[0].ravel()
else:
texture_stack = ch.concatenate([tex.ravel() for tex in texturesch])
haveTextures_listflat = [item for sublist in haveTextures_list for item in sublist]
renderer.set(camera=camera, frustum=frustum, v=vstack, f=fstack, vn=vnstack, vc=vcstack, ft=ftstack, texture_stack=texture_stack, v_list=vch, f_list=f_listflat, vc_list=vc_list, ft_list=uvflat, textures_list=textures_listflat, haveUVs_list=haveTextures_listflat, bgcolor=ch.ones(3), overdraw=True)
renderer.msaa = True
renderer.sharedWin = sharedWin
# renderer.clear()
# renderer.useShaderErrors = True
# renderer.initGL()
# renderer.initGLTexture()
def addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, vmod, fmod_list, vcmod, vnmod, uvmod, haveTexturesmod_list, texturesmod_list):
v.insert(0,vmod)
f_list.insert(0,fmod_list)
vc.insert(0,vcmod)
vn.insert(0,vnmod)
uv.insert(0,uvmod)
haveTextures_list.insert(0,haveTexturesmod_list)
textures_list.insert(0,texturesmod_list)
def addObjectDataLast(v, f_list, vc, vn, uv, haveTextures_list, textures_list, vmod, fmod_list, vcmod, vnmod, uvmod, haveTexturesmod_list, texturesmod_list):
v.insert(-1,vmod)
f_list.insert(-1,fmod_list)
vc.insert(-1,vcmod)
vn.insert(-1,vnmod)
uv.insert(-1,uvmod)
haveTextures_list.insert(-1,haveTexturesmod_list)
textures_list.insert(-1,texturesmod_list)
def removeObjectData(objIdx, v, f_list, vc, vn, uv, haveTextures_list, textures_list):
del v[objIdx]
del f_list[objIdx]
del vc[objIdx]
del vn[objIdx]
del uv[objIdx]
del haveTextures_list[objIdx]
del textures_list[objIdx]
def transformObject2(v, vn, chScale, chObjAz, chPosition):
if chScale.size == 1:
scaleMat = geometry.Scale(x=chScale[0], y=chScale[0],z=chScale[0])[0:3,0:3]
elif chScale.size == 2:
scaleMat = geometry.Scale(x=chScale[0], y=chScale[0], z=chScale[1])[0:3, 0:3]
else:
scaleMat = geometry.Scale(x=chScale[0], y=chScale[1], z=chScale[2])[0:3, 0:3]
chRotAzMat = geometry.RotateZ(a=chObjAz)[0:3,0:3]
chRotAzMatX = geometry.RotateX(a=0)[0:3,0:3]
# transformation = scaleMat
transformation = ch.dot(ch.dot(chRotAzMat,chRotAzMatX), scaleMat)
invTranspModel = ch.transpose(ch.linalg.inv(transformation))
vtransf = []
vntransf = []
for mesh_i, mesh in enumerate(v):
vtransf = vtransf + [ch.dot(v[mesh_i], transformation) + chPosition]
# ipdb.set_trace()
# vtransf = vtransf + [v[mesh_i] + chPosition]
vndot = ch.dot(vn[mesh_i], invTranspModel)
vndot = vndot/ch.sqrt(ch.sum(vndot**2,1))[:,None]
vntransf = vntransf + [vndot]
return vtransf, vntransf
def getCubeData(scale=(2,2,2), st=False, rgb=np.array([1.0, 1.0, 1.0])):
dataCube, facesCube = create_cube(scale=(1,1,1), st=False, rgba=np.array([rgb[0], rgb[1], rgb[2], 1.0]), dtype='float32', type='triangles')
verticesCube = ch.Ch(dataCube[:,0:3])
UVsCube = ch.Ch(np.zeros([verticesCube.shape[0],2]))
facesCube = facesCube.reshape([-1,3])
normalsCube = geometry.chGetNormals(verticesCube, facesCube)
haveTexturesCube = [[False]]
texturesListCube = [[None]]
vColorsCube = ch.Ch(dataCube[:,3:6])
return verticesCube, facesCube, normalsCube, vColorsCube, texturesListCube, haveTexturesCube
| 36,041 | 42.11244 | 359 | py |
inversegraphics | inversegraphics-master/image_processing.py | __author__ = 'pol'
from skimage.feature import hog
from skimage import data, color, exposure
import numpy as np
import ipdb
import skimage.color
from numpy.core.umath_tests import matrix_multiply
# def xyz2labCh(xyz, illuminant="D65", observer="2"):
# """XYZ to CIE-LAB color space conversion.
# Parameters
# ----------
# xyz : array_like
# The image in XYZ format, in a 3- or 4-D array of shape
# ``(.., ..,[ ..,] 3)``.
# illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
# The name of the illuminant (the function is NOT case sensitive).
# observer : {"2", "10"}, optional
# The aperture angle of the observer.
# Returns
# -------
# out : ndarray
# The image in CIE-LAB format, in a 3- or 4-D array of shape
# ``(.., ..,[ ..,] 3)``.
# Raises
# ------
# ValueError
# If `xyz` is not a 3-D array of shape ``(.., ..,[ ..,] 3)``.
# ValueError
# If either the illuminant or the observer angle is unsupported or
# unknown.
# Notes
# -----
# By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values
# x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for
# a list of supported illuminants.
# References
# ----------
# .. [1] http://www.easyrgb.com/index.php?X=MATH&H=07#text7
# .. [2] http://en.wikipedia.org/wiki/Lab_color_space
# Examples
# --------
#
# """
# arr = xyz
#
# xyz_ref_white = skimage.color.get_xyz_coords(illuminant, observer)
#
# # scale by CIE XYZ tristimulus values of the reference white point
# arr = arr / xyz_ref_white
#
# # Nonlinear distortion and linear transformation
# mask = arr > 0.008856
# notmask = arr <= 0.008856
# arr[mask] = ch.power(arr[mask], 1. / 3.)
# arr[notmask] = 7.787 * arr[notmask] + 16. / 116.
#
# x, y, z = arr[..., 0], arr[..., 1], arr[..., 2]
#
# # Vector scaling
# L = (116. * y) - 16.
# a = 500.0 * (x - y)
# b = 200.0 * (y - z)
#
# return ch.concatenate([x[..., np.newaxis] for x in [L, a, b]], axis=-1)
#
# def rgb2xyz(rgb):
# """RGB to XYZ color space conversion.
# Parameters
# ----------
# rgb : array_like
# The image in RGB format, in a 3- or 4-D array of shape
# ``(.., ..,[ ..,] 3)``.
# Returns
# -------
# out : ndarray
# The image in XYZ format, in a 3- or 4-D array of shape
# ``(.., ..,[ ..,] 3)``.
# Raises
# ------
# ValueError
# If `rgb` is not a 3- or 4-D array of shape ``(.., ..,[ ..,] 3)``.
# Notes
# -----
# The CIE XYZ color space is derived from the CIE RGB color space. Note
# however that this function converts from sRGB.
# References
# ----------
# .. [1] http://en.wikipedia.org/wiki/CIE_1931_color_space
# Examples
# --------
#
# """
# # Follow the algorithm from http://www.easyrgb.com/index.php
# # except we don't multiply/divide by 100 in the conversion
# arr = rgb
# mask = arr > 0.04045
# notmask = arr <= 0.04045
# arr[mask] = ch.power((arr[mask] + 0.055) / 1.055, 2.4)
# arr[notmask] /= 12.92
# return _convert(xyz_from_rgb, arr)
def scaleInvariantMSECoeff(x_pred, x_target):
#Rows: test samples
#Cols: target variables
scales = (matrix_multiply(x_pred[:,None,:],x_target[:,:,None])/matrix_multiply(x_pred[:,None,:],x_pred[:,:,None])).ravel()
return scales
def scaleInvariantColourDifference(rgbGT, rgbPred):
scaleRGB = scaleInvariantMSECoeff(rgbPred, rgbGT)
dist = np.sqrt(np.sum((rgbGT - scaleRGB[:,None]*rgbPred)**2, axis=1))
return dist
def cColourDifference(rgb1, rgb2):
lab1 = cv2.cvtColor(np.uint8(rgb1.reshape([-1,1,3])*255), cv2.COLOR_RGB2LAB)/255
lab2 = cv2.cvtColor(np.uint8(rgb2.reshape([-1,1,3])*255), cv2.COLOR_RGB2LAB)/255
dist = np.sqrt(np.sum((lab1[:,0,1:] - lab2[:,0,1:])**2, axis=1))
return dist
def eColourDifference(rgb1, rgb2):
lab1 = cv2.cvtColor(np.uint8(rgb1.reshape([-1,1,3])*255), cv2.COLOR_RGB2LAB)/255
lab2 = cv2.cvtColor(np.uint8(rgb2.reshape([-1,1,3])*255), cv2.COLOR_RGB2LAB)/255
dist = np.sqrt(np.sum((lab1[:,0] - lab2[:,0])**2, axis=1))
return dist
# conf.cellSize = cellSize;
# conf.numOrientations = 9;
def computeHoG(image):
image = color.rgb2gray(image)
hog_descr, hog_image = hog(image, orientations=9, pixels_per_cell=(8, 8),cells_per_block=(1, 1), visualise=True)
# fig, ( ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
#
# ax1.axis('off')
# ax1.imshow(image, cmap=plt.cm.gray)
# ax1.set_title('Input image')
# # Rescale histogram for better display
# hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
#
# ax2.axis('off')
# ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
# ax2.set_title('Histogram of Oriented Gradients')
# plt.show()
return hog_descr
def computeHoGFeatures(images):
hogs = []
for image in images:
features = computeHoG(image)
hogs = hogs + [features[None,:] ]
hogfeats = np.vstack(hogs)
return hogfeats
def computeIllumFeatures(images, numFreq):
illum = []
win = 40
for image in images:
features = featuresIlluminationDirection(image, win)[:numFreq,:].ravel()
illum = illum + [features[None,:]]
illumfeats = np.vstack(illum)
return illumfeats
def featuresIlluminationDirection(image,win):
image = color.rgb2gray(image)
coeffs = np.fft.fft2(image[image.shape[0]/2-win:image.shape[0]/2+win,image.shape[1]/2-win:image.shape[1]/2+win])
magnitudes = np.sqrt(coeffs.real**2 + coeffs.imag**2)
phases = np.angle(coeffs)
return np.hstack([magnitudes.ravel()[:,None], phases.ravel()[:,None]])
from chumpy import depends_on, Ch
import chumpy as ch
from math import radians
import cv2
import scipy
import matplotlib.pyplot as plt
def dr_wrt_convolution(x, filter):
print("Computing convolution gradients")
widthRolls = x.shape[1]
heightRolls = x.shape[0]
tmpShape = [x.shape[0]+filter.shape[0], x.shape[1]+filter.shape[1]]
template = np.zeros(tmpShape)
template[0:filter.shape[0], 0:filter.shape[1]] = filter
jacs = []
for i in range(heightRolls):
for j in range(widthRolls):
templateRolled = np.roll(template, shift=i, axis=0)
templateRolled = np.roll(templateRolled, shift=j, axis=1)
templateGrad = templateRolled[tmpShape[0] - x.shape[0] - np.int(filter.shape[0]/2): tmpShape[0] - np.int(filter.shape[0]/2), tmpShape[1] - x.shape[1] - np.int(filter.shape[1]/2): tmpShape[1] - np.int(filter.shape[1]/2)]
jacs = jacs + [scipy.sparse.coo_matrix(templateGrad.ravel())]
return scipy.sparse.vstack(jacs).tocsc()
class convolve2D(Ch):
terms = 'filter'
dterms = 'x'
def compute_r(self):
# convolved = scipy.signal.convolve2d(self.x, self.filter, mode='same')
convolved = scipy.ndimage.convolve(self.x, self.filter, mode='reflect')
# return convolved[np.int((convolved.shape[0]-self.x.shape[0])/2):np.int((convolved.shape[0]-self.x.shape[0])/2) + self.x.shape[1], np.int((convolved.shape[1]-self.x.shape[1])/2):np.int((convolved.shape[1]-self.x.shape[1])/2) + self.x.shape[1]]
return convolved
def compute_dr_wrt(self, wrt):
if wrt is self.x:
return self.convolve2DDr
else:
return None
class HogImage(Ch):
terms = 'numOrient', 'cwidth', 'cheight'
dterms = 'image', 'hog'
def compute_r(self):
from skimage import draw
sy,sx, _ = self.image.shape
radius = min(self.cwidth, self.cheight) // 2 - 1
orientations_arr = np.arange(self.numOrient)
dx_arr = radius * np.cos(orientations_arr / self.numOrient * np.pi)
dy_arr = radius * np.sin(orientations_arr / self.numOrient * np.pi)
cr2 = self.cheight + self.cheight
cc2 = self.cwidth + self.cwidth
hog_image = np.zeros((sy, sx), dtype=float)
n_cellsx = int(np.floor(sx // self.cwidth)) # number of cells in x
n_cellsy = int(np.floor(sy // self.cheight)) # number of cells in y
for x in range(n_cellsx):
for y in range(n_cellsy):
for o, dx, dy in zip(orientations_arr, dx_arr, dy_arr):
centre = tuple([y * cr2 // 2, x * cc2 // 2])
rr, cc = draw.line(int(centre[0] + dy),
int(centre[1] + dx),
int(centre[0] - dy),
int(centre[1] - dx))
hog_image[rr, cc] += self.hog[y, x, o]
return hog_image
def compute_dr_wrt(self, wrt):
return None
import skimage
def diffHog(image, drconv=None, numOrient = 9, cwidth=8, cheight=8):
imagegray = 0.3*image[:,:,0] + 0.59*image[:,:,1] + 0.11*image[:,:,2]
sy,sx = imagegray.shape
# gx = ch.empty(imagegray.shape, dtype=np.double)
gx = imagegray[:, 2:] - imagegray[:, :-2]
gx = ch.hstack([np.zeros([sy,1]), gx, np.zeros([sy,1])])
gy = imagegray[2:, :] - imagegray[:-2, :]
# gy = imagegray[:, 2:] - imagegray[:, :-2]
gy = ch.vstack([np.zeros([1,sx]), gy, np.zeros([1,sx])])
gx += 1e-5
# gy = imagegray[:-2,1:-1] - imagegray[2:,1:-1] + 0.00001
# gx = imagegray[1:-1,:-2] - imagegray[1:-1, 2:] + 0.00001
distFilter = np.ones([2*cheight,2*cwidth], dtype=np.uint8)
distFilter[np.int(2*cheight/2), np.int(2*cwidth/2)] = 0
distFilter = (cv2.distanceTransform(distFilter, cv2.DIST_L2, 3)- np.max(cv2.distanceTransform(distFilter, cv2.DIST_L2, 3)))/(-np.max(cv2.distanceTransform(distFilter, cv2.DIST_L2, 3)))
magn = ch.sqrt(gy**2 + gx**2)*180/np.sqrt(2)
angles = ch.arctan(gy/gx)*180/np.pi + 90
# meanOrient = np.linspace(0, 180, numOrient)
orientations_arr = np.arange(numOrient)
meanOrient = orientations_arr / numOrient * 180
fb_resttmp = 1 - ch.abs(ch.expand_dims(angles[:,:],2) - meanOrient[1:].reshape([1,1,numOrient-1]))*numOrient/180
zeros_rest = np.zeros([sy,sx, numOrient-1, 1])
fb_rest = ch.max(ch.concatenate([fb_resttmp[:,:,:,None], zeros_rest],axis=3), axis=3)
chMinOrient0 = ch.min(ch.concatenate([ch.abs(ch.expand_dims(angles[:,:],2) - meanOrient[0].reshape([1,1,1]))[:,:,:,None], ch.abs(180 - ch.expand_dims(angles[:,:],2) - meanOrient[0].reshape([1,1,1]))[:,:,:,None]], axis=3), axis=3)
zeros_fb0 = np.zeros([sy,sx, 1])
fb0_tmp = ch.concatenate([1 - chMinOrient0[:,:]*numOrient/180, zeros_fb0],axis=2)
fb_0 = ch.max(fb0_tmp,axis=2)
fb = ch.concatenate([fb_0[:,:,None], fb_rest],axis=2)
# fb[:,:,0] = ch.max(1 - ch.abs(ch.expand_dims(angles,2) - meanOrient.reshape([1,1,numOrient]))*numOrient/180,0)
# fb = 1./(1. + ch.exp(1 - ch.abs(ch.expand_dims(angles,2) - meanOrient.reshape([1,1,numOrient]))*numOrient/180))
Fb = ch.expand_dims(magn,2)*fb
if drconv is None:
drconv = dr_wrt_convolution(Fb[:,:,0], distFilter)
Fs_list = [convolve2D(x=Fb[:,:,Fbi], filter=distFilter, convolve2DDr=drconv).reshape([Fb.shape[0], Fb.shape[1],1]) for Fbi in range(numOrient)]
# Fs_list = [scipy.signal.convolve2d(Fb[:,:,Fbi], distFilter).reshape([Fb.shape[0], Fb.shape[1],1]) for Fbi in range(numOrient)]
Fs = ch.concatenate(Fs_list, axis=2)
# cellCols = np.arange(start=cwidth/2, stop=Fs.shape[1]-cwidth/2 , step=cwidth)
# cellRows = np.arange(start=cheight/2, stop=Fs.shape[0]-cheight/2 , step=cheight)
Fcells = Fs[0:Fs.shape[0] :cheight,0:Fs.shape[1] :cwidth,:]
epsilon = 1e-5
v = Fcells/ch.sqrt(ch.sum(Fcells**2) + epsilon)
# v = Fcells
# hog, hogim = skimage.feature.hog(imagegray, orientations=numOrient, pixels_per_cell=(cheight, cwidth), visualise=True)
hog_image = HogImage(image=image, hog=Fcells, numOrient=numOrient, cwidth=cwidth, cheight=cheight)
# plt.imshow(hog_image)
# plt.figure()
# plt.imshow(hogim)
# ipdb.set_trace()
return v, hog_image, drconv
import zernike
def zernikeProjection(images, numCoeffs, win=50):
croppedImages = images[:,images.shape[1]/2-win:images.shape[1]/2+win,images.shape[2]/2-win:images.shape[2]/2+win, :]
zpolys = zernikePolynomials(imageSize=(croppedImages.shape[1],croppedImages.shape[2]), numCoeffs=numCoeffs)
coeffs = np.sum(np.sum(croppedImages[:,:,:,:,None]*zpolys.reshape([1,zpolys.shape[0], zpolys.shape[1], 1, -1]), axis=2), axis=1)
return coeffs
def zernikeProjectionGray(images, numCoeffs, win=50):
if images.shape[3] == 3:
images = 0.3*images[:,:,:,0] + 0.59*images[:,:,:,1] + 0.11*images[:,:,:,2]
croppedImages = images[:,images.shape[1]/2-win:images.shape[1]/2+win,images.shape[2]/2-win:images.shape[2]/2+win]
zpolys = zernikePolynomials(imageSize=(croppedImages.shape[1],croppedImages.shape[2]), numCoeffs=numCoeffs)
coeffs = np.sum(np.sum(croppedImages[:,:,:,None]*zpolys.reshape([1,zpolys.shape[0], zpolys.shape[1], -1]), axis=2), axis=1)
return coeffs
def chZernikeProjection(image, numCoeffs=20, win=50):
croppedImage = image[image.shape[0]/2-win:image.shape[0]/2+win,image.shape[1]/2-win:image.shape[1]/2+win, :]
zpolys = zernikePolynomials(imageSize=(croppedImage.shape[0],croppedImage.shape[1]), numCoeffs=numCoeffs)
imageProjections = croppedImage[:,:,:,None]*zpolys.reshape([zpolys.shape[0], zpolys.shape[1], 1, -1])
coeffs = ch.sum(ch.sum(imageProjections, axis=0), axis=0)
return coeffs, imageProjections
def zernikePolynomials(imageSize=(100,100), numCoeffs=20):
sy,sx = imageSize
# distFilter = np.ones([sy,sx], dtype=np.uint8)
# distFilter[np.int(sy/2), np.int(sx/2)] = 0
# distFilter = cv2.distanceTransform(distFilter, cv2.DIST_L2, 3)
# distFilter /= np.max(distFilter)
# np.arange()
ones = np.ones([sy,sx], dtype=np.bool)
imgind = np.where(ones)
dy = imgind[0] - int(sy/2)
dx = imgind[1] - int(sx/2)
pixaz = np.arctan2(dy,dx)
pixrad = np.sqrt(dy**2 + dx**2)
imaz = np.zeros([sy,sx])
imrad = np.zeros([sy,sx])
imaz[imgind] = pixaz
imrad[imgind] = pixrad
outcircle = imrad>=sy/2
imrad[outcircle] = 0
imaz[outcircle] = 0
imrad/=np.max(imrad)
zpolys = [zernike.zernikel(j, imrad, imaz)[:,:,None] for j in range(numCoeffs)]
zpolys = np.concatenate(zpolys, axis=2)
return zpolys | 14,461 | 35.428212 | 252 | py |
inversegraphics | inversegraphics-master/generative_models.py | import cv2
import numpy as np
import matplotlib.pyplot as plt
import ipdb
import scipy
import chumpy as ch
from chumpy.ch import MatVecMult, Ch, depends_on
def scoreImage(img, template, method, methodParams):
score = 0
if method == 'chamferModelToData':
sqDists = chamferDistanceModelToData(img, template, methodParams['minThresImage'], methodParams['maxThresImage'], methodParams['minThresTemplate'],methodParams['maxThresTemplate'])
score = np.sum(sqDists)
elif method == 'robustChamferModelToData':
sqDists = np.sum(chamferDistanceModelToData(img, template, methodParams['minThresImage'], methodParams['maxThresImage'], methodParams['minThresTemplate'],methodParams['maxThresTemplate']))
score = robustDistance(sqDists, methodParams['scale'])
elif method == 'chamferDataToModel':
sqDists = chamferDistanceDataToModel(img, template, methodParams['minThresImage'], methodParams['maxThresImage'], methodParams['minThresTemplate'],methodParams['maxThresTemplate'])
score = np.sum(sqDists)
elif method == 'robustChamferDataToModel':
sqDists = np.sum(chamferDistanceDataToModel(img, template, methodParams['minThresImage'], methodParams['maxThresImage'], methodParams['minThresTemplate'],methodParams['maxThresTemplate']))
score = robustDistance(sqDists, methodParams['scale'])
elif method == 'sqDistImages':
sqDists = sqDistImages(img, template)
score = np.sum(sqDists) / template.size
elif method == 'ignoreSqDistImages':
sqDists = sqDistImages(img, template)
score = np.sum(sqDists * (template > 0)) / np.sum(template > 0)
elif method == 'robustSqDistImages':
sqDists = sqDistImages(img, template)
score = robustDistance(sqDists, methodParams['scale'])
elif method == 'negLogLikelihoodRobust':
score = -modelLogLikelihoodRobust(img, template, methodParams['testMask'], methodParams['backgroundModel'], methodParams['layerPrior'], methodParams['variances'])
elif method == 'negLogLikelihood':
score = -modelLogLikelihood(img, template, methodParams['testMask'], methodParams['backgroundModel'], methodParams['variances'])
return score
def chamferDistanceModelToData(img, template, minThresImage, maxThresImage, minThresTemplate, maxThresTemplate):
imgEdges = cv2.Canny(np.uint8(img*255), minThresImage,maxThresImage)
tempEdges = cv2.Canny(np.uint8(template*255), minThresTemplate, maxThresTemplate)
bwEdges1 = cv2.distanceTransform(~imgEdges, cv2.DIST_L2, 5)
score = np.sum(np.multiply(tempEdges/255, bwEdges1))/np.sum(tempEdges/255.0)
return score
def chamferDistanceDataToModel(img, template, minThresImage, maxThresImage, minThresTemplate, maxThresTemplate):
imgEdges = cv2.Canny(np.uint8(img*255), minThresImage,maxThresImage)
tempEdges = cv2.Canny(np.uint8(template*255), minThresTemplate, maxThresTemplate)
bwEdges1 = cv2.distanceTransform(~tempEdges, cv2.DIST_L2, 5)
score = np.multiply(imgEdges/255.0, bwEdges1)/np.sum(imgEdges/255.0)
return score
def sqDistImages(img, template):
sqResiduals = np.square(img - template)
return sqResiduals
def computeVariances(sqResiduals):
return np.sum(sqResiduals, axis=3)/sqResiduals.shape[-1]
def pixelLayerPriors(masks):
return np.sum(masks, axis=2) / masks.shape[-1]
def globalLayerPrior(masks):
return np.sum(masks) / masks.size
def modelLogLikelihoodRobust(image, template, testMask, backgroundModel, layerPriors, variances):
likelihood = pixelLikelihoodRobust(image, template, testMask, backgroundModel, layerPriors, variances)
liksum = np.sum(np.log(likelihood))
return liksum
def modelLogLikelihoodRobustCh(image, template, testMask, backgroundModel, layerPriors, variances):
likelihood = pixelLikelihoodRobustCh(image, template, testMask, backgroundModel, layerPriors, variances)
liksum = ch.sum(ch.log(likelihood))
return liksum
def modelLogLikelihoodRobustRegionCh(image, template, testMask, backgroundModel, layerPriors, variances):
likelihood = pixelLikelihoodRobustRegionCh(image, template, testMask, backgroundModel, layerPriors, variances)
liksum = ch.sum(ch.log(likelihood))
return liksum
def modelLogLikelihood(image, template, testMask, backgroundModel, variances):
likelihood = pixelLikelihood(image, template, testMask, backgroundModel, variances)
liksum = np.sum(np.log(likelihood))
def modelLogLikelihoodCh(image, template, testMask, backgroundModel, variances):
logLikelihood = logPixelLikelihoodCh(image, template, testMask, backgroundModel, variances)
return ch.sum(logLikelihood)
def pixelLikelihoodRobust(image, template, testMask, backgroundModel, layerPrior, variances):
sigma = np.sqrt(variances)
mask = testMask
if backgroundModel == 'FULL':
mask = np.ones(image.shape[0:2])
# mask = np.repeat(mask[..., np.newaxis], 3, 2)
repPriors = np.tile(layerPrior, image.shape[0:2])
# sum = np.sum(np.log(layerPrior * scipy.stats.norm.pdf(image, location = template, scale=np.sqrt(variances) ) + (1 - repPriors)))
# uniformProbs = np.ones(image.shape)
foregroundProbs = np.prod(1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (image - template)**2 / (2 * variances)) * layerPrior, axis=2) + (1 - repPriors)
return foregroundProbs * mask + (1-mask)
def pixelLikelihoodRobustSQErrorCh(sqeRenderer, testMask, backgroundModel, layerPrior, variances):
sigma = ch.sqrt(variances)
mask = testMask
if backgroundModel == 'FULL':
mask = np.ones(sqeRenderer.r.shape[0:2])
# mask = np.repeat(mask[..., np.newaxis], 3, 2)
repPriors = ch.tile(layerPrior, sqeRenderer.r.shape[0:2])
# sum = np.sum(np.log(layerPrior * scipy.stats.norm.pdf(image, location = template, scale=np.sqrt(variances) ) + (1 - repPriors)))
# uniformProbs = np.ones(image.shape)
probs = ch.exp( - (sqeRenderer) / (2 * variances)) * (1./(sigma * np.sqrt(2 * np.pi)))
foregroundProbs = (probs[:,:,0] * probs[:,:,1] * probs[:,:,2]) * layerPrior + (1 - repPriors)
return foregroundProbs * mask + (1-mask)
def pixelLikelihoodRobustCh(image, template, testMask, backgroundModel, layerPrior, variances):
sigma = ch.sqrt(variances)
mask = testMask
if backgroundModel == 'FULL':
mask = np.ones(image.shape[0:2])
# mask = np.repeat(mask[..., np.newaxis], 3, 2)
repPriors = ch.tile(layerPrior, image.shape[0:2])
# sum = np.sum(np.log(layerPrior * scipy.stats.norm.pdf(image, location = template, scale=np.sqrt(variances) ) + (1 - repPriors)))
# uniformProbs = np.ones(image.shape)
probs = ch.exp( - (image - template)**2 / (2 * variances)) * (1./(sigma * np.sqrt(2 * np.pi)))
foregroundProbs = (probs[:,:,0] * probs[:,:,1] * probs[:,:,2]) * layerPrior + (1 - repPriors)
return foregroundProbs * mask + (1-mask)
def pixelLikelihoodRobustRegionCh(image, template, testMask, backgroundModel, layerPrior, variances):
sigma = ch.sqrt(variances)
mask = testMask
if backgroundModel == 'FULL':
mask = np.ones(image.shape[0:2])
# mask = np.repeat(mask[..., np.newaxis], 3, 2)
repPriors = ch.tile(layerPrior, image.shape[0:2])
# sum = np.sum(np.log(layerPrior * scipy.stats.norm.pdf(image, location = template, scale=np.sqrt(variances) ) + (1 - repPriors)))
# uniformProbs = np.ones(image.shape)
imshape = image.shape
from opendr.filters import filter_for
from opendr.filters import GaussianKernel2D
blur_mtx = filter_for(imshape[0], imshape[1], imshape[2] if len(imshape)>2 else 1, kernel = GaussianKernel2D(3, 1))
blurred_image = MatVecMult(blur_mtx, image).reshape(imshape)
blurred_template = MatVecMult(blur_mtx, template).reshape(imshape)
probs = ch.exp( - (blurred_image - template)**2 / (2 * variances)) * (1./(sigma * np.sqrt(2 * np.pi)))
foregroundProbs = (probs[:,:,0] * probs[:,:,1] * probs[:,:,2]) * layerPrior + (1 - repPriors)
return foregroundProbs * mask + (1-mask)
import chumpy as ch
from chumpy import depends_on, Ch
class EdgeFilter(Ch):
dterms = ['renderer', 'rendererGT']
def compute_r(self):
return self.blurredDiff()
def compute_dr_wrt(self, wrt):
if wrt is self.renderer:
return self.blurredDiff().dr_wrt(self.renderer)
def blurredDiff(self):
edges = self.renderer.boundarybool_image
imshape = self.renderer.shape
rgbEdges = np.tile(edges.reshape([imshape[0],imshape[1],1]),[1,1,3]).astype(np.bool)
from opendr.filters import filter_for
from opendr.filters import GaussianKernel2D
blur_mtx = filter_for(imshape[0], imshape[1], imshape[2] if len(imshape)>2 else 1, kernel = GaussianKernel2D(3, 1))
blurred_diff = MatVecMult(blur_mtx, self.renderer - self.rendererGT).reshape(imshape)
return blurred_diff[rgbEdges]
class NLLRobustModel(Ch):
terms = ['Q', 'variances']
dterms = ['renderer', 'groundtruth']
def compute_r(self):
return -np.sum(np.log(self.prob))
def compute_dr_wrt(self, wrt):
if wrt is self.renderer:
fgMask = np.array(self.renderer.image_mesh_bool([0])).astype(np.bool)
dr = (-1./(self.prob) * fgMask * self.fgProb[:,:,0]*self.fgProb[:,:,1]*self.fgProb[:,:,2] * self.Q[:, :])[:, :, None] * ((self.groundtruth.r - self.renderer.r)/self.variances.r)
return dr.ravel()
@depends_on(dterms)
def fgProb(self):
return np.exp(- (self.renderer.r - self.groundtruth.r) ** 2 / (2 * self.variances.r)) * (1. / (np.sqrt(self.variances.r) * np.sqrt(2 * np.pi)))
@depends_on(dterms)
def prob(self):
h = self.renderer.r.shape[0]
w = self.renderer.r.shape[1]
occProb = np.ones([h, w])
bgProb = np.ones([h, w])
fgMask = np.array(self.renderer.image_mesh_bool([0])).astype(np.bool)
errorFun = fgMask[:, :]*(self.Q[:, :] * self.fgProb[:,:,0]*self.fgProb[:,:,1]*self.fgProb[:,:,2] + (1-self.Q[:, :]))+ (1- fgMask[:, :])
return errorFun
# @depends_on(dterms)
# def prob(self):
# h = self.renderer.r.shape[0]
# w = self.renderer.r.shape[1]
#
# occProb = np.ones([h, w])
# bgProb = np.ones([h, w])
#
# fgMask = np.array(self.renderer.image_mesh_bool([0])).astype(np.bool)
#
# errorFun = fgMask[:, :, None] * ((self.Q[0][:, :, None] * self.fgProb) + (self.Q[1] * occProb + self.Q[2] * bgProb)[:, :, None]) + (1 - fgMask[:, :, None])
#
# return errorFun
class NLLRobustSQErrorModel(Ch):
terms = ['Q', 'variances']
dterms = ['sqeRenderer']
def compute_r(self):
return -np.sum(np.log(self.prob))
def compute_dr_wrt(self, wrt):
if wrt is self.sqeRenderer:
fgMask = np.array(self.sqeRenderer.image_mesh_bool([0])).astype(np.bool)
# dr = (-1./(self.prob) * fgMask * self.fgProb[:,:,0]*self.fgProb[:,:,1]*self.fgProb[:,:,2] * self.Q[:, :])[:, :, None] * ((self.sqeRenderer.imageGT.r - self.sqeRenderer.render_image)/self.variances.r)
dr = np.tile((-1./(self.prob) * fgMask * self.fgProb[:,:,0]*self.fgProb[:,:,1]*self.fgProb[:,:,2] * self.Q[:, :])[:, :, None], [1,1,3]) * (-0.5/self.variances.r)
# dr = (-1./(self.prob[:, :,None]) * fgMask[:, :,None] * self.fgProb * self.Q[:, :,None]) * (-0.5/self.variances.r)
return dr.ravel()
@depends_on(dterms)
def fgProb(self):
return np.exp(- (self.sqeRenderer.r) / (2 * self.variances.r)) * (1. / (np.sqrt(self.variances.r) * np.sqrt(2 * np.pi)))
@depends_on(dterms)
def prob(self):
h = self.sqeRenderer.r.shape[0]
w = self.sqeRenderer.r.shape[1]
occProb = np.ones([h, w])
bgProb = np.ones([h, w])
fgMask = np.array(self.sqeRenderer.image_mesh_bool([0])).astype(np.bool)
errorFun = fgMask[:, :]*(self.Q[:, :] * self.fgProb[:,:,0]*self.fgProb[:,:,1]*self.fgProb[:,:,2] + (1-self.Q[:, :]))+ (1- fgMask[:, :])
return errorFun
class NLLCRFModel(Ch):
terms = ['Q', 'variances']
dterms = ['renderer', 'groundtruth']
def compute_r(self):
return -np.sum(np.log(self.prob))
def compute_dr_wrt(self, wrt):
if wrt is self.renderer:
fgMask = np.array(self.renderer.image_mesh_bool([0])).astype(np.bool)
dr = -1./(self.prob) * fgMask[:, :, None] * self.fgProb * self.Q[0][:, :, None] * ((self.groundtruth.r - self.renderer.r)/self.variances.r)
return dr.ravel()
@depends_on(dterms)
def fgProb(self):
return np.exp(- (self.renderer.r - self.groundtruth.r) ** 2 / (2 * self.variances.r)) * (1. / (np.sqrt(self.variances.r) * np.sqrt(2 * np.pi)))
@depends_on(dterms)
def prob(self):
h = self.renderer.r.shape[0]
w = self.renderer.r.shape[1]
occProb = np.ones([h, w])
bgProb = np.ones([h, w])
fgMask = np.array(self.renderer.image_mesh_bool([0])).astype(np.bool)
errorFun = fgMask[:, :, None]*((self.Q[0][:, :, None] * self.fgProb) + (self.Q[1] * occProb + self.Q[2] * bgProb)[:, :, None])[:, :, None] + (1- fgMask[:, :, None])
return errorFun
@depends_on(dterms)
def prob(self):
h = self.renderer.r.shape[0]
w = self.renderer.r.shape[1]
occProb = np.ones([h, w])
bgProb = np.ones([h, w])
fgMask = np.array(self.renderer.image_mesh_bool([0])).astype(np.bool)
errorFun = fgMask[:, :, None] * ((self.Q[0][:, :, None] * self.fgProb) + (self.Q[1] * occProb + self.Q[2] * bgProb)[:, :, None]) + (1 - fgMask[:, :, None])
return errorFun
class LogCRFModel(Ch):
terms = ['groundtruth', 'Q', 'variances']
dterms = ['renderer']
def compute_r(self):
return self.logProb()
def compute_dr_wrt(self, wrt):
if wrt is self.renderer:
return self.logProb().dr_wrt(self.renderer)
def logProb(self):
fgProb = ch.exp(- (self.renderer - self.groundtruth.r) ** 2 / (2 * self.variances.r)) * (1. / (np.sqrt(self.variances.r)* np.sqrt(2 * np.pi)))
h = self.renderer.r.shape[0]
w = self.renderer.r.shape[1]
occProb = np.ones([h, w])
bgProb = np.ones([h, w])
fgMask = np.array(self.renderer.image_mesh_bool([0])).astype(np.bool)
errorFun = ch.log(fgMask[:, :, None]*((self.Q[0][:, :, None] * fgProb) + (self.Q[1] * occProb + self.Q[2] * bgProb)[:, :, None]) + (1- fgMask[:, :, None]))
return errorFun
class LogRobustModelOld(Ch):
dterms = ['renderer', 'groundtruth', 'foregroundPrior', 'variances']
def compute_r(self):
return self.logProb()
def compute_dr_wrt(self, wrt):
if wrt is self.renderer:
return self.logProb().dr_wrt(self.renderer)
def logProb(self):
visibility = self.renderer.visibility_image
visible = visibility != 4294967295
visible = np.array(self.renderer.image_mesh_bool([0])).copy().astype(np.bool)
return ch.log(pixelLikelihoodRobustCh(self.groundtruth, self.renderer, visible, 'MASK', self.foregroundPrior, self.variances))
class LogRobustModel(Ch):
terms = ['useMask']
dterms = ['renderer', 'groundtruth', 'foregroundPrior', 'variances']
def compute_r(self):
return self.logProb()
def compute_dr_wrt(self, wrt):
if wrt is self.renderer:
return self.logProb().dr_wrt(self.renderer)
def logProb(self):
# visibility = self.renderer.visibility_image
# visible = visibility != 4294967295
try:
self.useMask
except:
self.useMask = False
if self.useMask:
visible = self.renderer.indices_image != 0
else:
visible = np.ones_like(self.renderer.indices_image.astype(np.bool))
# visible = np.array(self.renderer.image_mesh_bool([0])).copy().astype(np.bool)
return ch.log(pixelLikelihoodRobustCh(self.groundtruth, self.renderer, visible, 'MASK', self.foregroundPrior, self.variances))
class LogRobustSQErrorModel(Ch):
dterms = ['sqeRenderer', 'foregroundPrior', 'variances']
def compute_r(self):
return self.logProb()
def compute_dr_wrt(self, wrt):
if wrt is self.sqeRenderer:
return self.logProb().dr_wrt(self.sqeRenderer)
def logProb(self):
visibility = self.sqeRenderer.visibility_image
# visible = visibility != 4294967295
visible = np.array(self.sqeRenderer.image_mesh_bool([0])).copy().astype(np.bool)
return ch.log(pixelLikelihoodRobustSQErrorCh(self.sqeRenderer, visible, 'MASK', self.foregroundPrior, self.variances))
class LogRobustModelRegion(Ch):
dterms = ['renderer', 'groundtruth', 'foregroundPrior', 'variances']
def compute_r(self):
return self.logProb()
def compute_dr_wrt(self, wrt):
if wrt is self.renderer:
return self.logProb().dr_wrt(self.renderer)
def logProb(self):
visibility = self.renderer.visibility_image
visible = visibility != 4294967295
visible = np.array(self.renderer.image_mesh_bool([0])).copy().astype(np.bool)
return ch.log(pixelLikelihoodRobustRegionCh(self.groundtruth, self.renderer, visible, 'MASK', self.foregroundPrior, self.variances))
class LogGaussianModelOld(Ch):
dterms = ['renderer', 'groundtruth', 'variances']
def compute_r(self):
return self.logProb()
def compute_dr_wrt(self, wrt):
if wrt is self.renderer:
return self.logProb().dr_wrt(self.renderer)
def logProb(self):
visibility = self.renderer.visibility_image
visible = visibility != 4294967295
visible = np.array(self.renderer.image_mesh_bool([0])).copy().astype(np.bool)
return logPixelLikelihoodCh(self.groundtruth, self.renderer, visible, 'MASK', self.variances)
class LogGaussianModel(Ch):
terms = ['useMask']
dterms = ['renderer', 'groundtruth', 'variances']
def compute_r(self):
return self.logProb()
def compute_dr_wrt(self, wrt):
if wrt is self.renderer:
return self.logProb().dr_wrt(self.renderer)
def logProb(self):
# visibility = self.renderer.visibility_image
# visible = visibility != 4294967295
try:
self.useMask
except:
self.useMask = False
if self.useMask:
visible = self.renderer.indices_image != 0 # assumes the first mesh is the background cube.
else:
visible = np.ones_like(self.renderer.indices_image.astype(np.bool))
# visible = np.array(self.renderer.image_mesh_bool([0])).copy().astype(np.bool)
return logPixelLikelihoodCh(self.groundtruth, self.renderer, visible, 'MASK', self.variances)
def pixelLikelihood(image, template, testMask, backgroundModel, variances):
sigma = np.sqrt(variances)
# sum = np.sum(np.log(layerPrior * scipy.stats.norm.pdf(image, location = template, scale=np.sqrt(variances) ) + (1 - repPriors)))
mask = testMask
if backgroundModel == 'FULL':
mask = np.ones(image.shape[0:2])
# mask = np.repeat(mask[..., np.newaxis], 3, 2)
uniformProbs = np.ones(image.shape[0:2])
normalProbs = np.prod((1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (image - template)**2 / (2 * variances))),axis=2)
return normalProbs * mask + (1-mask)
def logPixelLikelihoodCh(image, template, testMask, backgroundModel, variances):
sigma = ch.sqrt(variances)
# sum = np.sum(np.log(layerPrior * scipy.stats.norm.pdf(image, location = template, scale=np.sqrt(variances) ) + (1 - repPriors)))
mask = testMask
if backgroundModel == 'FULL':
mask = np.ones(image.shape[0:2])
# mask = np.repeat(mask[..., np.newaxis], 3, 2)
uniformProbs = np.ones(image.shape[0:2])
logprobs = (-(image - template)**2 / (2. * variances)) - ch.log((sigma * np.sqrt(2.0 * np.pi)))
pixelLogProbs = logprobs[:,:,0] + logprobs[:,:,1] + logprobs[:,:,2]
return pixelLogProbs * mask
def pixelLikelihoodCh(image, template, testMask, backgroundModel, layerPrior, variances):
sigma = ch.sqrt(variances)
mask = testMask
if backgroundModel == 'FULL':
mask = np.ones(image.shape[0:2])
# mask = np.repeat(mask[..., np.newaxis], 3, 2)
repPriors = ch.tile(layerPrior, image.shape[0:2])
# sum = np.sum(np.log(layerPrior * scipy.stats.norm.pdf(image, location = template, scale=np.sqrt(variances) ) + (1 - repPriors)))
# uniformProbs = np.ones(image.shape)
probs = ch.exp( - (image - template)**2 / (2 * variances)) * (1./(sigma * np.sqrt(2 * np.pi)))
foregroundProbs = (probs[:,:,0] * probs[:,:,1] * probs[:,:,2])
return foregroundProbs * mask + (1-mask)
def layerPosteriorsRobust(image, template, testMask, backgroundModel, layerPrior, variances):
sigma = np.sqrt(variances)
mask = testMask
if backgroundModel == 'FULL':
mask = np.ones(image.shape[0:2])
# mask = np.repeat(mask[..., np.newaxis], 3, 2)
repPriors = np.tile(layerPrior, image.shape[0:2])
foregroundProbs = np.prod(1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (image - template)**2 / (2 * variances)) * layerPrior, axis=2)
backgroundProbs = np.ones(image.shape)
outlierProbs = (1-repPriors)
lik = pixelLikelihoodRobust(image, template, testMask, backgroundModel, layerPrior, variances)
# prodlik = np.prod(lik, axis=2)
# return np.prod(foregroundProbs*mask, axis=2)/prodlik, np.prod(outlierProbs*mask, axis=2)/prodlik
return foregroundProbs*mask/lik, outlierProbs*mask/lik
def layerPosteriorsRobustCh(image, template, testMask, backgroundModel, layerPrior, variances):
sigma = ch.sqrt(variances)
mask = testMask
if backgroundModel == 'FULL':
mask = np.ones(image.shape[0:2])
# mask = np.repeat(mask[..., np.newaxis], 3, 2)
repPriors = ch.tile(layerPrior, image.shape[0:2])
probs = ch.exp( - (image - template)**2 / (2 * variances)) * (1/(sigma * np.sqrt(2 * np.pi)))
foregroundProbs = probs[:,:,0] * probs[:,:,1] * probs[:,:,2] * layerPrior
backgroundProbs = np.ones(image.shape)
outlierProbs = ch.Ch(1-repPriors)
lik = pixelLikelihoodRobustCh(image, template, testMask, backgroundModel, layerPrior, variances)
# prodlik = np.prod(lik, axis=2)
# return np.prod(foregroundProbs*mask, axis=2)/prodlik, np.prod(outlierProbs*mask, axis=2)/prodlik
return foregroundProbs*mask/lik, outlierProbs*mask/lik
def layerPosteriorsRobustSQErrorCh(sqeRenderer, testMask, backgroundModel, layerPrior, variances):
sigma = ch.sqrt(variances)
mask = testMask
if backgroundModel == 'FULL':
mask = np.ones(sqeRenderer.r.shape[0:2])
# mask = np.repeat(mask[..., np.newaxis], 3, 2)
repPriors = ch.tile(layerPrior, sqeRenderer.r.shape[0:2])
probs = ch.exp( - (sqeRenderer) / (2 * variances)) * (1/(sigma * np.sqrt(2 * np.pi)))
foregroundProbs = probs[:,:,0] * probs[:,:,1] * probs[:,:,2] * layerPrior
backgroundProbs = np.ones(sqeRenderer.r.shape)
outlierProbs = ch.Ch(1-repPriors)
lik = pixelLikelihoodRobustSQErrorCh(sqeRenderer, testMask, backgroundModel, layerPrior, variances)
# prodlik = np.prod(lik, axis=2)
# return np.prod(foregroundProbs*mask, axis=2)/prodlik, np.prod(outlierProbs*mask, axis=2)/prodlik
return foregroundProbs*mask/lik, outlierProbs*mask/lik
def robustDistance(sqResiduals, scale):
return np.sum(sqResiduals/(sqResiduals + scale**2))
def testImageMatching():
minThresTemplate = 10
maxThresTemplate = 100
methodParams = {'scale': 85000, 'minThresImage': minThresTemplate, 'maxThresImage': maxThresTemplate, 'minThresTemplate': minThresTemplate, 'maxThresTemplate': maxThresTemplate}
teapots = ["test/teapot1", "test/teapot2","test/teapot3","test/teapot4","test/teapot5","test/teapot6"]
images = []
edges = []
for teapot in teapots:
im = cv2.imread(teapot + ".png")
can = cv2.Canny(im, minThresTemplate,maxThresTemplate)
images.append(im)
edges.append(can)
cv2.imwrite(teapot + "_can.png", can)
confusion = np.zeros([6,6])
for tp1 in np.arange(1,7):
for tp2 in np.arange(tp1,7):
dist = distance = scoreImage(images[tp1-1], images[tp2-1], 'robustSqDistImages', methodParams)
print(dist)
confusion[tp1-1, tp2-1] = dist
plt.matshow(confusion)
plt.colorbar()
plt.savefig('test/confusion.png')
# elif method == 'chamferDataToModel':
# sqDists = chamferDistanceDataToModel(img, template, methodParams['minThresImage'], methodParams['maxThresImage'], methodParams['minThresTemplate'],methodParams['maxThresTemplate'])
# score = np.sum(sqDists)
# elif method == 'robustChamferDataToModel':
# sqDists = np.sum(chamferDistanceDataToModel(img, template, methodParams['minThresImage'], methodParams['maxThresImage'], methodParams['minThresTemplate'],methodParams['maxThresTemplate']))
# score = robustDistance(sqDists, methodParams['robustScale'])
# elif method == 'sqDistImages':
# sqDists = sqDistImages(img, template)
# score = np.sum(sqDists)
# elif method == 'robustSqDistImages': | 25,302 | 38.910095 | 213 | py |
inversegraphics | inversegraphics-master/var_inf.py | __author__ = 'pol'
import ipdb
import matplotlib
matplotlib.use('Qt4Agg')
from math import radians
import chumpy as ch
import numpy as np
import cv2
import matplotlib.pyplot as plt
from sklearn import mixture
from numpy.random import choice
plt.ion()
image = cv2.imread('opendr_GT.png')
image = np.float64(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))/255.0
nComps = 5
nRecComps = 4
gmm = mixture.GMM(n_components=nComps, covariance_type='spherical')
win = 40
colors = image[image.shape[0]/2-win:image.shape[0]/2+win,image.shape[1]/2-win:image.shape[1]/2+win,:].reshape([4*win*win,3])
gmm.fit(colors)
imshape = [win*2,win*2,3]
numPixels = win*2 * win*2
chInput = ch.Ch(colors)
numVars = chInput.size
recSoftmaxW = ch.Ch(np.random.uniform(0,1, [nRecComps,numVars])/numVars)
chRecLogistic = ch.exp(ch.dot(recSoftmaxW,chInput.reshape([numVars,1])))
chRecSoftmax = chRecLogistic.ravel()/ch.sum(chRecLogistic)
chZRecComps = ch.zeros([numVars, nRecComps])
chZ = ch.zeros([numVars])
recMeans = ch.Ch(np.random.uniform(0,1, [3,nRecComps]))
recCovars = 0.2
chRecLogLikelihoods = - 0.5*(chZ.reshape([numPixels,3, 1]) - ch.tile(recMeans, [numPixels, 1,1]))**2 - ch.log((2 * recCovars) * (1/(ch.sqrt(recCovars) * np.sqrt(2 * np.pi))))
genZCovars = 0.2
chGenComponentsProbs = ch.Ch(gmm.weights_)
chCompMeans = ch.zeros([nComps, 3])
for comp in range(nComps):
chCompMeans[comp, :] = gmm.means_[comp]
chPZComp = ch.exp( - (ch.tile(chZ.reshape([numPixels,3,1]), [1, 1, nComps]) - chCompMeans.reshape([1,3, nComps]))**2 / (2 * genZCovars)) * (1/(ch.sqrt(genZCovars) * np.sqrt(2 * np.pi)))
chPZ = ch.dot(chGenComponentsProbs.reshape([1,nComps]), chPZComp.reshape([5, numVars]))
prec = 0.5
covars = np.eye(colors.size, colors.size)
# for row in np.arange(covars.shape[0]):
# cols = [max(row-1, 0), min(row+1,colors.size-1)]
# covars[row, cols[0]] = prec
# covars[row, cols[1]] = prec
# covars[cols[0], row] = prec
# covars[cols[1], row] = prec
# covars = np.linalg.inv(covars)
detCov = 1
# detCov = np.linalg.det(covars)
# covars = [ch.Ch([covar] for covar in gmm.covars_)]
chResiduals = chInput.ravel() - chZ.ravel()
covar = 0.2
ipdb.set_trace()
chLogJoint = ch.log(chPZ.ravel()) - 0.5*covar*ch.dot(chResiduals,chResiduals) - 0.5*(ch.log(detCov) + numVars*ch.log((2 * np.pi)))
# chGenMarginal = ch.prod(chLikelihoods)
ipdb.set_trace()
# likelihoodsZ = [chGenComponentsProbs[comp]*ch.exp( - (chInput - chZ)**2 / (2 * covars)) * (1/(ch.sqrt(covars) * np.sqrt(2 * np.pi))) for comp in range(nComps)]
# chLikelihoodsZ = ch.concatenate(likelihoods)
# chGenMarginalZ = ch.exp(ch.sum(ch.log(chLikelihoodsZ)))
gmmRec = mixture.GMM(n_components=nRecComps, covariance_type='spherical')
gmmRec.covars_=gmm.covars_.copy()
#Update the mean of the gaussians and update the mixing weights.
methods=['dogleg', 'minimize', 'BFGS', 'L-BFGS-B', 'Nelder-Mead']
free_vars = [recMeans.ravel(), recSoftmaxW]
print("Beginning optimization.")
while True:
gmmRec.weights_=np.array(chRecSoftmax.r)
gmmRec.means_=np.array(ch.concatenate(recMeans))
epsilon = np.random.randn(numVars)
u = choice(nRecComps, size=1, p=chRecSoftmax.r)
chZ[:] = chZRecComps[:,u].r.ravel() + recCovars*epsilon.ravel()
pu = chRecSoftmax
L = ch.log(pu[u]) + ch.sum(chLogJoint.ravel()) - ch.sum(chRecLogLikelihoods[:,:,u].ravel())
drL = L.dr_wrt(recMeans)/numPixels
alpha = 0.1
recSoftmaxW[:] = recSoftmaxW.r[:] + alpha*L.dr_wrt(recSoftmaxW).reshape(recSoftmaxW.shape)/numPixels
ipdb.set_trace()
chZ[:] = chZ.r[:] + alpha*L.dr_wrt(chZ).reshape(chZ.r.shape)/numPixels
chZRecComps[:,u] = chZ.r[:]
# ch.minimize({'raw': -L}, bounds=None, method=methods[1], x0=free_vars, callback=None, options={'disp':False, 'maxiter':1})
| 3,755 | 32.837838 | 186 | py |
inversegraphics | inversegraphics-master/geometry.py | import chumpy as ch
from chumpy import depends_on, Ch
import cv2
import numpy as np
import scipy.sparse as sp
from chumpy.utils import row, col
from opendr.geometry import Rodrigues
class RotateZ(Ch):
dterms = 'a'
def compute_r(self):
return np.array([[np.cos(self.a.r), -np.sin(self.a.r), 0, 0], [np.sin(self.a.r), np.cos(self.a.r), 0, 0], [0, 0, 1, 0], [0,0,0,1]])
def compute_dr_wrt(self, wrt):
if wrt is not self.a:
return
if wrt is self.a:
return np.array([[-np.sin(self.a.r)[0], -np.cos(self.a.r)[0], 0, 0], [np.cos(self.a.r)[0], -np.sin(self.a.r)[0], 0, 0], [0, 0, 0, 0], [0,0,0,0]]).reshape(16,1)
class RotateX(Ch):
dterms = 'a'
def compute_r(self):
return np.array([[1, 0, 0, 0],[0, np.cos(self.a.r), -np.sin(self.a.r), 0], [0, np.sin(self.a.r), np.cos(self.a.r),0],[0,0,0,1]])
def compute_dr_wrt(self, wrt):
# if wrt is not self.a:
# return
if wrt is self.a:
return np.array([[0, 0, 0, 0],[0, -np.sin(self.a.r)[0], -np.cos(self.a.r)[0], 0], [0, np.cos(self.a.r)[0], -np.sin(self.a.r)[0],0],[0,0,0,0]]).reshape(16,1)
class Scale(Ch):
dterms = 'x', 'y','z'
def compute_r(self):
return np.array([[self.x.r, 0, 0, 0],[0, self.y.r, 0, 0], [0, 0, self.z.r,0],[0,0,0,1]])
def compute_dr_wrt(self, wrt):
return
def compute_dr_wrt(self, wrt):
if wrt is not self.x and wrt is not self.y and wrt is not self.z:
return
if wrt is self.x:
return np.array([[1, 0, 0, 0],[0, 0, 0, 0], [0, 0, 0,0],[0,0,0,0]]).reshape(16,1)
if wrt is self.y:
return np.array([[0, 0, 0, 0],[0, 1, 0, 0], [0, 0, 0,0],[0,0,0,0]]).reshape(16,1)
if wrt is self.z:
return np.array([[0, 0, 0, 0],[0, 0, 0, 0], [0, 0, 1, 0],[0,0,0,0]]).reshape(16,1)
class Translate(Ch):
dterms = 'x', 'y', 'z'
def compute_r(self):
return np.array([[1, 0, 0, self.x.r],[0, 1, 0, self.y.r], [0, 0, 1,self.z.r],[0,0,0,1]])
def compute_dr_wrt(self, wrt):
if wrt is not self.x and wrt is not self.y and wrt is not self.z:
return
if wrt is self.x:
return np.array([[0, 0, 0, 1],[0, 0, 0, 0], [0, 0, 0,0],[0,0,0,0]]).reshape(16,1)
if wrt is self.y:
return np.array([[0, 0, 0, 0],[0, 0, 0, 1], [0, 0, 0,0],[0,0,0,0]]).reshape(16,1)
if wrt is self.z:
return np.array([[0, 0, 0, 0],[0, 0, 0, 0], [0, 0, 0, 1],[0,0,0,0]]).reshape(16,1)
def getNormals(vertices, faces):
norm = np.zeros( vertices.shape, dtype=vertices.dtype )
tris = vertices[faces]
n = np.cross( tris[::,1 ] - tris[::,0] , tris[::,2 ] - tris[::,0] )
normalize_v3(n)
norm[ faces[:,0] ] += n
norm[ faces[:,1] ] += n
norm[ faces[:,2] ] += n
normalize_v3(norm)
return norm
def chGetNormals(vertices, faces):
import opendr.geometry
return opendr.geometry.VertNormals(vertices, faces).reshape((-1,3))
| 3,023 | 30.831579 | 171 | py |
inversegraphics | inversegraphics-master/diffrender_analyze.py | __author__ = 'pol'
import matplotlib
matplotlib.use('Qt4Agg')
import scene_io_utils
from math import radians
import timeit
import time
import opendr
import chumpy as ch
import geometry
import image_processing
import numpy as np
import cv2
import generative_models
import recognition_models
import matplotlib.pyplot as plt
from opendr_utils import *
from utils import *
import OpenGL.GL as GL
import glfw
useShapeModel = True
#########################################
# OpenDR Initialization starts here
#########################################
#Main script options:
glModes = ['glfw','mesa']
glMode = glModes[0]
width, height = (150, 150)
win = -1
if glMode == 'glfw':
#Initialize base GLFW context for the Demo and to share context among all renderers.
glfw.init()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
# glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.DEPTH_BITS,32)
glfw.window_hint(glfw.VISIBLE, GL.GL_FALSE)
win = glfw.create_window(width, height, "Demo", None, None)
glfw.make_context_current(win)
angle = 60 * 180 / np.pi
clip_start = 0.05
clip_end = 10
frustum = {'near': clip_start, 'far': clip_end, 'width': width, 'height': height}
camDistance = 0.4
teapots = [line.strip() for line in open('teapots.txt')]
# renderTeapotsList = np.arange(len(teapots))
renderTeapotsList = np.arange(len(teapots))[0:1]
targetModels = []
v_teapots, f_list_teapots, vc_teapots, vn_teapots, uv_teapots, haveTextures_list_teapots, textures_list_teapots, vflat, varray, center_teapots = scene_io_utils.loadTeapotsOpenDRData(renderTeapotsList, False, False, targetModels)
azimuth = np.pi
chCosAz = ch.Ch([np.cos(azimuth)])
chSinAz = ch.Ch([np.sin(azimuth)])
chAz = 2*ch.arctan(chSinAz/(ch.sqrt(chCosAz**2 + chSinAz**2) + chCosAz))
chAz = ch.Ch([0])
chObjAz = ch.Ch([0])
chAzRel = chAz - chObjAz
elevation = 0
chLogCosEl = ch.Ch(np.log(np.cos(elevation)))
chLogSinEl = ch.Ch(np.log(np.sin(elevation)))
chEl = 2*ch.arctan(ch.exp(chLogSinEl)/(ch.sqrt(ch.exp(chLogCosEl)**2 + ch.exp(chLogSinEl)**2) + ch.exp(chLogCosEl)))
chEl = ch.Ch([0.95993109])
chDist = ch.Ch([camDistance])
chLightSHCoeffs = ch.Ch(np.array([2, 0.25, 0.25, 0.12,-0.17,0.36,0.1,0.,0.]))
clampedCosCoeffs = clampedCosineCoefficients()
chComponent = chLightSHCoeffs * clampedCosCoeffs
chPointLightIntensity = ch.Ch([1])
chLightAz = ch.Ch([0.0])
chLightEl = ch.Ch([np.pi/2])
chLightDist = ch.Ch([0.5])
light_color = ch.ones(3)*chPointLightIntensity
chVColors = ch.Ch([0.4,0.4,0.4])
chDisplacement = ch.Ch([0.0, 0.0,0.0])
chScale = ch.Ch([1.0,1.0,1.0])
# vcch[0] = np.ones_like(vcflat[0])*chVColorsGT.reshape([1,3])
renderer_teapots = []
for teapot_i in range(len(renderTeapotsList)):
vmod = v_teapots[teapot_i]
fmod_list = f_list_teapots[teapot_i]
vcmod = vc_teapots[teapot_i]
vnmod = vn_teapots[teapot_i]
uvmod = uv_teapots[teapot_i]
haveTexturesmod_list = haveTextures_list_teapots[teapot_i]
texturesmod_list = textures_list_teapots[teapot_i]
centermod = center_teapots[teapot_i]
vmod, vnmod, _ = transformObject(vmod, vnmod, chScale, chObjAz, ch.Ch([0]), ch.Ch([0]), np.array([0, 0, 0]))
renderer = createRendererTarget(glMode, chAz, chEl, chDist, centermod, vmod, vcmod, fmod_list, vnmod, light_color, chComponent, chVColors, 0, chDisplacement, width,height, uvmod, haveTexturesmod_list, texturesmod_list, frustum, win )
renderer.overdraw = True
renderer.nsamples = 8
renderer.msaa = False
renderer.initGL()
renderer.initGLTexture()
# renderer.initGL_AnalyticRenderer()
renderer.imageGT = None
renderer.r
renderer_teapots = renderer_teapots + [renderer]
currentTeapotModel = 0
center = center_teapots[currentTeapotModel]
if useShapeModel:
import shape_model
#%% Load data
filePath = 'data/teapotModel.pkl'
teapotModel = shape_model.loadObject(filePath)
faces = teapotModel['faces']
#%% Sample random shape Params
latentDim = np.shape(teapotModel['ppcaW'])[1]
shapeParams = np.random.randn(latentDim)
chShapeParams = ch.Ch(shapeParams)
meshLinearTransform=teapotModel['meshLinearTransform']
W=teapotModel['ppcaW']
b=teapotModel['ppcaB']
chVertices = shape_model.VerticesModel(chShapeParams=chShapeParams,meshLinearTransform=meshLinearTransform,W = W,b=b)
chVertices.init()
chVertices = ch.dot(geometry.RotateZ(-np.pi/2)[0:3,0:3],chVertices.T).T
chNormals = shape_model.chGetNormals(chVertices, faces)
smNormals = [chNormals]
smFaces = [[faces]]
smVColors = [chVColors*np.ones(chVertices.shape)]
smUVs = [ch.Ch(np.zeros([chVertices.shape[0],2]))]
smHaveTextures = [[False]]
smTexturesList = [[None]]
chVertices = chVertices - ch.mean(chVertices, axis=0)
minZ = ch.min(chVertices[:,2])
chMinZ = ch.min(chVertices[:,2])
zeroZVerts = chVertices[:,2]- chMinZ
chVertices = ch.hstack([chVertices[:,0:2] , zeroZVerts.reshape([-1,1])])
chVertices = chVertices*0.09
smCenter = ch.array([0,0,0.1])
smVertices = [chVertices]
chNormals = shape_model.chGetNormals(chVertices, faces)
smNormals = [chNormals]
smVertices, smNormals, _ = transformObject(smVertices, smNormals, chScale, chObjAz, ch.Ch([0]), ch.Ch([0]), np.array([0, 0, 0]))
renderer = createRendererTarget(glMode, chAz, chEl, chDist, smCenter, [smVertices], [smVColors], [smFaces], [smNormals], light_color, chComponent, chVColors, 0, chDisplacement, width,height, [smUVs], [smHaveTextures], [smTexturesList], frustum, win )
renderer.overdraw = True
renderer.nsamples = 8
renderer.msaa = False
renderer.initGL()
renderer.initGLTexture()
# renderer.initGL_AnalyticRenderer()
renderer.imageGT = None
renderer.r
chShapeParams[:] = np.zeros([latentDim])
chVerticesMean = chVertices.r.copy()
else:
renderer = renderer_teapots[0]
##GT Renderer
chObjAzGT = ch.Ch([0])
chAzGT = ch.Ch([0])
chAzRelGT = chAzGT - chObjAzGT
chElGT = ch.Ch(chEl.r[0])
chDistGT = ch.Ch([camDistance])
phiOffset = ch.Ch([0])
totalOffset = phiOffset + chObjAzGT
chVColorsGT = ch.Ch([0.8,0.8,0.8])
chAmbientIntensityGT = ch.Ch([0.025])
clampedCosCoeffs = clampedCosineCoefficients()
SHFilename = 'data/LightSHCoefficients.pickle'
with open(SHFilename, 'rb') as pfile:
envMapDic = pickle.load(pfile)
hdritems = list(envMapDic.items())
envMapCoeffs = ch.Ch(list(envMapDic.items())[0][1][1])
envMapCoeffsRotated = ch.Ch(np.dot(light_probes.chSphericalHarmonicsZRotation(totalOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]])
envMapCoeffsRotatedRel = ch.Ch(np.dot(light_probes.chSphericalHarmonicsZRotation(phiOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]])
shCoeffsRGB = envMapCoeffsRotated
shCoeffsRGBRel = envMapCoeffsRotatedRel
chShCoeffs = 0.3*shCoeffsRGB[:,0] + 0.59*shCoeffsRGB[:,1] + 0.11*shCoeffsRGB[:,2]
chShCoeffsRel = 0.3*shCoeffsRGBRel[:,0] + 0.59*shCoeffsRGBRel[:,1] + 0.11*shCoeffsRGBRel[:,2]
chAmbientSHGT = chShCoeffs.ravel() * chAmbientIntensityGT * clampedCosCoeffs
chAmbientSHGTRel = chShCoeffsRel.ravel() * chAmbientIntensityGT * clampedCosCoeffs
chComponentGT = chAmbientSHGT
chComponentGTRel = chAmbientSHGTRel
# shapeParams = np.random.randn(latentDim)
if useShapeModel:
shapeParams = np.random.randn(latentDim)
chShapeParamsGT = ch.Ch(shapeParams)
chVerticesGT = shape_model.VerticesModel(chShapeParams =chShapeParamsGT,meshLinearTransform=meshLinearTransform,W=W,b=b)
chVerticesGT.init()
chVerticesGT = ch.dot(geometry.RotateZ(-np.pi/2)[0:3,0:3],chVerticesGT.T).T
# chNormalsGT = shape_model.chShapeParamsToNormals(teapotModel['N'], landmarks, teapotModel['linT'])
# chNormalsGT = shape_model.shapeParamsToNormals(shapeParams, teapotModel)
chNormalsGT = shape_model.chGetNormals(chVerticesGT, faces)
smNormalsGT = [chNormalsGT]
smFacesGT = [[faces]]
smVColorsGT = [chVColorsGT*np.ones(chVerticesGT.shape)]
smUVsGT = [ch.Ch(np.zeros([chVerticesGT.shape[0],2]))]
smHaveTexturesGT = [[False]]
smTexturesListGT = [[None]]
smCenterGT = ch.mean(chVerticesGT, axis=0)
chVerticesGT = chVerticesGT - ch.mean(chVerticesGT, axis=0)
minZ = ch.min(chVerticesGT[:,2])
chMinZ = ch.min(chVerticesGT[:,2])
zeroZVerts = chVerticesGT[:,2]- chMinZ
chVerticesGT = ch.hstack([chVerticesGT[:,0:2] , zeroZVerts.reshape([-1,1])])
chVerticesGT = chVerticesGT*0.09
smCenterGT = ch.array([0,0,0.1])
smVerticesGT = [chVerticesGT]
#########################################
# Initialization ends here
#########################################
#########################################
# Generative model set up
#########################################
rendererGT = ch.Ch(renderer.r.copy())
numPixels = width*height
E_raw = renderer - rendererGT
SE_raw = ch.sum(E_raw*E_raw, axis=2)
SSqE_raw = ch.SumOfSquares(E_raw)/numPixels
initialPixelStdev = 0.01
reduceVariance = False
# finalPixelStdev = 0.05
stds = ch.Ch([initialPixelStdev])
variances = stds ** 2
globalPrior = ch.Ch([0.9])
negLikModel = -ch.sum(generative_models.LogGaussianModel(renderer=renderer, groundtruth=rendererGT, variances=variances))
negLikModelRobust = -ch.sum(generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances))
pixelLikelihoodCh = generative_models.LogGaussianModel(renderer=renderer, groundtruth=rendererGT, variances=variances)
pixelLikelihoodRobustCh = generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances)
# negLikModel = -generative_models.modelLogLikelihoodCh(rendererGT, renderer, np.array([]), 'FULL', variances)/numPixels
# negLikModelRobust = -generative_models.modelLogLikelihoodRobustCh(rendererGT, renderer, np.array([]), 'FULL', globalPrior, variances)/numPixels
# pixelLikelihoodCh = generative_models.logPixelLikelihoodCh(rendererGT, renderer, np.array([]), 'FULL', variances)
# pixelLikelihoodRobustCh = ch.log(generative_models.pixelLikelihoodRobustCh(rendererGT, renderer, np.array([]), 'FULL', globalPrior, variances))
post = generative_models.layerPosteriorsRobustCh(rendererGT, renderer, np.array([]), 'MASK', globalPrior, variances)[0]
# modelLogLikelihoodRobustRegionCh = -ch.sum(generative_models.LogRobustModelRegion(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances))/numPixels
#
# pixelLikelihoodRobustRegionCh = generative_models.LogRobustModelRegion(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances)
# models = [negLikModel, negLikModelRobust, hogError]
models = [negLikModel, negLikModelRobust]
pixelModels = [pixelLikelihoodCh, pixelLikelihoodRobustCh]
modelsDescr = ["Gaussian Model", "Outlier model" ]
model = 1
pixelErrorFun = pixelModels[model]
errorFun = models[model]
############
# Experiments
############
seed = 1
np.random.seed(seed)
gtPrefix = 'train4_occlusion_shapemodel_photorealistic_10K_test100-1100'
# gtPrefix = 'train4_occlusion_shapemodel'
gtDir = 'groundtruth/' + gtPrefix + '/'
featuresDir = gtDir
experimentPrefix = 'train4_occlusion_shapemodel_10k'
experimentDir = 'experiments/' + experimentPrefix + '/'
ignore = []
if os.path.isfile(gtDir + 'ignore.npy'):
ignore = np.load(gtDir + 'ignore.npy')
groundTruthFilename = gtDir + 'groundTruth.h5'
gtDataFile = h5py.File(groundTruthFilename, 'r')
rangeTests = np.arange(100,1100)
idsInRange = np.arange(len(rangeTests))
testSet = np.load(experimentDir + 'test.npy')[rangeTests][idsInRange]
shapeGT = gtDataFile[gtPrefix].shape
boolTestSet = np.array([np.any(num == testSet) for num in gtDataFile[gtPrefix]['trainIds']])
dataIds = gtDataFile[gtPrefix][boolTestSet]['trainIds']
dataIdsTestIndices = np.array([np.where(dataIds==num)[0][0] for num in testSet])
groundTruth = gtDataFile[gtPrefix][boolTestSet][dataIdsTestIndices]
dataTeapotIdsTest = groundTruth['trainTeapotIds']
test = np.arange(len(testSet))
testSet = testSet[test]
print("Reading experiment.")
dataAzsGT = groundTruth['trainAzsGT']
dataObjAzsGT = groundTruth['trainObjAzsGT']
dataElevsGT = groundTruth['trainElevsGT']
dataLightAzsGT = groundTruth['trainLightAzsGT']
dataLightElevsGT = groundTruth['trainLightElevsGT']
dataLightIntensitiesGT = groundTruth['trainLightIntensitiesGT']
dataVColorGT = groundTruth['trainVColorGT']
dataScenes = groundTruth['trainScenes']
dataTeapotIds = groundTruth['trainTeapotIds']
dataEnvMaps = groundTruth['trainEnvMaps']
dataOcclusions = groundTruth['trainOcclusions']
dataTargetIndices = groundTruth['trainTargetIndices']
dataLightCoefficientsGT = groundTruth['trainLightCoefficientsGT']
dataLightCoefficientsGTRel = groundTruth['trainLightCoefficientsGTRel']
dataEnvMapPhiOffsets = groundTruth['trainEnvMapPhiOffsets']
dataAmbientIntensityGT = groundTruth['trainAmbientIntensityGT']
dataIds = groundTruth['trainIds']
gtDtype = groundTruth.dtype
testSetFixed = testSet
whereBad = []
for test_it, test_id in enumerate(testSet):
if test_id in ignore:
bad = np.where(testSetFixed==test_id)
testSetFixed = np.delete(testSetFixed, bad)
whereBad = whereBad + [bad]
# testSet = testSetFixed
loadFromHdf5 = False
useShapeModel = True
syntheticGroundtruth = False
synthPrefix = '_cycles'
if syntheticGroundtruth:
synthPrefix = ''
if syntheticGroundtruth:
imagesDir = gtDir + 'images_opendr/'
else:
imagesDir = gtDir + 'images/'
images = readImages(imagesDir, dataIds, loadFromHdf5)
# testSet = np.arange(len(images))[0:10]
testAzsGT = dataAzsGT
testObjAzsGT = dataObjAzsGT
testElevsGT = dataElevsGT
testLightAzsGT = dataLightAzsGT
testLightElevsGT = dataLightElevsGT
testLightIntensitiesGT = dataLightIntensitiesGT
testVColorGT = dataVColorGT
testOcclusions = dataOcclusions
if useShapeModel:
dataShapeModelCoeffsGT = groundTruth['trainShapeModelCoeffsGT']
testShapeParamsGT = dataShapeModelCoeffsGT
testLightCoefficientsGTRel = dataLightCoefficientsGTRel * dataAmbientIntensityGT[:,None]
testAzsRel = np.mod(testAzsGT - testObjAzsGT, 2*np.pi)
##### Load Results data:
# testPrefix = 'train4_occlusion_shapemodel_10k_ECCVNEW-JOINED-ALL2018'
testPrefix = 'train4_occlusion_shapemodel_10k_ECCV-PHOTOREALISTIC-JOINT2018'
resultDir = 'results/' + testPrefix + '/'
with open(resultDir + 'experiment.pickle', 'rb') as pfile:
experimentDic = pickle.load(pfile)
testSet = experimentDic['testSet']
methodsPred = experimentDic['methodsPred']
testOcclusions = experimentDic['testOcclusions']
# testOcclusions = testOcclusions[dataIdsTestIndices] #Change depending on the source of GT.
# import ipdb; ipdb.set_trace()
testPrefixBase = experimentDic[ 'testPrefixBase']
parameterRecognitionModels = experimentDic[ 'parameterRecognitionModels']
azimuths = experimentDic[ 'azimuths']
azimuths = [azimuths[method][dataIdsTestIndices] if azimuths[method] is not None else None for method in range(len(azimuths))]
elevations = experimentDic[ 'elevations']
elevations = [elevations[method][dataIdsTestIndices] if elevations[method] is not None else None for method in range(len(elevations))]
vColors = experimentDic[ 'vColors']
vColors = [vColors[method][dataIdsTestIndices] if vColors[method] is not None else None for method in range(len(vColors))]
lightCoeffs = experimentDic[ 'lightCoeffs']
lightCoeffs = [lightCoeffs[method][dataIdsTestIndices] if lightCoeffs[method] is not None else None for method in range(len(lightCoeffs))]
likelihoods = []
# likelihoods = experimentDic['likelihoods']
# likelihoods = [likelihoods[method][dataIdsTestIndices] if likelihoods[method] is not None else None for method in range(len(likelihoods))]
shapeParams = experimentDic['shapeParams']
shapeParams = [shapeParams[method][dataIdsTestIndices] if shapeParams[method] is not None else None for method in range(len(shapeParams))]
if os.path.isfile(resultDir + 'segmentations.pickle'):
with open(resultDir + 'segmentations.pickle', 'rb') as pfile:
segmentationsDic = pickle.load(pfile)
segmentations = segmentationsDic['segmentations']
segmentations = [segmentations[method][dataIdsTestIndices] if segmentations[method] is not None else None for method in range(len(segmentations))]
else:
segmentations = [None]*len(methodsPred)
testOcclusionsFull = testOcclusions.copy()
loadMask = True
if loadMask:
masksGT = loadMasks(gtDir + '/masks_occlusion/', dataIds)
#
# with open(resultDir + 'approxProjections.pickle', 'rb') as pfile:
# approxProjectionsDic = pickle.load(pfile)
#
# approxProjections = approxProjectionsDic['approxProjections']
# approxProjectionsGT = approxProjectionsDic['approxProjectionsGT']
#
# envMapTexture = np.zeros([180,360,3])
# approxProjections = []
# for method in range(len(methodsPred)):
# print("Approx projection on method " + str(method))
# approxProjectionsFittedList = []
# for test_i in range(len(testSet)):
# pEnvMap = SHProjection(envMapTexture, np.concatenate([lightCoeffs[4][test_i][:,None], lightCoeffs[4][test_i][:,None], lightCoeffs[4][test_i][:,None]], axis=1))
# approxProjection = np.sum(pEnvMap, axis=(2,3))
# approxProjectionsFittedList = approxProjectionsFittedList + [approxProjection[None,:]]
# approxProjections = approxProjections + [np.vstack(approxProjectionsFittedList)]
#
#
# approxProjectionsGTList = []
# for test_i in range(len(testSet)):
# pEnvMap = SHProjection(envMapTexture, np.concatenate([testLightCoefficientsGTRel[test_i][:,None], testLightCoefficientsGTRel[test_i][:,None], testLightCoefficientsGTRel[test_i][:,None]], axis=1))
# approxProjectionGT = np.sum(pEnvMap, axis=(2,3))
# approxProjectionsGTList = approxProjectionsGTList + [approxProjectionGT[None,:]]
# approxProjectionsGT = np.vstack(approxProjectionsGTList)
approxProjections = None
approxProjectionsGT = None
# #
# # ##### Load Results data:
# #
#
# testPrefix2 = 'train4_occlusion_shapemodel_10k_ECCV-PHOTREALISTIC-MEANBASELINE-2530-17944_predict_1000samples__method1errorFun1_std0.05_shapePen0'
# resultDir2 = 'results/' + testPrefix2 + '/'
# with open(resultDir2 + 'experiment.pickle', 'rb') as pfile:
# experimentDic2 = pickle.load(pfile)
# testSet2 = experimentDic2['testSet']
# methodsPred2 = experimentDic2['methodsPred']
# testOcclusions2 = experimentDic2[ 'testOcclusions']
# testPrefixBase2 = experimentDic2[ 'testPrefixBase']
# parameterRecognitionModels2 = experimentDic2[ 'parameterRecognitionModels']
# azimuths2 = experimentDic2['azimuths']
# elevations2 = experimentDic2[ 'elevations']
# vColors2 = experimentDic2[ 'vColors']
# lightCoeffs2 = experimentDic2[ 'lightCoeffs']
# likelihoods2 = experimentDic2['likelihoods']
# shapeParams2 = experimentDic2['shapeParams']
# if os.path.isfile(resultDir2 + 'segmentations.pickle'):
# with open(resultDir2 + 'segmentations.pickle', 'rb') as pfile:
# segmentationsDic2 = pickle.load(pfile)
# segmentations2 = segmentationsDic2['segmentations']
# else:
# segmentations2 = [None]*len(methodsPred2)
#
# testOcclusionsFull2 = testOcclusions2.copy()
#
#
# testPrefix3 = 'train4_occlusion_shapemodel_10k_ECCV-SYNTH-FIX-2530-17944_optimize_1000samples__method1errorFun1_std0.03_shapePen0'
# resultDir3 = 'results/' + testPrefix3 + '/'
# with open(resultDir3 + 'experiment.pickle', 'rb') as pfile:
# experimentDic3 = pickle.load(pfile)
# testSet3 = experimentDic3['testSet']
# methodsPred3 = experimentDic3['methodsPred']
# testOcclusions3 = experimentDic3[ 'testOcclusions']
# testPrefixBase3 = experimentDic3[ 'testPrefixBase']
# parameterRecognitionModels3 = experimentDic3[ 'parameterRecognitionModels']
# azimuths3 = experimentDic3['azimuths']
# elevations3 = experimentDic3[ 'elevations']
# vColors3 = experimentDic3[ 'vColors']
# lightCoeffs3 = experimentDic3[ 'lightCoeffs']
# likelihoods3 = experimentDic3['likelihoods']
# shapeParams3 = experimentDic3['shapeParams']
# if os.path.isfile(resultDir3 + 'segmentations.pickle'):
# with open(resultDir3 + 'segmentations.pickle', 'rb') as pfile:
# segmentationsDic3 = pickle.load(pfile)
# segmentations3 = segmentationsDic3['segmentations']
# else:
# segmentations3 = [None]*len(methodsPred3)
#
# testOcclusionsFull3 = testOcclusions3.copy()
#
# # with open(resultDir2 + 'approxProjections.pickle', 'rb') as pfile:
# # approxProjectionsDic2 = pickle.load(pfile)
# #
# # approxProjections2 = approxProjectionsDic2['approxProjections']
# # approxProjectionsGT2 = approxProjectionsDic2['approxProjectionsGT']
#
# approxProjections2 = None
# approxProjectionsGT2 = None
#
# testPrefix3 = 'train4_occlusion_shapemodel_10k_ECCV-SYNTHETIC-GAUSSIAN11664-17944_optimize_300samples__method1errorFun1_std0.05_shapePen0'
# resultDir3 = 'results/' + testPrefix3 + '/'
# with open(resultDir3 + 'experiment.pickle', 'rb') as pfile:
# experimentDic3 = pickle.load(pfile)
# testSet3 = experimentDic3['testSet']
# methodsPred3 = experimentDic3['methodsPred']
# testOcclusions3 = experimentDic3[ 'testOcclusions']
# testPrefixBase3 = experimentDic3[ 'testPrefixBase']
# parameterRecognitionModels3 = experimentDic3[ 'parameterRecognitionModels']
# azimuths3 = experimentDic3['azimuths']
# elevations3 = experimentDic3[ 'elevations']
# vColors3 = experimentDic3[ 'vColors']
# lightCoeffs3 = experimentDic3[ 'lightCoeffs']
# likelihoods3 = experimentDic3['likelihoods']
# shapeParams3 = experimentDic3['shapeParams']
# if os.path.isfile(resultDir3 + 'segmentations.pickle'):
# with open(resultDir3 + 'segmentations.pickle', 'rb') as pfile:
# segmentationsDic3 = pickle.load(pfile)
# segmentations3 = segmentationsDic3['segmentations']
# else:
# segmentations3 = [None]*len(methodsPred3)
#
# testOcclusionsFull3 = testOcclusions3.copy()
#
# # with open(resultDir3 + 'approxProjections.pickle', 'rb') as pfile:
# # approxProjectionsDic3 = pickle.load(pfile)
# #
# # approxProjections3 = approxProjectionsDic3['approxProjections']
# # approxProjectionsGT3 = approxProjectionsDic3['approxProjectionsGT']
#
# approxProjections3 = None
# approxProjectionsGT3 = None
#
# ipdb.set_trace()
#
# range1 = np.arange(len(azimuths2[3]))
# range2 = np.arange(-1000 + len(azimuths2[3]) + len(azimuths3[3]),len(azimuths3[3]))
#
# testSet, parameterRecognitionModels, testPrefixBase, methodsPred, testOcclusions, azimuths, elevations, vColors, lightCoeffs, shapeParams, likelihoods, segmentations, approxProjections, approxProjectionsGT = \
# joinExperiments(range1, range2, testSet2,methodsPred2,testOcclusions2,testPrefixBase2,parameterRecognitionModels2,azimuths2,elevations2,vColors2,lightCoeffs2,likelihoods2,shapeParams2,segmentations2, approxProjections2, approxProjectionsGT3, testSet3,methodsPred3,testOcclusions3,testPrefixBase3,parameterRecognitionModels3,azimuths3,elevations3,vColors3,lightCoeffs3,likelihoods3,shapeParams3,segmentations3, approxProjections3, approxProjectionsGT3)
#
# testSet = testSet2
# testOcclusions = testOcclusions2
# testPrefix = 'train4_occlusion_shapemodel_10k_ECCV-SYNTHETIC-GAUSSIAN2530-17944_optimize_1000samples__method1errorFun1_std0.05_shapePen0-FINAL'
# resultDir = 'results/' + testPrefix + '/'
#
# experimentDic = {'testSet':testSet, 'methodsPred':methodsPred, 'testOcclusions':testOcclusions, 'likelihoods':likelihoods, 'testPrefixBase':testPrefixBase, 'parameterRecognitionModels':parameterRecognitionModels, 'azimuths':azimuths, 'elevations':elevations, 'vColors':vColors, 'lightCoeffs':lightCoeffs, 'shapeParams':shapeParams}
#
# with open(resultDir + 'experiment.pickle', 'wb') as pfile:
# pickle.dump(experimentDic, pfile)
# #
# #
#
# methodsPred = methodsPred + ['Gaussian (OpenGL)']
# azimuths = azimuths + [azimuths2[3]]
# elevations = elevations + [elevations2[3]]
# vColors = vColors + [vColors2[3]]
# lightCoeffs = lightCoeffs + [lightCoeffs2[3]]
# likelihoods = likelihoods + [likelihoods2[3]]
# shapeParams = shapeParams + [shapeParams2[3]]
# segmentations = segmentations + [segmentations2[3]]
#
# methodsPred[0] = methodsPred2[0]
# azimuths[0] = azimuths2[0]
# elevations[0] = elevations2[0]
# vColors[0] = vColors2[0]
# lightCoeffs[0] = lightCoeffs2[0]
# likelihoods[0] = likelihoods2[0]
# shapeParams[0] = shapeParams2[0]
# segmentations[0] = segmentations2[0]
rendererGT = None
chPointLightIntensityGT = ch.Ch([1])
light_colorGT = ch.ones(3)*chPointLightIntensityGT
chDisplacementGT = ch.Ch([0.0,0.0,0.0])
chScaleGT = ch.Ch([1, 1.,1.])
replaceableScenesFile = '../databaseFull/fields/scene_replaceables_backup.txt'
# likelihoods = [np.array([]), np.array([])]
from OpenGL import contextdata
# #
# segmentations[2] = np.zeros([len(testSet), 150,150])
# segmentations[3] = np.zeros([len(testSet), 150,150])
# segmentations[4] = np.zeros([len(testSet), 150,150])
# segmentations[5] = np.zeros([len(testSet), 150,150])
#
#
# for test_i in range(len(testSet)):
#
# # sceneNumber = dataScenes[test_i]
# # sceneIdx = scene_io_utils.getSceneIdx(sceneNumber, replaceableScenesFile)
# # sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndicesScene, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
# # # sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
# #
# # targetIndex = dataTargetIndices[test_i]
# # sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
# # v, f_list, vc, vn, uv, haveTextures_list, textures_list = scene_io_utils.loadSavedScene(sceneDicFile, True)
# #
# # removeObjectData(len(v) -1 - targetIndex, v, f_list, vc, vn, uv, haveTextures_list, textures_list)
# #
# # addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, smVerticesGT, smFacesGT, smVColorsGT, smNormalsGT, smUVsGT, smHaveTexturesGT, smTexturesListGT)
# #
# # if rendererGT is not None:
# # rendererGT.makeCurrentContext()
# # rendererGT.clear()
# # contextdata.cleanupContext(contextdata.getContext())
# # if glMode == 'glfw':
# # glfw.destroy_window(rendererGT.win)
# # del rendererGT
# #
# # targetPosition = targetPositions[np.where(targetIndex==np.array(targetIndicesScene))[0]]
# #
# # rendererGT = createRendererGT(glMode, chAzGT, chObjAzGT, chElGT, chDistGT, center, v, vc, f_list, vn, light_colorGT, chComponentGT, chVColorsGT, targetPosition.copy(), chDisplacementGT, chScaleGT, width,height, uv, haveTextures_list, textures_list, frustum, None )
# #
# # for hdrFile, hdrValues in hdritems:
# # hdridx = hdrValues[0]
# # envMapCoeffs = hdrValues[1]
# # if hdridx == dataEnvMaps[test_i]:
# # break
# # envMapFilename = hdrFile
# #
# # phiOffset[:] = dataEnvMapPhiOffsets[test_i]
# # chObjAzGT[:] = testObjAzsGT[test_i]
# # chAzGT[:] = testAzsGT[test_i]
# # chElGT[:] = testElevsGT[test_i]
# # chVColorsGT[:] = testVColorGT[test_i]
# # envMapCoeffsRotated[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(totalOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
# # envMapCoeffsRotatedRel[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(phiOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
# # chShapeParamsGT[:] = dataShapeModelCoeffsGT[test_i]
#
# im = images[test_i]
# rendererGT = srgb2lin(im.copy())
#
# chLightSHCoeffs[:] = testLightCoefficientsGTRel[test_i]
# chObjAz[:] = 0
# chAz[:] = testAzsRel[test_i]
# chEl[:] = testElevsGT[test_i]
# chVColors[:] = testVColorGT[test_i]
# chShapeParams[:] = testShapeParamsGT[test_i]
#
# stds[:] = 0.05
#
# negLikModelRobust = -ch.sum(generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances))/numPixels
#
# # likelihoods[0] = np.append(likelihoods[0], negLikModelRobust.r)
#
# chLightSHCoeffs[:] = lightCoeffs[3][idsInRange[test_i]]
# chObjAz[:] = 0
# chAz[:] = azimuths[3][idsInRange[test_i]]
# chEl[:] = elevations[3][idsInRange[test_i]]
# chVColors[:] = vColors[3][idsInRange[test_i]]
# chShapeParams[:] = shapeParams[3][idsInRange[test_i]]
#
# # likelihoods[1] = np.append(likelihoods[1], negLikModelRobust.r)
#
# #masksGT
# #render[~mask*vis_im] = np.concatenate([np.ones([1000,1000])[:,:,None], np.zeros([1000,1000])[:,:,None],np.zeros([1000,1000])[:,:,None]], axis=2)[~mask*vis_im]
#
# vis_im = np.array(renderer.indices_image==1).copy().astype(np.bool)
# post = generative_models.layerPosteriorsRobustCh(rendererGT, renderer, vis_im, 'MASK', globalPrior, variances)[0].r>0.5
# render = ~post.copy()
# mask = masksGT[test_i]
# render[~vis_im] = 1
#
# segmentations[3][test_i] = post
#
#
# chLightSHCoeffs[:] = lightCoeffs[2][idsInRange[test_i]]
# chObjAz[:] = 0
# chAz[:] = azimuths[2][idsInRange[test_i]]
# chEl[:] = elevations[2][idsInRange[test_i]]
# chVColors[:] = vColors[2][idsInRange[test_i]]
# chShapeParams[:] = shapeParams[2][idsInRange[test_i]]
#
# # likelihoods[2] = np.append(likelihoods[2], negLikModelRobust.r)
#
# # masksGT
# # render[~mask*vis_im] = np.concatenate([np.ones([1000,1000])[:,:,None], np.zeros([1000,1000])[:,:,None],np.zeros([1000,1000])[:,:,None]], axis=2)[~mask*vis_im]
#
# post = np.array(renderer.indices_image == 1).copy().astype(np.bool)
#
# segmentations[2][test_i] = post
#
#
# chLightSHCoeffs[:] = lightCoeffs[5][idsInRange[test_i]]
# chObjAz[:] = 0
# chAz[:] = azimuths[5][idsInRange[test_i]]
# chEl[:] = elevations[5][idsInRange[test_i]]
# chVColors[:] = vColors[5][idsInRange[test_i]]
# chShapeParams[:] = shapeParams[5][idsInRange[test_i]]
#
# # likelihoods[1] = np.append(likelihoods[1], negLikModelRobust.r)
#
# # masksGT
# # render[~mask*vis_im] = np.concatenate([np.ones([1000,1000])[:,:,None], np.zeros([1000,1000])[:,:,None],np.zeros([1000,1000])[:,:,None]], axis=2)[~mask*vis_im]
#
# post = np.array(renderer.indices_image == 1).copy().astype(np.bool)
#
# segmentations[5][test_i] = post
# #
# # chLightSHCoeffs[:] = lightCoeffs[4][idsInRange[test_i]]
# # chObjAz[:] = 0
# # chAz[:] = azimuths[4][idsInRange[test_i]]
# # chEl[:] = elevations[4][idsInRange[test_i]]
# # chVColors[:] = vColors[4][idsInRange[test_i]]
# # chShapeParams[:] = shapeParams[4][idsInRange[test_i]]
#
# # likelihoods[1] = np.append(likelihoods[1], negLikModelRobust.r)
#
# # masksGT
# # render[~mask*vis_im] = np.concatenate([np.ones([1000,1000])[:,:,None], np.zeros([1000,1000])[:,:,None],np.zeros([1000,1000])[:,:,None]], axis=2)[~mask*vis_im]
#
# # vis_im = np.array(renderer.indices_image == 1).copy().astype(np.bool)
# # post = generative_models.layerPosteriorsRobustCh(rendererGT, renderer, vis_im, 'MASK', globalPrior, variances)[0].r > 0.5
# # render = ~post.copy()
# # mask = masksGT[test_i]
# # render[~vis_im] = 1
# #
# # segmentations[4][test_i] = post
#
# # renderRGB = np.concatenate([render[:,:,None], render[:,:,None], render[:,:,None]], axis=2)
#
# # cv2.imwrite('tmp/SH/renderergt' + str(test_i) + '.jpeg' , 255*lin2srgb(rendererGT[:,:,[2,1,0]]), [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# # plt.imsave('tmp/SH/mask' + str(test_i) + '.jpeg', mask)
# # cv2.imwrite('tmp/SH/post' + str(test_i) + '.jpeg' , 255*post, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# # cv2.imwrite('tmp/SH/post' + str(test_i) + '.jpeg' , 255*render, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# # plt.imsave('tmp/SH/renderer' + str(test_i) + '.png' , lin2srgb(renderer.r.copy()))
# # ipdb.set_trace()
# # # #
#
# methodsPred[3] = 'Robust Fit'
#
# methodsPred[4] = 'Robust (OpenGL)'
#
# methodsPred[5] = 'Recognition (OpenGL)'
#
# methodsPred[6] = 'Gaussian Fit'
#
# segmentations[4] = segmentations3[3]
#
# errorsPosePredList, errorsLightCoeffsList, errorsShapeParamsList, errorsShapeVerticesList, errorsEnvMapList, errorsLightCoeffsCList, errorsVColorsEList, errorsVColorsCList, errorsVColorsSList, errorsSegmentationList \
# = computeErrors(np.arange(len(rangeTests)), azimuths, testAzsRel, elevations, testElevsGT, vColors, testVColorGT, lightCoeffs, testLightCoefficientsGTRel, approxProjections, approxProjectionsGT, shapeParams, testShapeParamsGT, useShapeModel, chShapeParams, chVertices, segmentations, masksGT)
# envMapTexture = np.zeros([180,360,3])
# approxProjectionsFittedList = []
# for test_i in range(len(testSet)):
# pEnvMap = SHProjection(envMapTexture, np.concatenate([lightCoeffs[4][test_i][:,None], lightCoeffs[4][test_i][:,None], lightCoeffs[4][test_i][:,None]], axis=1))
# approxProjection = np.sum(pEnvMap, axis=(2,3))
# approxProjectionsFittedList = approxProjectionsFittedList + [approxProjection[None,:]]
# approxProjectionsFitted = np.vstack(approxProjectionsFittedList)
#
# envMapTexture = np.zeros([180,360,3])
# approxProjectionsGTList = []
# for test_i in range(len(testSet)):
# pEnvMap = SHProjection(envMapTexture, np.concatenate([testLightCoefficientsGTRel[test_i][:,None], testLightCoefficientsGTRel[test_i][:,None], testLightCoefficientsGTRel[test_i][:,None]], axis=1))
# approxProjectionGT = np.sum(pEnvMap, axis=(2,3))
# approxProjectionsGTList = approxProjectionsGTList + [approxProjectionGT[None,:]]
# approxProjectionsGT = np.vstack(approxProjectionsGTList)
# with open(resultDir + 'experiment_errors.pickle', 'rb') as pfile:
# experimentErrorsDic = pickle.load(pfile)
#
# errorsPosePredList = experimentErrorsDic['errorsPosePredList']
# errorsLightCoeffsList = experimentErrorsDic['errorsLightCoeffsList']
# errorsShapeParamsList = experimentErrorsDic['errorsShapeParamsLis']
# errorsShapeVerticesList = experimentErrorsDic['errorsShapeVerticesList']
# errorsEnvMapList = experimentErrorsDic['errorsEnvMapList']
# errorsLightCoeffsCList = experimentErrorsDic['errorsLightCoeffsCList']
# errorsVColorsEList = experimentErrorsDic['errorsVColorsEList']
# errorsVColorsCList = experimentErrorsDic['errorsVColorsCList']
# errorsVColorsSList = experimentErrorsDic['errorsVColorsSList']
# errorsSegmentationList = experimentErrorsDic['errorsSegmentationList']
testPrefix = 'train4_occlusion_shapemodel_10k_ECCV-PHOTOREALISTIC-JOINT2018'
resultDir = 'results/' + testPrefix + '/'
# experimentDic = {'testSet':testSet, 'methodsPred':methodsPred, 'testOcclusions':testOcclusions, 'likelihoods':likelihoods, 'testPrefixBase':testPrefixBase, 'parameterRecognitionModels':parameterRecognitionModels, 'azimuths':azimuths, 'elevations':elevations, 'vColors':vColors, 'lightCoeffs':lightCoeffs, 'shapeParams':shapeParams}
#
# with open(resultDir + 'experiment.pickle', 'wb') as pfile:
# pickle.dump(experimentDic, pfile)
# #
# experimentErrorsDic = {'errorsPosePredList':errorsPosePredList, 'errorsLightCoeffsList':errorsLightCoeffsList, 'errorsShapeParamsLis':errorsShapeParamsList, 'errorsShapeVerticesList':errorsShapeVerticesList, 'errorsEnvMapList':errorsEnvMapList, 'errorsLightCoeffsCList':errorsLightCoeffsCList, 'errorsVColorsEList':errorsVColorsEList, 'errorsVColorsCList':errorsVColorsCList, 'errorsVColorsSList':errorsVColorsSList,'errorsSegmentationList':errorsSegmentationList}
#
#
# with open(resultDir + 'experiment_errors.pickle', 'wb') as pfile:
# pickle.dump(experimentErrorsDic, pfile)
with open(resultDir + 'experiment_errors.pickle', 'rb') as pfile:
experimentErrorsDic = pickle.load(pfile)
errorsPosePredList = experimentErrorsDic['errorsPosePredList']
errorsLightCoeffsList = experimentErrorsDic['errorsLightCoeffsList']
errorsShapeParamsList = experimentErrorsDic['errorsShapeParamsLis']
errorsShapeVerticesList = experimentErrorsDic['errorsShapeVerticesList']
errorsEnvMapList = experimentErrorsDic['errorsEnvMapList']
errorsLightCoeffsCList = experimentErrorsDic['errorsLightCoeffsCList']
errorsVColorsEList = experimentErrorsDic['errorsVColorsEList']
errorsVColorsCList = experimentErrorsDic['errorsVColorsCList']
errorsVColorsSList = experimentErrorsDic['errorsVColorsSList']
errorsSegmentationList = experimentErrorsDic['errorsSegmentationList']
meanAbsErrAzsList, meanAbsErrElevsList, meanErrorsLightCoeffsList, meanErrorsShapeParamsList, meanErrorsShapeVerticesList, meanErrorsLightCoeffsCList, meanErrorsEnvMapList, meanErrorsVColorsEList, meanErrorsVColorsCList, meanErrorsVColorsCList, meanErrorsSegmentation \
= computeErrorAverages(np.mean, np.arange(len(rangeTests)), useShapeModel, errorsPosePredList, errorsLightCoeffsList, errorsShapeParamsList, errorsShapeVerticesList, errorsEnvMapList, errorsLightCoeffsCList, errorsVColorsEList, errorsVColorsCList,errorsVColorsSList, errorsSegmentationList)
nearestNeighbours = False
if 'Nearest Neighbours' in set(methodsPred):
nearestNeighbours = True
plotColors = ['k']
if nearestNeighbours:
# methodsPred = methodsPred + ["Nearest Neighbours"]
plotColors = plotColors + ['m']
plotColors = plotColors + ['b']
plotColors = plotColors + ['r']
plotColors = plotColors + ['r']
plotColors = plotColors + ['b']
plotColors = plotColors + ['g']
plotColors = plotColors + ['g']
plotStyles = ['solid']
if nearestNeighbours:
# methodsPred = methodsPred + ["Nearest Neighbours"]
plotStyles = plotStyles + ['solid']
plotStyles = plotStyles + ['solid']
plotStyles = plotStyles + ['solid']
plotStyles = plotStyles + ['dashed']
plotStyles = plotStyles + ['dashed']
plotStyles = plotStyles + ['solid']
plotStyles = plotStyles + ['dashed']
plotMethodsIndices = [2,5,6, 7, 3,4]
# plotMethodsIndices = [0,2,6,3]
import ipdb; ipdb.set_trace()
recognitionIdx = 2
robustIdx = 3
errorsAzimuthList = [errorsPosePredList[i][0] if errorsPosePredList[i] is not None else None for i in range(len(methodsPred))]
errorsElevationList = [errorsPosePredList[i][1] if errorsPosePredList[i] is not None else None for i in range(len(methodsPred))]
variablesDescr = ['Azimuth', 'Elevation', 'VColor', 'Illumination', 'Shape', 'Segmentation']
errorsList = [errorsAzimuthList,errorsElevationList, errorsVColorsCList, errorsLightCoeffsCList, errorsShapeVerticesList, errorsSegmentationList]
saveConditionalHistograms(resultDir, testOcclusions, methodsPred, variablesDescr, plotMethodsIndices, errorsList)
print("Printing occlusin-likelihood plots!")
# meanLikelihoodArr = [np.array([]), np.array([]), np.array([]), np.array([])]
# occlusions = []
# for occlusionLevel in range(100):
#
# setUnderOcclusionLevel = testOcclusionsFull * 100 < occlusionLevel
#
# if np.any(setUnderOcclusionLevel):
# occlusions = occlusions + [occlusionLevel]
# testOcclusions = testOcclusionsFull[setUnderOcclusionLevel]
#
# if likelihoods[0] is not None:
# meanLikelihoodArr[0] = np.append(meanLikelihoodArr[0], np.mean(likelihoods[0][setUnderOcclusionLevel]))
# if likelihoods[1] is not None:
# meanLikelihoodArr[1] = np.append(meanLikelihoodArr[1], np.mean(likelihoods[1][setUnderOcclusionLevel]))
# if likelihoods[2] is not None:
# meanLikelihoodArr[2] = np.append(meanLikelihoodArr[2], np.mean(likelihoods[2][setUnderOcclusionLevel]))
# if likelihoods[3] is not None:
# meanLikelihoodArr[3] = np.append(meanLikelihoodArr[3], np.mean(likelihoods[3][setUnderOcclusionLevel]))
# saveLikelihoodPlots(resultDir, occlusions, methodsPred, plotColors, plotMethodsIndices, meanLikelihoodArr)
#
# print("Computing means!")
#
meanAbsErrAzsArr = []
meanAbsErrElevsArr = []
meanErrorsLightCoeffsArr = []
meanErrorsEnvMapArr = []
meanErrorsShapeParamsArr = []
meanErrorsShapeVerticesArr = []
meanErrorsLightCoeffsCArr = []
meanErrorsVColorsEArr = []
meanErrorsVColorsCArr = []
meanErrorsVColorsSArr = []
meanErrorsSegmentationArr = []
for method_i in range(len(methodsPred)):
meanAbsErrAzsArr = meanAbsErrAzsArr + [np.array([])]
meanAbsErrElevsArr = meanAbsErrElevsArr + [np.array([])]
meanErrorsLightCoeffsArr = meanErrorsLightCoeffsArr + [np.array([])]
meanErrorsShapeParamsArr = meanErrorsShapeParamsArr + [np.array([])]
meanErrorsShapeVerticesArr = meanErrorsShapeVerticesArr + [np.array([])]
meanErrorsLightCoeffsCArr = meanErrorsLightCoeffsCArr + [np.array([])]
meanErrorsVColorsEArr = meanErrorsVColorsEArr + [np.array([])]
meanErrorsVColorsCArr = meanErrorsVColorsCArr + [np.array([])]
meanErrorsVColorsSArr = meanErrorsVColorsSArr + [np.array([])]
meanErrorsEnvMapArr = meanErrorsEnvMapArr + [np.array([])]
meanErrorsSegmentationArr = meanErrorsSegmentationArr + [np.array([])]
occlusions = []
print("Printing occlusin-error plots!")
for occlusionLevel in range(100):
setUnderOcclusionLevel = testOcclusionsFull * 100 < occlusionLevel
if np.any(setUnderOcclusionLevel):
occlusions = occlusions + [occlusionLevel]
testOcclusions = testOcclusionsFull[setUnderOcclusionLevel]
colors = matplotlib.cm.plasma(testOcclusions)
for method_i in range(len(methodsPred)):
if errorsPosePredList[method_i] is not None:
meanAbsErrAzsArr[method_i] = np.append(meanAbsErrAzsArr[method_i], np.mean(np.abs(errorsPosePredList[method_i][0][setUnderOcclusionLevel])))
meanAbsErrElevsArr[method_i] = np.append(meanAbsErrElevsArr[method_i], np.mean(np.abs(errorsPosePredList[method_i][1][setUnderOcclusionLevel])))
if errorsLightCoeffsList[method_i] is not None:
meanErrorsLightCoeffsArr[method_i] = np.append(meanErrorsLightCoeffsArr[method_i],np.mean(np.mean(errorsLightCoeffsList[method_i][setUnderOcclusionLevel], axis=1), axis=0))
if errorsLightCoeffsCList[method_i] is not None:
meanErrorsLightCoeffsCArr[method_i] = np.append(meanErrorsLightCoeffsCArr[method_i],np.mean(np.mean(errorsLightCoeffsCList[method_i][setUnderOcclusionLevel], axis=1), axis=0))
if useShapeModel:
if errorsShapeParamsList[method_i] is not None:
meanErrorsShapeParamsArr[method_i] = np.append(meanErrorsShapeParamsArr[method_i],np.mean(np.mean(errorsShapeParamsList[method_i][setUnderOcclusionLevel], axis=1), axis=0))
if errorsShapeVerticesList[method_i] is not None:
meanErrorsShapeVerticesArr[method_i] = np.append(meanErrorsShapeVerticesArr[method_i], np.mean(errorsShapeVerticesList[method_i][setUnderOcclusionLevel], axis=0))
if errorsEnvMapList[method_i] is not None:
meanErrorsEnvMapArr[method_i] = np.append(meanErrorsEnvMapArr[method_i], np.mean(errorsEnvMapList[method_i][setUnderOcclusionLevel]))
if errorsVColorsEList[method_i] is not None:
meanErrorsVColorsEArr[method_i] = np.append(meanErrorsVColorsEArr[method_i], np.mean(errorsVColorsEList[method_i][setUnderOcclusionLevel], axis=0))
if errorsVColorsCList[method_i] is not None:
meanErrorsVColorsCArr[method_i] = np.append(meanErrorsVColorsCArr[method_i], np.mean(errorsVColorsCList[method_i][setUnderOcclusionLevel], axis=0))
if errorsVColorsSList[method_i] is not None:
meanErrorsVColorsSArr[method_i] = np.append(meanErrorsVColorsSArr[method_i], np.mean(errorsVColorsSList[method_i][setUnderOcclusionLevel], axis=0))
if errorsSegmentationList[method_i] is not None:
meanErrorsSegmentationArr[method_i] = np.append(meanErrorsSegmentationArr[method_i], np.mean(errorsSegmentationList[method_i][setUnderOcclusionLevel], axis=0))
else:
meanErrorsSegmentationArr[method_i] = None
print("Printing occlusin-error plots - median!")
saveOcclusionPlots(resultDir, 'mean',occlusions, methodsPred, plotColors, plotStyles, plotMethodsIndices, useShapeModel, meanAbsErrAzsArr, meanAbsErrElevsArr, meanErrorsVColorsCArr, meanErrorsVColorsEArr, meanErrorsVColorsSArr, meanErrorsLightCoeffsArr, meanErrorsShapeParamsArr, meanErrorsShapeVerticesArr, meanErrorsLightCoeffsCArr, meanErrorsEnvMapArr, meanErrorsSegmentationArr)
medianAbsErrAzsArr = []
medianAbsErrElevsArr = []
medianErrorsLightCoeffsArr = []
medianErrorsEnvMapArr = []
medianErrorsShapeParamsArr = []
medianErrorsShapeVerticesArr = []
medianErrorsLightCoeffsCArr = []
medianErrorsVColorsEArr = []
medianErrorsVColorsCArr = []
medianErrorsVColorsSArr = []
medianErrorsSegmentationArr = []
for method_i in range(len(methodsPred)):
medianAbsErrAzsArr = medianAbsErrAzsArr + [np.array([])]
medianAbsErrElevsArr = medianAbsErrElevsArr + [np.array([])]
medianErrorsLightCoeffsArr = medianErrorsLightCoeffsArr + [np.array([])]
medianErrorsShapeParamsArr = medianErrorsShapeParamsArr + [np.array([])]
medianErrorsShapeVerticesArr = medianErrorsShapeVerticesArr + [np.array([])]
medianErrorsLightCoeffsCArr = medianErrorsLightCoeffsCArr + [np.array([])]
medianErrorsVColorsEArr = medianErrorsVColorsEArr + [np.array([])]
medianErrorsVColorsCArr = medianErrorsVColorsCArr + [np.array([])]
medianErrorsVColorsSArr = medianErrorsVColorsSArr + [np.array([])]
medianErrorsEnvMapArr = medianErrorsEnvMapArr + [np.array([])]
medianErrorsSegmentationArr = medianErrorsSegmentationArr + [np.array([])]
occlusions = []
print("Printing occlusin-error plots!")
for occlusionLevel in range(100):
setUnderOcclusionLevel = testOcclusionsFull * 100 < occlusionLevel
if np.any(setUnderOcclusionLevel):
occlusions = occlusions + [occlusionLevel]
testOcclusions = testOcclusionsFull[setUnderOcclusionLevel]
colors = matplotlib.cm.plasma(testOcclusions)
for method_i in range(len(methodsPred)):
if errorsPosePredList[method_i] is not None:
medianAbsErrAzsArr[method_i] = np.append(medianAbsErrAzsArr[method_i], np.median(np.abs(errorsPosePredList[method_i][0][setUnderOcclusionLevel])))
medianAbsErrElevsArr[method_i] = np.append(medianAbsErrElevsArr[method_i], np.median(np.abs(errorsPosePredList[method_i][1][setUnderOcclusionLevel])))
if errorsLightCoeffsList[method_i] is not None:
medianErrorsLightCoeffsArr[method_i] = np.append(medianErrorsLightCoeffsArr[method_i],np.median(np.mean(errorsLightCoeffsList[method_i][setUnderOcclusionLevel], axis=1), axis=0))
if errorsLightCoeffsCList[method_i] is not None:
medianErrorsLightCoeffsCArr[method_i] = np.append(medianErrorsLightCoeffsCArr[method_i],np.median(np.mean(errorsLightCoeffsCList[method_i][setUnderOcclusionLevel], axis=1), axis=0))
if useShapeModel:
if errorsShapeParamsList[method_i] is not None:
medianErrorsShapeParamsArr[method_i] = np.append(medianErrorsShapeParamsArr[method_i],np.median(np.median(errorsShapeParamsList[method_i][setUnderOcclusionLevel], axis=1), axis=0))
if errorsShapeVerticesList[method_i] is not None:
medianErrorsShapeVerticesArr[method_i] = np.append(medianErrorsShapeVerticesArr[method_i], np.median(errorsShapeVerticesList[method_i][setUnderOcclusionLevel], axis=0))
if errorsEnvMapList[method_i] is not None:
medianErrorsEnvMapArr[method_i] = np.append(medianErrorsEnvMapArr[method_i], np.median(errorsEnvMapList[method_i][setUnderOcclusionLevel]))
if errorsVColorsEList[method_i] is not None:
medianErrorsVColorsEArr[method_i] = np.append(medianErrorsVColorsEArr[method_i], np.median(errorsVColorsEList[method_i][setUnderOcclusionLevel], axis=0))
if errorsVColorsCList[method_i] is not None:
medianErrorsVColorsCArr[method_i] = np.append(medianErrorsVColorsCArr[method_i], np.median(errorsVColorsCList[method_i][setUnderOcclusionLevel], axis=0))
if errorsVColorsSList[method_i] is not None:
medianErrorsVColorsSArr[method_i] = np.append(medianErrorsVColorsSArr[method_i], np.median(errorsVColorsSList[method_i][setUnderOcclusionLevel], axis=0))
if errorsSegmentationList[method_i] is not None:
medianErrorsSegmentationArr[method_i] = np.append(medianErrorsSegmentationArr[method_i], np.median(errorsSegmentationList[method_i][setUnderOcclusionLevel], axis=0))
else:
medianErrorsSegmentationArr[method_i] = None
saveOcclusionPlots(resultDir, 'median', occlusions,methodsPred, plotColors, plotStyles, plotMethodsIndices, useShapeModel, medianAbsErrAzsArr, medianAbsErrElevsArr, medianErrorsVColorsCArr, medianErrorsVColorsEArr, medianErrorsVColorsSArr, medianErrorsLightCoeffsArr, medianErrorsShapeParamsArr, medianErrorsShapeVerticesArr, medianErrorsLightCoeffsCArr, medianErrorsEnvMapArr, medianErrorsSegmentationArr)
SHModel = ""
for occlusionLevel in [25,75,100]:
resultDirOcclusion = 'results/' + testPrefix + '/occlusion' + str(occlusionLevel) + '/'
if not os.path.exists(resultDirOcclusion):
os.makedirs(resultDirOcclusion)
setUnderOcclusionLevel = testOcclusionsFull * 100 < occlusionLevel
testOcclusions = testOcclusionsFull[setUnderOcclusionLevel]
#
#
errorsPosePred = [errorsPosePredList[recognitionIdx][0][setUnderOcclusionLevel], errorsPosePredList[recognitionIdx][1][setUnderOcclusionLevel]]
errorsLightCoeffs = errorsLightCoeffsList[recognitionIdx][setUnderOcclusionLevel]
errorsShapeParams = errorsShapeParamsList[recognitionIdx][setUnderOcclusionLevel]
errorsShapeVertices= errorsShapeVerticesList[recognitionIdx][setUnderOcclusionLevel]
if errorsEnvMapList[recognitionIdx] is not None:
errorsEnvMap= errorsEnvMapList[recognitionIdx][setUnderOcclusionLevel]
else:
errorsEnvMap = None
errorsLightCoeffsC= errorsLightCoeffsCList[recognitionIdx][setUnderOcclusionLevel]
errorsVColorsE= errorsVColorsEList[recognitionIdx][setUnderOcclusionLevel]
errorsVColorsC= errorsVColorsCList[recognitionIdx][setUnderOcclusionLevel]
errorsVColorsS= errorsVColorsSList[recognitionIdx][setUnderOcclusionLevel]
# errorsSegmentation = errorsSegmentationList[recognitionIdx][setUnderOcclusionLevel]
errorsSegmentation = None
errorsPoseFitted = [errorsPosePredList[robustIdx][0][setUnderOcclusionLevel], errorsPosePredList[robustIdx][1][setUnderOcclusionLevel]]
errorsFittedLightCoeffs = errorsLightCoeffsList[robustIdx][setUnderOcclusionLevel]
errorsFittedShapeParams = errorsShapeParamsList[robustIdx][setUnderOcclusionLevel]
errorsFittedShapeVertices= errorsShapeVerticesList[robustIdx][setUnderOcclusionLevel]
if errorsEnvMapList[recognitionIdx] is not None:
errorsFittedEnvMap = errorsEnvMapList[robustIdx][setUnderOcclusionLevel]
else:
errorsFittedEnvMap = None
errorsFittedLightCoeffsC= errorsLightCoeffsCList[robustIdx][setUnderOcclusionLevel]
errorsFittedVColorsE = errorsVColorsEList[robustIdx][setUnderOcclusionLevel]
errorsFittedVColorsC = errorsVColorsCList[robustIdx][setUnderOcclusionLevel]
errorsFittedVColorsS = errorsVColorsSList[robustIdx][setUnderOcclusionLevel]
# errorsFittedSegmentation = errorsSegmentationList[recognitionIdx][setUnderOcclusionLevel]
errorsFittedSegmentation = None
saveScatterPlots(resultDirOcclusion, testOcclusions, useShapeModel, errorsPosePred, errorsPoseFitted,errorsLightCoeffsC,errorsFittedLightCoeffsC,errorsEnvMap,errorsFittedEnvMap,errorsLightCoeffs,errorsFittedLightCoeffs,errorsShapeParams,errorsFittedShapeParams,errorsShapeVertices,errorsFittedShapeVertices,errorsVColorsE,errorsFittedVColorsE,errorsVColorsC,errorsFittedVColorsC, errorsVColorsS,errorsFittedVColorsS)
errorsPoseFitted = [errorsPosePredList[5][0][setUnderOcclusionLevel], errorsPosePredList[5][1][setUnderOcclusionLevel]]
errorsFittedLightCoeffs = errorsLightCoeffsList[5][setUnderOcclusionLevel]
errorsFittedShapeParams = errorsShapeParamsList[5][setUnderOcclusionLevel]
errorsFittedShapeVertices= errorsShapeVerticesList[5][setUnderOcclusionLevel]
if errorsEnvMapList[recognitionIdx] is not None:
errorsFittedEnvMap = errorsEnvMapList[5][setUnderOcclusionLevel]
else:
errorsFittedEnvMap = None
errorsFittedLightCoeffsC= errorsLightCoeffsCList[5][setUnderOcclusionLevel]
errorsFittedVColorsE= errorsVColorsEList[5][setUnderOcclusionLevel]
errorsFittedVColorsC= errorsVColorsCList[5][setUnderOcclusionLevel]
errorsFittedVColorsS= errorsVColorsSList[5][setUnderOcclusionLevel]
errorsFittedSegmentation = errorsSegmentationList[5][setUnderOcclusionLevel]
saveScatterPlotsMethodFit(4, resultDirOcclusion, testOcclusions, useShapeModel, errorsPosePred, errorsPoseFitted,errorsLightCoeffsC,errorsFittedLightCoeffsC,errorsEnvMap,errorsFittedEnvMap,errorsLightCoeffs,errorsFittedLightCoeffs,errorsShapeParams,errorsFittedShapeParams,errorsShapeVertices,errorsFittedShapeVertices,errorsVColorsE,errorsFittedVColorsE,errorsVColorsC,errorsFittedVColorsC, errorsVColorsS,errorsFittedVColorsS)
# saveLikelihoodScatter(resultDirOcclusion, setUnderOcclusionLevel, testOcclusions, likelihoods)
# if len(stdevsFull) > 0:
# stdevs = stdevsFull[setUnderOcclusionLevel]
colors = matplotlib.cm.plasma(testOcclusions)
meanAbsErrAzsList, meanAbsErrElevsList, meanErrorsLightCoeffsList, meanErrorsShapeParamsList, meanErrorsShapeVerticesList, meanErrorsLightCoeffsCList, meanErrorsEnvMapList, meanErrorsVColorsEList, meanErrorsVColorsCList, meanErrorsVColorsSList, meanErrorsSegmentationList \
= computeErrorAverages(np.mean, setUnderOcclusionLevel, useShapeModel, errorsPosePredList, errorsLightCoeffsList, errorsShapeParamsList, errorsShapeVerticesList, errorsEnvMapList, errorsLightCoeffsCList, errorsVColorsEList, errorsVColorsCList, errorsVColorsSList, errorsSegmentationList)
medianAbsErrAzsList, medianAbsErrElevsList, medianErrorsLightCoeffsList, medianErrorsShapeParamsList, medianErrorsShapeVerticesList, medianErrorsLightCoeffsCList, medianErrorsEnvMapList, medianErrorsVColorsEList, medianErrorsVColorsCList, medianErrorsVColorsSList, medianErrorsSegmentationList \
= computeErrorAverages(np.median, setUnderOcclusionLevel, useShapeModel, errorsPosePredList, errorsLightCoeffsList, errorsShapeParamsList, errorsShapeVerticesList, errorsEnvMapList, errorsLightCoeffsCList, errorsVColorsEList, errorsVColorsCList, errorsVColorsSList, errorsSegmentationList)
# Write statistics to file.
import tabulate
headers = ["Errors"] + methodsPred
table = [["Azimuth"] + meanAbsErrAzsList,
["Elevation"] + meanAbsErrElevsList,
["VColor C"] + meanErrorsVColorsCList,
["VColor S"] + meanErrorsVColorsSList,
["SH Light"] + meanErrorsLightCoeffsList,
["SH Light C"] + meanErrorsLightCoeffsCList,
["SH Env Map"] + meanErrorsEnvMapList,
["Shape Params"] + meanErrorsShapeParamsList,
["Shape Vertices"] + meanErrorsShapeVerticesList,
["Segmentation"] + meanErrorsSegmentationList
]
performanceTable = tabulate.tabulate(table, headers=headers, tablefmt="latex", floatfmt=".3f")
with open(resultDirOcclusion + 'performance.tex', 'w') as expfile:
expfile.write(performanceTable)
table = [["Azimuth"] + medianAbsErrAzsList,
["Elevation"] + medianAbsErrElevsList,
["VColor C"] + medianErrorsVColorsCList,
["VColor S"] + medianErrorsVColorsSList,
["SH Light"] + medianErrorsLightCoeffsList,
["SH Light C"] + medianErrorsLightCoeffsCList,
["SH Env Map"] + medianErrorsEnvMapList,
["Shape Params"] + medianErrorsShapeParamsList,
["Shape Vertices"] + medianErrorsShapeVerticesList,
["Segmentation"] + medianErrorsSegmentationList
]
performanceTable = tabulate.tabulate(table, headers=headers, tablefmt="latex", floatfmt=".3f")
with open(resultDirOcclusion + 'median-performance.tex', 'w') as expfile:
expfile.write(performanceTable)
headers = ["", "l=0", "SH $l=0,m=-1$", "SH $l=1,m=0$", "SH $l=1,m=1$", "SH $l=1,m=-2$", "SH $l=2,m=-1$",
"SH $l=2,m=0$", "SH $l=2,m=1$", "SH $l=2,m=2$"]
for method_i in plotMethodsIndices:
SMSE_SH = np.mean(errorsLightCoeffsList[method_i][setUnderOcclusionLevel], axis=0)
table = [
[SHModel, SMSE_SH[0], SMSE_SH[1], SMSE_SH[2], SMSE_SH[3], SMSE_SH[4], SMSE_SH[5], SMSE_SH[6], SMSE_SH[7],
SMSE_SH[8]],
]
performanceTable = tabulate.tabulate(table, headers=headers, tablefmt="latex", floatfmt=".3f")
with open(resultDir + 'performance_SH_' + methodsPred[method_i] + '.tex', 'w') as expfile:
expfile.write(performanceTable)
# expfile.write(performanceTable)
SMSE_SH = np.mean(errorsLightCoeffsCList[method_i][setUnderOcclusionLevel], axis=0)
table = [
[SHModel, SMSE_SH[0], SMSE_SH[1], SMSE_SH[2], SMSE_SH[3], SMSE_SH[4], SMSE_SH[5], SMSE_SH[6], SMSE_SH[7],
SMSE_SH[8]],
]
performanceTable = tabulate.tabulate(table, headers=headers, tablefmt="latex", floatfmt=".3f")
with open(resultDir + 'performance_SH_C_' + methodsPred[method_i] + '.tex', 'w') as expfile:
expfile.write(performanceTable)
if useShapeModel:
SMSE_SHAPE_PARAMS = np.mean(errorsShapeParamsList[method_i][setUnderOcclusionLevel], axis=0)
table = [[SHModel, SMSE_SHAPE_PARAMS[0], SMSE_SHAPE_PARAMS[1], SMSE_SHAPE_PARAMS[2], SMSE_SHAPE_PARAMS[3],
SMSE_SHAPE_PARAMS[4], SMSE_SHAPE_PARAMS[5], SMSE_SHAPE_PARAMS[6], SMSE_SHAPE_PARAMS[7],
SMSE_SHAPE_PARAMS[8], SMSE_SHAPE_PARAMS[9]]]
performanceTable = tabulate.tabulate(table, headers=headers, tablefmt="latex", floatfmt=".3f")
with open(resultDir + 'performance_ShapeParams_' + methodsPred[method_i] + '.tex', 'w') as expfile:
expfile.write(performanceTable)
plt.ion()
print("Finished.")
| 58,258 | 45.127474 | 466 | py |
inversegraphics | inversegraphics-master/export_collisions.py | #!/usr/bin/env python3.4m
import scene_io_utils
import re
from blender_utils import *
from collision import *
numpy.random.seed(1)
inchToMeter = 0.0254
outputDir = 'data/'
width = 150
height = 150
numSamples = 100
useCycles = False
distance = 0.75
scene_io_utils.loadTargetsBlendData()
sceneCollisions = {}
replaceableScenesFile = '../databaseFull/fields/scene_replaceables_backup_new.txt'
sceneLines = [line.strip() for line in open(replaceableScenesFile)]
teapots = [line.strip() for line in open('teapots.txt')]
renderTeapotsList = np.arange(len(teapots))
scaleZ = 0.265
scaleY = 0.18
scaleX = 0.35
cubeScale = mathutils.Matrix([[scaleX/2, 0,0,0], [0,scaleY/2,0 ,0] ,[0,0,scaleZ/2,0],[0,0,0,1]])
for sceneIdx in range(len(sceneLines))[:]:
sceneLine = sceneLines[sceneIdx]
sceneParts = sceneLine.split(' ')
sceneFile = sceneParts[0]
sceneNumber = int(re.search('.+?scene([0-9]+)\.txt', sceneFile, re.IGNORECASE).groups()[0])
scene_io_utils.loadTargetsBlendData()
bpy.ops.wm.read_factory_settings()
scene_io_utils.loadSceneBlendData(sceneIdx, replaceableScenesFile)
scene = bpy.data.scenes['Main Scene']
scene.render.resolution_x = width #perhaps set resolution in code
scene.render.resolution_y = height
scene.render.tile_x = height/2
scene.render.tile_y = width/2
scene.cycles.samples = 10
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
roomName = ''
for model in scene.objects:
reg = re.compile('(room[0-9]+)')
res = reg.match(model.name)
if res:
roomName = res.groups()[0]
cubeTarget = bpy.data.scenes['Scene'].objects['Cube']
try:
bpy.data.scenes['Main Scene'].objects.unlink(bpy.data.scenes['Scene'].objects['Cube'])
except:
pass
scene.objects.link(cubeTarget)
targetCollisions = {}
for targetIdx in range(len(targetIndices)):
targetParentIndex = targetIndices[targetIdx]
targetParentPosition = np.array(targetPositions[targetIdx])
scene.objects.unlink(scene.objects[str(targetParentIndex)])
director = outputDir
cubeTarget.layers[1] = True
# objAzimuths = numpy.arange(0,360, 5) # Map it to non colliding rotations.
objAzimuths = numpy.array([])
intersections = []
translationMat = mathutils.Matrix.Translation(targetParentPosition)
azimuthRot = mathutils.Matrix.Rotation(radians(0), 4, 'Z')
cubeTarget.matrix_world = translationMat * azimuthRot * cubeScale * mathutils.Matrix.Translation(mathutils.Vector((0,0,1)))
scene.render.filepath = 'scenes/' + str(sceneNumber) + '/collisionCubeExample_' + str(targetIdx) + '.png'
placeCamera(scene.camera, -90, 25, 0.75, targetParentPosition)
bpy.ops.render.render(write_still=True)
collisionInterval = 5
for objAzimuth in numpy.arange(0,360,collisionInterval):
azimuthRot = mathutils.Matrix.Rotation(radians(-objAzimuth), 4, 'Z')
cubeTarget.matrix_world = translationMat * azimuthRot * cubeScale * mathutils.Matrix.Translation(mathutils.Vector((0,0,1)))
# scene.render.filepath = 'scenes/' + str(sceneNumber) + '/' + str(objAzimuth) + 'collisionCubeExample_' + str(targetIdx) + '.png'
# bpy.ops.render.render(write_still=True)
intersect = False
for sceneInstanceIdx, sceneInstance in enumerate(scene.objects):
if sceneInstance.type == 'EMPTY' and sceneInstance != cubeTarget and sceneInstance.name != str(roomInstanceNum) and sceneInstance.name != str(instances[targetParentIndex][1]):
intersect = instancesIntersect(mathutils.Matrix.Identity(4), [cubeTarget], sceneInstance.matrix_world, sceneInstance.dupli_group.objects)
if intersect:
print("Intersects with " + sceneInstance.name)
break
intersections = intersections + [[objAzimuth, intersect]]
startInterval = True
intervals = []
initInterval = 0
endInterval = 0
for idx, intersection in enumerate(intersections):
if not intersection[1]:
if startInterval:
initInterval = intersection[0]
startInterval = False
else:
if not startInterval:
if idx >= 1 and intersections[idx-1][0] != initInterval:
endInterval = intersection[0] - collisionInterval
intervals = intervals + [[initInterval, endInterval]]
startInterval = True
if not intersection[1]:
endInterval = intersection[0]
if not intersections[0][1]:
intervals = intervals + [[initInterval, endInterval+collisionInterval]]
else:
intervals = intervals + [[initInterval, endInterval]]
targetCollisions[targetParentIndex] = (targetParentPosition, intervals)
sceneCollisions[sceneNumber] = targetCollisions
with open('data/collisions_new/collisionScene' + str(sceneNumber) + '.pickle', 'wb') as pfile:
pickle.dump(targetCollisions, pfile)
print("Collision detection ended.")
# with open('data/collisions/collisions.pickle', 'wb') as pfile:
# pickle.dump(sceneCollisions, pfile)
# Cleanup
# for scene in bpy.data.scenes:
# # for objnum, obji in enumerate(scene.objects):
# #
# # obji.user_clear()
# # bpy.data.objects.remove(obji)
# # scene = bpy.data.scenes['Main Scene']
# if scene.name != 'Scene':
# scene.user_clear()
# bpy.data.scenes.remove(scene)
# bpy.ops.scene.delete()
| 5,875 | 37.657895 | 191 | py |
inversegraphics | inversegraphics-master/scene_io_utils.py | from blender_utils import *
from sklearn.preprocessing import normalize
from collections import OrderedDict
def loadTeapotsOpenDRData(renderTeapotsList, useBlender, unpackModelsFromBlender, targetModels):
v_teapots = []
f_list_teapots = []
vc_teapots = []
vn_teapots = []
uv_teapots = []
haveTextures_list_teapots = []
textures_list_teapots = []
blender_teapots = []
center_teapots = []
for teapotIdx, teapotNum in enumerate(renderTeapotsList):
objectDicFile = 'data/target' + str(teapotNum) + '.pickle'
if useBlender:
teapot = targetModels[teapotIdx]
teapot.layers[1] = True
teapot.layers[2] = True
blender_teapots = blender_teapots + [teapot]
if unpackModelsFromBlender:
vmod, fmod_list, vcmod, vnmod, uvmod, haveTexturesmod_list, texturesmod_list = unpackBlenderObject(teapot, objectDicFile, True)
else:
vmod, fmod_list, vcmod, vnmod, uvmod, haveTexturesmod_list, texturesmod_list = loadSavedObject(objectDicFile)
v_teapots = v_teapots + [vmod]
f_list_teapots = f_list_teapots + [fmod_list]
vc_teapots = vc_teapots + [vcmod]
vn_teapots = vn_teapots + [vnmod]
uv_teapots = uv_teapots + [uvmod]
haveTextures_list_teapots = haveTextures_list_teapots + [haveTexturesmod_list]
textures_list_teapots = textures_list_teapots + [texturesmod_list]
vflat = [item for sublist in vmod for item in sublist]
varray = np.vstack(vflat)
center_teapots = center_teapots + [np.sum(varray, axis=0)/len(varray)]
return v_teapots, f_list_teapots, vc_teapots, vn_teapots, uv_teapots, haveTextures_list_teapots, textures_list_teapots, vflat, varray, center_teapots
def loadMugsOpenDRData(mugFiles, useBlender, unpackModelsFromBlender, mugModels=None):
v_mugs = []
f_list_mugs = []
vc_mugs = []
vn_mugs = []
uv_mugs = []
haveTextures_list_mugs = []
textures_list_mugs = []
blender_mugs = []
center_mugs = []
for mugIdx, mugNum in enumerate(mugFiles):
objectDicFile = 'data/mug' + str(mugNum) + '.pickle'
if useBlender:
mug = mugModels[mugIdx]
mug.layers[1] = True
mug.layers[2] = True
blender_mugs = blender_mugs + [mug]
if unpackModelsFromBlender:
vmod, fmod_list, vcmod, vnmod, uvmod, haveTexturesmod_list, texturesmod_list = unpackBlenderObject(mug, objectDicFile, True)
else:
vmod, fmod_list, vcmod, vnmod, uvmod, haveTexturesmod_list, texturesmod_list = loadSavedObject(objectDicFile)
v_mugs = v_mugs + [vmod]
f_list_mugs = f_list_mugs + [fmod_list]
vc_mugs = vc_mugs + [vcmod]
vn_mugs = vn_mugs + [vnmod]
uv_mugs = uv_mugs + [uvmod]
haveTextures_list_mugs = haveTextures_list_mugs + [haveTexturesmod_list]
textures_list_mugs = textures_list_mugs + [texturesmod_list]
vflat = [item for sublist in vmod for item in sublist]
varray = np.vstack(vflat)
center_mugs = center_mugs + [np.sum(varray, axis=0)/len(varray)]
return v_mugs, f_list_mugs, vc_mugs, vn_mugs, uv_mugs, haveTextures_list_mugs, textures_list_mugs, vflat, varray, center_mugs
def getSceneInstancesInfo(sceneFile):
sceneLines = [line.strip() for line in open(sceneFile)]
numModels = sceneLines[2].split()[1]
instances = []
for line in sceneLines:
parts = line.split()
if parts[0] == 'newModel':
modelId = parts[2]
if parts[0] == 'parentContactPosition':
parentContactPosition = mathutils.Vector([float(parts[1])*inchToMeter, float(parts[2])*inchToMeter, float(parts[3])*inchToMeter])
if parts[0] == 'parentIndex':
parentIndex = int(parts[1])
if parts[0] == 'transform':
transform = mathutils.Matrix([[float(parts[1]), float(parts[2]), float(parts[3]), float(parts[4])], [float(parts[5]), float(parts[6]), float(parts[7]), float(parts[8])], [ float(parts[9]), float(parts[10]), float(parts[11]), float(parts[12])], [float(parts[13]), float(parts[14]), float(parts[15]), float(parts[16])]]).transposed()
# ipdb.set_trace()
transform[0][3] = transform[0][3]*inchToMeter
transform[1][3] = transform[1][3]*inchToMeter
transform[2][3] = transform[2][3]*inchToMeter
# ipdb.set_trace()
instances.append([modelId, parentIndex, parentContactPosition, transform])
return instances
def composeScene(modelInstances):
bpy.context.scene.name = 'Main Scene'
scene = bpy.context.scene
scene.unit_settings.system = 'METRIC'
modelNum = 0
for instanceidx, instance in enumerate(modelInstances):
# if modelNum != targetIndex:
instance.name = str(instanceidx)
scene.objects.link(instance)
# modelNum = modelNum + 1
return scene
def importBlenderScenes(instances, completeScene):
baseDir = '../COLLADA/'
blenderScenes = []
modelInstances = []
modelNum = 0
for instance in instances:
modelId = instance[0]
reg = re.compile('(room[0-9]+)')
isRoom = reg.match(modelId)
if completeScene or isRoom:
transform = instance[3]
modelPath = baseDir + modelId + '_cleaned.obj'
print('Importing ' + modelPath )
# if modelNum != targetIndex:
bpy.ops.scene.new()
bpy.context.scene.name = modelId
scene = bpy.context.scene
# scene.unit_settings.system = 'METRIC'
# bpy.utils.collada_import(modelPath)
bpy.ops.import_scene.obj(filepath=modelPath, split_mode='OFF', use_split_objects=True, use_split_groups=False)
# ipdb.set_trace()
sceneGroup = bpy.data.groups.new(modelId)
scene.update()
scaleMat = mathutils.Matrix.Scale(inchToMeter, 4)
# xrotation = mathutils.Matrix.Rotation(-90,4, 'X')
for mesh in scene.objects:
if mesh.type == 'MESH':
sceneGroup.objects.link(mesh)
# ipdb.set_trace()
# mesh_transform = mesh.matrix_world
# mesh.matrix_world = transform * mesh.matrix_world
mesh.pass_index = 0
# mesh.matrix_world[0][3] = mesh.matrix_world[0][3]*inchToMeter
# mesh.matrix_world[1][3] = mesh.matrix_world[1][3]*inchToMeter
# mesh.matrix_world[2][3] = mesh.matrix_world[2][3]*inchToMeter
# mesh.matrix_world = scaleMat * mesh.matrix_world
# ipdb.set_trace()
# mesh.data.show_double_sided = True
mesh.data.update()
modelInstance = bpy.data.objects.new(modelId, None)
modelInstance.dupli_type = 'GROUP'
modelInstance.dupli_group = sceneGroup
modelInstance.matrix_world = transform
modelInstance.pass_index = 0
modelInstances.append(modelInstance)
modelNum = modelNum + 1
# ipdb.set_trace()
scene.update()
blenderScenes.append(scene)
return blenderScenes, modelInstances
import os.path
def createTargetsBlendFile():
bpy.ops.wm.read_factory_settings()
# bpy.ops.wm.read_homefile(load_ui=False)
teapots = [line.strip() for line in open('teapots.txt')]
renderTeapotsList = np.arange(len(teapots))
[targetScenes, targetModels, transformations] = loadTargetModels(renderTeapotsList)
# bpy.data.scenes.remove(bpy.data.scenes['Scene'])
bpy.ops.file.pack_all()
bpy.ops.wm.save_as_mainfile(filepath='data/targets.blend')
bpy.ops.wm.read_factory_settings()
def createMugsBlendFile():
bpy.ops.wm.read_factory_settings()
# bpy.ops.wm.read_homefile(load_ui=False)
mugs = [line.strip() for line in open('mugs.txt')]
renderMugsList = np.arange(len(mugs))
[targetScenes, targetModels, transformations] = loadMugsModels(renderMugsList)
# bpy.data.scenes.remove(bpy.data.scenes['Scene'])
bpy.ops.file.pack_all()
bpy.ops.wm.save_as_mainfile(filepath='data/mugs.blend')
bpy.ops.wm.read_factory_settings()
def createSceneBlendFiles(overwrite=True):
replaceableScenesFile = '../databaseFull/fields/scene_replaceables_backup.txt'
sceneLines = [line.strip() for line in open(replaceableScenesFile)]
for sceneIdx in range(len(sceneLines)):
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = getSceneInformation(sceneIdx, replaceableScenesFile)
blendFilename = 'data/scene' + str(sceneNumber) + '.blend'
if overwrite or not os.path.isfile(blendFilename):
print("Importing Scene " + sceneFileName)
scene = loadBlenderScene(sceneIdx, replaceableScenesFile)
setupScene(scene, roomInstanceNum, scene.world, scene.camera, 100, 100, 16, True, False)
scene.update()
# bpy.data.scenes.remove(bpy.data.scenes['Scene'])
bpy.ops.file.pack_all()
bpy.ops.wm.save_as_mainfile(filepath=blendFilename)
bpy.ops.wm.read_factory_settings()
# bpy.ops.wm.read_homefile(load_ui=False)
# bpy.ops.wm.open_mainfile(filepath='data/Scene.blend')
def createSceneOpenDRFiles(overwrite=True):
replaceableScenesFile = '../databaseFull/fields/scene_replaceables_backup.txt'
sceneLines = [line.strip() for line in open(replaceableScenesFile)]
for sceneIdx in range(len(sceneLines)):
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = getSceneInformation(sceneIdx, replaceableScenesFile)
pickleFilename = 'data/scene' + str(sceneNumber) + '.pickle'
if overwrite or not os.path.isfile(pickleFilename):
print("Unpacking Scene " + str(sceneNumber))
loadSceneBlendData(sceneIdx, replaceableScenesFile)
scene = bpy.data.scenes['Main Scene']
v, f_list, vc, vn, uv, haveTextures_list, textures_list = unpackBlenderScene(scene, pickleFilename, True)
bpy.ops.wm.read_factory_settings()
def loadSceneBlendData(sceneIdx, replaceableScenesFile):
replaceableScenesFile = replaceableScenesFile
sceneLines = [line.strip() for line in open(replaceableScenesFile)]
sceneLine = sceneLines[sceneIdx]
sceneParts = sceneLine.split(' ')
sceneFile = sceneParts[0]
sceneNumber = int(re.search('.+?scene([0-9]+)\.txt', sceneFile, re.IGNORECASE).groups()[0])
sceneFilename = 'data/scene' + str(sceneNumber) + '.blend'
with bpy.data.libraries.load(filepath=sceneFilename) as (data_from, data_to):
for attr in dir(data_to):
setattr(data_to, attr, getattr(data_from, attr))
bpy.context.screen.scene = bpy.data.scenes['Main Scene']
def loadTargetsBlendData():
targetsFilename = 'data/targets.blend'
with bpy.data.libraries.load(filepath=targetsFilename) as (data_from, data_to):
for attr in dir(data_to):
setattr(data_to, attr, getattr(data_from, attr))
return data_to
def loadMugsBlendData():
targetsFilename = 'data/mugs.blend'
with bpy.data.libraries.load(filepath=targetsFilename) as (data_from, data_to):
for attr in dir(data_to):
setattr(data_to, attr, getattr(data_from, attr))
return data_to
def loadTargetModels(experimentTeapots):
teapots = [line.strip() for line in open('teapots.txt')]
targetModels = []
baseDir = '../databaseFull/models/'
targetInstances = []
blenderTeapots = []
transformations = []
modelNum = 0
selection = [ teapots[i] for i in experimentTeapots]
for teapotidx, teapot in enumerate(selection):
targetGroup = bpy.data.groups.new(teapot)
fullTeapot = baseDir + teapot + '.obj'
modelPath = fullTeapot
bpy.ops.scene.new()
bpy.context.scene.name = teapot
scene = bpy.context.scene
scene.unit_settings.system = 'METRIC'
print("Importing " + modelPath)
# bpy.utils.collada_import(modelPath)
bpy.ops.import_scene.obj(filepath=modelPath, split_mode='OFF', use_split_objects=True, use_split_groups=False)
scene.update()
# modifySpecular(scene, 0.3)
matrix_world = mathutils.Matrix.Identity(4)
minZ, maxZ = modelHeight(scene.objects, mathutils.Matrix.Identity(4))
minY, maxY = modelDepth(scene.objects, mathutils.Matrix.Identity(4))
scaleZ = 0.265/(maxZ-minZ)
scaleY = 0.18/(maxY-minY)
ratio = (maxZ-minZ)/(maxY-minY)
if ratio > 0.265/0.18:
scale = scaleZ
else:
scale = scaleY
# scale = min(scaleZ, scaleY)
scaleMat = mathutils.Matrix.Scale(scale, 4)
for mesh in scene.objects:
if mesh.type == 'MESH':
mesh.matrix_world = scaleMat * mesh.matrix_world
mesh.data.update()
matrix_world = scaleMat * matrix_world
rot = mathutils.Matrix.Rotation(radians(90), 4, 'Z')
rotateMatrixWorld(scene, rot )
matrix_world = rot * matrix_world
minZ, maxZ = modelHeight(scene.objects, mathutils.Matrix.Identity(4))
center = centerOfGeometry(scene.objects, mathutils.Matrix.Identity(4))
for mesh in scene.objects:
if mesh.type == 'MESH':
mesh.matrix_world = mathutils.Matrix.Translation(-center) * mesh.matrix_world
matrix_world = mathutils.Matrix.Translation(-center) * matrix_world
minZ, maxZ = modelHeight(scene.objects, mathutils.Matrix.Identity(4))
for mesh in scene.objects:
if mesh.type == 'MESH':
mesh.matrix_world = mathutils.Matrix.Translation(mathutils.Vector((0,0,-minZ))) * mesh.matrix_world
matrix_world = mathutils.Matrix.Translation(mathutils.Vector((0,0,-minZ))) * matrix_world
transformations = transformations + [matrix_world]
for mesh in scene.objects:
# mesh.update()
targetGroup.objects.link(mesh)
mesh.pass_index = 1
targetInstance = bpy.data.objects.new(teapot, None)
targetInstance.dupli_type = 'GROUP'
targetInstance.dupli_group = targetGroup
targetInstance.pass_index = 1
targetInstances.append(targetInstance)
targetInstance.name = 'teapotInstance' + str(experimentTeapots[teapotidx])
scene.objects.link(targetInstance)
scene.update()
blenderTeapots.append(scene)
# ipdb.set_trace()
return blenderTeapots, targetInstances, transformations
def loadMugsModels(experimentMugs):
mugs = [line.strip() for line in open('mugs.txt')]
targetModels = []
baseDir = '../databaseFull/models/'
targetInstances = []
blenderMugs = []
transformations = []
modelNum = 0
selection = [ mugs[i] for i in experimentMugs]
for mugidx, mug in enumerate(selection):
targetGroup = bpy.data.groups.new(mug)
fullTeapot = baseDir + mug + '.obj'
modelPath = fullTeapot
bpy.ops.scene.new()
bpy.context.scene.name = mug
scene = bpy.context.scene
scene.unit_settings.system = 'METRIC'
print("Importing " + modelPath)
# bpy.utils.collada_import(modelPath)
bpy.ops.import_scene.obj(filepath=modelPath, split_mode='OFF', use_split_objects=True, use_split_groups=False)
scene.update()
# modifySpecular(scene, 0.3)
matrix_world = mathutils.Matrix.Identity(4)
minZ, maxZ = modelHeight(scene.objects, mathutils.Matrix.Identity(4))
minY, maxY = modelDepth(scene.objects, mathutils.Matrix.Identity(4))
scaleZ = 0.1/(maxZ-minZ)
ratio = (maxZ-minZ)/(maxY-minY)
scale = scaleZ
# scale = min(scaleZ, scaleY)
scaleMat = mathutils.Matrix.Scale(scale, 4)
for mesh in scene.objects:
if mesh.type == 'MESH':
mesh.matrix_world = scaleMat * mesh.matrix_world
mesh.data.update()
matrix_world = scaleMat * matrix_world
rot = mathutils.Matrix.Rotation(radians(90), 4, 'Z')
rotateMatrixWorld(scene, rot )
matrix_world = rot * matrix_world
minZ, maxZ = modelHeight(scene.objects, mathutils.Matrix.Identity(4))
center = centerOfGeometry(scene.objects, mathutils.Matrix.Identity(4))
for mesh in scene.objects:
if mesh.type == 'MESH':
mesh.matrix_world = mathutils.Matrix.Translation(-center) * mesh.matrix_world
matrix_world = mathutils.Matrix.Translation(-center) * matrix_world
minZ, maxZ = modelHeight(scene.objects, mathutils.Matrix.Identity(4))
for mesh in scene.objects:
if mesh.type == 'MESH':
mesh.matrix_world = mathutils.Matrix.Translation(mathutils.Vector((0,0,-minZ))) * mesh.matrix_world
matrix_world = mathutils.Matrix.Translation(mathutils.Vector((0,0,-minZ))) * matrix_world
transformations = transformations + [matrix_world]
for mesh in scene.objects:
# mesh.update()
targetGroup.objects.link(mesh)
mesh.pass_index = 1
targetInstance = bpy.data.objects.new(mug, None)
targetInstance.dupli_type = 'GROUP'
targetInstance.dupli_group = targetGroup
targetInstance.pass_index = 1
targetInstances.append(targetInstance)
targetInstance.name = 'mugInstance' + str(experimentMugs[mugidx])
scene.objects.link(targetInstance)
scene.update()
blenderMugs.append(scene)
# ipdb.set_trace()
return blenderMugs, targetInstances, transformations
def getSceneInformation(sceneIdx, scenesFile):
replaceableScenesFile = scenesFile
sceneLines = [line.strip() for line in open(replaceableScenesFile)]
sceneLineNums = numpy.arange(len(sceneLines))
sceneNum = sceneLineNums[sceneIdx]
sceneLine = sceneLines[sceneNum]
sceneParts = sceneLine.split(' ')
sceneFile = sceneParts[0]
sceneNumber = int(re.search('.+?scene([0-9]+)\.txt', sceneFile, re.IGNORECASE).groups()[0])
sceneFileName = re.search('.+?(scene[0-9]+\.txt)', sceneFile, re.IGNORECASE).groups()[0]
instances = getSceneInstancesInfo('../databaseFull/scenes/' + sceneFileName)
targetPositions = []
targetIndices = []
for targetIndex in sceneParts[1::]:
targetIndex = int(targetIndex)
targetParentPosition = instances[targetIndex][2]
targetIndices = targetIndices + [targetIndex]
targetPositions = targetPositions + [np.array(targetParentPosition)]
roomName = ''
roomInstanceNum = 0
for modelIdx, model in enumerate(instances):
reg = re.compile('(room[0-9]+)')
res = reg.match(model[0])
if res:
roomName = res.groups()[0]
roomInstanceNum = modelIdx
return sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions
def getSceneIdx(sceneNumber, scenesFile):
replaceableScenesFile = scenesFile
sceneLines = [line.strip() for line in open(replaceableScenesFile)]
sceneLineNums = numpy.arange(len(sceneLines))
for sceneNum in sceneLineNums:
sceneLine = sceneLines[sceneNum]
sceneParts = sceneLine.split(' ')
sceneFile = sceneParts[0]
sceneNumInFile = int(re.search('.+?scene([0-9]+)\.txt', sceneFile, re.IGNORECASE).groups()[0])
if sceneNumInFile == sceneNumber:
return sceneNum
return -2
def loadBlenderScene(sceneIdx, replaceableScenesFile):
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = getSceneInformation(sceneIdx, replaceableScenesFile)
# targetParentPosition = instances[targetIndex][2]
# targetParentIndex = instances[targetIndex][1]
cam = bpy.data.cameras.new("MainCamera")
camera = bpy.data.objects.new("MainCamera", cam)
world = bpy.data.worlds.new("MainWorld")
[blenderScenes, modelInstances] = importBlenderScenes(instances, True)
# targetParentInstance = modelInstances[targetParentIndex]
scene = composeScene(modelInstances)
scene.world = world
scene.camera = camera
# roomInstance = scene.objects[roomName]
# roomInstance.layers[2] = True
# targetParentInstance.layers[2] = True
return scene
def loadSavedScene(sceneDicFile, tex_srgb2lin):
with open(sceneDicFile, 'rb') as pfile:
sceneDic = pickle.load(pfile)
v = sceneDic['v']
f_list = sceneDic['f_list']
vc = sceneDic['vc']
uv = sceneDic['uv']
haveTextures_list = sceneDic['haveTextures_list']
vn = sceneDic['vn']
textures_list = sceneDic['textures_list']
if tex_srgb2lin:
textures_listflat = [item for sublist in textures_list for item in sublist]
for texture_list in textures_listflat:
if texture_list is not None:
for texture in texture_list:
if texture is not None:
srgb2lin(texture)
print("Loaded serialized scene!")
return v, f_list, vc, vn, uv, haveTextures_list, textures_list
def unpackBlenderScene(scene, sceneDicFile, serializeScene):
# bpy.ops.render.render( write_still=True )
# ipdb.set_trace()
# v,f_list, vc, vn, uv, haveTextures_list, textures_list = unpackObjects(teapot)
v = []
f_list = []
vc = []
vn = []
uv = []
haveTextures_list = []
textures_list = []
print("Unpacking blender data for OpenDR.")
for modelInstance in scene.objects:
if modelInstance.dupli_group != None:
vmod,f_listmod, vcmod, vnmod, uvmod, haveTextures_listmod, textures_listmod = unpackBlenderObject(modelInstance, '', False)
# gray = np.dot(np.array([0.3, 0.59, 0.11]), vcmod[0].T).T
# sat = 0.5
# vcmod[0][:,0] = vcmod[0][:,0] * sat + (1-sat) * gray
# vcmod[0][:,1] = vcmod[0][:,1] * sat + (1-sat) * gray
# vcmod[0][:,2] = vcmod[0][:,2] * sat + (1-sat) * gray
v = v + vmod
f_list = f_list + f_listmod
vc = vc + vcmod
vn = vn + vnmod
uv = uv + uvmod
haveTextures_list = haveTextures_list + haveTextures_listmod
textures_list = textures_list + textures_listmod
#Serialize
if serializeScene:
sceneDic = {'v':v,'f_list':f_list,'vc':vc,'uv':uv,'haveTextures_list':haveTextures_list,'vn':vn,'textures_list': textures_list}
with open(sceneDicFile, 'wb') as pfile:
pickle.dump(sceneDic, pfile)
print("Serialized scene!")
return v, f_list, vc, vn, uv, haveTextures_list, textures_list
def loadSavedObject(objectDicFile):
with open(objectDicFile, 'rb') as pfile:
targetDic = pickle.load(pfile)
v = targetDic['v']
f_list = targetDic['f_list']
vc = targetDic['vc']
uv = targetDic['uv']
haveTextures_list = targetDic['haveTextures_list']
vn = targetDic['vn']
textures_list = targetDic['textures_list']
print("Loaded serialized target!")
return [v], [f_list], [vc], [vn], [uv], [haveTextures_list], [textures_list]
def unpackBlenderObject(object, objectDicFile, saveData):
f_list = []
v = []
vc = []
vn = []
uv = []
haveTextures = []
textures_list = []
vertexMeshIndex = 0
for mesh in object.dupli_group.objects:
if mesh.type == 'MESH':
# mesh.data.validate(verbose=True, clean_customdata=True)
fmesh, vmesh, vcmesh, nmesh, uvmesh, haveTexture, textures = buildData(mesh.data)
f_list = f_list + [fmesh]
vc = vc + [vcmesh]
transf = np.array(np.dot(object.matrix_world, mesh.matrix_world))
vmesh = np.hstack([vmesh, np.ones([vmesh.shape[0],1])])
vmesh = ( np.dot(transf , vmesh.T)).T[:,0:3]
v = v + [vmesh]
transInvMat = np.linalg.inv(transf).T
nmesh = np.hstack([nmesh, np.ones([nmesh.shape[0],1])])
nmesh = (np.dot(transInvMat , nmesh.T)).T[:,0:3]
vn = vn + [normalize(nmesh, axis=1)]
uv = uv + [uvmesh]
haveTextures_list = haveTextures + [haveTexture]
textures_list = textures_list + [textures]
vertexMeshIndex = vertexMeshIndex + len(vmesh)
#Serialize
if saveData:
targetDic = {'v':v,'f_list':f_list,'vc':vc,'uv':uv,'haveTextures_list':haveTextures_list,'vn':vn,'textures_list': textures_list}
with open(objectDicFile, 'wb') as pfile:
pickle.dump(targetDic, pfile)
print("Serialized object!")
return [v],[f_list],[vc],[vn], [uv], [haveTextures_list], [textures_list]
def buildData (msh):
lvdic = {} # local dictionary
lfl = [] # lcoal faces index list
lvl = [] # local vertex list
lvcl = []
lnl = [] # local normal list
luvl = [] # local uv list
lvcnt = 0 # local vertices count
isSmooth = False
texdic = {} # local dictionary
msh.calc_tessface()
# if len(msh.tessfaces) == 0 or msh.tessfaces is None:
# msh.calc_tessface()
textureNames = []
haveUVs = []
for i,f in enumerate(msh.polygons):
isSmooth = f.use_smooth
tmpfaces = []
hasUV = False # true by default, it will be verified below
texture = None
texname = None
if (len(msh.tessface_uv_textures)>0):
activeUV = msh.tessface_uv_textures.active.data
if msh.tessface_uv_textures.active.data[i].image is not None:
# ipdb.set_trace()
texname = msh.tessface_uv_textures.active.data[i].image.name
hasUV = True
texture = texdic.get(texname)
if (texture is None): # vertex not found
# print("Image: " + texname)
# print("Clamp x: " + str(msh.tessface_uv_textures.active.data[i].image.use_clamp_x))
# print("Clamp y: " + str(msh.tessface_uv_textures.active.data[i].image.use_clamp_y))
# print("Tile x: " + str(msh.tessface_uv_textures.active.data[i].image.tiles_x))
# print("Tile y: " + str(msh.tessface_uv_textures.active.data[i].image.tiles_y))
texture = np.flipud(np.array(msh.tessface_uv_textures.active.data[i].image.pixels).reshape([msh.tessface_uv_textures.active.data[i].image.size[1],msh.tessface_uv_textures.active.data[i].image.size[0],4])[:,:,:3])
texture = srgb2lin(texture)
if np.any(np.isnan(texture)) or np.any(texture<0) or np.any(texture>1) or texture.size == 0:
print("Problem with texture from Blender")
texture = np.flipud(np.array(msh.tessface_uv_textures.active.data[i].image.pixels).reshape([msh.tessface_uv_textures.active.data[i].image.size[1],msh.tessface_uv_textures.active.data[i].image.size[0],4])[:,:,:3])
hasUV = False
texture = None
texname = None
if hasUV:
texdic[texname] = texture
textureNames = textureNames + [texname]
haveUVs = haveUVs + [hasUV]
for j,v in enumerate(f.vertices):
vec = msh.vertices[v].co
vec = r3d(vec)
if (isSmooth): # use vertex normal
nor = msh.vertices[v].normal
else: # use face normal
nor = f.normal
vcolor = msh.materials[f.material_index].diffuse_color[:]
if vcolor == (0.0,0.0,0.0) and msh.materials[f.material_index].specular_color[:] != (0.0,0.0,0.0):
vcolor = msh.materials[f.material_index].specular_color[:]
# print("Using specular!")
nor = r3d(nor)
co = (0.0, 0.0)
if hasUV:
co = activeUV[i].uv[j]
co = r2d(co)
vcolor = (1.0,1.0,1.0)
key = vec, nor, co
vinx = lvdic.get(key)
if (vinx is None): # vertex not found
lvdic[key] = lvcnt
lvl.append(vec)
lnl.append(nor)
lvcl.append(vcolor)
luvl.append(co)
tmpfaces.append(lvcnt)
lvcnt+=1
else:
inx = lvdic[key]
tmpfaces.append(inx)
if (len(tmpfaces)==3):
lfl.append(tmpfaces)
else:
lfl.append([tmpfaces[0], tmpfaces[1], tmpfaces[2]])
lfl.append([tmpfaces[0], tmpfaces[2], tmpfaces[3]])
# vtx.append(lvdic)
textures = []
haveTextures = []
f_list = []
orderedtexs = OrderedDict(sorted(texdic.items(), key=lambda t: t[0]))
for texname, texture in orderedtexs.items():
fidxs = [lfl[idx] for idx in range(len(lfl)) if textureNames[idx] == texname]
f_list = f_list + [np.vstack(fidxs)]
textures = textures + [texture]
haveTextures = haveTextures + [True]
try:
fidxs = [lfl[idx] for idx in range(len(lfl)) if haveUVs[idx] == False]
except:
ipdb.set_trace()
if fidxs != None and fidxs != []:
f_list = f_list + [np.vstack(fidxs)]
textures = textures + [None]
haveTextures = haveTextures + [False]
#update global lists and dictionaries
v = np.vstack(lvl)
vc = np.vstack(lvcl)
n = np.vstack(lnl)
uv = np.vstack(luvl)
return f_list, v, vc, n, uv, haveTextures, textures
| 29,860 | 39.905479 | 343 | py |
inversegraphics | inversegraphics-master/torch_nn.py | import ipdb
import PyTorchAug
import PyTorch
nn = PyTorch.Nn()
lua = PyTorchAug.lua
lua.getGlobal("require")
lua.pushString('modules/LinearCR')
lua.call(1, 0)
lua = PyTorchAug.lua
lua.getGlobal("require")
lua.pushString('modules/Reparametrize')
lua.call(1, 0)
lua = PyTorchAug.lua
lua.getGlobal("require")
lua.pushString('modules/SelectiveOutputClamp')
lua.call(1, 0)
lua = PyTorchAug.lua
lua.getGlobal("require")
lua.pushString('modules/SelectiveGradientFilter')
lua.call(1, 0)
dim_hidden = 200
feature_maps = 96
filter_size = 5
colorchaPyTorchAugels = 1
# ipdb.set_trace()
encoder = PyTorchAug.Sequential()
encoder.add(PyTorchAug.SpatialConvolution(colorchaPyTorchAugels,feature_maps,filter_size,filter_size))
encoder.add(PyTorchAug.SpatialMaxPooling(2,2,2,2))
encoder.add(PyTorchAug.Threshold(0,1e-6))
encoder.add(PyTorchAug.SpatialConvolution(feature_maps,feature_maps/2,filter_size,filter_size))
encoder.add(PyTorchAug.SpatialMaxPooling(2,2,2,2))
encoder.add(PyTorchAug.Threshold(0,1e-6))
encoder.add(PyTorchAug.SpatialConvolution(feature_maps/2,feature_maps/4,filter_size,filter_size))
encoder.add(PyTorchAug.SpatialMaxPooling(2,2,2,2))
encoder.add(PyTorchAug.Threshold(0,1e-6))
encoder.add(PyTorchAug.Reshape((feature_maps/4)*15*15))
z = PyTorchAug.ConcatTable()
mu = PyTorchAug.Sequential()
mu.add(PyTorchAug.LinearCR((feature_maps/4)*15*15, dim_hidden))
mu.add(PyTorchAug.SelectiveGradientFilter())
mu.add(PyTorchAug.SelectiveOutputClamp())
z.add(mu)
sigma = PyTorchAug.Sequential()
sigma.add(PyTorchAug.LinearCR((feature_maps/4)*15*15, dim_hidden))
sigma.add(PyTorchAug.SelectiveGradientFilter())
sigma.add(PyTorchAug.SelectiveOutputClamp())
z.add(sigma)
encoder.add(z)
decoder = PyTorchAug.Sequential()
decoder.add(PyTorchAug.LinearCR(dim_hidden, (feature_maps/4)*15*15 ))
decoder.add(PyTorchAug.Threshold(0,1e-6))
decoder.add(PyTorchAug.Reshape((feature_maps/4),15,15))
decoder.add(PyTorchAug.SpatialUpSamplingNearest(2))
decoder.add(PyTorchAug.SpatialConvolution(feature_maps/4,feature_maps/2, 7, 7))
decoder.add(PyTorchAug.Threshold(0,1e-6))
decoder.add(PyTorchAug.SpatialUpSamplingNearest(2))
decoder.add(PyTorchAug.SpatialConvolution(feature_maps/2,feature_maps,7,7))
decoder.add(PyTorchAug.Threshold(0,1e-6))
decoder.add(PyTorchAug.SpatialUpSamplingNearest(2))
decoder.add(PyTorchAug.SpatialConvolution(feature_maps,feature_maps,7,7))
decoder.add(PyTorchAug.Threshold(0,1e-6))
decoder.add(PyTorchAug.SpatialUpSamplingNearest(2))
decoder.add(PyTorchAug.SpatialConvolution(feature_maps,1,7,7))
decoder.add(PyTorchAug.Sigmoid())
model = PyTorchAug.Sequential()
model.add(encoder)
model.add(PyTorchAug.Reparametrize(dim_hidden))
model.add(decoder)
model.cuda()
nn.collectgarbage() | 2,724 | 25.980198 | 102 | py |
inversegraphics | inversegraphics-master/diffrender_experiment.py | __author__ = 'pol'
import matplotlib
matplotlib.use('Qt4Agg')
from math import radians
import timeit
import time
import numpy as np
from utils import *
import matplotlib.pyplot as plt
plt.ion()
import h5py
import ipdb
import pickle
#########################################
# Initialization ends here
#########################################
seed = 1
np.random.seed(seed)
gtPrefix = 'objectnet3d_teapots'
experimentPrefix = 'objectnet3d_teapots'
experimentDescr = 'The real teapots'
gtDir = 'groundtruth/' + gtPrefix + '/'
experimentDir = 'experiments/' + experimentPrefix + '/'
# groundTruthFilename = gtDir + 'groundTruth.h5'
# gtDataFile = h5py.File(groundTruthFilename, 'r')
#
# onlySynthetic = False
#
#
# print("Reading experiment data.")
#
# shapeGT = gtDataFile[gtPrefix].shape
#
# groundTruth = gtDataFile[gtPrefix]
#
# dataAzsGT = groundTruth['trainAzsGT']
# dataObjAzsGT = groundTruth['trainObjAzsGT']
# dataElevsGT = groundTruth['trainElevsGT']
# dataLightAzsGT = groundTruth['trainLightAzsGT']
# dataLightElevsGT = groundTruth['trainLightElevsGT']
# dataLightIntensitiesGT = groundTruth['trainLightIntensitiesGT']
# dataVColorGT = groundTruth['trainVColorGT']
# dataScenes = groundTruth['trainScenes']
# dataTeapotIds = groundTruth['trainTeapotIds']
# dataEnvMaps = groundTruth['trainEnvMaps']
# dataOcclusions = groundTruth['trainOcclusions']
# dataTargetIndices = groundTruth['trainTargetIndices']
# dataLightCoefficientsGT = groundTruth['trainLightCoefficientsGT']
# dataLightCoefficientsGTRel = groundTruth['trainLightCoefficientsGTRel']
# dataAmbientIntensityGT = groundTruth['trainAmbientIntensityGT']
# dataIds = groundTruth['trainIds']
#
# gtDtype = groundTruth.dtype
#
# allDataIds = gtDataFile[gtPrefix]['trainIds']
########## Check if there is anything wrong with the renders:
# print("Reading images.")
# # images = readImages(imagesDir, trainSet, False, loadFromHdf5)
# writeHdf5 = False
# writeGray = False
# if writeHdf5:
# writeImagesHdf5(gtDir, gtDir, allDataIds, writeGray)
# if onlySynthetic:
# imagesDir = gtDir + 'images_opendr/'
# else:
# imagesDir = gtDir + 'images/'
#
# loadGray = True
# imagesAreH5 = False
# loadGrayFromHdf5 = False
#
# if not imagesAreH5:
# grayImages = readImages(imagesDir, allDataIds, loadGray, loadGrayFromHdf5)
# else:
# grayImages = h5py.File(imagesDir + 'images_gray.h5', 'r')["images"]
#
# badImages = np.where(np.mean(grayImages, (1,2)) < 0.01)[0]
#
# for id, badImage in enumerate(grayImages[badImages]):
# print("There are bad images!")
# plt.imsave('tmp/check/badImage' + str(badImages[id]) + '.png', np.tile(badImage[:,:,None], [1,1,3]))
#
size = 375
# if not os.path.isfile(experimentDir + 'train.npy'):
np.random.seed(seed)
data = np.arange(size)
np.random.shuffle(data)
generateExperiment(size, experimentDir, 0, 1)
# if not os.path.exists(experimentDir):
# os.makedirs(experimentDir)
#
# np.save(experimentDir + 'train.npy', train)
# np.save(experimentDir + 'test.npy', test)
########## Out of sample selections.
# testSamplesIds= [2,4]
# trainSamplesIds = [0,14,20,25,26,1]
#
# dataIdx = np.arange(shapeGT[0])
# train = np.array([],dtype=np.uint16)
# test = np.array([],dtype=np.uint16)
# for testId in testSamplesIds:
# test = np.append(test, np.where(dataTeapotIds == testId))
#
# for trainId in trainSamplesIds:
# train = np.append(train, np.where(dataTeapotIds == trainId))
#
# # boolTrainSet = np.ones(shapeGT[0]).astype(np.bool)
# # boolTrainSet[test] = False
# # train = dataIdx[boolTrainSet]
#
# np.random.shuffle(train)
# np.random.shuffle(test)
#
# if not os.path.exists(experimentDir):
# os.makedirs(experimentDir)
#
# np.save(experimentDir + 'train.npy', train)
# np.save(experimentDir + 'test.npy', test)
with open(experimentDir + 'description.txt', 'w') as expfile:
expfile.write(experimentDescr)
| 3,854 | 26.733813 | 106 | py |
inversegraphics | inversegraphics-master/diffrender_train.py | __author__ = 'pol'
import matplotlib
matplotlib.use('Qt4Agg')
from math import radians
import timeit
import time
import image_processing
import numpy as np
import cv2
from utils import *
import generative_models
import matplotlib.pyplot as plt
plt.ion()
import recognition_models
import skimage
import h5py
import ipdb
import pickle
import lasagne_nn
import lasagne
import theano
#########################################
# Initialization ends here
#########################################
seed = 1
np.random.seed(seed)
gtPrefix = 'train4_occlusion_shapemodel_triplets'
experimentPrefix = 'train4_occlusion_shapemodel_10k'
gtDir = 'groundtruth/' + gtPrefix + '/'
experimentDir = 'experiments/' + experimentPrefix + '/'
groundTruthFilename = gtDir + 'groundTruth.h5'
gtDataFile = h5py.File(groundTruthFilename, 'r')
allDataIds = gtDataFile[gtPrefix]['trainIds']
onlySynthetic = True
# trainSet = np.load(experimentDir + 'train.npy')[:12800]
trainSet = np.load(experimentDir + 'train.npy')[:]
print("Reading experiment data.")
shapeGT = gtDataFile[gtPrefix].shape
boolTestSet = np.zeros(shapeGT).astype(np.bool)
boolTestSet[trainSet] = True
trainGroundTruth = gtDataFile[gtPrefix][boolTestSet]
groundTruth = np.zeros(shapeGT, dtype=trainGroundTruth.dtype)
groundTruth[boolTestSet] = trainGroundTruth
groundTruth = groundTruth[trainSet]
dataAzsGT = groundTruth['trainAzsGT']
dataObjAzsGT = groundTruth['trainObjAzsGT']
dataElevsGT = groundTruth['trainElevsGT']
# dataLightAzsGT = groundTruth['trainLightAzsGT']
# dataLightElevsGT = groundTruth['trainLightElevsGT']
# dataLightIntensitiesGT = groundTruth['trainLightIntensitiesGT']
dataVColorGT = groundTruth['trainVColorGT']
# dataScenes = groundTruth['trainScenes']
dataTeapotIds = groundTruth['trainTeapotIds']
# dataEnvMaps = groundTruth['trainEnvMaps']
dataOcclusions = groundTruth['trainOcclusions']
# dataTargetIndices = groundTruth['trainTargetIndices']
# dataLightCoefficientsGT = groundTruth['trainLightCoefficientsGT']
dataLightCoefficientsGTRel = groundTruth['trainLightCoefficientsGTRel']
dataAmbientIntensityGT = groundTruth['trainAmbientIntensityGT']
dataShapeModelCoeffsGT = groundTruth['trainShapeModelCoeffsGT']
dataIds = groundTruth['trainIds']
gtDtype = groundTruth.dtype
# gtDtype = [('trainIds', trainIds.dtype.name), ('trainAzsGT', trainAzsGT.dtype.name),('trainObjAzsGT', trainObjAzsGT.dtype.name),('trainElevsGT', trainElevsGT.dtype.name),('trainLightAzsGT', trainLightAzsGT.dtype.name),('trainLightElevsGT', trainLightElevsGT.dtype.name),('trainLightIntensitiesGT', trainLightIntensitiesGT.dtype.name),('trainVColorGT', trainVColorGT.dtype.name, (3,) ),('trainScenes', trainScenes.dtype.name),('trainTeapotIds', trainTeapotIds.dtype.name),('trainEnvMaps', trainEnvMaps.dtype.name),('trainOcclusions', trainOcclusions.dtype.name),('trainTargetIndices', trainTargetIndices.dtype.name), ('trainComponentsGT', trainComponentsGT.dtype, (9,)),('trainComponentsGTRel', trainComponentsGTRel.dtype, (9,))]
# Create experiment simple sepratation.
#
trainAzsRel = np.mod(dataAzsGT - dataObjAzsGT, 2*np.pi)
trainElevsGT = dataElevsGT
# trainComponentsGTRel = dataComponentsGTRel
trainVColorGT = dataVColorGT
loadFromHdf5 = False
print("Reading images.")
if onlySynthetic:
imagesDir = gtDir + 'images_opendr/'
else:
imagesDir = gtDir + 'images/'
loadGray = False
imagesAreH5 = False
loadGrayFromHdf5 = False
trainTriplets = True
filter = np.ones(len(trainSet)).astype(np.bool)
filter = np.array(tuple(dataOcclusions >= 0) and tuple(dataOcclusions < 0.9))
# filter = dataOcclusions < 0.3
trainSet = trainSet[filter]
trainLightCoefficientsGTRel = dataLightCoefficientsGTRel[filter]
trainShapeModelCoeffsGT = dataShapeModelCoeffsGT[filter]
trainAzsRel=trainAzsRel[filter]
trainElevsGT =trainElevsGT[filter]
trainVColorGT=trainVColorGT[filter]
trainAmbientIntensityGT = dataAmbientIntensityGT[filter]
grayImages = readImages(imagesDir, trainSet, True, loadGrayFromHdf5)
numTrainSet = len(grayImages)
if trainTriplets:
imagesDir_t1 = gtDir + 'triplets1/'
imagesDir_t2 = gtDir + 'triplets2/'
grayImages_t1 = readImages(imagesDir_t1, trainSet, True, loadGrayFromHdf5)
grayImages_t2 = readImages(imagesDir_t2, trainSet, True, loadGrayFromHdf5)
grayImages_75 = np.zeros([numTrainSet, 75,75])
grayImages_t1_75 = np.zeros([numTrainSet, 75,75])
grayImages_t2_75 = np.zeros([numTrainSet, 75, 75])
for imidx in range(numTrainSet):
grayImages_75[imidx] = skimage.transform.resize(grayImages[imidx], [75, 75])
grayImages_t1_75[imidx] = skimage.transform.resize(grayImages_t1[imidx], [75, 75])
grayImages_t2_75[imidx] = skimage.transform.resize(grayImages_t2[imidx], [75, 75])
# images = readImages(imagesDir, trainSet, False, False)
# grayImages = 0.3*images[:,:,:,0] + 0.59*images[:,:,:,1] + 0.11*images[:,:,:,2]
# backprojectionsDir = gtDir + 'backprojections/'
# backprojections = readImages(backprojectionsDir, trainSet, True, False)
loadMask = False
gtDirMask = 'groundtruth/train4_occlusion_mask/'
masksDir = gtDirMask + 'masks_occlusion/'
if loadMask:
masksGT = loadMasks(masksDir, trainSet)
loadHogFeatures = False
loadFourierFeatures = False
loadZernikeFeatures = False
synthPrefix = '_cycles'
if onlySynthetic:
synthPrefix = ''
# if loadHogFeatures:
# hogfeatures = np.load(gtDir + 'hog' + synthPrefix + '.npy')
# else:
# print("Extracting Hog features .")
# hogfeatures = image_processing.computeHoGFeatures(images)
# np.save(gtDir + 'hog' + synthPrefix + '.npy', hogfeatures)
# if loadIllumFeatures:
# illumfeatures = np.load(experimentDir + 'illum.npy')
# else:
# print("Extracting Illumination features (FFT.")
# illumfeatures = image_processing.computeIllumFeatures(images, images[0].size/12)
# np.save(experimentDir + 'illum.npy', illumfeatures)
# print("Extracting Zernike features.")
#
# numCoeffs=200
# # numTrees = 400
# win=40
# if loadZernikeFeatures:
# trainZernikeCoeffs = np.load(gtDir + 'zernike_numCoeffs' + str(numCoeffs) + '_win' + str(win) + '.npy')[trainSet]
# else:
# print("Extracting Zernike features.")
# batchSize = 1000
#
# trainZernikeCoeffs = np.empty([images.shape[0], numCoeffs])
# for batch in range(int(images.shape[0]/batchSize)):
# trainZernikeCoeffs[batchSize*batch:batchSize*batch + batchSize] = image_processing.zernikeProjectionGray(images[batchSize*batch:batchSize*batch + batchSize], numCoeffs=numCoeffs, win=win)
# np.save(gtDir + 'zernike_numCoeffs' + str(numCoeffs) + '_win' + str(win) + synthPrefix + '.npy', trainZernikeCoeffs)
# trainZernikeCoeffs = trainZernikeCoeffs[trainSet]
#
# trainHogfeatures = hogfeatures[trainSet]
# trainIllumfeatures = illumfeatures[trainSet]
parameterTrainSet = set(['azimuthsRF', 'elevationsRF', 'vcolorsRF'])
# parameterTrainSet = set(['vcolorsRF', 'spherical_harmonicsRF'])
# parameterTrainSet = set(['spherical_harmonicsZernike'])
# parameterTrainSet = set(['spherical_harmonicsZernike'])
# parameterTrainSet = set(['spherical_harmonicsNN'])
parameterTrainSet = set(['appearanceAndLightNN'])
parameterTrainSet = set(['appearanceNN'])
parameterTrainSet = set(['neuralNetModelLight'])
parameterTrainSet = set(['neuralNetModelShape'])
parameterTrainSet = set(['poseNN'])
parameterTrainSet = set(['azimuthTripletNN'])
# parameterTrainSet = set(['appearanceNN'])
# parameterTrainSet = set(['poseNN'])
print("Training recognition models.")
if 'azimuthTripletNN' in parameterTrainSet:
modelType = 'cnn_pose_embedding'
network = lasagne_nn.load_network(modelType=modelType, param_values=[], imgSize=75)
print("Training NN Pose Components")
validRatio = 0.9
trainValSet = np.arange(len(trainSet))[:np.uint(len(trainSet)*validRatio)]
validSet = np.arange(len(trainSet))[np.uint(len(trainSet)*validRatio)::]
# modelPath = experimentDir + 'neuralNetModelRelSHComponents.npz'
param_values = []
fineTune = False
pretrainedExperimentDir = 'experiments/train3_test/'
if fineTune:
pretrainedModelFile = pretrainedExperimentDir + 'neuralNetModelAzimuthTriplet.pickle'
with open(pretrainedModelFile, 'rb') as pfile:
neuralNetTripletAzimuth = pickle.load(pfile)
meanImage = neuralNetTripletAzimuth['mean']
# ipdb.set_trace()
modelType = neuralNetTripletAzimuth['type']
param_values = neuralNetTripletAzimuth['params']
else:
meanImage = np.mean(grayImages, axis=0)
meanImage = np.zeros(grayImages.shape)
modelPath=experimentDir + 'neuralNetModelAzimuthTriplet.pickle'
poseGT = np.hstack([np.cos(trainAzsRel)[:,None] , np.sin(trainAzsRel)[:,None], np.cos(trainElevsGT)[:,None], np.sin(trainElevsGT)[:,None]])
grayImagesR = grayImages.reshape([grayImages.shape[0], 1, grayImages.shape[1], grayImages.shape[2]])
grayImages_t1_R = grayImages_t1.reshape([grayImages_t1.shape[0], 1, grayImages_t1.shape[1], grayImages_t1.shape[2]])
grayImages_t2_R = grayImages_t2.reshape([grayImages_t2.shape[0], 1, grayImages_t2.shape[1], grayImages_t2.shape[2]])
grayImages_75_R = grayImages_75.reshape([grayImages_75.shape[0], 1, grayImages_75.shape[1], grayImages_75.shape[2]])
grayImages_t1_75_R = grayImages_t1_75.reshape([grayImages_t1_75.shape[0], 1, grayImages_t1_75.shape[1], grayImages_t1_75.shape[2]])
grayImages_t2_75_R = grayImages_t2_75.reshape([grayImages_t2_75.shape[0], 1, grayImages_t2_75.shape[1], grayImages_t2_75.shape[2]])
tripletsModel = lasagne_nn.train_triplets_h5(grayImages_75_R, grayImages_t1_75_R, grayImages_t2_75_R, len(trainValSet), meanImage=meanImage, network=network, modelType=modelType, num_epochs=150, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# poseNNmodel = lasagne_nn.train_nn(grayImages, trainSet, validSet, len(trainValSet), poseGT[trainValSet].astype(np.float32), poseGT[validSet].astype(np.float32), meanImage=meanImage, network=network, modelType=modelType, num_epochs=10, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# np.savez(modelPath, *SHNNparams)
with open(modelPath, 'wb') as pfile:
pickle.dump(tripletsModel, pfile)
if 'poseNN' in parameterTrainSet:
modelType = 'cnn_pose'
network = lasagne_nn.load_network(modelType=modelType, param_values=[])
print("Training NN Pose Components")
validRatio = 0.9
trainValSet = np.arange(len(trainSet))[:np.uint(len(trainSet)*validRatio)]
validSet = np.arange(len(trainSet))[np.uint(len(trainSet)*validRatio)::]
# modelPath = experimentDir + 'neuralNetModelRelSHComponents.npz'
# grayTrainImages = grayImages[trainValSet][:,:,:]
# grayValidImages = grayImages[validSet][:,:,:]
# grayTrainImages = grayTrainImages[:,None, :,:]
# grayValidImages = grayValidImages[:,None, :,:]
# import sys
# sys.exit("NN")
param_values = []
fineTune = False
pretrainedExperimentDir = 'experiments/train3_test/'
if fineTune:
pretrainedModelFile = pretrainedExperimentDir + 'neuralNetModelPose.pickle'
with open(pretrainedModelFile, 'rb') as pfile:
neuralNetModelPose = pickle.load(pfile)
meanImage = neuralNetModelPose['mean']
# ipdb.set_trace()
modelType = neuralNetModelPose['type']
param_values = neuralNetModelPose['params']
else:
meanImage = np.mean(grayImages, axis=0)
modelPath=experimentDir + 'neuralNetModelPose3.pickle'
poseGT = np.hstack([np.cos(trainAzsRel)[:,None] , np.sin(trainAzsRel)[:,None], np.cos(trainElevsGT)[:,None], np.sin(trainElevsGT)[:,None]])
poseNNmodel = lasagne_nn.train_nn_h5(grayImages.reshape([grayImages.shape[0],1,grayImages.shape[1],grayImages.shape[2]]), len(trainValSet), poseGT[trainValSet].astype(np.float32), poseGT[validSet].astype(np.float32), meanImage=meanImage, network=network, modelType=modelType, num_epochs=150, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# poseNNmodel = lasagne_nn.train_nn(grayImages, trainSet, validSet, len(trainValSet), poseGT[trainValSet].astype(np.float32), poseGT[validSet].astype(np.float32), meanImage=meanImage, network=network, modelType=modelType, num_epochs=10, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# np.savez(modelPath, *SHNNparams)
with open(modelPath, 'wb') as pfile:
pickle.dump(poseNNmodel, pfile)
if 'poseNNColor' in parameterTrainSet:
modelType = 'cnn_pose_color'
network = lasagne_nn.load_network(modelType=modelType, param_values=[])
print("Training NN Pose Components")
validRatio = 0.9
trainValSet = np.arange(len(trainSet))[:np.uint(len(trainSet)*validRatio)]
validSet = np.arange(len(trainSet))[np.uint(len(trainSet)*validRatio)::]
# modelPath = experimentDir + 'neuralNetModelRelSHComponents.npz'
# grayTrainImages = grayImages[trainValSet][:,:,:]
# grayValidImages = grayImages[validSet][:,:,:]
# grayTrainImages = grayTrainImages[:,None, :,:]
# grayValidImages = grayValidImages[:,None, :,:]
# import sys
# sys.exit("NN")
param_values = []
fineTune = False
pretrainedExperimentDir = 'experiments/train3_test/'
if fineTune:
pretrainedModelFile = pretrainedExperimentDir + 'neuralNetModelPoseColor.pickle'
with open(pretrainedModelFile, 'rb') as pfile:
neuralNetModelPose = pickle.load(pfile)
meanImage = neuralNetModelPose['mean']
# ipdb.set_trace()
modelType = neuralNetModelPose['type']
param_values = neuralNetModelPose['params']
else:
meanImage = np.mean(images, axis=0)
modelPath=experimentDir + 'neuralNetModelPoseColor.pickle'
poseGT = np.hstack([np.cos(trainAzsRel)[:,None] , np.sin(trainAzsRel)[:,None], np.cos(trainElevsGT)[:,None], np.sin(trainElevsGT)[:,None]])
poseNNmodel = lasagne_nn.train_nn_h5(images.reshape([images.shape[0],3,images.shape[1],images.shape[2]]), len(trainValSet), poseGT[trainValSet].astype(np.float32), poseGT[validSet].astype(np.float32), meanImage=meanImage, network=network, modelType=modelType, num_epochs=150, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# poseNNmodel = lasagne_nn.train_nn(grayImages, trainSet, validSet, len(trainValSet), poseGT[trainValSet].astype(np.float32), poseGT[validSet].astype(np.float32), meanImage=meanImage, network=network, modelType=modelType, num_epochs=10, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# np.savez(modelPath, *SHNNparams)
with open(modelPath, 'wb') as pfile:
pickle.dump(poseNNmodel, pfile)
if 'appearanceAndLightNN' in parameterTrainSet:
modelType = 'cnn_appLight'
network = lasagne_nn.load_network(modelType=modelType, param_values=[])
print("Training NN Appereance and Light Components")
validRatio = 0.9
trainValSet = np.arange(len(trainSet))[:np.uint(len(trainSet)*validRatio)]
validSet = np.arange(len(trainSet))[np.uint(len(trainSet)*validRatio)::]
# modelPath = experimentDir + 'neuralNetModelRelSHComponents.npz'
# grayTrainImages = grayImages[trainValSet][:,:,:]
# grayValidImages = grayImages[validSet][:,:,:]
# grayTrainImages = grayTrainImages[:,None, :,:]
# grayValidImages = grayValidImages[:,None, :,:]
# import sys
# sys.exit("NN")
param_values = []
fineTune = True
pretrainedExperimentDir = 'experiments/train4_occlusion_10k/'
if fineTune:
pretrainedModelFile = pretrainedExperimentDir + 'neuralNetModelAppLight.pickle'
with open(pretrainedModelFile, 'rb') as pfile:
neuralNetModelAppLight = pickle.load(pfile)
meanImage = neuralNetModelAppLight['mean']
# ipdb.set_trace()
modelType = neuralNetModelAppLight['type']
param_values = neuralNetModelAppLight['params']
else:
meanImage = np.mean(images, axis=0)
modelPath=experimentDir + 'neuralNetModelAppLight.pickle'
appLightGT = np.hstack([trainLightCoefficientsGTRel*trainAmbientIntensityGT[:,None] , trainVColorGT])
appLightNNmodel = lasagne_nn.train_nn_h5(images.reshape([images.shape[0],3,images.shape[1],images.shape[2]]), len(trainValSet), appLightGT[trainValSet].astype(np.float32), appLightGT[validSet].astype(np.float32), meanImage=meanImage, network=network, modelType=modelType, num_epochs=150, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# poseNNmodel = lasagne_nn.train_nn(grayImages, trainSet, validSet, len(trainValSet), poseGT[trainValSet].astype(np.float32), poseGT[validSet].astype(np.float32), meanImage=meanImage, network=network, modelType=modelType, num_epochs=10, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# np.savez(modelPath, *SHNNparams)
with open(modelPath, 'wb') as pfile:
pickle.dump(appLightNNmodel, pfile)
if 'appearanceNN' in parameterTrainSet:
modelType = 'cnn_app'
network = lasagne_nn.load_network(modelType=modelType, param_values=[])
print("Training NN Appeareance Components")
validRatio = 0.9
trainValSet = np.arange(len(trainSet))[:np.uint(len(trainSet)*validRatio)]
validSet = np.arange(len(trainSet))[np.uint(len(trainSet)*validRatio)::]
# modelPath = experimentDir + 'neuralNetModelRelSHComponents.npz'
# grayTrainImages = grayImages[trainValSet][:,:,:]
# grayValidImages = grayImages[validSet][:,:,:]
# grayTrainImages = grayTrainImages[:,None, :,:]
# grayValidImages = grayValidImages[:,None, :,:]
# import sys
# sys.exit("NN")
param_values = []
fineTune = False
pretrainedExperimentDir = 'experiments/train3_test/'
if fineTune:
pretrainedModelFile = pretrainedExperimentDir + 'neuralNetModelAppearance.pickle'
with open(pretrainedModelFile, 'rb') as pfile:
neuralNetModelAppearance = pickle.load(pfile)
meanImage = neuralNetModelAppearance['mean']
# ipdb.set_trace()
modelType = neuralNetModelAppearance['type']
param_values = neuralNetModelAppearance['params']
else:
meanImage = np.mean(images, axis=0)
modelPath=experimentDir + 'neuralNetModelAppearance.pickle'
appNNmodel = lasagne_nn.train_nn_h5(images.reshape([images.shape[0],3,images.shape[1],images.shape[2]]), len(trainValSet), trainVColorGT[trainValSet].astype(np.float32), trainVColorGT[validSet].astype(np.float32), meanImage=meanImage, network=network, modelType=modelType, num_epochs=150, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# poseNNmodel = lasagne_nn.train_nn(grayImages, trainSet, validSet, len(trainValSet), poseGT[trainValSet].astype(np.float32), poseGT[validSet].astype(np.float32), meanImage=meanImage, network=network, modelType=modelType, num_epochs=10, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# np.savez(modelPath, *SHNNparams)
with open(modelPath, 'wb') as pfile:
pickle.dump(appNNmodel, pfile)
if 'maskNN' in parameterTrainSet:
modelType = 'cnn_mask_large'
network = lasagne_nn.load_network(modelType=modelType, param_values=[])
print("Training NN Appeareance Components")
validRatio = 0.9
trainValSet = np.arange(len(trainSet))[:np.uint(len(trainSet)*validRatio)]
validSet = np.arange(len(trainSet))[np.uint(len(trainSet)*validRatio)::]
# modelPath = experimentDir + 'neuralNetModelRelSHComponents.npz'
# grayTrainImages = grayImages[trainValSet][:,:,:]
# grayValidImages = grayImages[validSet][:,:,:]
# grayTrainImages = grayTrainImages[:,None, :,:]
# grayValidImages = grayValidImages[:,None, :,:]
# import sys
# sys.exit("NN")
param_values = []
fineTune = True
pretrainedExperimentDir = experimentDir
if fineTune:
pretrainedModelFile = pretrainedExperimentDir + 'neuralNetModelMaskLarge.pickle'
with open(pretrainedModelFile, 'rb') as pfile:
neuralNetModelMask = pickle.load(pfile)
meanImage = neuralNetModelMask['mean']
# ipdb.set_trace()
modelType = neuralNetModelMask['type']
param_values = neuralNetModelMask['params']
else:
meanImage = np.mean(images, axis=0)
modelPath=experimentDir + 'neuralNetModelMaskLarge.pickle'
# masksGT = masksGT.reshape([-1, 150,150])
rsMasksGt = np.zeros([len(masksGT), 50,50])
for mask_i, mask in enumerate(masksGT):
rsMasksGt[mask_i] = skimage.transform.resize(mask, [50,50])
meanImage = np.mean(images, axis=0)
rsMasksGt = rsMasksGt.reshape([rsMasksGt.shape[0], 50*50])
maskNNmodel = lasagne_nn.train_nn_h5(images.reshape([images.shape[0],3,images.shape[1],images.shape[2]]), len(trainValSet), rsMasksGt[trainValSet].astype(np.float32), rsMasksGt[validSet].astype(np.float32), meanImage=meanImage, network=network, modelType=modelType, num_epochs=150, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# poseNNmodel = lasagne_nn.train_nn(grayImages, trainSet, validSet, len(trainValSet), poseGT[trainValSet].astype(np.float32), poseGT[validSet].astype(np.float32), meanImage=meanImage, network=network, modelType=modelType, num_epochs=10, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# np.savez(modelPath, *SHNNparams)
with open(modelPath, 'wb') as pfile:
pickle.dump(maskNNmodel, pfile)
if 'azimuthsRF' in parameterTrainSet:
print("Training RFs Cos Azs")
randForestModelCosAzs = recognition_models.trainRandomForest(trainHogfeatures, np.cos(trainAzsRel))
trainedModel = {'randForestModelCosAzs':randForestModelCosAzs}
with open(experimentDir + 'randForestModelCosAzs05.pickle', 'wb') as pfile:
pickle.dump(trainedModel, pfile)
print("Training RFs Sin Azs")
randForestModelSinAzs = recognition_models.trainRandomForest(trainHogfeatures, np.sin(trainAzsRel))
trainedModel = {'randForestModelSinAzs':randForestModelSinAzs}
with open(experimentDir + 'randForestModelSinAzs05.pickle', 'wb') as pfile:
pickle.dump(trainedModel, pfile)
if 'elevationsRF' in parameterTrainSet:
print("Training RFs Cos Elevs")
randForestModelCosElevs = recognition_models.trainRandomForest(trainHogfeatures, np.cos(trainElevsGT))
trainedModel = {'randForestModelCosElevs':randForestModelCosElevs}
with open(experimentDir + 'randForestModelCosElevs05.pickle', 'wb') as pfile:
pickle.dump(trainedModel, pfile)
print("Training RFs Sin Elevs")
randForestModelSinElevs = recognition_models.trainRandomForest(trainHogfeatures, np.sin(trainElevsGT))
trainedModel = {'randForestModelSinElevs':randForestModelSinElevs}
with open(experimentDir + 'randForestModelSinElevs05.pickle', 'wb') as pfile:
pickle.dump(trainedModel, pfile)
if 'neuralNetModelLight' in parameterTrainSet:
modelType = 'cnn_light'
network = lasagne_nn.load_network(modelType=modelType, param_values=[])
print("Training NN SH Components")
validRatio = 0.9
trainValSet = np.arange(len(trainSet))[:np.uint(len(trainSet)*validRatio)]
validSet = np.arange(len(trainSet))[np.uint(len(trainSet)*validRatio)::]
# modelPath = experimentDir + 'neuralNetModelRelSHComponents.npz'
param_values = []
fineTune = False
pretrainedExperimentDir = 'experiments/train3_test/'
if fineTune:
pretrainedModelFile = pretrainedExperimentDir + 'neuralNetModelLight.pickle'
with open(pretrainedModelFile, 'rb') as pfile:
neuralNetModelLight = pickle.load(pfile)
meanImage = neuralNetModelLight['mean']
# ipdb.set_trace()
modelType = neuralNetModelLight['type']
param_values = neuralNetModelLight['params']
else:
meanImage = np.mean(images, axis=0)
lightGT = trainLightCoefficientsGTRel*trainAmbientIntensityGT[:,None]
modelPath=experimentDir + 'neuralNetModelLight.pickle'
lightNNmodel = lasagne_nn.train_nn_h5(images.reshape([images.shape[0],3,images.shape[1],images.shape[2]]), len(trainValSet), lightGT[trainValSet].astype(np.float32), lightGT[validSet].astype(np.float32), meanImage=meanImage, network=network, modelType=modelType, num_epochs=150, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# np.savez(modelPath, *SHNNparams)
with open(modelPath, 'wb') as pfile:
pickle.dump(lightNNmodel, pfile)
if 'neuralNetModelShape' in parameterTrainSet:
modelType = 'cnn_shape'
network = lasagne_nn.load_network(modelType=modelType, param_values=[])
print("Training NN Shape Components")
validRatio = 0.9
trainValSet = np.arange(len(trainSet))[:np.uint(len(trainSet)*validRatio)]
validSet = np.arange(len(trainSet))[np.uint(len(trainSet)*validRatio)::]
# modelPath = experimentDir + 'neuralNetModelRelSHComponents.npz'
param_values = []
fineTune = True
pretrainedExperimentDir = 'experiments/train4_occlusion_shapemodel_10k/'
if fineTune:
pretrainedModelFile = pretrainedExperimentDir + 'neuralNetModelShape.pickle'
with open(pretrainedModelFile, 'rb') as pfile:
neuralNetModelShape = pickle.load(pfile)
meanImage = neuralNetModelShape['mean']
# ipdb.set_trace()
modelType = neuralNetModelShape['type']
param_values = neuralNetModelShape['params']
else:
meanImage = np.mean(grayImages, axis=0)
modelPath=experimentDir + 'neuralNetModelShape.pickle'
shapeNNmodel = lasagne_nn.train_nn_h5(grayImages.reshape([grayImages.shape[0],1,grayImages.shape[1],grayImages.shape[2]]), len(trainValSet), trainShapeModelCoeffsGT[trainValSet].astype(np.float32), trainShapeModelCoeffsGT[validSet].astype(np.float32), meanImage=meanImage, network=network, modelType=modelType, num_epochs=150, saveModelAtEpoch=True, modelPath=modelPath, param_values=param_values)
# np.savez(modelPath, *SHNNparams)
with open(modelPath, 'wb') as pfile:
pickle.dump(shapeNNmodel, pfile)
if 'spherical_harmonicsZernike' in parameterTrainSet:
print("Training on Zernike features")
# linRegModelZernikeSH = recognition_models.trainLinearRegression(trainZernikeCoeffs,dataLightCoefficientsGTRel * dataAmbientIntensityGT[:,None])
# with open(experimentDir + 'linRegModelZernike' + str(numCoeffs) +'_win' + str(win) + '.pickle', 'wb') as pfile:
# pickle.dump(linRegModelZernikeSH, pfile)
trainZernikeCoeffs[trainZernikeCoeffs >= 1000] = 35
randForestModelRelZernikeSH = recognition_models.trainRandomForest(trainZernikeCoeffs, trainLightCoefficientsGTRel * trainAmbientIntensityGT[:,None])
with open(experimentDir + 'randomForestModelZernike400' + str(numCoeffs) + '_win' + str(win) + '.pickle', 'wb') as pfile:
pickle.dump(randForestModelRelZernikeSH, pfile)
if 'vcolorsRF' in parameterTrainSet:
print("Training RF on Vertex Colors")
numTrainSet = images.shape[0]
colorWindow = 30
image = images[0]
croppedImages = images[:,image.shape[0]/2-colorWindow:image.shape[0]/2+colorWindow,image.shape[1]/2-colorWindow:image.shape[1]/2+colorWindow,:]
randForestModelVColor = recognition_models.trainRandomForest(croppedImages.reshape([numTrainSet,-1]), trainVColorGT)
with open(experimentDir + 'randForestModelVColor05.pickle', 'wb') as pfile:
pickle.dump(randForestModelVColor, pfile)
if 'vcolorsLR' in parameterTrainSet:
print("Training LR on Vertex Colors")
numTrainSet = images.shape[0]
colorWindow = 30
image = images[0]
croppedImages = images[:,image.shape[0]/2-colorWindow:image.shape[0]/2+colorWindow,image.shape[1]/2-colorWindow:image.shape[1]/2+colorWindow,:]
linRegModelVColor = recognition_models.trainLinearRegression(croppedImages.reshape([numTrainSet,-1]), trainVColorGT)
with open(experimentDir + 'linearRegressionModelVColor.pickle', 'wb') as pfile:
pickle.dump(linRegModelVColor, pfile)
#
# imagesStack = np.vstack([image.reshape([1,-1]) for image in images])
# randForestModelLightIntensity = recognition_models.trainRandomForest(imagesStack, trainLightIntensitiesGT)
#
#
# # # print("Training LR")
# # # linRegModelCosAzs = recognition_models.trainLinearRegression(hogfeatures, np.cos(trainAzsGT))
# # # linRegModelSinAzs = recognition_models.trainLinearRegression(hogfeatures, np.sin(trainAzsGT))
# # # linRegModelCosElevs = recognition_models.trainLinearRegression(hogfeatures, np.cos(trainElevsGT))
# # # linRegModelSinElevs = recognition_models.trainLinearRegression(hogfeatures, np.sin(trainElevsGT))
# #
#
# print("Finished training recognition models.") | 28,634 | 44.524642 | 730 | py |
inversegraphics | inversegraphics-master/zernike.py | """
@file py102-example2-zernike.py
@brief Fitting a surface in Python example for Python 102 lecture
@author Tim van Werkhoven (t.i.m.vanwerkhoven@gmail.com)
@url http://python101.vanwerkhoven.org
@date 20111012
Created by Tim van Werkhoven (t.i.m.vanwerkhoven@xs4all.nl) on 2011-10-12
Copyright (c) 2011 Tim van Werkhoven. All rights reserved.
This file is licensed under the Creative Commons Attribution-Share Alike
license versions 3.0 or higher, see
http://creativecommons.org/licenses/by-sa/3.0/
"""
### Libraries
import numpy as N
from scipy.misc import factorial as fac
### Init functions
def zernike_rad(m, n, rho):
"""
Calculate the radial component of Zernike polynomial (m, n)
given a grid of radial coordinates rho.
"""
if (n < 0 or m < 0 or abs(m) > n):
raise ValueError
if ((n-m) % 2):
return rho*0.0
pre_fac = lambda k: (-1.0)**k * fac(n-k) / ( fac(k) * fac( (n+m)/2.0 - k ) * fac( (n-m)/2.0 - k ) )
return sum(pre_fac(k) * rho**(n-2.0*k) for k in range(int((n-m)/2+1)))
def zernike(m, n, rho, phi):
"""
Calculate Zernike polynomial (m, n) given a grid of radial
coordinates rho and azimuthal coordinates phi.
"""
if (m > 0): return zernike_rad(m, n, rho) * N.cos(m * phi)
if (m < 0): return zernike_rad(-m, n, rho) * N.sin(-m * phi)
return zernike_rad(0, n, rho)
def zernikel(j, rho, phi):
"""
Calculate Zernike polynomial with Noll coordinate j given a grid of radial
coordinates rho and azimuthal coordinates phi.
"""
n = 0
while (j > n):
n += 1
j -= n
m = -n+2*j
return zernike(m, n, rho, phi) | 1,649 | 27.448276 | 103 | py |
inversegraphics | inversegraphics-master/save_exr_images.py | #!/usr/bin/python
import OpenEXR
import Imath
from PIL import Image
import sys
import numpy as np
def exportExrImages(annotationdir, imgdir, numTeapot, frame, sceneNum, target, prefix):
framestr = '{0:04d}'.format(frame)
outfilename = "render" + prefix + "_obj" + str(numTeapot) + "_scene" + str(sceneNum) + "_target" + str(target) + "_" + framestr
exrfile = OpenEXR.InputFile(annotationdir + outfilename + ".exr")
pt = Imath.PixelType(Imath.PixelType.FLOAT)
dw = exrfile.header()['dataWindow']
size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
quality_val = 100
# rgbf = [Image.fromstring("F", size, exrfile.channel("shNormal." + c, pt)) for c in "RGB"]
# extrema = [im.getextrema() for im in rgbf]
# darkest = min([lo for (lo,hi) in extrema])
# lighest = max([hi for (lo,hi) in extrema])
# scale = 255 / (lighest - darkest)
# def normalize_0_255(v):
# return (v * scale) + darkest
# rgb8 = [im.point(normalize_0_255).convert("L") for im in rgbf]
# Image.merge("RGB", rgb8).save(jpgfilename + "_normal.jpg", "JPEG")
rgbf = [Image.fromstring("F", size, exrfile.channel("RenderLayer.Combined." + c, pt)) for c in "RGB"]
pix = [np.array(im) for im in rgbf]
pix[0][pix[0]>1] = 1
pix[1][pix[1]>1] = 1
pix[2][pix[2]>1] = 1
pix[0] = pix[0]*255
pix[1] = pix[1]*255
pix[2] = pix[2]*255
imgr = Image.fromarray(pix[0].astype('uint8'))
imgg = Image.fromarray(pix[1].astype('uint8'))
imgb = Image.fromarray(pix[2].astype('uint8'))
finalimg = Image.merge("RGB", (imgr, imgg, imgb))
# finalimg.save(imgdir + outfilename + ".png", "PNG", quality=quality_val)
distancestr = exrfile.channel('RenderLayer.IndexOB.X', pt)
distance = Image.fromstring("F", size, distancestr)
shapeIndexstr = exrfile.channel('RenderLayer.IndexOB.X', pt)
shapeIndex = Image.fromstring("F", size, shapeIndexstr)
segment = np.array(shapeIndex)
sumComplete = np.sum(segment)
segmentimg = Image.fromarray(segment.astype('uint8')*255)
segmentimg.save(imgdir + outfilename + "_segment.png", "PNG", quality=quality_val)
singlefile = "render" + prefix + "_obj" + str(numTeapot) + "_scene" + str(sceneNum) + "_target" + str(target) + "_single_" + framestr
exrfile = OpenEXR.InputFile(annotationdir + singlefile + ".exr")
rgbf = [Image.fromstring("F", size, exrfile.channel("RenderLayer.001.Combined." + c, pt)) for c in "RGB"]
pix = [np.array(im) for im in rgbf]
pix[0][pix[0]>1] = 1
pix[1][pix[1]>1] = 1
pix[2][pix[2]>1] = 1
pix[0] = pix[0]*255
pix[1] = pix[1]*255
pix[2] = pix[2]*255
imgr = Image.fromarray(pix[0].astype('uint8'))
imgg = Image.fromarray(pix[1].astype('uint8'))
imgb = Image.fromarray(pix[2].astype('uint8'))
finalimg = Image.merge("RGB", (imgr, imgg, imgb))
finalimg.save(imgdir + singlefile + ".png", "PNG", quality=quality_val)
shapeIndexstrsingle = exrfile.channel('RenderLayer.001.IndexOB.X', pt)
shapeIndexsingle = Image.fromstring("F", size, shapeIndexstrsingle)
segmentsingle = np.array(shapeIndexsingle)
segmentimgsingle = Image.fromarray(segmentsingle.astype('uint8')*255)
segmentimgsingle.save(imgdir + singlefile + "_segment.png", "PNG", quality=quality_val)
sumSingle = np.sum(segmentsingle)
print "Sum Complete " + str(sumComplete)
print "Sum Single " + str(sumSingle)
with open(annotationdir + 'occlusions' + ".txt", "a") as myfile:
myfile.write(str(numTeapot) + ' ' + str(frame) + ' ' + str(sceneNum) + " " + str(target) + " " + str(sumComplete/sumSingle) + ' ' + prefix + "\n")
return
| 3,659 | 40.590909 | 154 | py |
inversegraphics | inversegraphics-master/diffrender_demo.py | __author__ = 'pol'
import matplotlib
matplotlib.use('Qt4Agg')
import bpy
import scene_io_utils
import mathutils
from math import radians
import timeit
import time
import opendr
import chumpy as ch
import geometry
import image_processing
import numpy as np
import cv2
from blender_utils import *
import glfw
import generative_models
import matplotlib.pyplot as plt
from opendr_utils import *
import OpenGL.GL as GL
import light_probes
import imageio
from utils import *
plt.ion()
#__GL_THREADED_OPTIMIZATIONS
#Main script options:r
useBlender = False
loadBlenderSceneFile = False
groundTruthBlender = False
useShapeModel = True
datasetGroundtruth = False
syntheticGroundtruth = True
useCycles = False
demoMode = False
showSubplots = False
unpackModelsFromBlender = False
unpackSceneFromBlender = False
loadSavedSH = False
useGTasBackground = False
refreshWhileMinimizing = True
computePerformance = False
savePerformance = False
glModes = ['glfw','mesa']
glMode = glModes[0]
sphericalMap = False
np.random.seed(1)
width, height = (150, 150)
win = -1
if glMode == 'glfw':
#Initialize base GLFW context for the Demo and to share context among all renderers.
glfw.init()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
# glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.DEPTH_BITS,32)
if demoMode:
glfw.window_hint(glfw.VISIBLE, GL.GL_TRUE)
else:
glfw.window_hint(glfw.VISIBLE, GL.GL_FALSE)
win = glfw.create_window(width, height, "Demo", None, None)
glfw.make_context_current(win)
angle = 60 * 180 / numpy.pi
clip_start = 0.05
clip_end = 10
frustum = {'near': clip_start, 'far': clip_end, 'width': width, 'height': height}
camDistance = 0.4
gtPrefix = 'train4_occlusion_shapemodel'
gtDirPref = 'train4_occlusion_shapemodel'
gtDir = 'groundtruth/' + gtDirPref + '/'
groundTruthFilename = gtDir + 'groundTruth.h5'
gtDataFile = h5py.File(groundTruthFilename, 'r')
groundTruth = gtDataFile[gtPrefix]
dataAzsGT = groundTruth['trainAzsGT']
dataObjAzsGT = groundTruth['trainObjAzsGT']
dataElevsGT = groundTruth['trainElevsGT']
# dataLightAzsGT = groundTruth['trainLightAzsGT']
# dataLightElevsGT = groundTruth['trainLightElevsGT']
# dataLightIntensitiesGT = groundTruth['trainLightIntensities']
dataVColorGT = groundTruth['trainVColorGT']
dataScenes = groundTruth['trainScenes']
dataTeapotIds = groundTruth['trainTeapotIds']
dataEnvMaps = groundTruth['trainEnvMaps']
dataOcclusions = groundTruth['trainOcclusions']
dataTargetIndices = groundTruth['trainTargetIndices']
dataIds = groundTruth['trainIds']
dataLightCoefficientsGT = groundTruth['trainLightCoefficientsGT']
dataLightCoefficientsGTRel = groundTruth['trainLightCoefficientsGTRel']
dataAmbientIntensityGT = groundTruth['trainAmbientIntensityGT']
dataEnvMapPhiOffsets = groundTruth['trainEnvMapPhiOffsets']
dataShapeModelCoeffsGT = groundTruth['trainShapeModelCoeffsGT']
readDataId = 10045
readDataId = 6102
import shape_model
if useShapeModel:
import shape_model
#%% Load data
filePath = 'data/teapotModel.pkl'
teapotModel = shape_model.loadObject(filePath)
faces = teapotModel['faces']
#%% Sample random shape Params
latentDim = np.shape(teapotModel['ppcaW'])[1]
shapeParams = np.random.randn(latentDim)
shapeParams = dataShapeModelCoeffsGT[readDataId]
chShapeParams = ch.Ch(shapeParams)
meshLinearTransform=teapotModel['meshLinearTransform']
W=teapotModel['ppcaW']
b=teapotModel['ppcaB']
teapots = [line.strip() for line in open('teapots.txt')]
renderTeapotsList = np.arange(len(teapots))[0:1]
sceneNumber = dataScenes[readDataId]
replaceableScenesFile = '../databaseFull/fields/scene_replaceables_backup.txt'
sceneIdx = scene_io_utils.getSceneIdx(sceneNumber, replaceableScenesFile)
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
targetParentIdx = 0
targetIndex = targetIndices[targetParentIdx]
targetIndex = dataTargetIndices[readDataId]
for targetParentIdx, targetParentIndex in enumerate(targetIndices):
if targetParentIndex == targetIndex:
#Now targetParentIdx has the right idx of the list of parent indices.
break
targetParentPosition = targetPositions[targetParentIdx]
targetPosition = targetParentPosition
if useBlender and not loadBlenderSceneFile:
scene = scene_io_utils.loadBlenderScene(sceneIdx, replaceableScenesFile)
scene_io_utils.setupScene(scene, roomInstanceNum, scene.world, scene.camera, width, height, 16, useCycles, False)
scene.update()
targetPosition = np.array(targetPosition)
#Save barebones scene.
elif useBlender and loadBlenderSceneFile:
scene_io_utils.loadSceneBlendData(sceneIdx, replaceableScenesFile)
scene = bpy.data.scenes['Main Scene']
scene.render.resolution_x = width #perhaps set resolution in code
scene.render.resolution_y = height
scene.render.tile_x = height/2
scene.render.tile_y = width/2
scene.cycles.samples = 1024
scene.sequencer_colorspace_settings.name = 'Linear'
scene.display_settings.display_device = 'None'
bpy.context.screen.scene = scene
tex_srgb2lin = True
if unpackSceneFromBlender:
v, f_list, vc, vn, uv, haveTextures_list, textures_list = scene_io_utils.unpackBlenderScene(scene, sceneDicFile, True)
else:
v, f_list, vc, vn, uv, haveTextures_list, textures_list = scene_io_utils.loadSavedScene(sceneDicFile, tex_srgb2lin)
removeObjectData(len(v) -1 - targetIndex, v, f_list, vc, vn, uv, haveTextures_list, textures_list)
targetModels = []
if useBlender and not loadBlenderSceneFile:
[targetScenes, targetModels, transformations] = scene_io_utils.loadTargetModels(renderTeapotsList)
elif useBlender:
teapots = [line.strip() for line in open('teapots.txt')]
selection = [ teapots[i] for i in renderTeapotsList]
scene_io_utils.loadTargetsBlendData()
for teapotIdx, teapotName in enumerate(selection):
targetModels = targetModels + [bpy.data.scenes[teapotName[0:63]].objects['teapotInstance' + str(renderTeapotsList[teapotIdx])]]
v_teapots, f_list_teapots, vc_teapots, vn_teapots, uv_teapots, haveTextures_list_teapots, textures_list_teapots, vflat, varray, center_teapots = scene_io_utils.loadTeapotsOpenDRData(renderTeapotsList, useBlender, unpackModelsFromBlender, targetModels)
mugs = [line.strip() for line in open('mugs.txt')]
renderMugsList = np.arange(len(mugs))[:]
# [mugScenes, mugModels, transformations] = scene_io_utils.loadMugsModels(renderMugsList)
v_mugs, f_list_mugs, vc_mugs, vn_mugs, uv_mugs, haveTextures_list_mugs, textures_list_mugs, vflat, varray, center_mugs = scene_io_utils.loadMugsOpenDRData(renderMugsList, False, False, None)
azimuth = np.pi
chCosAz = ch.Ch([np.cos(azimuth)])
chSinAz = ch.Ch([np.sin(azimuth)])
chAz = 2*ch.arctan(chSinAz/(ch.sqrt(chCosAz**2 + chSinAz**2) + chCosAz))
elevation = 0
chLogCosEl = ch.Ch(np.log(np.cos(elevation)))
chLogSinEl = ch.Ch(np.log(np.sin(elevation)))
chEl = 2*ch.arctan(ch.exp(chLogSinEl)/(ch.sqrt(ch.exp(chLogCosEl)**2 + ch.exp(chLogSinEl)**2) + ch.exp(chLogCosEl)))
chDist = ch.Ch([camDistance])
chPointLightIntensity = ch.Ch([1])
chPointLightIntensityGT = ch.Ch([1])
chLightAz = ch.Ch([0.0])
chLightEl = ch.Ch([0])
chLightDist = ch.Ch([0.5])
chLightDistGT = ch.Ch([0.5])
chLightAzGT = ch.Ch([0.0])
chLightElGT = ch.Ch([np.pi/4])
ligthTransf = computeHemisphereTransformation(chLightAz, chLightEl, chLightDist, targetPosition)
ligthTransfGT = computeHemisphereTransformation(chLightAzGT, chLightElGT, chLightDistGT, targetPosition)
lightPos = ch.dot(ligthTransf, ch.Ch([0.,0.,0.,1.]))[0:3]
lightPos = ch.Ch([targetPosition[0]+0.5,targetPosition[1],targetPosition[2] + 0.5])
lightPosGT = ch.dot(ligthTransfGT, ch.Ch([0.,0.,0.,1.]))[0:3]
chGlobalConstant = ch.Ch([0.5])
chGlobalConstantGT = ch.Ch([0.5])
light_color = ch.ones(3)*chPointLightIntensity
light_colorGT = ch.ones(3)*chPointLightIntensityGT
chVColors = ch.Ch(dataVColorGT[readDataId])
chVColorsGT = ch.Ch(dataVColorGT[readDataId])
shCoefficientsFile = 'data/sceneSH' + str(sceneIdx) + '.pickle'
clampedCosCoeffs = clampedCosineCoefficients()
# envMapFilename = 'data/hdr/dataset/TropicalRuins_3k.hdr'
SHFilename = 'data/LightSHCoefficients.pickle'
with open(SHFilename, 'rb') as pfile:
envMapDic = pickle.load(pfile)
hdritems = list(envMapDic.items())
hdrstorender = []
phiOffsets = [0, np.pi/2, np.pi, 3*np.pi/2]
for hdrFile, hdrValues in hdritems:
hdridx = hdrValues[0]
envMapCoeffs = hdrValues[1]
if hdridx == dataEnvMaps[readDataId]:
break
envMapFilename = hdrFile
envMapTexture = np.array(imageio.imread(envMapFilename))[:,:,0:3]
envMapGray = 0.3*envMapTexture[:,:,0] + 0.59*envMapTexture[:,:,1] + 0.11*envMapTexture[:,:,2]
envMapGrayMean = np.mean(envMapGray, axis=(0,1))
# if sphericalMap:
# envMapTexture, envMapMean = light_probes.processSphericalEnvironmentMap(envMapTexture)
# envMapCoeffsGT = light_probes.getEnvironmentMapCoefficients(envMapTexture, 1, 0, 'spherical')
# else:
# envMapMean = np.mean(envMapTexture,axis=(0,1))[None,None,:]
# envMapGray = 0.3*envMapTexture[:,:,0] + 0.59*envMapTexture[:,:,1] + 0.11*envMapTexture[:,:,2]
# envMapGrayMean = np.mean(envMapGray, axis=(0,1))
# envMapTexture = envMapTexture/envMapGrayMean
#
# # envMapTexture = 4*np.pi*envMapTexture/np.sum(envMapTexture, axis=(0,1))
# envMapCoeffsGT = light_probes.getEnvironmentMapCoefficients(envMapTexture, 1, 0, 'equirectangular')
# pEnvMap = SHProjection(envMapTexture, envMapCoeffsGT)
# approxProjection = np.sum(pEnvMap, axis=3)
# imageio.imwrite("tmp.exr", approxProjection)
envMapCoeffsGT = ch.Ch(envMapCoeffs)
# rotation = ch.Ch([0.0])
phiOffsetGT = ch.Ch(dataEnvMapPhiOffsets[readDataId])
phiOffset = ch.Ch(dataEnvMapPhiOffsets[readDataId])
chObjAzGT = ch.Ch(dataObjAzsGT[readDataId])
# chObjAzGT[:] = 0
chAzGT = ch.Ch(dataAzsGT[readDataId])
# chAzGT[:] = 0
chAzRelGT = chAzGT - chObjAzGT
chElGT = ch.Ch(dataElevsGT[readDataId])
# chElGT[:] = 0
chDistGT = ch.Ch([camDistance])
totalOffsetGT = phiOffsetGT + chObjAzGT
chAmbientIntensityGT = ch.Ch(dataAmbientIntensityGT[readDataId])
# chAmbientIntensityGT = ch.Ch([0.125])
shCoeffsRGBGT = ch.dot(light_probes.chSphericalHarmonicsZRotation(totalOffsetGT), envMapCoeffsGT[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
shCoeffsRGBGTRel = ch.dot(light_probes.chSphericalHarmonicsZRotation(phiOffsetGT), envMapCoeffsGT[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
chShCoeffsGT = 0.3*shCoeffsRGBGT[:,0] + 0.59*shCoeffsRGBGT[:,1] + 0.11*shCoeffsRGBGT[:,2]
chShCoeffsGTRel = 0.3*shCoeffsRGBGTRel[:,0] + 0.59*shCoeffsRGBGTRel[:,1] + 0.11*shCoeffsRGBGTRel[:,2]
chAmbientSHGT = chShCoeffsGT.ravel() * chAmbientIntensityGT * clampedCosCoeffs
chAmbientSHGTRel = chShCoeffsGTRel.ravel() * chAmbientIntensityGT * clampedCosCoeffs
chLightRadGT = ch.Ch([0.1])
chLightDistGT = ch.Ch([0.5])
chLightIntensityGT = ch.Ch([0])
chLightAzGT = ch.Ch([0])
chLightElGT = ch.Ch([0])
angleGT = ch.arcsin(chLightRadGT/chLightDistGT)
zGT = chZonalHarmonics(angleGT)
shDirLightGTOriginal = np.array(chZonalToSphericalHarmonics(zGT, np.pi/2 - chLightElGT, chLightAzGT - np.pi/2).r[:]).copy()
shDirLightGT = ch.Ch(shDirLightGTOriginal.copy())
chComponentGTOriginal = ch.array(np.array(chAmbientSHGT + shDirLightGT*chLightIntensityGT * clampedCosCoeffs).copy())
# chComponentGT = chAmbientSHGT + shDirLightGT*chLightIntensityGT * clampedCosCoeffs
chComponentGT = chAmbientSHGT
# chComponentGT = ch.Ch([0.2,0,0,0,0,0,0,0,0])
chAz = ch.Ch(dataAzsGT[readDataId]) - ch.Ch(dataObjAzsGT[readDataId])
chObjAz = 0
chEl = ch.Ch(dataElevsGT[readDataId])
chAzRel = chAz - chObjAz
totalOffset = phiOffset + chObjAz
chAmbientIntensity = ch.Ch(dataAmbientIntensityGT[readDataId])
shCoeffsRGB = ch.dot(light_probes.chSphericalHarmonicsZRotation(totalOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
shCoeffsRGBRel = ch.dot(light_probes.chSphericalHarmonicsZRotation(phiOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
chShCoeffs = 0.3*shCoeffsRGB[:,0] + 0.59*shCoeffsRGB[:,1] + 0.11*shCoeffsRGB[:,2]
chShCoeffs = ch.Ch(0.3*shCoeffsRGB.r[:,0] + 0.59*shCoeffsRGB.r[:,1] + 0.11*shCoeffsRGB.r[:,2])
chShCoeffsRel = 0.3*shCoeffsRGBRel[:,0] + 0.59*shCoeffsRGBRel[:,1] + 0.11*shCoeffsRGBRel[:,2]
chAmbientSH = chShCoeffs.ravel() * chAmbientIntensity * clampedCosCoeffs
chLightRad = ch.Ch([0.1])
chLightDist = ch.Ch([0.5])
chLightIntensity = ch.Ch([0])
chLightAz = ch.Ch([np.pi/2])
chLightEl = ch.Ch([0])
angle = ch.arcsin(chLightRad/chLightDist)
z = chZonalHarmonics(angle)
shDirLight = chZonalToSphericalHarmonics(z, np.pi/2 - chLightEl, chLightAz - np.pi/2) * clampedCosCoeffs
chComponent = chAmbientSH + shDirLight*chLightIntensity
# chComponent = chComponentGT
if useBlender:
addEnvironmentMapWorld(scene)
updateEnviornmentMap(envMapFilename, scene)
setEnviornmentMapStrength(1./envMapGrayMean, scene)
rotateEnviornmentMap(-totalOffset, scene)
chDisplacement = ch.Ch([0.0, 0.0,0.0])
chDisplacementGT = ch.Ch([0.0,0.0,0.0])
chScale = ch.Ch([1.0,1.0,1.0])
chScaleGT = ch.Ch([1, 1.,1.])
# vcch[0] = np.ones_like(vcflat[0])*chVColorsGT.reshape([1,3])
renderer_teapots = []
blender_teapots = []
teapots = [line.strip() for line in open('teapots.txt')]
selection = [ teapots[i] for i in renderTeapotsList]
scene_io_utils.loadTargetsBlendData()
for teapotIdx, teapotName in enumerate(selection):
teapot = bpy.data.scenes[teapotName[0:63]].objects['teapotInstance' + str(renderTeapotsList[teapotIdx])]
teapot.layers[1] = True
teapot.layers[2] = True
targetModels = targetModels + [teapot]
blender_teapots = blender_teapots + [teapot]
setObjectDiffuseColor(teapot, chVColorsGT.r.copy())
# for teapot_i in range(len(renderTeapotsList)):
# if useBlender:
# teapot = blender_teapots[teapot_i]
# teapot.matrix_world = mathutils.Matrix.Translation(targetPosition)
#
# vmod = v_teapots[teapot_i]
# fmod_list = f_list_teapots[teapot_i]
# vcmod = vc_teapots[teapot_i]
# vnmod = vn_teapots[teapot_i]
# uvmod = uv_teapots[teapot_i]
# haveTexturesmod_list = haveTextures_list_teapots[teapot_i]
# texturesmod_list = textures_list_teapots[teapot_i]
# centermod = center_teapots[teapot_i]
#
# vmod, vnmod, _ = transformObject(vmod, vnmod, chScale, np.pi/2, ch.Ch([0]), ch.Ch([0]), targetPosition)
# renderer = createRendererTarget(glMode, False, chAz, chEl, chDist, centermod, vmod, vcmod, fmod_list, vnmod, light_color, chComponent, chVColors, targetPosition, chDisplacement, width,height, uvmod, haveTexturesmod_list, texturesmod_list, frustum, win )
#
# renderer.msaa = True
# renderer.overdraw = True
# renderer.r
# renderer_teapots = renderer_teapots + [renderer]
if useShapeModel:
shapeParams = np.random.randn(latentDim)
shapeParams = dataShapeModelCoeffsGT[readDataId]
chShapeParams = ch.Ch(shapeParams)
# landmarksLong = ch.dot(chShapeParams,teapotModel['ppcaW'].T) + teapotModel['ppcaB']
# landmarks = landmarksLong.reshape([-1,3])
# chVertices = shape_model.chShapeParamsToVerts(landmarks, teapotModel['meshLinearTransform'])
chVertices = shape_model.VerticesModel(chShapeParams =chShapeParams,meshLinearTransform=meshLinearTransform,W=W,b=b)
chVertices.init()
chVertices = ch.dot(geometry.RotateZ(-np.pi/2)[0:3,0:3],chVertices.T).T
# teapotNormals = teapotModel['N']
# chNormals = shape_model.chShapeParamsToNormals(teapotNormals, landmarks, teapotModel['linT'])
# rot = mathutils.Matrix.Rotation(radians(90), 4, 'X')
# chNormals= ch.dot(np.array(rot)[0:3, 0:3], chNormals.T).T
# chNormals2 = ch.array(shape_model.shapeParamsToNormals(shapeParams, teapotModel))
chNormals = shape_model.chGetNormals(chVertices, faces)
smNormals = [chNormals]
smFaces = [[faces]]
smVColors = [chVColors*np.ones(chVertices.shape)]
smUVs = ch.Ch(np.zeros([chVertices.shape[0],2]))
smHaveTextures = [[False]]
smTexturesList = [[None]]
chVertices = chVertices - ch.mean(chVertices, axis=0)
minZ = ch.min(chVertices[:,2])
chMinZ = ch.min(chVertices[:,2])
# chVertices[:,2] = chVertices[:,2] - minZ
zeroZVerts = chVertices[:,2]- chMinZ
chVertices = ch.hstack([chVertices[:,0:2] , zeroZVerts.reshape([-1,1])])
chVertices = chVertices*0.09
smCenter = ch.array([0,0,0.1])
smVertices = [chVertices]
smFacesB = [smFaces]
smVColorsB = [smVColors]
smUVsB = [[smUVs]]
smHaveTexturesB = [smHaveTextures]
smTexturesListB = [smTexturesList]
smVertices, smNormals, _ = transformObject(smVertices, smNormals, chScale, chObjAz, ch.Ch([0.0]), ch.Ch([0]), np.array([0,0,0]))
v_mug = v_mugs[0][0]
f_list_mug = f_list_mugs[0]
vc_mug = [[np.array([1,0,0])*np.ones(v_mug[0][0].shape)]]
vn_mug = vn_mugs[0][0]
uv_mug = uv_mugs[0]
haveTextures_list_mug = haveTextures_list_mugs[0]
textures_list_mug = textures_list_mugs[0]
verticesMug, normalsMug, _ = transformObject(v_mug, vn_mug, chScale, chObjAz - np.pi/2, ch.Ch([0.2]), ch.Ch([0]), np.array([0,0,0]))
if False:
VerticesB = [smVertices ] + [verticesMug]
NormalsB = [smNormals] + [normalsMug]
FacesB = smFacesB + f_list_mug
VColorsB = smVColorsB + vc_mug
UVsB = smUVsB + uv_mug
HaveTexturesB = smHaveTexturesB + haveTextures_list_mug
TexturesListB = smTexturesListB + textures_list_mug
else:
VerticesB = [smVertices ]
NormalsB = [smNormals]
FacesB = smFacesB
VColorsB = smVColorsB
UVsB = smUVsB
HaveTexturesB = smHaveTexturesB
TexturesListB = smTexturesListB
# renderer = createRendererTarget(glMode, True, chAz, chEl, chDist, smCenter, [smVertices], smVColorsB, smFacesB, [smNormals], light_color, chComponent, chVColors, targetPosition, chDisplacement, width,height,smUVsB, smHaveTexturesB, smTexturesListB, frustum, win )
renderer = createRendererTarget(glMode, chAz, chEl, chDist, smCenter, VerticesB, VColorsB, FacesB, NormalsB, light_color, chComponent, chVColors, np.array([0,0,0]), chDisplacement, width,height, UVsB, HaveTexturesB, TexturesListB, frustum, win )
renderer.overdraw = True
renderer.nsamples = 8
renderer.msaa = True
renderer.initGL()
renderer.initGLTexture()
chAz2 = ch.Ch(chAz.r[:] - 0.3)
chEl2 = ch.Ch(chEl.r[:]- 0.3)
chVColors2 = ch.Ch(np.array([1,0,0.5]))
light_color2 = light_color + np.array([0.2,-0.4,-0.4])
renderer2 = createRendererTarget(glMode, chAz2, chEl2, chDist, smCenter, VerticesB, VColorsB, FacesB, NormalsB, light_color2, chComponent, chVColors2, np.array([0,0,0]), chDisplacement, width,height, UVsB, HaveTexturesB, TexturesListB, frustum, win )
renderer2.overdraw = True
renderer2.nsamples = 8
renderer2.msaa = True
renderer2.initGL()
renderer2.initGLTexture()
# plt.imsave('errors.png', sqeRenderer.r)
# plt.imsave('errorscolors.png', sqeRenderer.render_image)
# # plt.imsave('renderer2.png', sqeRenderer.render_dedx)
# plt.imsave('errorsdx.png', sqeRenderer.render_dedx,cmap=matplotlib.cm.coolwarm, vmin=-1, vmax=1)
# plt.imsave('errorsdy.png', sqeRenderer.render_dedy,cmap=matplotlib.cm.coolwarm, vmin=-1, vmax=1)
#
sys.exit()
# # # Funky theano stuff
# import lasagne_nn
# import lasagne
# import theano
# import theano.tensor as T
# with open('experiments/train4/neuralNetModelRelSHLight.pickle', 'rb') as pfile:
# neuralNetModelSHLight = pickle.load(pfile)
# meanImage = neuralNetModelSHLight['mean']
# modelType = neuralNetModelSHLight['type']
# param_values = neuralNetModelSHLight['params']
# rendererGray = 0.3*renderer[:,:,0] + 0.59*renderer[:,:,1] + 0.11*renderer[:,:,2]
# input = rendererGray.r[None,None, :,:]
# input_var = T.tensor4('inputs')
# network = lasagne_nn.build_cnn(input_var)
# network_small = lasagne_nn.build_cnn_small(input_var)
# lasagne.layers.set_all_param_values(network, param_values)
# prediction = lasagne.layers.get_output(network)
# chThFun = TheanoFunOnOpenDR(theano_input=input_var, theano_output=prediction, opendr_input=renderer, dim_output = 9)
# sys.exit(0)
currentTeapotModel = 0
if not useShapeModel:
renderer = renderer_teapots[currentTeapotModel]
# shapeParams = np.random.randn(latentDim)
if useShapeModel:
shapeParams = np.random.randn(latentDim)
shapeParams = dataShapeModelCoeffsGT[readDataId]
chShapeParamsGT = ch.Ch(shapeParams)
chVerticesGT = shape_model.VerticesModel(chShapeParams =chShapeParamsGT,meshLinearTransform=meshLinearTransform,W=W,b=b)
chVerticesGT.init()
chVerticesGT = ch.dot(geometry.RotateZ(-np.pi/2)[0:3,0:3],chVerticesGT.T).T
# chNormalsGT = shape_model.chShapeParamsToNormals(teapotModel['N'], landmarks, teapotModel['linT'])
# chNormalsGT = shape_model.shapeParamsToNormals(shapeParams, teapotModel)
chNormalsGT = shape_model.chGetNormals(chVerticesGT, faces)
smNormalsGT = [chNormalsGT]
smFacesGT = [[faces]]
smVColorsGT = [chVColorsGT*np.ones(chVerticesGT.shape)]
smUVsGT = [ch.Ch(np.zeros([chVerticesGT.shape[0],2]))]
smHaveTexturesGT = [[False]]
smTexturesListGT = [[None]]
smCenterGT = ch.mean(chVerticesGT, axis=0)
chVerticesGT = chVerticesGT - ch.mean(chVerticesGT, axis=0)
minZ = ch.min(chVerticesGT[:,2])
chMinZ = ch.min(chVerticesGT[:,2])
zeroZVerts = chVerticesGT[:,2]- chMinZ
chVerticesGT = ch.hstack([chVerticesGT[:,0:2] , zeroZVerts.reshape([-1,1])])
chVerticesGT = chVerticesGT*0.09
smCenterGT = ch.array([0,0,0.1])
smVerticesGT = [chVerticesGT]
smVerticesGT, smNormalsGT, _ = transformObject(smVerticesGT, smNormalsGT, chScaleGT, chObjAzGT, ch.Ch([0]), ch.Ch([0]), targetPosition)
if useShapeModel:
addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, smVerticesGT, smFacesGT, smVColorsGT, smNormalsGT, smUVsGT, smHaveTexturesGT, smTexturesListGT)
else:
addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, v_teapots[currentTeapotModel][0], f_list_teapots[currentTeapotModel][0], vc_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0], uv_teapots[currentTeapotModel][0], haveTextures_list_teapots[currentTeapotModel][0], textures_list_teapots[currentTeapotModel][0])
center = center_teapots[currentTeapotModel]
if not useShapeModel:
smCenterGT = ch.array([0, 0, 0.1])
rendererGT = createRendererGT(glMode, chAzGT, chElGT, chDistGT, smCenterGT, v, vc, f_list, vn, light_colorGT, chComponentGT, chVColorsGT,
targetPosition, chDisplacementGT, width,height, uv, haveTextures_list, textures_list, frustum, win )
rendererGT.useShaderErrors = False
rendererGT.msaa = True
rendererGT.nsamples = 8
rendererGT.overdraw = True
rendererGT.initGL()
rendererGT.initGLTexture()
cv2.imwrite('renderergt' + str(readDataId) + '.jpeg' , 255*lin2srgb(rendererGT.r[:,:,[2,1,0]]), [int(cv2.IMWRITE_JPEG_QUALITY), 100])
ipdb.set_trace()
render = renderer.r.copy()
red = np.zeros(render.shape)
red[:,:,0] = 1
vis_occluded = np.array(rendererGT.indices_image == 0 + 1).copy().astype(np.bool)
vis_im = np.array(rendererGT.image_mesh_bool([0])).copy().astype(np.bool)
occlusion = (vis_im & (1 - vis_occluded)).astype(np.bool)
render[occlusion] = render[occlusion]*0.1 + red[occlusion] *0.9
cv2.imwrite('renderer' + str(readDataId) + '.jpeg' , 255*lin2srgb(render[:,:,[2,1,0]]), [int(cv2.IMWRITE_JPEG_QUALITY), 100])
ipdb.set_trace()
if useGTasBackground:
for teapot_i in range(len(renderTeapotsList)):
renderer = renderer_teapots[teapot_i]
renderer.set(background_image=rendererGT.r)
currentTeapotModel = 0
if not useShapeModel:
renderer = renderer_teapots[currentTeapotModel]
import differentiable_renderer
paramsList = [chAz, chEl]
# diffRenderer = differentiable_renderer.DifferentiableRenderer(renderer=renderer, params_list=paramsList, params=ch.concatenate(paramsList))
diffRenderer = renderer
vis_gt = np.array(rendererGT.indices_image!=1).copy().astype(np.bool)
vis_mask = np.array(rendererGT.indices_image==1).copy().astype(np.bool)
vis_im = np.array(renderer.indices_image!=1).copy().astype(np.bool)
oldChAz = chAz[0].r
oldChEl = chEl[0].r
# Show it
shapeIm = vis_gt.shape
numPixels = shapeIm[0] * shapeIm[1]
shapeIm3D = [vis_im.shape[0], vis_im.shape[1], 3]
if useBlender:
center = centerOfGeometry(teapot.dupli_group.objects, teapot.matrix_world)
# addLamp(scene, center, chLightAzGT.r, chLightElGT.r, chLightDistGT, chLightIntensityGT.r)
#Add ambient lighting to scene (rectangular lights at even intervals).
# addAmbientLightingScene(scene, useCycles)
teapot = blender_teapots[currentTeapotModel]
teapotGT = blender_teapots[currentTeapotModel]
placeNewTarget(scene, teapot, targetPosition)
teapot.layers[1]=True
# scene.layers[0] = False
# scene.layers[1] = True
scene.objects.unlink(scene.objects[str(targetIndex)])
placeCamera(scene.camera, -chAzGT.r[:].copy()*180/np.pi, chElGT.r[:].copy()*180/np.pi, chDistGT, center)
azimuthRot = mathutils.Matrix.Rotation(chObjAzGT.r[:].copy(), 4, 'Z')
original_matrix_world = teapot.matrix_world.copy()
teapot.matrix_world = mathutils.Matrix.Translation(original_matrix_world.to_translation()) * azimuthRot * (mathutils.Matrix.Translation(-original_matrix_world.to_translation())) * original_matrix_world
scene.update()
scene.render.image_settings.file_format = 'OPEN_EXR'
scene.render.filepath = 'opendr_blender.exr'
# bpy.ops.file.pack_all()
# bpy.ops.wm.save_as_mainfile(filepath='data/scene' + str(sceneIdx) + '_complete.blend')
# scene.render.filepath = 'blender_envmap_render.exr'
def imageGT():
global groundTruthBlender
global rendererGT
global blenderRender
global datasetGroundtruth
if datasetGroundtruth:
return np.copy(np.array(imageDataset)).astype(np.float64)
if groundTruthBlender:
return blenderRender
else:
return np.copy(np.array(rendererGT.r)).astype(np.float64)
global datasetGroundtruth
if syntheticGroundtruth:
imagesDir = gtDir + 'images_opendr/'
else:
imagesDir = gtDir + 'images/'
import utils
import skimage.transform
image = utils.readImages(imagesDir, [readDataId], False)[0]
if image.shape[0] != height or image.shape[1] != width:
image = skimage.transform.resize(image, [height,width])
imageDataset = srgb2lin(image)
imagegt = imageGT()
chImage = ch.array(imagegt)
# E_raw_simple = renderer - rendererGT
negVisGT = ~vis_gt
imageWhiteMask = imagegt.copy()
imageWhiteMask[np.tile(negVisGT.reshape([shapeIm[0],shapeIm[1],1]),[1,1,3]).astype(np.bool)] = 1
chImageWhite = ch.Ch(imageWhiteMask)
E_raw = renderer - rendererGT
SE_raw = ch.sum(E_raw*E_raw, axis=2)
SSqE_raw = ch.SumOfSquares(E_raw)/numPixels
initialPixelStdev = 0.01
reduceVariance = False
# finalPixelStdev = 0.05
stds = ch.Ch([initialPixelStdev])
variances = stds ** 2
globalPrior = ch.Ch([0.8])
negLikModel = -ch.sum(generative_models.LogGaussianModel(renderer=renderer, groundtruth=rendererGT, variances=variances))/numPixels
negLikModelRobust = -ch.sum(generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances))/numPixels
modelLogLikelihoodRobustRegionCh = -ch.sum(generative_models.LogRobustModelRegion(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances))/numPixels
pixelLikelihoodRobustRegionCh = generative_models.LogRobustModelRegion(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances)
pixelLikelihoodCh = generative_models.LogGaussianModel(renderer=renderer, groundtruth=rendererGT, variances=variances)
pixelLikelihoodRobustCh = generative_models.LogRobustModel(renderer=renderer, groundtruth=rendererGT, foregroundPrior=globalPrior, variances=variances)
post = generative_models.layerPosteriorsRobustCh(rendererGT, renderer, vis_im, 'FULL', globalPrior, variances)[0]
vis_im = np.array(renderer.indices_image==1).copy().astype(np.bool)
# hogGT, hogImGT, drconv = image_processing.diffHog(rendererGT)
# hogRenderer, hogImRenderer, _ = image_processing.diffHog(renderer, drconv)
#
# hogE_raw = hogGT - hogRenderer
# hogCellErrors = ch.sum(hogE_raw*hogE_raw, axis=2)
# hogError = -ch.dot(hogGT.ravel(),hogRenderer.ravel())/(ch.sqrt(ch.SumOfSquares(hogGT))*ch.sqrt(ch.SumOfSquares(hogGT)))
# import opendr.filters
# robPyr = opendr.filters.gaussian_pyramid(renderer - rendererGT, n_levels=6, normalization=None)/numPixels
# robPyrSum = -ch.sum(ch.log(ch.exp(-0.5*robPyr**2/variances) + 1))
#
# edgeErrorPixels = generative_models.EdgeFilter(rendererGT=rendererGT, renderer=renderer)**2
# edgeError = ch.sum(edgeErrorPixels)
models = [negLikModel, negLikModelRobust]
pixelModels = [pixelLikelihoodCh, pixelLikelihoodRobustCh]
modelsDescr = ["Gaussian Model", "Outlier model"]
# , negLikModelPyr, negLikModelRobustPyr, SSqE_raw
# negLikModel2 = -generative_models.modelLogLikelihoodCh(rendererGT, diffRenderer, vis_im, 'FULL', variances)/numPixels
#
# negLikModelRobust2 = -generative_models.modelLogLikelihoodRobustCh(rendererGT, diffRenderer, vis_im, 'FULL', globalPrior, variances)/numPixels
#
# pixelLikelihoodCh2 = generative_models.logPixelLikelihoodCh(rendererGT, diffRenderer, vis_im, 'FULL', variances)
#
# pixelLikelihoodRobustCh2 = ch.log(generative_models.pixelLikelihoodRobustCh(rendererGT, diffRenderer, vis_im, 'FULL', globalPrior, variances))
#
# post2 = generative_models.layerPosteriorsRobustCh(rendererGT, diffRenderer, vis_im, 'FULL', globalPrior, variances)[0]
# pixelModels2 = [pixelLikelihoodCh2, pixelLikelihoodRobustCh2]
# models2 = [negLikModel2, negLikModelRobust2]
model = 1
pixelErrorFun = pixelModels[model]
errorFun = models[model]
# pixelErrorFun2 = pixelModels2[model]
# errorFun2 = models2[model]
# zpolys = image_processing.zernikePolynomials(image=rendererGT.r.copy(), numCoeffs=20)
iterat = 0
changedGT = False
refresh = True
drawSurf = False
makeVideo = True
updateErrorFunctions = True
pendingCyclesRender = True
performance = {}
elevations = {}
azimuths = {}
gradEl = {}
gradAz = {}
performanceSurf = {}
elevationsSurf = {}
azimuthsSurf = {}
gradElSurf = {}
gradAzSurf = {}
gradFinElSurf = {}
gradFinAzSurf = {}
ims = []
# free_variables = [chCosAz, chSinAz, chLogCosEl, chLogSinEl]
free_variables = [chAz, chEl, chVColors, chShCoeffs]
# free_variables = [chShapeParams]
azVar = 1
elVar = 1
vColorVar = 0.00001
shCoeffsVar = 0.00001
df_vars = np.concatenate([azVar*np.ones(chAz.shape), elVar*np.ones(chEl.shape), vColorVar*np.ones(chVColors.r.shape), shCoeffsVar*np.ones(chShCoeffs.r.shape)])
if useShapeModel:
df_vars = np.concatenate([np.ones(chShapeParams.shape)])
maxiter = 20
method=1
options={'disp':False, 'maxiter':maxiter}
mintime = time.time()
boundEl = (0, np.pi/2.0)
boundAz = (0, None)
boundscomponents = (0,None)
bounds = [boundAz,boundEl]
bounds = [(None , None ) for sublist in free_variables for item in sublist]
methods=['dogleg', 'minimize', 'BFGS', 'L-BFGS-B', 'Nelder-Mead', 'SGDMom', 'probLineSearch']
exit = False
minimize = False
plotMinimization = False
changeRenderer = False
printStatsBool = False
beginTraining = False
createGroundTruth = False
beginTesting = False
exploreSurfaceBool = False
newTeapotAsGT = False
global chAzSaved
global chElSaved
global chComponentSaved
chAzSaved = chAz.r[0]
chElSaved = chEl.r[0]
# chComponentSaved = chComponent.r[0]
if useShapeModel:
chShapeParamsSaved = chShapeParams.r[:]
if showSubplots:
f, ((ax1, ax2), (ax3, ax4), (ax5,ax6)) = plt.subplots(3, 2, subplot_kw={'aspect':'equal'}, figsize=(9, 12))
pos1 = ax1.get_position()
pos5 = ax5.get_position()
pos5.x0 = pos1.x0
ax5.set_position(pos5)
f.tight_layout()
ax1.set_title("Ground Truth")
ax2.set_title("Backprojection")
rendererIm = lin2srgb(renderer.r.copy())
pim2 = ax2.imshow(rendererIm)
edges = renderer.boundarybool_image
gtoverlay = imageGT().copy()
gtoverlay = lin2srgb(gtoverlay)
gtoverlay[np.tile(edges.reshape([shapeIm[0],shapeIm[1],1]),[1,1,3]).astype(np.bool)] = 1
pim1 = ax1.imshow(gtoverlay)
#
# extent = ax1.get_window_extent().transformed(f.dpi_scale_trans.inverted())
# f.savefig('ax1_figure.png', bbox_inches=extent)
# ax3.set_title("Pixel negative log probabilities")
# pim3 = ax3.imshow(-pixelErrorFun.r)
# cb3 = plt.colorbar(pim3, ax=ax3,use_gridspec=True)
# cb3.mappable = pim3
#
# ax4.set_title("Posterior probabilities")
# pim4 = ax4.imshow(np.tile(post.reshape(shapeIm[0],shapeIm[1],1), [1,1,3]))
# cb4 = plt.colorbar(pim4, ax=ax4,use_gridspec=True)
paramWrt1 = chAz
paramWrt2 = chEl
if useShapeModel:
paramWrt1 = chShapeParams[0]
paramWrt2 = chShapeParams[1]
diffAz = -ch.optimization.gradCheckSimple(pixelErrorFun, paramWrt1, 0.1)
diffEl = -ch.optimization.gradCheckSimple(pixelErrorFun, paramWrt2, 0.1)
ax3.set_title("Dr wrt. Azimuth Checkgrad")
drazsum = np.sign(-diffAz.reshape(shapeIm[0],shapeIm[1],1))*pixelErrorFun.dr_wrt(paramWrt1).reshape(shapeIm[0],shapeIm[1],1)
drazsumnobnd = -pixelErrorFun.dr_wrt(paramWrt1).reshape(shapeIm[0],shapeIm[1],1)*(1-renderer.boundarybool_image.reshape(shapeIm[0],shapeIm[1],1))
drazsumbnd = -pixelErrorFun.dr_wrt(paramWrt1).reshape(shapeIm[0],shapeIm[1],1)*(renderer.boundarybool_image.reshape(shapeIm[0],shapeIm[1],1))
drazsumnobnddiff = diffAz.reshape(shapeIm[0],shapeIm[1],1)*(1-renderer.boundarybool_image.reshape(shapeIm[0],shapeIm[1],1))
drazsumbnddiff = diffAz.reshape(shapeIm[0],shapeIm[1],1)*(renderer.boundarybool_image.reshape(shapeIm[0],shapeIm[1],1))
img3 = ax3.imshow(drazsum.squeeze(),cmap=matplotlib.cm.coolwarm, vmin=-1, vmax=1)
cb3 = plt.colorbar(img3, ax=ax3,use_gridspec=True)
cb3.mappable = img3
ax4.set_title("Dr wrt. param 2 Checkgrad")
drazsum = np.sign(-diffEl.reshape(shapeIm[0],shapeIm[1],1))*pixelErrorFun.dr_wrt(paramWrt2).reshape(shapeIm[0],shapeIm[1],1)
img4 = ax4.imshow(drazsum.squeeze(),cmap=matplotlib.cm.coolwarm, vmin=-1, vmax=1)
cb4 = plt.colorbar(img4, ax=ax4,use_gridspec=True)
cb4.mappable = img4
ax5.set_title("Dr wrt. param 1")
drazsum = -pixelErrorFun.dr_wrt(paramWrt1).reshape(shapeIm[0],shapeIm[1],1).reshape(shapeIm[0],shapeIm[1],1)
img5 = ax5.imshow(drazsum.squeeze(),cmap=matplotlib.cm.coolwarm, vmin=-1, vmax=1)
cb5 = plt.colorbar(img5, ax=ax5,use_gridspec=True)
cb5.mappable = img5
ax6.set_title("Dr wrt. param 2")
drazsum = -pixelErrorFun.dr_wrt(paramWrt2).reshape(shapeIm[0],shapeIm[1],1).reshape(shapeIm[0],shapeIm[1],1)
img6 = ax6.imshow(drazsum.squeeze(),cmap=matplotlib.cm.coolwarm, vmin=-1, vmax=1)
cb6 = plt.colorbar(img6, ax=ax6,use_gridspec=True)
cb6.mappable = img6
pos1 = ax1.get_position()
pos5 = ax5.get_position()
pos5.x0 = pos1.x0
ax5.set_position(pos5)
pos1 = ax1.get_position()
pos5 = ax5.get_position()
pos5.x0 = pos1.x0
ax5.set_position(pos5)
plt.show()
plt.pause(0.01)
t = time.time()
if makeVideo:
import matplotlib.animation as animation
Writer = animation.writers['ffmpeg']
writer = Writer(fps=1, metadata=dict(title='', artist=''), bitrate=1800)
figvid, (vax1, vax2) = plt.subplots(1, 2, sharey=True, subplot_kw={'aspect':'equal'}, figsize=(12, 6))
vax1.axes.get_xaxis().set_visible(False)
vax1.axes.get_yaxis().set_visible(False)
vax1.set_title("Ground truth")
vax2.axes.get_xaxis().set_visible(False)
vax2.axes.get_yaxis().set_visible(False)
vax2.set_title("Backprojection")
plt.tight_layout()
if computePerformance:
from mpl_toolkits.mplot3d import Axes3D
global figperf
figperf = plt.figure()
global axperf
axperf = figperf.add_subplot(111, projection='3d')
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('small')
# x1,x2,y1,y2 = plt.axis()
# plt.axis((0,360,0,90))
performance[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
azimuths[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
elevations[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradAz[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradEl[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
performanceSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
azimuthsSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
elevationsSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradElSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradAzSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradFinAzSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradFinElSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
def refreshSubplots():
#Other subplots visualizing renders and its pixel derivatives
edges = renderer.boundarybool_image
imagegt = imageGT()
gtoverlay = imageGT().copy()
gtoverlay = lin2srgb(gtoverlay)
gtoverlay[np.tile(edges.reshape([shapeIm[0],shapeIm[1],1]),[1,1,3]).astype(np.bool)] = 1
pim1.set_data(gtoverlay)
rendererIm = lin2srgb(renderer.r.copy())
pim2.set_data(rendererIm)
paramWrt1 = chAz
paramWrt2 = chEl
if useShapeModel:
paramWrt1 = chShapeParams[0]
paramWrt2 = chShapeParams[1]
global model
if model != 2:
diffAz = -ch.optimization.gradCheckSimple(pixelErrorFun, paramWrt1, 0.1)
diffEl = -ch.optimization.gradCheckSimple(pixelErrorFun, paramWrt2, 0.1)
# ax3.set_title("Pixel negative log probabilities")
# pim3 = ax3.imshow(-pixelErrorFun.r)
# cb3.mappable = pim3
# cb3.update_normal(pim3)
ax3.set_title("Dr wrt. Parameter 0 Checkgrad")
drazsum = -np.sign(diffAz.reshape(shapeIm[0],shapeIm[1],1))*pixelErrorFun.dr_wrt(paramWrt1).reshape(shapeIm[0],shapeIm[1],1)
# drazsum = drazsum*(renderer.boundarybool_image.reshape(shapeIm[0],shapeIm[1],1))
img3 = ax3.imshow(drazsum.squeeze(),cmap=matplotlib.cm.coolwarm, vmin=-1, vmax=1)
cb3.mappable = img3
# else:
# ax3.set_title("HoG Image GT")
# pim3 = ax3.imshow(hogImGT.r)
# cb3.mappable = pim3
# cb3.update_normal(pim3)
# ax4.set_title("Posterior probabilities")
# ax4.imshow(np.tile(post.reshape(shapeIm[0],shapeIm[1],1), [1,1,3]))
ax4.set_title("Dr wrt. Parameter 1 Checkgrad")
drazsum = -np.sign(diffEl.reshape(shapeIm[0],shapeIm[1],1))*pixelErrorFun.dr_wrt(paramWrt2).reshape(shapeIm[0],shapeIm[1],1).reshape(shapeIm[0],shapeIm[1],1)
img4 = ax4.imshow(drazsum.squeeze(),cmap=matplotlib.cm.coolwarm, vmin=-1, vmax=1)
cb4.mappable = img4
# else:
# ax4.set_title("HoG image renderer")
# ax4.set_title("HoG Image GT")
# pim4 = ax4.imshow(hogImRenderer.r)
# cb4.mappable = pim4
# cb4.update_normal(pim4)
sdy, sdx = pixelErrorFun.shape
drazsum = -pixelErrorFun.dr_wrt(paramWrt1).reshape(sdy,sdx,1).reshape(sdy,sdx,1)
# drazsum = drazsum*(renderer.boundarybool_image.reshape(shapeIm[0],shapeIm[1],1))
img5 = ax5.imshow(drazsum.squeeze(),cmap=matplotlib.cm.coolwarm, vmin=-1, vmax=1)
cb5.mappable = img5
cb5.update_normal(img5)
drazsum = -pixelErrorFun.dr_wrt(paramWrt2).reshape(sdy, sdx,1).reshape(sdy,sdx,1)
img6 = ax6.imshow(drazsum.squeeze(),cmap=matplotlib.cm.coolwarm, vmin=-1, vmax=1)
cb6.mappable = img6
cb6.update_normal(img6)
f.canvas.draw()
plt.pause(0.01)
def plotSurface(model):
global figperf
global axperf
global surf
global line
global drawSurf
global computePerformance
global plotMinimization
global chAz
global chEl
global chDist
global chAzGT
global chElGT
global chDistGT
global scene
if not plotMinimization and not drawSurf:
figperf.clear()
global axperf
axperf = figperf.add_subplot(111, projection='3d')
plt.figure(figperf.number)
axperf.clear()
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('small')
scaleSurfGrads = 0
if drawSurf:
print("Drawing gardient surface.")
from scipy.interpolate import griddata
x1 = np.linspace((azimuthsSurf[(model, chAzGT.r[0], chElGT.r[0])]*180./np.pi).min(), (azimuthsSurf[(model, chAzGT.r[0], chElGT.r[0])]*180./np.pi).max(), len((azimuthsSurf[(model, chAzGT.r[0], chElGT.r[0])]*180./np.pi)))
y1 = np.linspace((elevationsSurf[(model, chAzGT.r[0], chElGT.r[0])]*180./np.pi).min(), (elevationsSurf[(model, chAzGT.r[0], chElGT.r[0])]*180./np.pi).max(), len((elevationsSurf[(model, chAzGT.r[0], chElGT.r[0])]*180./np.pi)))
x2, y2 = np.meshgrid(x1, y1)
z2 = griddata(((azimuthsSurf[(model, chAzGT.r[0], chElGT.r[0])]*180./np.pi), (elevationsSurf[(model, chAzGT.r[0], chElGT.r[0])]*180./np.pi)), performanceSurf[(model, chAzGT.r[0], chElGT.r[0])], (x2, y2), method='cubic')
from matplotlib import cm, colors
surf = axperf.plot_surface(x2, y2, z2, rstride=3, cstride=3, cmap=cm.coolwarm, linewidth=0.1, alpha=0.85)
# scaleSurfGrads = 5./avgSurfGradMagnitudes
for point in range(len(performanceSurf[(model, chAzGT.r[0], chElGT.r[0])])):
perfi = performanceSurf[(model, chAzGT.r[0], chElGT.r[0])][point]
azi = azimuthsSurf[(model, chAzGT.r[0], chElGT.r[0])][point]
eli = elevationsSurf[(model, chAzGT.r[0], chElGT.r[0])][point]
gradAzi = -gradAzSurf[(model, chAzGT.r[0], chElGT.r[0])][point]
gradEli = -gradElSurf[(model, chAzGT.r[0], chElGT.r[0])][point]
scaleGrad = np.sqrt(gradAzi**2+gradEli**2) / 5
arrowGrad = Arrow3D([azi*180./np.pi, azi*180./np.pi + gradAzi/scaleGrad], [eli*180./np.pi, eli*180./np.pi + gradEli/scaleGrad], [perfi, perfi], mutation_scale=10, lw=1, arrowstyle="-|>", color="b")
axperf.add_artist(arrowGrad)
diffAzi = -gradFinAzSurf[(model, chAzGT.r[0], chElGT.r[0])][point]
diffEli = -gradFinElSurf[(model, chAzGT.r[0], chElGT.r[0])][point]
scaleDiff = np.sqrt(diffAzi**2+diffEli**2) / 5
colorArrow = 'g'
if diffAzi * gradAzi + diffEli * gradEli < 0:
colorArrow = 'r'
arrowGradDiff = Arrow3D([azi*180./np.pi, azi*180./np.pi + diffAzi/scaleDiff], [eli*180./np.pi, eli*180./np.pi + diffEli/scaleDiff], [perfi, perfi], mutation_scale=10, lw=1, arrowstyle="-|>", color=colorArrow)
# axperf.add_artist(arrowGradDiff)
axperf.plot([chAzGT.r[0]*180./np.pi, chAzGT.r[0]*180./np.pi], [chElGT.r[0]*180./np.pi,chElGT.r[0]*180./np.pi], [z2.min(), z2.max()], 'b--', linewidth=1)
errorFun = models[model]
axperf.plot(chAz.r*180./np.pi, chEl.r*180./np.pi, errorFun.r[0], 'yD')
import scipy.sparse as sp
if sp.issparse(errorFun.dr_wrt(chAz)):
drAz = -errorFun.dr_wrt(chAz).toarray()[0][0]
else:
drAz = -errorFun.dr_wrt(chAz)[0][0]
if sp.issparse(errorFun.dr_wrt(chEl)):
drEl = -errorFun.dr_wrt(chEl).toarray()[0][0]
else:
drEl = -errorFun.dr_wrt(chEl)[0][0]
scaleDr = np.sqrt(drAz**2+drEl**2) / 5
chAzOldi = chAz.r[0]
chElOldi = chEl.r[0]
diffAz = -ch.optimization.gradCheckSimple(errorFun, chAz, 0.01745)
diffEl = -ch.optimization.gradCheckSimple(errorFun, chEl, 0.01745)
scaleDiff = np.sqrt(diffAz**2+diffEl**2) / 5
chAz[0] = chAzOldi
chEl[0] = chElOldi
arrowGrad = Arrow3D([chAz.r[0]*180./np.pi, chAz.r[0]*180./np.pi + drAz/scaleDr], [chEl.r[0]*180./np.pi, chEl.r[0]*180./np.pi + drEl/scaleDr], [errorFun.r[0], errorFun.r[0]], mutation_scale=10, lw=1, arrowstyle="-|>", color="b")
axperf.add_artist(arrowGrad)
colorArrow = 'g'
if diffAz * drAz + diffEl * drEl < 0:
colorArrow = 'r'
arrowGradDiff = Arrow3D([chAz.r[0]*180./np.pi, chAz.r[0]*180./np.pi + diffAz/scaleDiff], [chEl.r[0]*180./np.pi, chEl.r[0]*180./np.pi + diffEl/scaleDiff], [errorFun.r[0], errorFun.r[0]], mutation_scale=10, lw=1, arrowstyle="-|>", color=colorArrow)
axperf.add_artist(arrowGradDiff)
if plotMinimization:
if azimuths.get((model, chAzGT.r[0], chElGT.r[0])) != None:
axperf.plot(azimuths[(model, chAzGT.r[0], chElGT.r[0])]*180./np.pi, elevations[(model, chAzGT.r[0], chElGT.r[0])]*180./np.pi, performance[(model, chAzGT.r[0], chElGT.r[0])], color='g', linewidth=1.5)
axperf.plot(azimuths[(model, chAzGT.r[0], chElGT.r[0])]*180./np.pi, elevations[(model, chAzGT.r[0], chElGT.r[0])]*180./np.pi, performance[(model, chAzGT.r[0], chElGT.r[0])], 'rD')
axperf.set_xlabel('Azimuth (degrees)')
axperf.set_ylabel('Elevation (degrees)')
if model == 2:
axperf.set_zlabel('Squared Error')
plt.title('Model: ' + modelsDescr[model])
plt.pause(0.01)
plt.draw()
def printStats():
print("**** Statistics ****" )
print("Relative Azimuth: " + str(chAzRel))
print("GT Relative Azimuth: " + str(chAzRelGT))
print("GT Cam Azimuth: " + str(chAzGT))
print("Cam Azimuth: " + str(chAz))
print("GT Cam Elevation: " + str(chElGT))
print("Cam Elevation: " + str(chEl))
print("Dr wrt cam Azimuth: " + str(errorFun.dr_wrt(chAz)))
print("Dr wrt cam Elevation: " + str(errorFun.dr_wrt(chEl)))
if useShapeModel:
print("Dr wrt Shape Param 0: " + str(errorFun.dr_wrt(chShapeParams[0])))
print("Dr wrt Shape Param 1: " + str(errorFun.dr_wrt(chShapeParams[1])))
# print("Dr wrt Distance: " + str(errorFun.dr_wrt(chDist)))
print("Occlusion is " + str(getOcclusionFraction(rendererGT)*100) + " %")
if drawSurf:
avgError = np.mean(np.sqrt((gradAzSurf[(model, chAzGT.r[0], chElGT.r[0])] - gradFinAzSurf[(model, chAzGT.r[0], chElGT.r[0])])**2 + (gradElSurf[(model, chAzGT.r[0], chElGT.r[0])] - gradFinElSurf[(model, chAzGT.r[0], chElGT.r[0])])**2))
print("** Approx gradients - finite differenes." )
print("Avg Eucl. distance :: " + str(avgError))
norm2Grad = np.sqrt((gradAzSurf[(model, chAzGT.r[0], chElGT.r[0])])**2 + (gradElSurf[(model, chAzGT.r[0], chElGT.r[0])])**2)
norm2Diff = np.sqrt((gradFinAzSurf[(model, chAzGT.r[0], chElGT.r[0])])**2 + (gradFinElSurf[(model, chAzGT.r[0], chElGT.r[0])])**2)
avgAngle = np.arccos((gradFinAzSurf[(model, chAzGT.r[0], chElGT.r[0])]*gradAzSurf[(model, chAzGT.r[0], chElGT.r[0])] + gradFinElSurf[(model, chAzGT.r[0], chElGT.r[0])]*gradElSurf[(model, chAzGT.r[0], chElGT.r[0])])/(norm2Grad*norm2Diff))
print("Avg Angle.: " + str(np.mean(avgAngle)))
print("Num opposite (red) gradients: " + str(np.sum((gradFinAzSurf[(model, chAzGT.r[0], chElGT.r[0])]*gradAzSurf[(model, chAzGT.r[0], chElGT.r[0])] + gradFinElSurf[(model, chAzGT.r[0], chElGT.r[0])]*gradElSurf[(model, chAzGT.r[0], chElGT.r[0])]) < 0)))
idxmin = np.argmin(performanceSurf[(model, chAzGT.r[0], chElGT.r[0])])
azDiff = np.arctan2(np.arcsin(chAzGT - azimuthsSurf[(model, chAzGT.r[0], chElGT.r[0])][idxmin]), np.arccos(chAzGT - azimuthsSurf[(model, chAzGT.r[0], chElGT.r[0])][idxmin]))
elDiff = np.arctan2(np.arcsin(chElGT - elevationsSurf[(model, chAzGT.r[0], chElGT.r[0])][idxmin]), np.arccos(chElGT - elevationsSurf[(model, chAzGT.r[0], chElGT.r[0])][idxmin]))
print("Minimum Azimuth difference of " + str(azDiff*180/np.pi))
print("Minimum Elevation difference of " + str(elDiff*180/np.pi))
azDiff = np.arctan2(np.arcsin(chAzGT - chAz.r[0]), np.arccos(chAzGT - chAz.r[0]))
elDiff = np.arctan2(np.arcsin(chElGT - chEl.r[0]), np.arccos(chElGT - chEl.r[0]))
# print("Current Azimuth difference of " + str(azDiff*180/np.pi))
# print("Current Elevation difference of " + str(elDiff*180/np.pi))
global printStatsBool
printStatsBool = False
def cb(errorFunMin):
# global t
# elapsed_time = time.time() - t
# print("Ended interation in " + str(elapsed_time))
#
# global pixelErrorFun
# global errorFun
# global iterat
# iterat = iterat + 1
print("Callback! " )
# print("Model Log Likelihood: " + str(errorFunMin.r))
# global imagegt
# global renderer
# global gradAz
# global gradEl
# global performance
# global azimuths
# global elevations
# t = time.time()
def cb2(_):
global t
elapsed_time = time.time() - t
print("Ended interation in " + str(elapsed_time))
global pixelErrorFun
global errorFun
global iterat
iterat = iterat + 1
print("Callback! " + str(iterat))
print("Model Log Likelihood: " + str(errorFun.r))
global imagegt
global renderer
global gradAz
global gradEl
global performance
global azimuths
global elevations
if reduceVariance:
stds[:] = stds.r[:]*0.9
if demoMode and refreshWhileMinimizing:
refreshSubplots()
if makeVideo:
plt.figure(figvid.number)
im1 = vax1.imshow(gtoverlay)
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.8)
t = vax1.annotate("Minimization iteration: " + str(iterat), xy=(1, 0), xycoords='axes fraction', fontsize=16,
xytext=(-20, 5), textcoords='offset points', ha='right', va='bottom', bbox=bbox_props)
im2 = vax2.imshow(renderer.r)
ims.append([im1, im2, t])
if computePerformance and demoMode:
if performance.get((model, chAzGT.r[0], chElGT.r[0])) == None:
performance[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
azimuths[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
elevations[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradAz[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradEl[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
performance[(model, chAzGT.r[0], chElGT.r[0])] = numpy.append(performance[(model, chAzGT.r[0], chElGT.r[0])], errorFun.r)
azimuths[(model, chAzGT.r[0], chElGT.r[0])] = numpy.append(azimuths[(model, chAzGT.r[0], chElGT.r[0])], chAz.r)
elevations[(model, chAzGT.r[0], chElGT.r[0])] = numpy.append(elevations[(model, chAzGT.r[0], chElGT.r[0])], chEl.r)
import scipy.sparse as sp
if sp.issparse(errorFun.dr_wrt(chAz)):
drAz = errorFun.dr_wrt(chAz).toarray()[0][0]
else:
drAz = errorFun.dr_wrt(chAz)[0][0]
if sp.issparse(errorFun.dr_wrt(chEl)):
drEl = errorFun.dr_wrt(chEl).toarray()[0][0]
else:
drEl = errorFun.dr_wrt(chEl)[0][0]
gradAz[(model, chAzGT.r[0], chElGT.r[0])] = numpy.append(gradAz[(model, chAzGT.r[0], chElGT.r[0])], drAz)
gradEl[(model, chAzGT.r[0], chElGT.r[0])] = numpy.append(gradEl[(model, chAzGT.r[0], chElGT.r[0])], drEl)
plotSurface(model)
if drawSurf and demoMode and refreshWhileMinimizing:
# plt.pause(0.1)
plt.show()
plt.draw()
plt.pause(0.01)
t = time.time()
def readKeys(window, key, scancode, action, mods):
print("Reading keys...")
global exit
global refresh
global chAz
global chEl
global phiOffsetGT
global chComponent
global changedGT
refresh = False
if mods!=glfw.MOD_SHIFT and key == glfw.KEY_ESCAPE and action == glfw.RELEASE:
glfw.set_window_should_close(window, True)
exit = True
if mods!=glfw.MOD_SHIFT and key == glfw.KEY_LEFT and action == glfw.RELEASE:
refresh = True
chAz[:] = chAz.r[0] - radians(5)
azimuth = chAz.r[0] - radians(5)
# chCosAz[:] = np.cos(azimuth)
# chSinAz[:] = np.sin(azimuth)
if mods!=glfw.MOD_SHIFT and key == glfw.KEY_RIGHT and action == glfw.RELEASE:
refresh = True
chAz[:] = chAz.r[0] + radians(5)
azimuth = chAz.r[0] + radians(5)
# chCosAz[:] = np.cos(azimuth)
# chSinAz[:] = np.sin(azimuth)
if mods!=glfw.MOD_SHIFT and key == glfw.KEY_DOWN and action == glfw.RELEASE:
refresh = True
chEl[:] = chEl[0].r - radians(5)
elevation = chEl[0].r - radians(5)
if elevation <= 0:
elevation = 0.0001
chLogCosEl[:] = np.log(np.cos(elevation))
chLogSinEl[:] = np.log(np.sin(elevation))
refresh = True
if mods!=glfw.MOD_SHIFT and key == glfw.KEY_UP and action == glfw.RELEASE:
refresh = True
chEl[:] = chEl[0].r + radians(5)
elevation = chEl[0].r + radians(5)
if elevation >= np.pi/2 - 0.0001:
elevation = np.pi/2 - 0.0002
chLogCosEl[:] = np.log(np.cos(elevation))
chLogSinEl[:] = np.log(np.sin(elevation))
if mods==glfw.MOD_SHIFT and key == glfw.KEY_LEFT and action == glfw.RELEASE:
refresh = True
chAz[:] = chAz.r[0] - radians(1)
# azimuth = chAz.r[0] - radians(1)
# chCosAz[:] = np.cos(azimuth)
# chSinAz[:] = np.sin(azimuth)
if mods==glfw.MOD_SHIFT and key == glfw.KEY_RIGHT and action == glfw.RELEASE:
refresh = True
chAz[:] = chAz.r[0] + radians(1)
# azimuth = chAz.r[0] + radians(1)
# chCosAz[:] = np.cos(azimuth)
# chSinAz[:] = np.sin(azimuth)
# if mods==glfw.MOD_SHIFT and key == glfw.KEY_LEFT and action == glfw.RELEASE:
# print("Left modifier!")
# refresh = True
# chAzGT[0] = chAzGT[0].r - radians(1)
# if mods==glfw.MOD_SHIFT and key == glfw.KEY_RIGHT and action == glfw.RELEASE:
# refresh = True
# # chAz[0] = chAz[0].r + radians(1)
# rotation[:] = rotation.r[0] + np.pi/4
# # rotation[:] = np.pi/2
# shCoeffsRGBGT[:] = np.dot(light_probes.sphericalHarmonicsZRotation(totalOffsetGT.r[:]), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
# chAzGT[:] = rotation.r[:]
# # ipdb.set_trace()
# shOriginal = chComponentGTOriginal[[0,3,2,1,4,5,6,7,8]]
# shOriginalDir = shDirLightGTOriginal[[0,3,2,1,4,5,6,7,8]]
# # chComponentGT[:] = np.dot(light_probes.sphericalHarmonicsZRotation(rotation), shOriginal)[[0,3,2,1,4,5,6,7,8]]
# # shDirLightGT[:] = np.dot(light_probes.sphericalHarmonicsZRotation(rotation), shOriginalDir)[[0,3,2,1,4,5,6,7,8]]
# # shDirLightGT[:] = np.dot(shDirLightGTOriginal.T, light_probes.sphericalHarmonicsZRotation(rotation)).T[:]
# # shDirLightGT[:] = np.sum(np.array(light_probes.sphericalHarmonicsZRotation(rotation) * shDirLightGTOriginal[:,None]), axis=1)
# # shDirLightGT[:] = chZonalToSphericalHarmonics(zGT, np.pi/2 - chLightElGT, chLightAzGT + rotation[:] - np.pi/2).r[:]
# print("Original: " + str(shDirLightGTOriginal))
# print(str(shCoeffsRGBGT.r))
# print(str(shDirLightGT.r))
# print(str(rendererGT.tn[0]))
if mods==glfw.MOD_SHIFT and key == glfw.KEY_DOWN and action == glfw.RELEASE:
refresh = True
chEl[0] = chEl[0].r - radians(1)
refresh = True
if mods==glfw.MOD_SHIFT and key == glfw.KEY_UP and action == glfw.RELEASE:
refresh = True
chEl[0] = chEl[0].r + radians(1)
# if mods!=glfw.MOD_SHIFT and key == glfw.KEY_X and action == glfw.RELEASE:
# refresh = True
# chScale[0] = chScale[0].r + 0.05
#
# if mods==glfw.MOD_SHIFT and key == glfw.KEY_X and action == glfw.RELEASE:
# refresh = True
# chScale[0] = chScale[0].r - 0.05
# if mods!=glfw.MOD_SHIFT and key == glfw.KEY_Y and action == glfw.RELEASE:
# refresh = True
# chScale[1] = chScale[1].r + 0.05
#
# if mods==glfw.MOD_SHIFT and key == glfw.KEY_Y and action == glfw.RELEASE:
# refresh = True
# chScale[1] = chScale[1].r - 0.05
# if mods!=glfw.MOD_SHIFT and key == glfw.KEY_Z and action == glfw.RELEASE:
# refresh = True
# chScale[2] = chScale[2].r + 0.05
#
# if mods==glfw.MOD_SHIFT and key == glfw.KEY_Z and action == glfw.RELEASE:
# refresh = True
# chScale[2] = chScale[2].r - 0.05
global errorFun
if mods != glfw.MOD_SHIFT and key == glfw.KEY_C and action == glfw.RELEASE:
print("Azimuth grad check: ")
jacs, approxjacs, check = ch.optimization.gradCheck(errorFun, [chAz], [1.49e-08])
print("Grad check jacs: " + "%.2f" % jacs)
print("Grad check fin jacs: " + "%.2f" % approxjacs)
print("Grad check check: " + "%.2f" % check)
# print("Scipy grad check: " + "%.2f" % ch.optimization.scipyGradCheck({'raw': errorFun}, [chAz]))
print("Elevation grad check: ")
jacs, approxjacs, check = ch.optimization.gradCheck(errorFun, [chEl], [1])
print("Grad check jacs: " + "%.2f" % jacs)
print("Grad check fin jacs: " + "%.2f" % approxjacs)
print("Grad check check: " + "%.2f" % check)
# print("Scipy grad check: " + "%.2f" % ch.optimization.scipyGradCheck({'raw': errorFun}, [chEl]))
print("Red VColor grad check: ")
jacs, approxjacs, check = ch.optimization.gradCheck(errorFun, [chVColors[0]], [0.01])
print("Grad check jacs: " + "%.2f" % jacs)
print("Grad check fin jacs: " + "%.2f" % approxjacs)
print("Grad check check: " + "%.2f" % check)
# print("Scipy grad check: " + "%.2f" % ch.optimization.scipyGradCheck({'raw': errorFun}, [chVColors]))
if key == glfw.KEY_D:
refresh = True
# chComponent[0] = chComponent[0].r + 0.1
if mods == glfw.MOD_SHIFT and glfw.KEY_D:
refresh = True
# chComponent[0] = chComponent[0].r - 0.1
global drawSurf
global model
global models
global updateErrorFunctions
if key == glfw.KEY_G and action == glfw.RELEASE:
refresh = True
changedGT = True
updateErrorFunctions = True
global targetPosition
global center
global cameraGT
global rendererGT
global renderer
global teapotGT
global teapot
global newTeapotAsGT
if mods==glfw.MOD_SHIFT and key == glfw.KEY_G and action == glfw.RELEASE:
newTeapotAsGT = True
global groundTruthBlender
global blenderRender
if mods != glfw.MOD_SHIFT and key == glfw.KEY_B and action == glfw.RELEASE:
if useBlender:
updateErrorFunctions = True
groundTruthBlender = not groundTruthBlender
# changedGT = True
# if groundTruthBlender:
# bpy.ops.render.render( write_still=True )
# image = cv2.imread(scene.render.filepath)
# image = np.float64(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))/255.0
# blenderRender = image
refresh = True
#Compute in order to plot the surface neighouring the azimuth/el of the gradients and error function.
global exploreSurfaceBool
if key == glfw.KEY_E and action == glfw.RELEASE:
print("Key E pressed?")
exploreSurfaceBool = True
if key == glfw.KEY_P and action == glfw.RELEASE:
ipdb.set_trace()
refresh = True
if key == glfw.KEY_N and action == glfw.RELEASE:
print("Back to GT!")
chAz[:] = chAzGT.r[:]
chEl[:] = chElGT.r[:]
chShapeParams[:] = chShapeParamsGT.r[:]
# chComponent[:] = chComponentGT.r[:]
refresh = True
global chAzSaved
global chElSaved
global chComponentSaved
global chShapeParamsSaved
if key == glfw.KEY_Z and action == glfw.RELEASE:
print("Saved!")
chAzSaved = chAz.r[0]
chElSaved = chEl.r[0]
chShapeParamsSaved = chShapeParams.r[:]
# chComponentSaved = chComponent.r[0]
if key == glfw.KEY_X and action == glfw.RELEASE:
print("Back to Saved!")
chAz[0] = chAzSaved
chEl[0] = chElSaved
# chComponent[0] = chComponentSaved
chShapeParams[:] = chShapeParamsSaved
refresh = True
global printStatsBool
if mods!=glfw.MOD_SHIFT and key == glfw.KEY_S and action == glfw.RELEASE:
printStatsBool = True
if key == glfw.KEY_V and action == glfw.RELEASE:
global ims
if makeVideo:
im_ani = animation.ArtistAnimation(figvid, ims, interval=2000, repeat_delay=3000, repeat=False, blit=True)
im_ani.save('minimization_demo.mp4', fps=None, writer=writer, codec='mp4')
ims = []
global stds
global globalPrior
global plotMinimization
if mods != glfw.MOD_CONTROL and mods != glfw.MOD_SHIFT and key == glfw.KEY_KP_1 and action == glfw.RELEASE:
stds[:] = stds.r[0]/1.5
print("New standard devs of " + str(stds.r))
refresh = True
drawSurf = False
plotMinimization = False
if mods != glfw.MOD_CONTROL and mods != glfw.MOD_SHIFT and key == glfw.KEY_KP_2 and action == glfw.RELEASE:
stds[:] = stds.r[0]*1.5
print("New standard devs of " + str(stds.r))
refresh = True
drawSurf = False
plotMinimization = False
if mods != glfw.MOD_CONTROL and mods != glfw.MOD_SHIFT and key == glfw.KEY_KP_4 and action == glfw.RELEASE:
globalPrior[0] = globalPrior.r[0] - 0.05
print("New foreground prior of" + str(globalPrior.r))
refresh = True
drawSurf = False
plotMinimization = False
if mods != glfw.MOD_CONTROL and mods != glfw.MOD_SHIFT and key == glfw.KEY_KP_5 and action == glfw.RELEASE:
globalPrior[0] = globalPrior.r[0] + 0.05
print("New foreground prior of " + str(globalPrior.r))
refresh = True
drawSurf = False
plotMinimization = False
global changeRenderer
global currentTeapotModel
changeRenderer = False
if mods != glfw.MOD_SHIFT and key == glfw.KEY_KP_7 and action == glfw.RELEASE:
currentTeapotModel = (currentTeapotModel - 1) % len(renderTeapotsList)
changeRenderer = True
if mods != glfw.MOD_SHIFT and key == glfw.KEY_KP_8 and action == glfw.RELEASE:
currentTeapotModel = (currentTeapotModel + 1) % len(renderTeapotsList)
changeRenderer = True
global renderer
global chShapeParams
if mods == glfw.MOD_SHIFT and key == glfw.KEY_KP_1 and action == glfw.RELEASE:
refresh = True
chShapeParams[1] = chShapeParams.r[1] + 0.2
if mods == glfw.MOD_SHIFT and key == glfw.KEY_KP_2 and action == glfw.RELEASE:
refresh = True
chShapeParams[2] = chShapeParams.r[2] + 0.2
if mods == glfw.MOD_SHIFT and key == glfw.KEY_KP_3 and action == glfw.RELEASE:
chShapeParams[3] = chShapeParams.r[3] + 0.2
refresh = True
if mods == glfw.MOD_SHIFT and key == glfw.KEY_KP_4 and action == glfw.RELEASE:
chShapeParams[4] = chShapeParams.r[4] + 0.2
refresh = True
if mods == glfw.MOD_SHIFT and key == glfw.KEY_KP_5 and action == glfw.RELEASE:
chShapeParams[5] = chShapeParams.r[5] + 0.2
refresh = True
if mods == glfw.MOD_SHIFT and mods == glfw.MOD_SHIFT and key == glfw.KEY_KP_6 and action == glfw.RELEASE:
chShapeParams[6] = chShapeParams.r[6] + 0.2
refresh = True
if mods == glfw.MOD_SHIFT and key == glfw.KEY_KP_8 and action == glfw.RELEASE:
chShapeParams[7] = chShapeParams.r[7] + 0.2
refresh = True
if mods == glfw.MOD_SHIFT and key == glfw.KEY_KP_9 and action == glfw.RELEASE:
chShapeParams[9] = chShapeParams.r[9] + 0.2
refresh = True
if mods == glfw.MOD_SHIFT and key == glfw.KEY_KP_8 and action == glfw.RELEASE:
chShapeParams[8] = chShapeParams.r[8] + 0.2
refresh = True
if mods == glfw.MOD_SHIFT and key == glfw.KEY_KP_0 and action == glfw.RELEASE:
chShapeParams[0] = chShapeParams.r[0] + 0.2
refresh = True
if mods == glfw.MOD_CONTROL and key == glfw.KEY_KP_1 and action == glfw.RELEASE:
refresh = True
chShapeParams[1] = chShapeParams.r[1] - 0.2
if mods == glfw.MOD_CONTROL and key == glfw.KEY_KP_2 and action == glfw.RELEASE:
refresh = True
chShapeParams[2] = chShapeParams.r[2] - 0.2
if mods == glfw.MOD_CONTROL and key == glfw.KEY_KP_3 and action == glfw.RELEASE:
chShapeParams[3] = chShapeParams.r[3] - 0.2
refresh = True
if mods == glfw.MOD_CONTROL and key == glfw.KEY_KP_4 and action == glfw.RELEASE:
chShapeParams[4] = chShapeParams.r[4] - 0.2
refresh = True
if mods == glfw.MOD_CONTROL and key == glfw.KEY_KP_5 and action == glfw.RELEASE:
chShapeParams[5] = chShapeParams.r[5] - 0.2
refresh = True
if mods == glfw.MOD_CONTROL and mods == glfw.MOD_CONTROL and key == glfw.KEY_KP_6 and action == glfw.RELEASE:
chShapeParams[6] = chShapeParams.r[6] - 0.2
refresh = True
if mods == glfw.MOD_CONTROL and key == glfw.KEY_KP_8 and action == glfw.RELEASE:
chShapeParams[7] = chShapeParams.r[7] - 0.2
refresh = True
if mods == glfw.MOD_CONTROL and key == glfw.KEY_KP_9 and action == glfw.RELEASE:
chShapeParams[9] = chShapeParams.r[9] - 0.2
refresh = True
if mods == glfw.MOD_CONTROL and key == glfw.KEY_KP_8 and action == glfw.RELEASE:
chShapeParams[8] = chShapeParams.r[8] - 0.2
refresh = True
if mods == glfw.MOD_CONTROL and key == glfw.KEY_KP_0 and action == glfw.RELEASE:
chShapeParams[0] = chShapeParams.r[0] - 0.2
refresh = True
if key == glfw.KEY_R and action == glfw.RELEASE:
refresh = True
if mods == glfw.MOD_CONTROL and key == glfw.KEY_R and action == glfw.RELEASE:
refresh = True
chShapeParams[:] = np.random.randn(latentDim)
chShapeParamsGT[:] = np.random.randn(latentDim)
global pixelErrorFun
global pixelErrorFun2
global errorFun2
global models2
global pixelModels2
global model
global models
global modelsDescr
global pixelModels
global reduceVariance
if key == glfw.KEY_O and action == glfw.RELEASE:
# drawSurf = False
model = (model + 1) % len(models)
print("Using " + modelsDescr[model])
errorFun = models[model]
pixelErrorFun = pixelModels[model]
# errorFun2 = models2[model]
# pixelErrorFun2 = pixelModels2[model]
# if model == 2:
# reduceVariance = True
# else:
# reduceVariance = False
refresh = True
global method
global methods
global options
global maxiter
if key == glfw.KEY_1 and action == glfw.RELEASE:
method = 0
options={'disp':False, 'maxiter':maxiter}
print("Changed to minimizer: " + methods[method])
if key == glfw.KEY_2 and action == glfw.RELEASE:
method = 1
maxiter = 20
options={'disp':False, 'maxiter':maxiter}
print("Changed to minimizer: " + methods[method])
if key == glfw.KEY_3 and action == glfw.RELEASE:
method = 2
options={'disp':False, 'maxiter':maxiter}
print("Changed to minimizer: " + methods[method])
if key == glfw.KEY_4 and action == glfw.RELEASE:
print("Changed to minimizer: " + methods[method])
method = 3
options={'disp':False, 'maxiter':maxiter}
if key == glfw.KEY_5 and action == glfw.RELEASE:
method = 4
maxiter = 1000
options={'disp':False, 'maxiter':maxiter}
print("Changed to minimizer: " + methods[method])
if key == glfw.KEY_6 and action == glfw.RELEASE:
method = 5
maxiter = 200
options = {'disp':False, 'maxiter':maxiter, 'lr':0.01, 'momentum':0.5, 'decay':0.99}
print("Changed to minimizer: " + methods[method])
if key == glfw.KEY_7 and action == glfw.RELEASE:
maxiter = 50
method = 6
options={'disp':False, 'maxiter':maxiter, 'df_vars':df_vars}
print("Changed to minimizer: " + methods[method])
global minimize
global free_variables
global df_vars
if mods==glfw.MOD_SHIFT and key == glfw.KEY_M and action == glfw.RELEASE:
# free_variables = [renderer.v.a.a]
minimize = True
if mods!=glfw.MOD_SHIFT and key == glfw.KEY_M and action == glfw.RELEASE:
# free_variables = [chAz, chEl, chVColors, chShCoeffs]
minimize = True
def timeRendering(iterations):
t = time.time()
for i in range(iterations):
chAz[:] = chAz.r[0] + radians(0.001)
renderer.r
print("Per iteration time of " + str((time.time() - t)/iterations))
def timeGradients(iterations):
t = time.time()
for i in range(iterations):
chAz[:] = chAz.r[0] + radians(0.001)
errorFun.dr_wrt(chAz)
print("Per iteration time of " + str((time.time() - t)/iterations))
def exploreSurface():
global drawSurf
global errorFun
global refresh
global model
if computePerformance:
print("Estimating cost function surface and gradients...")
drawSurf = True
chAzOld = chAz.r[0]
chElOld = chEl.r[0]
for model_num, errorFun in enumerate(models):
performanceSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = np.array([])
azimuthsSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = np.array([])
elevationsSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradAzSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradElSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradFinAzSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradFinElSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = np.array([])
for chAzi in np.linspace(max(chAzGT.r[0]-np.pi/3.,0), min(chAzGT.r[0] + np.pi/3., 2.*np.pi), num=20):
for chEli in np.linspace(max(chElGT.r[0]-np.pi/2,0), min(chElGT.r[0]+np.pi/2, np.pi/2), num=10):
for model_num, errorFun in enumerate(models):
chAz[:] = chAzi
chEl[:] = chEli
performanceSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = numpy.append(performanceSurf[(model_num, chAzGT.r[0], chElGT.r[0])], errorFun.r)
azimuthsSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = numpy.append(azimuthsSurf[(model_num, chAzGT.r[0], chElGT.r[0])], chAzi)
elevationsSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = numpy.append(elevationsSurf[(model_num, chAzGT.r[0], chElGT.r[0])], chEli)
import scipy.sparse as sp
if sp.issparse(errorFun.dr_wrt(chAz)):
drAz = errorFun.dr_wrt(chAz).toarray()[0][0]
else:
drAz = errorFun.dr_wrt(chAz)[0][0]
if sp.issparse(errorFun.dr_wrt(chEl)):
drEl = errorFun.dr_wrt(chEl).toarray()[0][0]
else:
drEl = errorFun.dr_wrt(chEl)[0][0]
gradAzSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = numpy.append(gradAzSurf[(model_num, chAzGT.r[0], chElGT.r[0])], drAz)
gradElSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = numpy.append(gradElSurf[(model_num, chAzGT.r[0], chElGT.r[0])], drEl)
chAzOldi = chAz.r[0]
chElOldi = chEl.r[0]
diffAz = ch.optimization.gradCheckSimple(errorFun, chAz, 0.01745)
diffEl = ch.optimization.gradCheckSimple(errorFun, chEl, 0.01745)
chAz[:] = chAzOldi
chEl[:] = chElOldi
gradFinAzSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = numpy.append(gradFinAzSurf[(model_num, chAzGT.r[0], chElGT.r[0])], diffAz)
gradFinElSurf[(model_num, chAzGT.r[0], chElGT.r[0])] = numpy.append(gradFinElSurf[(model_num, chAzGT.r[0], chElGT.r[0])], diffEl)
errorFun = models[model]
chAz[:] = chAzOld
chEl[:] = chElOld
refresh = True
if savePerformance:
def writeStats(model):
with open('stats/statistics.txt', 'a') as statsFile:
statsFile.write("**** Statistics for " + modelsDescr[model] + " ****" + '\n')
if drawSurf:
avgError = np.mean(np.sqrt((gradAzSurf[(model, chAzGT.r[0], chElGT.r[0])] - gradFinAzSurf[(model, chAzGT.r[0], chElGT.r[0])])**2 + (gradElSurf[(model, chAzGT.r[0], chElGT.r[0])] - gradFinElSurf[(model, chAzGT.r[0], chElGT.r[0])])**2))
statsFile.write("** Approx gradients - finite differenes." + '\n')
statsFile.write("Avg Eucl. distance :: " + str(avgError) + '\n')
norm2Grad = np.sqrt((gradAzSurf[(model, chAzGT.r[0], chElGT.r[0])])**2 + (gradElSurf[(model, chAzGT.r[0], chElGT.r[0])])**2)
norm2Diff = np.sqrt((gradFinAzSurf[(model, chAzGT.r[0], chElGT.r[0])])**2 + (gradFinElSurf[(model, chAzGT.r[0], chElGT.r[0])])**2)
avgAngle = np.arccos((gradFinAzSurf[(model, chAzGT.r[0], chElGT.r[0])]*gradAzSurf[(model, chAzGT.r[0], chElGT.r[0])] + gradFinElSurf[(model, chAzGT.r[0], chElGT.r[0])]*gradElSurf[(model, chAzGT.r[0], chElGT.r[0])])/(norm2Grad*norm2Diff))
statsFile.write("Avg Angle.: " + str(np.mean(avgAngle)) + '\n')
statsFile.write("Num opposite (red) gradients: " + str(np.sum((gradFinAzSurf[(model, chAzGT.r[0], chElGT.r[0])]*gradAzSurf[(model, chAzGT.r[0], chElGT.r[0])] + gradFinElSurf[(model, chAzGT.r[0], chElGT.r[0])]*gradElSurf[(model, chAzGT.r[0], chElGT.r[0])]) < 0)) + '\n')
idxmin = np.argmin(performanceSurf[(model, chAzGT.r[0], chElGT.r[0])])
azDiff = np.arctan2(np.arcsin(chAzGT - azimuthsSurf[(model, chAzGT.r[0], chElGT.r[0])][idxmin]), np.arccos(chAzGT - azimuthsSurf[(model, chAzGT.r[0], chElGT.r[0])][idxmin]))
elDiff = np.arctan2(np.arcsin(chElGT - elevationsSurf[(model, chAzGT.r[0], chElGT.r[0])][idxmin]), np.arccos(chElGT - elevationsSurf[(model, chAzGT.r[0], chElGT.r[0])][idxmin]))
statsFile.write("Minimum Azimuth difference of " + str(azDiff*180/np.pi) + '\n')
statsFile.write("Minimum Elevation difference of " + str(elDiff*180/np.pi) + '\n')
azDiff = np.arctan2(np.arcsin(chAzGT - chAz.r[0]), np.arccos(chAzGT - chAz.r[0]))
elDiff = np.arctan2(np.arcsin(chElGT - chEl.r[0]), np.arccos(chElGT - chEl.r[0]))
statsFile.write("Current Azimuth difference of " + str(azDiff*180/np.pi) + '\n')
statsFile.write("Current Elevation difference of " + str(elDiff*180/np.pi) + '\n\n')
for model_idx, model_i in enumerate(models):
writeStats(model_idx)
plotSurface(model_idx)
plt.savefig('stats/surfaceModel' + modelsDescr[model_idx] + '.png')
print("Finshed estimating.")
if demoMode:
glfw.set_key_callback(win, readKeys)
while not exit:
# Poll for and process events
glfw.make_context_current(win)
glfw.poll_events()
if newTeapotAsGT:
rendererGT.makeCurrentContext()
rendererGT.clear()
del rendererGT
removeObjectData(len(v) - targetIndex - 1, v, f_list, vc, vn, uv, haveTextures_list, textures_list)
addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, v_teapots[currentTeapotModel][0], f_list_teapots[currentTeapotModel][0], vc_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0], uv_teapots[currentTeapotModel][0], haveTextures_list_teapots[currentTeapotModel][0], textures_list_teapots[currentTeapotModel][0])
rendererGT = createRendererGT(glMode, chAzGT, chObjAzGT, chElGT, chDistGT, center, v, vc, f_list, vn, light_colorGT, chComponentGT, chVColorsGT, targetPosition, chDisplacementGT, chScaleGT, width,height, uv, haveTextures_list, textures_list, frustum, win )
updateErrorFunctions = True
refresh = True
changedGT = True
#Unlink and place the new teapot for Blender.
if useBlender:
scene.objects.unlink(teapotGT)
teapot.matrix_world = mathutils.Matrix.Translation(targetPosition)
teapotGT = blender_teapots[currentTeapotModel]
placeNewTarget(scene, teapotGT, targetPosition)
placeCamera(scene.camera, -chAzGT.r[:].copy()*180/np.pi, chElGT.r[:].copy()*180/np.pi, chDistGT, center)
scene.update()
newTeapotAsGT = False
if printStatsBool:
printStats()
if changedGT:
drawSurf = False
plotMinimization = False
imagegt = imageGT()
chImage[:,:,:] = imagegt[:,:,:]
chAzGT[:] = chAz.r[:]
chElGT[:] = chEl.r[:]
chDistGT[:] = chDist.r[:]
# chComponentGT[:] = chComponent.r[:]
# chVColorsGT[:] = chVColors.r[:]
if makeVideo:
ims = []
performance[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
azimuths[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
elevations[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradAz[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradEl[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
performanceSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
azimuthsSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
elevationsSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradElSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradAzSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradFinAzSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
gradFinElSurf[(model, chAzGT.r[0], chElGT.r[0])] = np.array([])
if useBlender:
print("Updating Ground Truth blender camera!")
scene.update()
center = centerOfGeometry(teapotGT.dupli_group.objects, teapotGT.matrix_world)
placeCamera(scene.camera, -chAzGT.r[:].copy()*180/np.pi, chElGT.r[:].copy()*180/np.pi, chDistGT, center)
scene.update()
if useBlender:
pendingCyclesRender = True
if useGTasBackground:
for teapot_i in range(len(renderTeapotsList)):
renderer_i = renderer_teapots[teapot_i]
renderer_i.set(background_image=rendererGT.r)
changedGT = False
if exploreSurfaceBool:
print("Explores surface is true??")
exploreSurface()
exploreSurfaceBool = False
if groundTruthBlender and pendingCyclesRender:
scene.update()
# scene.layers[0] = False
# scene.layers[1] = True
bpy.ops.render.render( write_still=True )
# image = cv2.imread(scene.render.filepath)
# image = np.float64(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))/255.0
image = np.array(imageio.imread(scene.render.filepath))[:,:,0:3]
plt.imsave('blenderImage.png', lin2srgb(image))
image[image>1]=1
blenderRender = image
# blenderRenderGray = 0.3*blenderRender[:,:,0] + 0.59*blenderRender[:,:,1] + 0.11*blenderRender[:,:,2]
# rendererGTGray = 0.3*rendererGT[:,:,0] + 0.59*rendererGT[:,:,1] + 0.11*rendererGT[:,:,2]
# chAmbientIntensityGT[:] = chAmbientIntensityGT.r*(np.mean(blenderRenderGray,axis=(0,1))/np.mean(rendererGTGray.r,axis=(0,1)))
pendingCyclesRender = False
if changeRenderer:
print("New teapot model " + str(currentTeapotModel))
drawSurf = False
plotMinimization = False
refresh = True
renderer = renderer_teapots[currentTeapotModel]
updateErrorFunctions = True
if useBlender:
teapot = blender_teapots[currentTeapotModel]
changeRenderer = False
if updateErrorFunctions:
# currentGT = rendererGT
# if useBlender and groundTruthBlender:
# image = np.array(imageio.imread(scene.render.filepath))[:,:,0:3]
# image[image>1]=1
# blenderRender = image
# currentGT = blenderRender
currentGT = ch.Ch(imageGT())
negLikModel = -ch.sum(generative_models.LogGaussianModel(renderer=renderer, groundtruth=currentGT, variances=variances))/numPixels
negLikModelRobust = -ch.sum(generative_models.LogRobustModel(renderer=renderer, groundtruth=currentGT, foregroundPrior=globalPrior, variances=variances))/numPixels
pixelLikelihoodCh = generative_models.LogGaussianModel(renderer=renderer, groundtruth=currentGT, variances=variances)
pixelLikelihoodRobustCh = generative_models.LogRobustModel(renderer=renderer, groundtruth=currentGT, foregroundPrior=globalPrior, variances=variances)
modelLogLikelihoodRobustRegionCh = -ch.sum(generative_models.LogRobustModelRegion(renderer=renderer, groundtruth=currentGT, foregroundPrior=globalPrior, variances=variances))/numPixels
pixelLikelihoodRobustRegionCh = generative_models.LogRobustModelRegion(renderer=renderer, groundtruth=currentGT, foregroundPrior=globalPrior, variances=variances)
post = generative_models.layerPosteriorsRobustCh(currentGT, renderer, vis_im, 'FULL', globalPrior, variances)[0]
edgeErrorPixels = generative_models.EdgeFilter(rendererGT=currentGT, renderer=renderer)**2
edgeError = ch.sum(edgeErrorPixels)
# hogGT, hogImGT, _ = image_processing.diffHog(currentGT, drconv)
# hogRenderer, hogImRenderer, _ = image_processing.diffHog(renderer, drconv)
#
# hogE_raw = hogGT - hogRenderer
# hogCellErrors = ch.sum(hogE_raw*hogE_raw, axis=2)
# hogError = -ch.dot(hogGT.ravel(),hogRenderer.ravel())/(ch.sqrt(ch.SumOfSquares(hogGT))*ch.sqrt(ch.SumOfSquares(hogGT)))
import opendr.filters
robPyr = opendr.filters.gaussian_pyramid(renderer - currentGT, n_levels=6, normalization=None)/numPixels
robPyrSum = -ch.sum(ch.log(ch.exp(-0.5*robPyr**2/variances) + 1))
# edgeErrorPixels = generative_models.EdgeFilter(rendererGT=currentGT, renderer=renderer)**2
# edgeError = ch.sum(edgeErrorPixels)
models = [negLikModel, negLikModelRobust, robPyrSum]
pixelModels = [pixelLikelihoodCh, pixelLikelihoodRobustCh, robPyr]
modelsDescr = ["Gaussian Model", "Outlier model", "Region Robust", "Pyr Error" ]
pixelErrorFun = pixelModels[model]
errorFun = models[model]
updateErrorFunctions = False
if minimize:
iterat = 0
print("Minimizing with method " + methods[method])
ch.minimize({'raw': errorFun}, bounds=bounds, method=methods[method], x0=free_variables, callback=cb2, options=options)
plotMinimization = True
minimize = False
if refresh:
print("Model Log Likelihood: " + str(errorFun.r))
if showSubplots:
refreshSubplots()
if computePerformance and drawSurf:
plotSurface(model)
# if demoMode or drawSurf:
# plt.pause(0.1)
# plt.draw()
refresh = False
refreshSubplots() | 84,241 | 42.002552 | 357 | py |
sa-nmt | sa-nmt-master/Loss.py | """
This file handles the details of the loss function during training.
This includes: loss criterion, training statistics, and memory optimizations.
"""
from __future__ import division
import time
import sys
import math
import torch
import torch.nn as nn
def nmt_criterion(vocab_size, pad_id=0):
"""
Construct the standard NMT Criterion
"""
weight = torch.ones(vocab_size)
weight[pad_id] = 0
crit = nn.NLLLoss(weight, size_average=False)
return crit
class Statistics:
"""
Training loss function statistics.
"""
def __init__(self, loss=0, n_words=0, n_correct=0):
self.loss = loss
self.n_words = n_words
self.n_correct = n_correct
self.start_time = time.time()
def update(self, stat):
self.loss += stat.loss
self.n_words += stat.n_words
self.n_correct += stat.n_correct
def accuracy(self):
return 100 * (self.n_correct / self.n_words)
def ppl(self):
print(self.loss, self.n_words)
return math.exp(min(self.loss / self.n_words, 100))
def elapsed_time(self):
return time.time() - self.start_time
def output(self, epoch, uidx, max_updates, start):
t = self.elapsed_time()
print(("Epoch %2d, %5d/%5d; acc: %6.2f; ppl: %6.2f; " +
"%3.0f tgt tok/s; %6.0f s elapsed") %
(epoch, uidx, max_updates,
self.accuracy(),
self.ppl(),
self.n_words / (t + 1e-5),
time.time() - start))
sys.stdout.flush()
def log(self, prefix, experiment, optim):
t = self.elapsed_time()
experiment.add_scalar_value(prefix + "_ppl", self.ppl())
experiment.add_scalar_value(prefix + "_accuracy", self.accuracy())
experiment.add_scalar_value(prefix + "_tgtper", self.n_words / t)
experiment.add_scalar_value(prefix + "_lr", optim.lr)
@staticmethod
def score(loss, scores, targ, pad):
pred = scores.max(1)[1]
non_padding = targ.ne(pad)
num_correct = pred.eq(targ) \
.masked_select(non_padding).int() \
.sum().item()
return Statistics(loss, non_padding.int().sum().item(), num_correct)
def filter_gen_state(state):
for k, v in state.items():
if v is not None:
yield k, v
def new_split(x, size):
xs = []
for u in torch.split(x, size):
v = u.detach()
if u.requires_grad:
v.requires_grad_(True)
xs += [v]
return tuple(xs)
def shards(state, shard_size, eval=False):
if eval:
yield state
else:
non_none = dict(filter_gen_state(state))
keys, values = zip(*((k, new_split(v, shard_size))
for k, v in non_none.items()))
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = []
for i, k in enumerate(keys):
dv = [v.grad for v in values[i] if v.grad is not None]
if dv:
dv = torch.cat(dv)
variables += [(state[k], dv)]
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads)
class LossCompute:
def __init__(self, generator, crit):
self.generator = generator
self.crit = crit
def make_loss_batch(self, outputs, targets):
"""
Create all the variables that need to be sharded.
This needs to match compute loss exactly.
"""
return {"out": outputs,
"target": targets}
def compute_loss(self, out, target):
def bottle(v):
return v.view(-1, v.size(2))
target = target.view(-1)
# Standard generator.
scores = self.generator(bottle(out))
loss = self.crit(scores, target)
scores_data = scores.detach()
target = target.clone()
# Coverage loss term.
stats = Statistics.score(loss.item(), scores_data, target, 0)
return loss, stats
| 4,092 | 27.227586 | 77 | py |
sa-nmt | sa-nmt-master/Iterator.py | import numpy
import random
import pickle as pkl
import gzip
from tempfile import mkstemp
import os
import string
def fopen(filename, mode='r'):
if filename.endswith('.gz'):
return gzip.open(filename, mode)
return open(filename, mode)
class TextIterator:
"""Simple Bitext iterator."""
def __init__(self, source_file, target_file,
source_dict, target_dict,
batch_size=32,
batch_tokens=0,
max_seq_length=50,
src_vocab_size=-1,
tgt_vocab_size=-1):
self.source_file = source_file
self.target_file = target_file
self.shuffle([source_file, target_file])
self.source = fopen(source_file + '.shuf', 'r')
self.target = fopen(target_file + '.shuf', 'r')
with open(source_dict, 'rb') as f:
self.source_dict = pkl.load(f)
with open(target_dict, 'rb') as f:
self.target_dict = pkl.load(f)
self.src_punct = set()
for p in set(string.punctuation):
if p in self.source_dict:
self.src_punct.add(self.source_dict[p])
self.batch_tokens = batch_tokens
self.batch_size = batch_size
self.max_seq_length = max_seq_length
self.src_vocab_size = src_vocab_size
self.tgt_vocab_size = tgt_vocab_size
if src_vocab_size < 0:
self.src_vocab_size = len(self.source_dict)
if tgt_vocab_size < 0:
self.tgt_vocab_size = len(self.target_dict)
self.source_buffer = []
self.target_buffer = []
self.k = batch_size * 20
self.end_of_data = False
def __iter__(self):
return self
@staticmethod
def shuffle(files):
print('shuffle %s | %s' % (files[0], files[1]))
tf_os, tpath = mkstemp()
tf = open(tpath, 'w')
fds = [open(ff) for ff in files]
punct = set(string.punctuation)
def punct_count(ws):
p = [w for w in ws if w in punct]
return len(p)
for l in fds[0]:
lines = [l.strip()] + [ff.readline().strip() for ff in fds[1:]]
tokens = [l.split() for l in lines]
lengths = [len(t) for t in tokens]
if min(lengths) * 2 < max(lengths):
continue
if min(lengths) < 1:
continue
# remove lines that have many punctuations
n_punct = punct_count(tokens[0])
if n_punct * 4 > lengths[0]:
continue
print("|||".join(lines), file=tf)
[ff.close() for ff in fds]
tf.close()
tf = open(tpath, 'r')
lines = tf.readlines()
random.shuffle(lines)
fds = [open(ff + '.shuf', 'w') for ff in files]
for l in lines:
s = l.strip().split('|||')
for ii, fd in enumerate(fds):
print(s[ii], file=fd)
[ff.close() for ff in fds]
os.remove(tpath)
return
def reset(self):
self.shuffle([self.source_file, self.target_file])
self.source.seek(0)
self.target.seek(0)
def __next__(self):
if self.end_of_data:
self.end_of_data = False
self.reset()
raise StopIteration
source = []
target = []
batch_tokens = 0
# fill buffer, if it's empty
assert len(self.source_buffer) == len(self.target_buffer)
if len(self.source_buffer) == 0:
for k_ in range(self.k):
ss = self.source.readline()
if ss == "":
break
tt = self.target.readline()
if tt == "":
break
self.source_buffer.append(ss.strip().split())
self.target_buffer.append(tt.strip().split())
# sort by target buffer
tlen = numpy.array([len(t) for t in self.target_buffer])
tidx = tlen.argsort()
_sbuf = [self.source_buffer[i] for i in tidx]
_tbuf = [self.target_buffer[i] for i in tidx]
self.source_buffer = _sbuf
self.target_buffer = _tbuf
if len(self.source_buffer) == 0 or len(self.target_buffer) == 0:
self.end_of_data = False
self.reset()
raise StopIteration
try:
# actual work here
while True:
# read from source file and map to word index
try:
ss = self.source_buffer.pop()
except IndexError:
break
ss = [self.source_dict.get(w, 1) for w in ss]
if self.src_vocab_size > 0:
ss = [w if w < self.src_vocab_size else 1 for w in ss]
# read from source file and map to word index
tt = self.target_buffer.pop()
# append bos and eos to target
tt = [2] + [self.target_dict.get(w, 1) for w in tt] + [3]
if self.tgt_vocab_size > 0:
tt = [w if w < self.tgt_vocab_size else 1 for w in tt]
if len(ss) > self.max_seq_length or \
len(tt) > self.max_seq_length:
continue
source.append(ss)
target.append(tt)
batch_tokens += len(ss) + len(tt)
if self.batch_tokens > 0:
if batch_tokens >= self.batch_tokens:
break
else:
if len(source) >= self.batch_size:
break
except IOError:
self.end_of_data = True
if len(source) <= 0 or len(target) <= 0:
self.end_of_data = False
self.reset()
raise StopIteration
return source, target
| 5,911 | 29.474227 | 75 | py |
sa-nmt | sa-nmt-master/opts.py | import argparse
def model_opts(parser):
"""
These options are passed to the construction of the model.
Be careful with these as they will be used during translation.
"""
# Model options
# Embedding Options
parser.add_argument('-word_vec_size', type=int, default=512,
help='Word embedding for both.')
parser.add_argument('-share_decoder_embeddings', action='store_true',
help='Share the word and out embeddings for decoder.')
# RNN Options
parser.add_argument('-encoder_type', type=str, default='brnn',
choices=['brnn', 'sabrnn', 'fabrnn'],
help="""Type of encoder layer to use.""")
parser.add_argument('-decoder_type', type=str, default='rnn',
choices=['rnn'],
help='Type of decoder layer to use.')
parser.add_argument('-layers', type=int, default=2,
help='Number of layers in enc/dec.')
parser.add_argument('-rnn_size', type=int, default=512,
help='Size of LSTM hidden states')
parser.add_argument('-input_feed', type=int, default=1,
help="""Feed the context vector at each time step as
additional input (via concatenation with the word
embeddings) to the decoder.""")
parser.add_argument('-rnn_type', type=str, default='LSTM',
choices=['LSTM'],
help="""The gate type to use in the RNNs""")
parser.add_argument('-encode_multi_key', action='store_true',
help="""Using multi keys encoding of source""")
parser.add_argument('-share_attn', action='store_true',
help="""sharing attention weights""")
def train_opts(parser):
parser.add_argument('-save_model', default='model',
help="""Model filename (the model will be saved as
<save_model>_epochN_PPL.pt where PPL is the
validation perplexity""")
parser.add_argument('-train_from', default='', type=str,
help="""If training from a checkpoint then this is the
path to the pretrained model's state_dict.""")
# GPU
parser.add_argument('-gpuid', default=[], nargs='+', type=int,
help="Use CUDA on the listed devices.")
parser.add_argument('-seed', type=int, default=42,
help="""Random seed used for the experiments
reproducibility.""")
parser.add_argument('-max_updates', type=int, default=1000000,
help="""max number of updates""")
# Init options
parser.add_argument('-start_epoch', type=int, default=1,
help='The epoch from which to start')
parser.add_argument('-param_init', type=float, default=0.1,
help="""Parameters are initialized over uniform distribution
with support (-param_init, param_init).
Use 0 to not use initialization""")
# Optimization options
parser.add_argument('-batch_size', type=int, default=32,
help='Maximum batch size')
parser.add_argument('-max_generator_batches', type=int, default=32,
help="""Maximum batches of words in a sequence to run
the generator on in parallel. Higher is faster, but
uses more memory.""")
parser.add_argument('-epochs', type=int, default=13,
help='Number of training epochs')
parser.add_argument('-optim', default='adam',
choices=['sgd', 'adagrad', 'adadelta', 'adam'],
help="""Optimization method.""")
parser.add_argument('-max_grad_norm', type=float, default=5,
help="""If the norm of the gradient vector exceeds this,
renormalize it to have the norm equal to
max_grad_norm""")
parser.add_argument('-dropout', type=float, default=0.3,
help="Dropout probability; applied in LSTM stacks.")
# learning rate
parser.add_argument('-learning_rate', type=float, default=1.0,
help="""Starting learning rate. If adagrad/adadelta/adam
is used, then this is the global learning rate.
Recommended settings: sgd = 1, adagrad = 0.1,
adadelta = 1, adam = 0.001""")
parser.add_argument('-learning_rate_decay', type=float, default=0.5,
help="""If update_learning_rate, decay learning rate by
this much if (i) perplexity does not decrease on the
validation set or (ii) epoch has gone past
start_decay_at""")
parser.add_argument('-max_decay_step', type=int, default=5,
help='maximum number of decay step for learning rate')
parser.add_argument('-start_checkpoint_at', type=int, default=0,
help="""Start checkpointing every epoch after and including
this epoch""")
parser.add_argument('-start_eval_checkpoint_at', type=int, default=0,
help="""Start checkpointing every eval after and including
this eval""")
parser.add_argument('-report_every', type=int, default=50,
help="Print stats at this interval.")
parser.add_argument('-eval_every', type=int, default=10000,
help="Evaluate at this interval.")
parser.add_argument('-min_thres', type=float, default=-5.0,
help="clip the value lower than this point.")
parser.add_argument('-max_thres', type=float, default=7.0,
help="clip the value larger than this point.")
parser.add_argument('-hard', action='store_true',
help="using hard structured attention.")
def preprocess_opts(parser):
# Dictionary Options
parser.add_argument('-src_vocab_size', type=int, default=-1,
help="Size of the source vocabulary")
parser.add_argument('-tgt_vocab_size', type=int, default=-1,
help="Size of the target vocabulary")
# Truncation options
parser.add_argument('-max_seq_length', type=int, default=100,
help="Maximum sequence length")
# Data processing options
parser.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
def add_md_help_argument(parser):
parser.add_argument('-md', action=MarkdownHelpAction,
help='print Markdown-formatted help text and exit.')
# MARKDOWN boilerplate
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class MarkdownHelpFormatter(argparse.HelpFormatter):
"""A really bare-bones argparse help formatter that generates valid markdown.
This will generate something like:
usage
# **section heading**:
## **--argument-one**
```
argument-one help text
```
"""
def _format_usage(self, usage, actions, groups, prefix):
usage_text = super(MarkdownHelpFormatter, self)._format_usage(
usage, actions, groups, prefix)
return '\n```\n%s\n```\n\n' % usage_text
def format_help(self):
self._root_section.heading = '# %s' % self._prog
return super(MarkdownHelpFormatter, self).format_help()
def start_section(self, heading):
super(MarkdownHelpFormatter, self).start_section('## **%s**' % heading)
def _format_action(self, action):
lines = []
action_header = self._format_action_invocation(action)
lines.append('### **%s** ' % action_header)
if action.help:
lines.append('')
lines.append('```')
help_text = self._expand_help(action)
lines.extend(self._split_lines(help_text, 80))
lines.append('```')
lines.extend(['', ''])
return '\n'.join(lines)
class MarkdownHelpAction(argparse.Action):
def __init__(self, option_strings,
dest=argparse.SUPPRESS, default=argparse.SUPPRESS,
**kwargs):
super(MarkdownHelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
parser.formatter_class = MarkdownHelpFormatter
parser.print_help()
parser.exit()
| 8,871 | 44.968912 | 84 | py |
sa-nmt | sa-nmt-master/translate.py | import argparse
import torch
import modelx as models
import infer
import string
# build args parser
parser = argparse.ArgumentParser(description='Training NMT')
parser.add_argument('-checkpoint', required=True,
help='saved checkpoit.')
parser.add_argument('-input', required=True,
help='Text file to translate.')
parser.add_argument('-output', default='trans.bpe', help='output file')
parser.add_argument('-beam_size', default=5, type=int,
help="Beam size.")
parser.add_argument('-gpuid', default=[], nargs='+', type=int,
help="Use CUDA")
opt = parser.parse_args()
use_gpu = len(opt.gpuid) > 0
if torch.cuda.is_available() and not use_gpu:
print("so you should probably run with -gpus 0")
checkpoint = torch.load(opt.checkpoint)
train_opt = checkpoint['opt']
print('| train configuration')
train_opt.min_thres = -5.0
train_opt.max_thres = 7.0
#if train_opt.hard is None:
#train_opt.hard = False
print(train_opt)
model = models.make_base_model(train_opt, use_gpu, checkpoint)
if train_opt.encoder_type == "sabrnn":
punct_idx = set()
for p in set(string.punctuation):
if p in model.dicts[0]:
punct_idx.add(model.dicts[0][p])
model.encoder.punct(punct_idx)
# over-write some options
train_opt.beam_size = opt.beam_size
agent = infer.Beam(train_opt, model)
agent.translate(opt.input, opt.output)
| 1,416 | 28.520833 | 71 | py |
sa-nmt | sa-nmt-master/Utils.py | def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments), \
"Not all arguments have the same value: " + str(args)
def use_gpu(opt):
return (hasattr(opt, 'gpuid') and len(opt.gpuid) > 0) or \
(hasattr(opt, 'gpu') and opt.gpu > -1)
| 388 | 26.785714 | 62 | py |
sa-nmt | sa-nmt-master/extract_tree.py | import argparse
import torch
from torch.autograd import Variable
import modelx as models
import networkx as nx
from networkx.algorithms.tree import maximum_spanning_arborescence
import string
# build args parser
parser = argparse.ArgumentParser(description='Training NMT')
parser.add_argument('-checkpoint', required=True,
help='saved checkpoit.')
parser.add_argument('-input', required=True,
help='Text file to translate.')
parser.add_argument('-output', default='tree.txt', help='output file')
parser.add_argument('-gpuid', default=[], nargs='+', type=int,
help="Use CUDA")
opt = parser.parse_args()
use_gpu = True
if torch.cuda.is_available() and not use_gpu:
print("so you should probably run with -gpus 0")
checkpoint = torch.load(opt.checkpoint)
train_opt = checkpoint['opt']
print('| train configuration')
print(train_opt)
use_gpu = len(train_opt.gpuid) > 0
model = models.make_base_model(train_opt, use_gpu, checkpoint)
if train_opt.encoder_type in ["sabrnn", "fabrnn"]:
punct_idx = set()
for p in set(string.punctuation):
if p in model.dicts[0]:
punct_idx.add(model.dicts[0][p])
print('Add punctuation constraint')
model.encoder.punct(punct_idx)
# get the encoder
encoder = model.encoder
dicts = model.dicts
tt = torch.cuda if use_gpu else torch
def encode_string(ss):
ss = ss.split()
ss = [dicts[0].get(w, 1) for w in ss]
ss = Variable(tt.LongTensor(ss).view(-1, 1),
volatile=True)
return ss
def collapse_bpe(s, score):
"""Collapse BPEed tokens
Args:
s: a bped sentence
score: beta in the paper, a 2D tensor of p(z|s),
sum over the last dimension should be 1
"""
punct = set(string.punctuation)
# (1) identify bpe
tokens = s.split()
bpe = []
indices = []
punct_idx = []
for i, w in enumerate(tokens):
if w in punct:
punct_idx += [i]
if w.endswith("@@"):
bpe += [i]
else:
if len(bpe) == 0:
indices += [[i]]
else:
bpe += [i] # add the last bped token
indices += [bpe]
bpe = []
# collapsing from here
s_ = []
for bpe in indices:
# (1) collapse heads
s_.append(score[:, bpe].sum(1).view(-1, 1))
s_ = torch.cat(s_, 1)
collapsed_score = []
for bpe in indices:
# (2) collapse childs
collapsed_score += [s_[bpe, :].sum(0).view(1, -1)]
collapsed_score = torch.cat(collapsed_score, 0)
s = s.replace("@@ ", "") # the original string
return s, collapsed_score
def build_graph(score):
"""Build graph from potential score matrix
Args:
score: FloatTensor (n, n), score.sum(1) = 1
Returns:
a graph object
"""
# return a list of (parent, child, weight)
arcs = []
n = score.size(0)
# find the root first
val, idx = score.diag().max(0)
arcs.append((0, idx[0] + 1, val[0]))
for i in range(n):
for j in range(n):
if i == j: # root on the diagonal
continue
# arcs.append([0, i+1, score[i, j]])
else:
arcs.append([j+1, i+1, score[i, j]])
g = nx.DiGraph()
g.add_weighted_edges_from(arcs)
return g
def mst(score):
"""Get spaning tree from the adjacent matrix"""
g = build_graph(score)
mst = maximum_spanning_arborescence(g)
tree = []
for e in mst.edges():
head, child = e
tree.append('%s->%s' % (head, child))
return ' '.join(tree)
def renorm(m):
x = m.exp()
x = x / x.sum(1, keepdim=True)
return x.log()
def collapse():
fw = open(opt.output, 'w')
for line in open(opt.input):
x = encode_string(line)
model.encoder(x)
if train_opt.encoder_type == 'sabrnn':
score = model.encoder.tree_attn.edge_score.squeeze(0)
else:
score = model.encoder.attn.score.squeeze(0).log()
s, score = collapse_bpe(line, score.data)
try:
tree = mst(score)
out = '%s ||| %s\n' % (s.strip(), tree)
fw.write(out)
except:
pass
fw.close()
collapse()
| 4,276 | 26.242038 | 70 | py |
sa-nmt | sa-nmt-master/models.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from attention import GlobalAttention, SelfAttention
from Utils import aeq
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
import math
class EncoderBase(nn.Module):
"""
EncoderBase class for sharing code among various encoder.
"""
def _check_args(self, input, lengths=None, hidden=None):
s_len, n_batch = input.size()
if lengths is not None:
n_batch_ = len(lengths) # lengths.size()
aeq(n_batch, n_batch_)
def forward(self, input, lengths=None, hidden=None):
"""
Args:
input (LongTensor): len x batch x nfeat.
lengths (LongTensor): batch
hidden: Initial hidden state.
Returns:
hidden_t (Variable): Pair of layers x batch x rnn_size - final
encoder state
outputs (FloatTensor): len x batch x rnn_size - Memory bank
"""
raise NotImplementedError
class Encoder(EncoderBase):
""" The standard RNN encoder. """
def __init__(self, word_vocab_size, embedding_size, word_padding_idx,
num_layers, hidden_size, dropout, bidirectional=True):
super(Encoder, self).__init__()
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.embeddings = nn.Embedding(word_vocab_size,
embedding_size,
padding_idx=word_padding_idx)
self.rnn = nn.LSTM(input_size=embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
def forward(self, input, lengths=None, hidden=None):
""" See EncoderBase.forward() for description of args and returns."""
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
s_len, batch, emb_dim = emb.size()
self.word_embs = emb # for accessing later
packed_emb = emb
if lengths is not None:
packed_emb = pack(emb, lengths)
outputs, hidden = self.rnn(packed_emb, hidden)
if lengths is not None:
outputs = unpack(outputs)[0]
return outputs, hidden
# Structured Attention and our models
class MatrixTree(nn.Module):
"""Implementation of the matrix-tree theorem for computing marginals
of non-projective dependency parsing. This attention layer is used
in the paper "Learning Structured Text Representations."
"""
def __init__(self, eps=1e-5, device=torch.device('cpu')):
self.eps = eps
super(MatrixTree, self).__init__()
self.device = device
def forward(self, input, lengths=None):
laplacian = input.exp()
output = input.clone()
output.data.fill_(0)
for b in range(input.size(0)):
lx = lengths[b] if lengths is not None else input.size(1)
input_b = input[b, :lx, :lx]
lap = laplacian[b, :lx, :lx].masked_fill(
torch.eye(lx).to(self.device).ne(0), 0)
lap = -lap + torch.diag(lap.sum(0))
# store roots on diagonal
lap[0] = input_b.diag().exp()
inv_laplacian = lap.inverse()
factor = inv_laplacian.diag().unsqueeze(1)\
.expand(lx, lx).transpose(0, 1)
term1 = input_b.exp().mul(factor).clone()
term2 = input_b.exp().mul(inv_laplacian.transpose(0, 1)).clone()
term1[:, 0] = 0
term2[0] = 0
output_b = term1 - term2
roots_output = input_b.diag().exp().mul(
inv_laplacian.transpose(0, 1)[0])
output[b, :lx, :lx] = output_b + torch.diag(roots_output)
return output
class TreeAttention(nn.Module):
"""Structured attention class"""
def __init__(self, dim, min_thres=-5, max_thres=7, hard=False,
device=torch.device('cpu')):
super(TreeAttention, self).__init__()
self.q = nn.Linear(dim, dim, bias=False)
self.k = nn.Linear(dim, dim, bias=False)
self.v = nn.Linear(dim, dim, bias=False)
self.root_query = nn.Parameter(torch.randn(dim))
self.scale = math.sqrt(1 / dim)
self.dtree = MatrixTree()
self.min_thres = min_thres
self.max_thres = max_thres
self.hard = hard
self.device = device
def forward(self, input, punct_mask=None, lengths=None):
s_len, batch, dim = input.size()
input = input.contiguous().transpose(0, 1) \
.contiguous().view(-1, dim)
q = self.q(input).view(batch, s_len, -1) # (batch, s_len, dim)
k = self.k(input).view(batch, s_len, -1) # (batch, s_len, dim)
v = self.v(input).view(batch, s_len, -1) # (batch, s_len, dim)
_score = torch.bmm(q, k.transpose(1, 2)) # (batch, s_len, s_len)
# compute root
r_ = self.root_query.view(1, -1, 1).expand(batch, dim, 1)
root = torch.bmm(k, r_).squeeze(-1) # (batch, s_len)
mask = torch.eye(s_len).to(self.device)
score = _score.clone()
for b in range(batch):
score[b] = _score[b] * mask + torch.diag(root[b])
# normalized
score *= self.scale
if punct_mask is not None:
punct_mask = punct_mask.transpose(0, 1)
punct_mask = punct_mask[:, None, :].expand(batch, s_len, s_len) \
.transpose(1, 2)
score.data.masked_fill_(punct_mask, -math.inf)
score = score.clamp(self.min_thres, self.max_thres)
self.edge_score = score.transpose(1, 2)
edge_score = self.dtree(score, lengths).transpose(1, 2)
# edge_score.sum(2) == 1
if self.hard:
y = edge_score.data.new(edge_score.size()).fill_(0)
_, max_idx = edge_score.data.max(2)
y.scatter_(2, max_idx[:, :, None], 1)
hard_edge = (Variable(y) - edge_score).detach() + edge_score
edge_score = hard_edge
return torch.bmm(edge_score, v)
class SAEncoder(Encoder):
""" The structured attention RNN encoder. """
def __init__(self, word_vocab_size, embedding_size, word_padding_idx,
num_layers, hidden_size, dropout, bidirectional=True,
encode_multi_key=True, min_thres=-5, max_thres=7, hard=False):
super(SAEncoder, self).__init__(word_vocab_size, embedding_size,
word_padding_idx,
num_layers, hidden_size, dropout,
bidirectional=True)
self.tree_attn = TreeAttention(hidden_size, min_thres, max_thres, hard)
self.encode_multi_key = encode_multi_key
if not self.encode_multi_key:
self.linear = nn.Linear(hidden_size, hidden_size, bias=False)
self.punct_idx = None
def punct(self, punct_idx):
self.punct_idx = punct_idx
def forward(self, input, lengths=None):
outputs, hidden = Encoder.forward(self, input, lengths)
# find puncts
punct_mask = None
if self.punct_idx is not None:
punct = set(input.data.contiguous().view(-1).tolist())
punct &= self.punct_idx
if len(punct) > 0:
punct_mask = 0
for p in punct:
punct_mask += input.data.eq(p)
tree_outputs = self.tree_attn(outputs, punct_mask, lengths)
# tree_outputs has size (batch_size, s_len, hidden_size)
if not self.encode_multi_key:
# compute gate syntax
z = self.linear(outputs.view(-1, outputs.size(2))).sigmoid()
gtree = z.view_as(outputs) * tree_outputs.transpose(0, 1)
outputs = outputs + gtree
return outputs, hidden
return (outputs, tree_outputs), hidden
class FAEncoder(Encoder):
""" The flat attention RNN encoder. """
def __init__(self, word_vocab_size, embedding_size, word_padding_idx,
num_layers, hidden_size, dropout, bidirectional=True,
encode_multi_key=True):
super(FAEncoder, self).__init__(word_vocab_size, embedding_size,
word_padding_idx,
num_layers, hidden_size, dropout,
bidirectional=True)
self.attn = SelfAttention(hidden_size)
self.encode_multi_key = encode_multi_key
def punct(self, punct_idx):
self.punct_idx = punct_idx
def forward(self, input, lengths=None):
mask = input.data.eq(0).t()
outputs, hidden = Encoder.forward(self, input, lengths)
punct_mask = None
if self.punct_idx is not None:
punct = set(input.data.contiguous().view(-1).tolist())
punct &= self.punct_idx
if len(punct) > 0:
punct_mask = 0
for p in punct:
punct_mask += input.data.eq(p)
self_attn_outputs = self.attn(outputs, mask, punct_mask)
if not self.encode_multi_key:
outputs = outputs + self_attn_outputs.transpose(0, 1)
return outputs, hidden
return (outputs, self_attn_outputs), hidden
class StackedLSTM(nn.Module):
"""
Our own implementation of stacked LSTM.
Needed for the decoder, because we do input feeding.
"""
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTM, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_0, c_0 = hidden
h_1, c_1 = [], []
for i, layer in enumerate(self.layers):
h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))
input = h_1_i
if i + 1 != self.num_layers:
input = self.dropout(input)
h_1 += [h_1_i]
c_1 += [c_1_i]
h_1 = torch.stack(h_1)
c_1 = torch.stack(c_1)
return input, (h_1, c_1)
class Decoder(nn.Module):
def __init__(self, word_vocab_size, embedding_size, word_padding_idx,
num_layers, hidden_size, dropout, multi_key=False,
shared_attn=False):
input_size = embedding_size + hidden_size
self.hidden_size = hidden_size
super(Decoder, self).__init__()
self.embeddings = nn.Embedding(word_vocab_size, embedding_size,
padding_idx=word_padding_idx)
self.rnn = StackedLSTM(num_layers, input_size, hidden_size, dropout)
self.attn = GlobalAttention(hidden_size, multi_key, shared_attn)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, context, mask=None, init_output=None):
emb = self.embeddings(input)
batch_size = input.size(1)
h_size = (batch_size, self.hidden_size)
outputs = []
if init_output is None:
output = Variable(emb.data.new(*h_size).zero_(),
requires_grad=False)
else:
output = init_output
attns = []
# set mask
if mask is not None:
self.attn.apply_mask(mask)
for i, emb_t in enumerate(emb.split(1)):
emb_t = emb_t.squeeze(0)
emb_t = torch.cat([emb_t, output], 1)
rnn_output, hidden = self.rnn(emb_t, hidden)
attn_output, attn = self.attn(rnn_output,
context)
output = self.dropout(attn_output)
outputs += [output]
attns.append(attn)
attns = torch.stack(attns)
outputs = torch.stack(outputs)
return outputs, hidden, attns
class NMT(nn.Module):
def __init__(self, encoder, decoder):
super(NMT, self).__init__()
self.encoder = encoder
self.decoder = decoder
def _fix_enc_hidden(self, h):
"""
The encoder hidden is (layers*directions) x batch x dim.
We need to convert it to layers x batch x (directions*dim).
"""
return torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
def _init_hidden(self, enc_hidden):
hidden = (self._fix_enc_hidden(enc_hidden[0]),
self._fix_enc_hidden(enc_hidden[1]))
return hidden
def forward(self, src, tgt, src_lengths):
context, enc_hidden = self.encoder(src, src_lengths)
if isinstance(context, tuple):
context_ = (context[0].transpose(0, 1), context[1])
else:
context_ = context.transpose(0, 1)
enc_hidden = self._init_hidden(enc_hidden)
src_pad_mask = src.data.eq(0).t()
out, dec_hidden, attn = self.decoder(tgt, enc_hidden,
context_, src_pad_mask)
return out
def make_encoder(opt):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
"""
if opt.encoder_type == "sabrnn":
return SAEncoder(opt.src_vocab_size, opt.word_vec_size, 0,
opt.layers, opt.rnn_size, opt.dropout,
bidirectional=True,
encode_multi_key=opt.encode_multi_key,
min_thres=opt.min_thres, max_thres=opt.max_thres,
hard=opt.hard)
elif opt.encoder_type == "fabrnn":
return FAEncoder(opt.src_vocab_size, opt.word_vec_size, 0,
opt.layers, opt.rnn_size, opt.dropout,
bidirectional=True,
encode_multi_key=opt.encode_multi_key)
else:
return Encoder(opt.src_vocab_size, opt.word_vec_size, 0,
opt.layers, opt.rnn_size, opt.dropout)
def make_decoder(opt):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
return Decoder(opt.tgt_vocab_size, opt.word_vec_size, 0,
opt.layers, opt.rnn_size, opt.dropout,
opt.encode_multi_key, opt.share_attn)
def make_base_model(model_opt, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu: Boolean: whether to use gpu.
checkpoint: the snapshot model.
Returns:
the NMTModel.
"""
# Make encoder.
encoder = make_encoder(model_opt)
decoder = make_decoder(model_opt)
# Make NMT (= encoder + decoder).
model = NMT(encoder, decoder)
# Make Generator.
generator = nn.Sequential(
nn.Linear(model_opt.rnn_size, model_opt.tgt_vocab_size),
nn.LogSoftmax(dim=-1))
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.weight
model.generator = generator
# Load the model states from checkpoint.
if checkpoint is not None:
print('Loading model')
model.load_state_dict(checkpoint['model'])
model.dicts = checkpoint['dicts']
return model
| 15,669 | 36.488038 | 79 | py |
sa-nmt | sa-nmt-master/infer.py | import torch
from torch.autograd import Variable
import pickle as pkl
import math
# TODO: documentation of functions
class Beam(object):
r"""Beam search class for NMT.
This is a simple beam search object. It takes model, which can be used to
compute the next probable output and dictionaries that will be used to
map from word indices to the real strings.
opt:
opt (argpaser): that contains path to dictionaries
model (pytorch network): this should be creat ed before
"""
def __init__(self, opt, model):
self.opt = opt
self.tt = torch.cuda if len(opt.gpuid) > 0 else torch
self.model = model
self.model.eval()
self.dicts = model.dicts
# create an inverse map from int->word for target side
self.idx2w = {}
for w, idx in self.dicts[1].items():
self.idx2w[idx] = w
self.bos_idx = self.dicts[1]['<bos>']
self.eos_idx = self.dicts[1]['<eos>']
self.pad_idx = self.dicts[1]['<pad>']
def encode_string(self, ss):
ss = ss.split()
ss = [self.dicts[0].get(w, 1) for w in ss]
if self.opt.src_vocab_size > 0:
ss = [w if w < self.opt.src_vocab_size else 1 for w in ss]
ss = Variable(self.tt.LongTensor(ss).view(-1, 1),
volatile=True)
return ss
def decode_string(self, tidx):
ts = []
for i in list(tidx):
if i == self.eos_idx:
break
else:
ts += [self.idx2w[i]]
return ' '.join(ts)
def beam_search(self, input):
"""
Beam search function.
opt: trained optiongs
input: Tensor (bptt x 1)
"""
k = self.opt.beam_size
completed_hyps = []
input = input.expand(input.size(0), k)
max_len = int(input.size(0) * 1.5)
hypos = self.tt.LongTensor(max_len, k).fill_(2)
init_target = self.tt.LongTensor(1, k).fill_(2)
init_target = Variable(init_target, volatile=True)
scores = self.tt.FloatTensor(k).fill_(-math.inf)
scores[0] = 0
#lengths = [input.size(0) for i in range(k)]
context, enc_hidden = self.model.encoder(input)
init_hidden = self.model._init_hidden(enc_hidden)
# alias
decoder = self.model.decoder
generator = self.model.generator
init_output = None
if isinstance(context, tuple):
context = (context[0].transpose(0, 1), context[1])
else:
context = context.transpose(0, 1)
decoder.attn.mask = None
for t in range(max_len):
out, dec_hidden, attn = decoder(init_target, init_hidden, context,
init_output=init_output)
log_probs = generator(out.squeeze(0)).data
scores_t, idx_t = log_probs.topk(k, 1)
scores_t = scores_t + scores.view(-1, 1).expand_as(scores_t)
scores, k_idx = scores_t.view(-1).topk(k)
next_hp = k_idx.div(k)
next_ys = idx_t.view(-1).index_select(0, k_idx)
done_y = next_ys.eq(self.eos_idx)
if done_y.sum() > 0 and t > 0:
for i in range(k):
if next_ys[i] == self.eos_idx:
j = next_hp[i]
text = self.decode_string(hypos[0:t, j])
completed_hyps.append((text, scores[i] / (t+1)))
k -= 1
if k > 0:
cont_y = next_ys.ne(self.eos_idx)
next_ys = next_ys.masked_select(cont_y)
next_hp = next_hp.masked_select(cont_y)
if isinstance(context, tuple):
context = (context[0][:k], context[1][:k])
else:
context = context[:k]
scores = scores.masked_select(cont_y)
if k == 0:
break
hypos = hypos.index_select(1, next_hp)
hypos[t] = next_ys
init_target = Variable(next_ys.view(1, -1), volatile=True)
next_hp = Variable(next_hp)
init_output = out.squeeze(0).index_select(0, next_hp)
init_hidden = [h.index_select(1, next_hp) for h in dec_hidden]
if len(completed_hyps) > 0:
completed_hyps.sort(key=lambda tup: tup[1])
best_h = completed_hyps.pop()
return best_h[0]
else:
best_s, idx = scores.topk(1)
best_h = hypos.index_select(1, idx).view(-1)
return self.decode_string(best_h)
def translate(self, text_file, out_file='output.txt'):
fw = open(out_file, 'w')
for line in open(text_file):
src_idx = self.encode_string(line)
s = self.beam_search(src_idx)
fw.write(s + '\n')
fw.close()
| 4,938 | 35.316176 | 78 | py |
sa-nmt | sa-nmt-master/attention.py | import torch
import torch.nn as nn
from Utils import aeq
import math
import torch.nn.functional as F
class SelfAttention(nn.Module):
"""Self attention class"""
def __init__(self, dim):
super(SelfAttention, self).__init__()
self.q = nn.Linear(dim, dim, bias=False)
self.k = nn.Linear(dim, dim, bias=False)
self.v = nn.Linear(dim, dim, bias=False)
self.scale = math.sqrt(1 / dim)
def forward(self, input, mask=None, punct_mask=None):
"""
input (FloatTensor): s_len, batch, dim
mask (ByteTensor): batch x s_len
"""
s_len, batch, dim = input.size()
input = input.contiguous().transpose(0, 1) \
.contiguous().view(-1, dim)
q = self.q(input).view(batch, s_len, -1)
k = self.k(input).view(batch, s_len, -1)
v = self.v(input).view(batch, s_len, -1)
score = torch.bmm(q, k.transpose(1, 2)) * self.scale
# now (batch, s_len, s_len) FloatTensor
if mask is not None:
mask = mask[:, None, :].expand(batch, s_len, s_len)
score.data.masked_fill_(mask, -math.inf)
if punct_mask is not None:
punct_mask = punct_mask.transpose(0, 1) # batch, s_len
punct_mask = punct_mask[:, None, :].expand(batch, s_len, s_len)
score.data.masked_fill_(punct_mask, -math.inf)
# need proper masking
attn = F.softmax(score.view(-1, s_len), dim=-1).view(-1, s_len, s_len)
self.score = attn
return torch.bmm(attn, v)
class GlobalAttention(nn.Module):
"""
Luong Attention. This implement general attention
Concrete distribution: The Concrete Distribution: A Continuous Relaxation
of Discrete Random Variables
"""
def __init__(self, dim, multi_key=False, share_attn=False):
"""
dim (Int): dimension of input vector
multi_key (Boolean): using multi keys encoder
share_attn (Boolean): sharing attention weights between
semantic and syntactic annotations
"""
super(GlobalAttention, self).__init__()
# make a local copy of hyper-parameters
self.dim = dim
self.share_attn = share_attn
self.multi_key = multi_key
self.linear_in = nn.Linear(dim, dim, bias=False)
self.share_attn = share_attn
if multi_key:
if not share_attn: # using a separate attention
self.linear_sa = nn.Linear(dim, dim, bias=False)
self.linear_out = nn.Linear(dim*3, dim, bias=False)
self.gate = nn.Linear(dim, dim)
else:
self.linear_out = nn.Linear(dim*2, dim, bias=False)
self.mask = None
def apply_mask(self, mask):
self.mask = mask
def score(self, h_t, h_s, sa_attn=False):
"""
h_t (FloatTensor): batch x tgt_len x dim
h_s (FloatTensor): batch x src_len x dim
returns scores (FloatTensor): batch x tgt_len x src_len:
raw attention scores for each src index
sa_attn (Boolean): using a separate attention for syntax context
"""
# Check input sizes
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
h_t_ = h_t.view(tgt_batch*tgt_len, tgt_dim)
if sa_attn:
h_t_ = self.linear_sa(h_t_)
else:
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)
return torch.bmm(h_t, h_s_)
def forward(self, input, context):
"""
input (FloatTensor): batch x tgt_len x dim: decoder's rnn's output.
context (FloatTensor): batch x src_len x dim: src hidden states
"""
# one step input
if isinstance(context, tuple):
context, tree_context = context
if input.dim() == 2:
one_step = True
input = input.unsqueeze(1)
else:
one_step = False
batch, sourceL, dim = context.size()
batch_, targetL, dim_ = input.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
# compute attention scores, as in Luong et al.
align = self.score(input, context)
if self.mask is not None:
mask_ = self.mask[:, None, :]
align.data.masked_fill_(mask_, -math.inf)
# Softmax to normalize attention weights
align_vectors = F.softmax(align, dim=-1)
# each context vector c_t is the weighted average
# over all the source hidden states
c = torch.bmm(align_vectors, context)
if self.multi_key:
# sharing attention weight
if self.share_attn:
sc = torch.bmm(align_vectors, tree_context)
else:
# computing attention scores for syntax
tree_align = self.score(input, tree_context, True)
if self.mask is not None:
tree_align.data.masked_fill_(self.mask[:, None, :],
-math.inf)
tree_align_vectors = F.softmax(tree_align, dim=-1)
sc = torch.bmm(tree_align_vectors, tree_context)
z = F.sigmoid(self.gate(input)) # batch x tgt_len x dim
self.z = z # for visualization
sc = sc * z
concat_c = torch.cat([c, input, sc], 2).view(batch*targetL, dim*3)
else:
concat_c = torch.cat([c, input], 2).view(batch*targetL, dim*2)
attn_h = self.linear_out(concat_c).view(batch, targetL, dim)
attn_h = F.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
# Check output sizes
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, sourceL_ = align_vectors.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
# Check output sizes
targetL_, batch_, dim_ = attn_h.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(dim, dim_)
targetL_, batch_, sourceL_ = align_vectors.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(sourceL, sourceL_)
return attn_h, align_vectors
| 6,737 | 35.032086 | 78 | py |
sa-nmt | sa-nmt-master/train.py | import argparse
import torch
from Iterator import TextIterator
import models
from itertools import zip_longest
import random
import Loss
import opts
import os
import math
import subprocess
from infer import Beam
import re
from torch.optim.lr_scheduler import ReduceLROnPlateau
parser = argparse.ArgumentParser(description='train.py')
# Data and loading options
parser.add_argument('-datasets', required=True, default=[],
nargs='+', type=str,
help='source_file target_file.')
parser.add_argument('-valid_datasets', required=True, default=[],
nargs='+', type=str,
help='valid_source valid target files.')
parser.add_argument('-beam_size', default=12, type=int, help="beam size")
# dictionaries
parser.add_argument('-dicts', required=True, default=[],
nargs='+',
help='source_vocab.pkl target_vocab.pkl files.')
# opts.py
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opts.preprocess_opts(parser)
opt = parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# for reproducibility
torch.manual_seed(opt.seed)
random.seed(opt.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(opt.seed)
print(opt)
# batch preparation
def prepare_data(seqs_x, seqs_y):
mb = [(seqs_x[i], seqs_y[i]) for i in range(len(seqs_x))]
mb.sort(key=lambda x: len(x[0]), reverse=True)
xs = torch.LongTensor(
list(zip_longest(*map(lambda x: x[0], mb), fillvalue=0))).to(device)
ys = torch.LongTensor(
list(zip_longest(*map(lambda x: x[1], mb), fillvalue=0))).to(device)
lengths_x = [len(x[0]) for x in mb]
return xs, ys, lengths_x
def eval(model, criterion, valid_data):
stats = Loss.Statistics()
model.eval()
loss = Loss.LossCompute(model.generator, criterion)
for src, tgt in valid_data:
src, tgt, src_lengths = prepare_data(src, tgt, True)
outputs = model(src, tgt[:-1], src_lengths)
gen_state = loss.make_loss_batch(outputs, tgt[1:])
_, batch_stats = loss.compute_loss(**gen_state)
stats.update(batch_stats)
model.train()
return stats
def init_uniform(model, init_range=0.04):
"""Simple uniform initialization of all the weights"""
for p in model.parameters():
p.data.uniform_(-init_range, init_range)
def tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % n_params)
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
print('encoder: ', enc)
print('decoder: ', dec)
def check_model_path():
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def train(opt):
print('| build data iterators')
train = TextIterator(*opt.datasets, *opt.dicts,
src_vocab_size=opt.src_vocab_size,
tgt_vocab_size=opt.tgt_vocab_size,
batch_size=opt.batch_size,
max_seq_length=opt.max_seq_length)
valid = TextIterator(*opt.valid_datasets, *opt.dicts,
src_vocab_size=opt.src_vocab_size,
tgt_vocab_size=opt.tgt_vocab_size,
batch_size=opt.batch_size,
max_seq_length=opt.max_seq_length)
if opt.src_vocab_size < 0:
opt.src_vocab_size = len(train.source_dict)
if opt.tgt_vocab_size < 0:
opt.tgt_vocab_size = len(train.target_dict)
print('| vocabulary size. source = %d; target = %d' %
(opt.src_vocab_size, opt.tgt_vocab_size))
dicts = [train.source_dict, train.target_dict]
crit = Loss.nmt_criterion(opt.tgt_vocab_size, 0).to(device)
if opt.train_from != '':
print('| Load trained model!')
checkpoint = torch.load(opt.train_from)
model = models.make_base_model(opt, checkpoint)
else:
model = models.make_base_model(opt)
init_uniform(model)
model.to(device)
if opt.encoder_type in ["sabrnn", "fabrnn"]:
print('Add punctuation constrain!')
model.encoder.punct(train.src_punct)
print(model)
model.dicts = dicts
check_model_path()
tally_parameters(model)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
scheduler = ReduceLROnPlateau(optimizer, 'min',
factor=opt.learning_rate_decay,
patience=0)
uidx = 0 # number of updates
estop = False
min_lr = opt.learning_rate * math.pow(opt.learning_rate_decay, 5)
best_bleu = -1
for eidx in range(1, opt.epochs + 1):
closs = Loss.LossCompute(model.generator, crit)
tot_loss = 0
total_stats = Loss.Statistics()
report_stats = Loss.Statistics()
for x, y in train:
model.zero_grad()
src, tgt, lengths_x = prepare_data(x, y)
out = model(src, tgt[:-1], lengths_x)
gen_state = closs.make_loss_batch(out, tgt[1:])
shard_size = opt.max_generator_batches
batch_size = len(lengths_x)
batch_stats = Loss.Statistics()
for shard in Loss.shards(gen_state, shard_size):
loss, stats = closs.compute_loss(**shard)
loss.div(batch_size).backward()
batch_stats.update(stats)
tot_loss += loss.item()
torch.nn.utils.clip_grad_norm_(model.parameters(),
opt.max_grad_norm)
optimizer.step()
total_stats.update(batch_stats)
report_stats.update(batch_stats)
uidx += 1
if uidx % opt.report_every == 0:
report_stats.output(eidx, uidx, opt.max_updates,
total_stats.start_time)
report_stats = Loss.Statistics()
if uidx % opt.eval_every == 0:
valid_stats = eval(model, crit, valid)
# maybe adjust learning rate
scheduler.step(valid_stats.ppl())
cur_lr = optimizer.param_groups[0]['lr']
print('Validation perplexity %d: %g' %
(uidx, valid_stats.ppl()))
print('Learning rate: %g' % cur_lr)
if cur_lr < min_lr:
print('Reaching minimum learning rate. Stop training!')
estop = True
break
model_state_dict = model.state_dict()
if eidx >= opt.start_checkpoint_at:
checkpoint = {
'model': model_state_dict,
'opt': opt,
'dicts': dicts
}
# evaluate with BLEU score
inference = Beam(opt, model)
output_bpe = opt.save_model + '.bpe'
output_txt = opt.save_model + '.txt'
inference.translate(opt.valid_datasets[0], output_bpe)
model.train()
subprocess.call("sed 's/@@ //g' {:s} > {:s}"
.format(output_bpe, output_txt),
shell=True)
ref = opt.valid_datasets[1][:-4]
subprocess.call("sed 's/@@ //g' {:s} > {:s}"
.format(opt.valid_datasets[1], ref),
shell=True)
cmd = "perl data/multi-bleu.perl {} < {}" \
.format(ref, output_txt)
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE) \
.stdout.read().decode('utf-8')
bleu = re.search("[\d]+.[\d]+", p)
bleu = float(bleu.group())
print('Validation BLEU %d: %g' % (uidx, bleu))
if bleu > best_bleu:
best_bleu = bleu
torch.save(checkpoint, '%s_best.pt' % opt.save_model)
print('Saved model: %d | BLEU %.2f' % (uidx, bleu))
if uidx >= opt.max_updates:
print('Finishing after {:d} iterations!'.format(uidx))
estop = True
break
if estop:
break
train(opt)
| 8,797 | 36.598291 | 77 | py |
sa-nmt | sa-nmt-master/data/filter.py | import os
import sys
import random
from tempfile import mkstemp
from subprocess import call
def main(files):
tf_os, tpath = mkstemp()
tf = open(tpath, 'w')
fds = [open(ff) for ff in files]
for l in fds[0]:
lines = [l.strip()] + [ff.readline().strip() for ff in fds[1:]]
lengths = [len(l.split()) for l in lines]
if min(lengths) * 2 < max(lengths):
continue
if max(lengths) > 50 or min(lengths) < 2:
continue
print >>tf, "|||".join(lines)
[ff.close() for ff in fds]
tf.close()
tf = open(tpath, 'r')
lines = tf.readlines()
random.shuffle(lines)
fds = [open(ff+'.shuf','w') for ff in files]
for l in lines:
s = l.strip().split('|||')
for ii, fd in enumerate(fds):
print >>fd, s[ii]
[ff.close() for ff in fds]
os.remove(tpath)
if __name__ == '__main__':
main(sys.argv[1:])
| 933 | 18.458333 | 71 | py |
sa-nmt | sa-nmt-master/data/shuffle.py | from __future__ import print_function
import os
import sys
import random
from tempfile import mkstemp
from subprocess import call
def main(files):
tf_os, tpath = mkstemp()
tf = open(tpath, 'w')
fds = [open(ff) for ff in files]
for l in fds[0]:
lines = [l.strip()] + [ff.readline().strip() for ff in fds[1:]]
lengths = [len(l.split()) for l in lines]
if min(lengths) * 2 < max(lengths):
continue
if max(lengths) > 50 or min(lengths) < 1:
continue
print("|||".join(lines), file=tf)
[ff.close() for ff in fds]
tf.close()
tf = open(tpath, 'r')
lines = tf.readlines()
random.shuffle(lines)
fds = [open(ff+'.shuf','w') for ff in files]
for l in lines:
s = l.strip().split('|||')
for ii, fd in enumerate(fds):
print(s[ii], file=fd)
[ff.close() for ff in fds]
os.remove(tpath)
if __name__ == '__main__':
main(sys.argv[1:])
| 982 | 17.903846 | 71 | py |
sa-nmt | sa-nmt-master/data/shuffle.bak.py | from __future__ import print_function
import os
import sys
import random
from tempfile import mkstemp
from subprocess import call
def main(files):
tf_os, tpath = mkstemp()
tf = open(tpath, 'w')
fds = [open(ff) for ff in files]
for l in fds[0]:
lines = [l.strip()] + [ff.readline().strip() for ff in fds[1:]]
print("|||".join(lines), file=tf)
[ff.close() for ff in fds]
tf.close()
tf = open(tpath, 'r')
lines = tf.readlines()
random.shuffle(lines)
fds = [open(ff+'.shuf','w') for ff in files]
for l in lines:
s = l.strip().split('|||')
for ii, fd in enumerate(fds):
print(s[ii], file=fd)
[ff.close() for ff in fds]
os.remove(tpath)
if __name__ == '__main__':
main(sys.argv[1:])
| 800 | 16.042553 | 71 | py |
sa-nmt | sa-nmt-master/data/scan_example.py | from __future__ import print_function
import numpy
import theano
from theano import tensor
# some numbers
n_steps = 10
n_samples = 5
dim = 10
input_dim = 20
output_dim = 2
# one step function that will be used by scan
def oneStep(x_t, h_tm1, W_x, W_h, W_o):
h_t = tensor.tanh(tensor.dot(x_t, W_x) +
tensor.dot(h_tm1, W_h))
o_t = tensor.dot(h_t, W_o)
return h_t, o_t
# spawn theano tensor variable, our symbolic input
# a 3D tensor (n_steps, n_samples, dim)
x = tensor.tensor3(dtype='float32')
# initial state of our rnn
init_state = tensor.alloc(0., n_samples, dim)
# create parameters that we will use,
# note that, parameters are theano shared variables
# parameters for input to hidden states
W_x_ = numpy.random.randn(input_dim, dim).astype('float32')
W_x = theano.shared(W_x_)
# parameters for hidden state transition
W_h_ = numpy.random.randn(dim, dim).astype('float32')
W_h = theano.shared(W_h_)
# parameters from hidden state to output
W_o_ = numpy.random.randn(dim, output_dim).astype('float32')
W_o = theano.shared(W_o_)
# scan function
([h_vals, o_vals], updates) = theano.scan(
fn=oneStep,
sequences=[x],
outputs_info=[init_state, None],
non_sequences=[W_x, W_h, W_o],
n_steps=n_steps,
strict=True)
# let us now compile a function to get the output
f = theano.function([x], [h_vals, o_vals])
# now we will call the compiled function with actual input
actual_input = numpy.random.randn(
n_steps, n_samples, input_dim).astype('float32')
h_vals_, o_vals_ = f(actual_input)
# print the shapes
print('shape of input :', actual_input.shape)
print('shape of h_vals:', h_vals_.shape)
print('shape of o_vals:', o_vals_.shape)
| 1,709 | 23.782609 | 60 | py |
sa-nmt | sa-nmt-master/data/strip_sgml.py | from __future__ import print_function
import sys
import re
def main():
fin = sys.stdin
fout = sys.stdout
for l in fin:
line = l.strip()
text = re.sub('<[^<]+>', "", line).strip()
if len(text) == 0:
continue
print(text, file=fout)
if __name__ == "__main__":
main()
| 346 | 15.52381 | 50 | py |
sa-nmt | sa-nmt-master/data/build_dictionary.py | from __future__ import print_function
import numpy
import pickle as pkl
import sys
from collections import OrderedDict
def main():
for filename in sys.argv[1:]:
print('Processing', filename)
word_freqs = OrderedDict()
with open(filename, 'r') as f:
for line in f:
words_in = line.strip().split(' ')
for w in words_in:
if w not in word_freqs:
word_freqs[w] = 0
word_freqs[w] += 1
words = list(word_freqs.keys())
freqs = list(word_freqs.values())
sorted_idx = numpy.argsort(freqs)
sorted_words = [words[ii] for ii in sorted_idx[::-1]]
worddict = OrderedDict()
worddict['<pad>'] = 0
worddict['<unk>'] = 1
worddict['<bos>'] = 2
worddict['<eos>'] = 3
for ii, ww in enumerate(sorted_words):
worddict[ww] = ii+4
with open('%s.pkl' % filename, 'wb') as f:
pkl.dump(worddict, f)
print('Done')
if __name__ == '__main__':
main()
| 1,087 | 24.904762 | 61 | py |
sa-nmt | sa-nmt-master/data/extract_files.py | #!/usr/bin/python
import argparse
import logging
import os
import tarfile
TRAIN_DATA_URL = 'http://www.statmt.org/europarl/v7/fr-en.tgz'
VALID_DATA_URL = 'http://matrix.statmt.org/test_sets/newstest2011.tgz'
parser = argparse.ArgumentParser(
description="""
This script donwloads parallel corpora given source and target pair language
indicators. Adapted from,
https://github.com/orhanf/blocks-examples/tree/master/machine_translation
""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-s", "--source", type=str, help="Source language",
default="fr")
parser.add_argument("-t", "--target", type=str, help="Target language",
default="en")
parser.add_argument("--source-dev", type=str, default="newstest2011.fr",
help="Source language dev filename")
parser.add_argument("--target-dev", type=str, default="newstest2011.en",
help="Target language dev filename")
parser.add_argument("--outdir", type=str, default=".",
help="Output directory")
def extract_tar_file_to(file_to_extract, extract_into, names_to_look):
extracted_filenames = []
try:
logger.info("Extracting file [{}] into [{}]"
.format(file_to_extract, extract_into))
tar = tarfile.open(file_to_extract, 'r')
src_trg_files = [ff for ff in tar.getnames()
if any([ff.find(nn) > -1 for nn in names_to_look])]
if not len(src_trg_files):
raise ValueError("[{}] pair does not exist in the archive!"
.format(src_trg_files))
for item in tar:
# extract only source-target pair
if item.name in src_trg_files:
file_path = os.path.join(extract_into, item.path)
if not os.path.exists(file_path):
logger.info("...extracting [{}] into [{}]"
.format(item.name, file_path))
tar.extract(item, extract_into)
else:
logger.info("...file exists [{}]".format(file_path))
extracted_filenames.append(
os.path.join(extract_into, item.path))
except Exception as e:
logger.error("{}".format(str(e)))
return extracted_filenames
def main():
train_data_file = os.path.join(args.outdir, 'train_data.tgz')
valid_data_file = os.path.join(args.outdir, 'valid_data.tgz')
# Download europarl v7 and extract it
extract_tar_file_to(
train_data_file, os.path.dirname(train_data_file),
["{}-{}".format(args.source, args.target)])
# Download development set and extract it
extract_tar_file_to(
valid_data_file, os.path.dirname(valid_data_file),
[args.source_dev, args.target_dev])
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('prepare_data')
args = parser.parse_args()
main()
| 2,999 | 36.974684 | 76 | py |
sa-nmt | sa-nmt-master/data/length.py | from __future__ import print_function
import numpy
import sys
for name in sys.argv[1:]:
lens = []
with open(name, 'r') as f:
for ll in f:
lens.append(len(ll.strip().split(' ')))
print(name, ' max ', numpy.max(lens), ' min ', numpy.min(lens), ' average ', numpy.mean(lens))
| 310 | 19.733333 | 98 | py |
qsft | qsft-master/synt_exp/run-tests-complexity-vs-size.py | import numpy as np
import sys
import pandas as pd
import uuid
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 504)
pd.set_option('display.width', 1000)
sys.path.append("..")
sys.path.append("../src")
import argparse
from pathlib import Path
from synt_src.synthetic_helper import SyntheticHelper
from qsft.parallel_tests import run_tests
from synt_src.synthetic_signal import generate_signal_w
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--debug', type=bool, default=False)
parser.add_argument('--num_subsample', type=int, nargs="+")
parser.add_argument('--num_repeat', type=int, nargs="+")
parser.add_argument('--b', type=int, nargs="+")
parser.add_argument('--a', type=int)
parser.add_argument('--snr', type=float)
parser.add_argument('--n', type=int, nargs="+")
parser.add_argument('--q', type=int)
parser.add_argument('--t', type=int)
parser.add_argument('--sparsity', type=int)
parser.add_argument('--iters', type=int, default=1)
parser.add_argument('--subsampling', type=int, default=True)
parser.add_argument('--jobid', type=int)
args = parser.parse_args()
debug = args.debug
if debug:
args.num_subsample = [2]
args.num_repeat = [2]
args.b = [6]
args.a = 1
args.n = np.linspace(40, 40, num=1, dtype=int)
args.q = 3
args.t = 5
args.sparsity = 10
args.snr = 50
args.iters = 1
args.jobid = "debug-" + str(uuid.uuid1())[:8]
args.subsampling = True
if debug:
exp_dir_base = Path(f"results/{str(args.jobid)}")
else:
exp_dir_base = Path(f"/global/scratch/users/erginbas/qsft/synt-exp-results/{str(args.jobid)}")
exp_dir_base.mkdir(parents=True, exist_ok=True)
(exp_dir_base / "figs").mkdir(exist_ok=True)
print("Parameters :", args, flush=True)
methods = ["qsft_coded"]
dataframes = []
print("Starting the tests...", flush=True)
for n_idx in range(len(args.n)):
n = int(args.n[n_idx])
noise_sd = np.sqrt((args.sparsity * args.a ** 2) / (10 ** (args.snr / 10)))
print(fr"n = {n}, N = {args.q ** n:.2e}, sigma = {noise_sd}")
b_valid = [b for b in args.b if b <= n]
subsampling_args = {
"num_subsample": max(args.num_subsample),
"num_repeat": max(args.num_repeat),
"b": max(b_valid),
"all_bs": b_valid
}
test_args = {
"n_samples": 200000
}
for it in range(args.iters):
exp_dir = exp_dir_base / f"n{n}_i{it}"
exp_dir.mkdir(parents=True, exist_ok=True)
_, locq, strengths = generate_signal_w(n, args.q, args.sparsity, args.a, args.a,
full=False, max_weight=args.t)
signal_args = {
"n": n,
"q": args.q,
"t": args.t,
"locq": locq,
"strengths": strengths,
}
helper = SyntheticHelper(signal_args=signal_args, methods=methods, subsampling=args.subsampling,
exp_dir=exp_dir, subsampling_args=subsampling_args, test_args=test_args)
for method in methods:
if method == "lasso" and args.q ** n > 8000:
pass
else:
dataframes.append(run_tests(method, helper, 1, args.num_subsample, args.num_repeat,
b_valid, [noise_sd], parallel=False))
results_df = pd.concat(dataframes, ignore_index=True)
results_df.to_pickle(exp_dir_base / "result.pkl") | 3,745 | 31.017094 | 109 | py |
qsft | qsft-master/synt_exp/plot-complexity-vs-size.py | import numpy as np
import matplotlib.pyplot as plt
import sys
import pandas as pd
from matplotlib import ticker
import matplotlib
from mpl_toolkits.axes_grid1 import make_axes_locatable
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 504)
pd.set_option('display.width', 1000)
sys.path.append("..")
sys.path.append("../src")
from pathlib import Path
if __name__ == '__main__':
font = {'family': 'sans',
'size': 12}
matplotlib.rc('font', **font)
exp_dir = Path(f"results/debug-510ff35e")
results_df = pd.read_pickle(exp_dir / "result.pkl")
group_by = ["method", "n", "q", "num_subsample", "num_repeat", "b", "noise_sd"]
means = results_df.groupby(group_by, as_index=False).mean()
stds = results_df.groupby(group_by, as_index=False).std()
print(means)
q = results_df["q"][0]
# fig, ax = plt.subplots(2, 2)
colorMap = plt.get_cmap('cividis_r')
sampleAlpha = 0.6
timeAlpha = 0.8
methods = ["qsft_coded"]
method_names = ["q-SFT Coded"]
sample_comp = [[] for _ in methods]
time_comp = [[] for _ in methods]
for i in means.index:
mean_row = means.iloc[i]
m = np.where(mean_row["method"] == np.array(methods))[0][0]
if m != -1:
sample_comp[m].append((mean_row["n"], mean_row["n_samples"], mean_row["nmse"]))
time_comp[m].append((mean_row["n"], mean_row["runtime"], mean_row["nmse"]))
min_samples = np.min(np.array(sample_comp[0]), axis=0)[1]
max_samples = np.max(np.array(sample_comp[0]), axis=0)[1]
sample_bin_count = 10
ns = np.unique(np.array(sample_comp[0])[:, 0])
sample_bins = np.linspace(np.log10(min_samples), np.log10(max_samples), sample_bin_count + 1)[1:]
(exp_dir / "figs").mkdir(exist_ok=True)
for m in range(len(methods)):
fig, ax = plt.subplots(1, 1, figsize=(4.3, 3.2), dpi=300)
sample_comp_m = np.array(sample_comp[m])
sample_bin_totals = np.zeros((len(ns), sample_bin_count))
sample_bin_counts = np.zeros((len(ns), sample_bin_count))
for row in sample_comp_m:
sample_bin = np.where(sample_bins >= np.log10(row[1]))[0][0]
n_bin = np.where(int(row[0]) == ns)[0][0]
sample_bin_totals[n_bin, sample_bin] += row[2]
sample_bin_counts[n_bin, sample_bin] += 1
sample_bin_avg = sample_bin_totals / sample_bin_counts
for n_idx in range(len(ns)):
sample_row = sample_bin_avg[n_idx]
if len(np.where(~np.isnan(sample_row))[0]) > 0:
first_non_nan = np.where(~np.isnan(sample_row))[0][0]
sample_row[:first_non_nan] = sample_row[first_non_nan]
last_non_nan = np.where(~np.isnan(sample_row))[0][-1]
sample_row[last_non_nan+1:] = sample_row[last_non_nan]
sample_bin_avg[n_idx] = sample_row
sample_bin_avg = sample_bin_avg.T
sample_bin_avg = np.minimum(sample_bin_avg, 1)
masked_sample_bin_avg = np.ma.array(sample_bin_avg, mask=np.isnan(sample_bin_avg))
colorMap.set_bad('lightgrey')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
data = ax.pcolormesh(ns, 10 ** sample_bins, masked_sample_bin_avg, cmap=colorMap)
ax.set_yscale("log")
ax.set_xlabel("n\nN")
ax.set_ylabel('Sample Complexity')
ax.set_xticks(ns[1::3])
# ax.set_yticks([10, 10**2, 10**3, 10**4, 10**5])
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter(rf"${{%d}}$"))
ax.xaxis.set_label_coords(-0.03, -.04)
secax = ax.secondary_xaxis('bottom', functions=(lambda x: x, lambda x: x))
secax.xaxis.set_major_formatter(ticker.FormatStrFormatter("".join(["\n", rf"${q}^{{%d}}$"])))
secax.set_xticks(ns[1::3])
cbar = fig.colorbar(data, cax=cax, orientation='vertical')
cbar.ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
cbar.set_label('NMSE', rotation=270, labelpad=15)
plt.tight_layout()
plt.savefig(exp_dir / f'figs/complexity-vs-n-{methods[m]}.pdf', bbox_inches='tight',
transparent="True", pad_inches=0)
plt.show()
_, ax = plt.subplots(1, 1, figsize=(4, 3.2), dpi=300)
for m in range(len(methods)):
time_comp_m = np.array(time_comp[m])
total_success_time = np.zeros(len(ns))
count_success_time = np.zeros(len(ns))
for row in time_comp_m:
if row[2] < 0.1:
n_bin = np.where(int(row[0]) == ns)[0][0]
total_success_time[n_bin] += row[1]
count_success_time[n_bin] += 1
avg_success_time = total_success_time / count_success_time
ax.plot(ns, avg_success_time, "o-", label=method_names[m])
ax.set_xlabel("n\nN")
ax.set_ylabel('Runtime Complexity (sec)', fontsize=11)
ax.set_yscale("log")
ax.grid(True)
ax.set_yticks([0.01, 0.1, 1, 10, 100])
ax.set_xticks(ns[1::3])
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter(rf"${{%d}}$"))
ax.xaxis.set_label_coords(-0.03, -.04)
secax = ax.secondary_xaxis('bottom', functions=(lambda x: x, lambda x: x))
secax.xaxis.set_major_formatter(ticker.FormatStrFormatter("".join(["\n", rf"${q}^{{%d}}$"])))
secax.set_xticks(ns[1::3])
ax.legend()
plt.tight_layout()
plt.savefig(exp_dir / f'figs/complexity-vs-n-runtime.pdf', bbox_inches='tight',
transparent="True", pad_inches=0.1)
plt.show()
| 5,591 | 36.033113 | 101 | py |
qsft | qsft-master/synt_exp/run-tests-nmse-vs-snr.py | import numpy as np
import sys
import pandas as pd
import uuid
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 504)
pd.set_option('display.width', 1000)
sys.path.append("..")
import argparse
from pathlib import Path
from synt_exp.synt_src.synthetic_helper import SyntheticHelper
from qsft.parallel_tests import run_tests
from synt_exp.synt_src.synthetic_signal import generate_signal_w
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--debug', type=bool, default=False)
parser.add_argument('--num_subsample', type=int, nargs="+")
parser.add_argument('--num_repeat', type=int, nargs="+")
parser.add_argument('--b', type=int, nargs="+")
parser.add_argument('--a', type=int)
parser.add_argument('--snr', type=float, nargs="+")
parser.add_argument('--n', type=int)
parser.add_argument('--q', type=int)
parser.add_argument('--sparsity', type=int, nargs="+")
parser.add_argument('--iters', type=int, default=1)
parser.add_argument('--subsampling', type=int, default=True)
parser.add_argument('--jobid', type=int)
args = parser.parse_args()
debug = args.debug
if debug:
args.num_subsample = [3]
args.num_repeat = [1]
args.b = [7]
args.a = 1
args.n = 20
args.q = 4
args.sparsity = [100, 250, 1000]
args.iters = 5
args.jobid = "debug-" + str(uuid.uuid1())[:8]
args.subsampling = True
args.snr = np.linspace(-25, 5, num=30)
if debug:
exp_dir_base = Path(f"results/{str(args.jobid)}")
else:
exp_dir_base = Path(f"/global/scratch/users/erginbas/qsft/synt-exp-results/{str(args.jobid)}")
exp_dir_base.mkdir(parents=True, exist_ok=True)
(exp_dir_base / "figs").mkdir(exist_ok=True)
print("Parameters :", args, flush=True)
methods = ["qsft"]
dataframes = []
print("Starting the tests...", flush=True)
subsampling_args = {
"num_subsample": max(args.num_subsample),
"num_repeat": max(args.num_repeat),
"b": max(args.b),
"all_bs": args.b
}
test_args = {
"n_samples": 50000
}
print()
print("n = {}, N = {:.2e}".format(args.n, args.q ** args.n))
for s in range(len(args.sparsity)):
sparsity = args.sparsity[s]
noise_sd = np.sqrt((sparsity * args.a ** 2) / (args.q ** args.n * 10 ** (np.array(args.snr) / 10)))
for it in range(args.iters):
exp_dir = exp_dir_base / f"s{sparsity}_i{it}"
exp_dir.mkdir(parents=True, exist_ok=True)
_, locq, strengths = generate_signal_w(args.n, args.q, sparsity, args.a, args.a, full=False)
signal_args = {
"n": args.n,
"q": args.q,
"locq": locq,
"strengths": strengths,
}
helper = SyntheticHelper(signal_args=signal_args, methods=methods, subsampling=args.subsampling,
exp_dir=exp_dir, subsampling_args=subsampling_args, test_args=test_args)
for method in methods:
run_df = run_tests(method, helper, 1, args.num_subsample, args.num_repeat,
args.b, noise_sd, parallel=False)
run_df["sparsity"] = sparsity
dataframes.append(run_df)
results_df = pd.concat(dataframes, ignore_index=True)
results_df['snr'] = 10 * np.log10((results_df['sparsity'] / (results_df["q"] ** results_df["n"])) * \
(args.a ** 2 / results_df["noise_sd"] ** 2))
print(results_df)
results_df.to_pickle(exp_dir_base / "result.pkl")
| 3,712 | 32.45045 | 109 | py |
qsft | qsft-master/synt_exp/qsft-sample-vs-nmse.py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import sys
import pandas as pd
import uuid
sys.path.append("..")
sys.path.append("../src")
from qsft.utils import best_convex_underestimator
import argparse
from pathlib import Path
from synt_exp.synt_src.synthetic_helper import SyntheticHelper
from qsft.parallel_tests import run_tests
parser = argparse.ArgumentParser()
parser.add_argument('--debug', type=bool, default=False)
parser.add_argument('--num_subsample', type=int, nargs="+")
parser.add_argument('--num_repeat', type=int, nargs="+")
parser.add_argument('--b', type=int, nargs="+")
parser.add_argument('--a', type=int)
parser.add_argument('--noise_sd', type=float, nargs="+")
parser.add_argument('--n', type=int)
parser.add_argument('--q', type=int)
parser.add_argument('--sparsity', type=int)
parser.add_argument('--iters', type=int, default=1)
parser.add_argument('--subsampling', type=int, default=True)
parser.add_argument('--jobid', type=int)
args = parser.parse_args()
debug = args.debug
if debug:
args.num_subsample = [3]
args.num_repeat = [2]
args.b = [4]
args.a = 1
args.n = 6
args.q = 4
args.sparsity = 1
args.noise_sd = [1e-3]
args.iters = 1
args.jobid = "debug-" + str(uuid.uuid1())[:8]
args.subsampling = True
if debug:
exp_dir = Path(f"results/{str(args.jobid)}")
else:
exp_dir = Path(f"/global/scratch/users/erginbas/qsft/synt-exp-results/{str(args.jobid)}")
print("Parameters :", args, flush=True)
query_args = {
"query_method": "complex",
"delays_method": "nso",
"num_subsample": max(args.num_subsample),
"num_repeat": max(args.num_repeat),
"b": max(args.b),
"all_bs": args.b
}
methods = ["qsft", "lasso"]
colors = ["red", "blue"]
test_args = {
"n_samples": 50000
}
print("Loading/Calculating data...", flush=True)
exp_dir.mkdir(parents=True, exist_ok=True)
(exp_dir / "figs").mkdir(exist_ok=True)
helper = SyntheticHelper(args.n, args.q, noise_sd=args.noise_sd[0], sparsity=args.sparsity,
a_min=args.a, a_max=args.a,
baseline_methods=methods, subsampling=args.subsampling,
exp_dir=exp_dir, query_args=query_args, test_args=test_args)
print("n = {}, N = {:.2e}".format(args.n, args.q ** args.n))
print("Starting the tests...", flush=True)
fig, ax = plt.subplots()
for m in range(len(methods)):
# Test QSFT with different parameters
# Construct a grid of parameters. For each entry, run multiple test rounds.
# Compute the average for each parameter selection.
results_df = run_tests(methods[m], helper, args.iters, args.num_subsample, args.num_repeat,
args.b, args.noise_sd, parallel=False)
# results_df.to_csv(f'results/{str(args.jobid)}/results_df_{methods[m]}.csv')
means = results_df.groupby(["num_subsample", "num_repeat", "b", "noise_sd"], as_index=False).mean()
stds = results_df.groupby(["num_subsample", "num_repeat", "b", "noise_sd"], as_index=False).std()
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
print(results_df)
x_values = []
y_values = []
labels = []
all_points = []
for i in means.index:
mean_row = means.iloc[i]
std_row = stds.iloc[i]
ax.errorbar(mean_row['n_samples'], mean_row['nmse'],
xerr=std_row['n_samples'], yerr=std_row['nmse'], fmt="o", color=colors[m])
all_points.append([mean_row['n_samples'], mean_row['nmse']])
label = f'({int(mean_row["b"])},{int(mean_row["num_subsample"])},{int(mean_row["num_repeat"])})'
labels.append(label)
for i in range(len(all_points)):
ax.annotate(labels[i], xy=all_points[i], xycoords='data',
xytext=(20, 10), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
shrinkA=0, shrinkB=5,
connectionstyle="arc3,rad=0.4",
color='blue'), )
try:
if len(all_points) > 3:
bcue = best_convex_underestimator(np.array(all_points))
ax.plot(bcue[:, 0], bcue[:, 1], 'r--', lw=1.5, label="Best Cvx Underest.")
except:
pass
ax.set_xlabel('Total Samples')
ax.set_ylabel('Test NMSE')
plt.legend()
plt.grid()
plt.savefig(exp_dir / f'figs/nmse-vs-sample.png')
plt.show()
| 4,482 | 30.131944 | 104 | py |
qsft | qsft-master/synt_exp/plot-nmse-vs-snr.py | import numpy as np
import matplotlib.pyplot as plt
import sys
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 504)
pd.set_option('display.width', 1000)
sys.path.append("..")
sys.path.append("../src")
from pathlib import Path
if __name__ == '__main__':
exp_dir = Path(f"results_final/debug-e51c0b3c")
results_df = pd.read_pickle(exp_dir / "result.pkl")
group_by = ["method", "n", "q", "num_subsample", "num_repeat", "b", "noise_sd", "sparsity"]
means = results_df.groupby(group_by, as_index=False).mean()
stds = results_df.groupby(group_by, as_index=False).std()
_, ax = plt.subplots(1, 1, figsize=(3.5, 3), dpi=300)
sparsity_list = np.unique(results_df["sparsity"])
snr_values = [[] for _ in range(len(sparsity_list))]
for i in means.index:
mean_row = means.iloc[i]
s_idx = np.where(mean_row["sparsity"] == sparsity_list)[0][0]
snr_values[s_idx].append((mean_row["snr"], mean_row["nmse"]))
for s in range(len(sparsity_list)):
ax.plot(*zip(*snr_values[s]), "o-", label=f"S = {sparsity_list[s]}", markersize=4)
ax.set_xlabel('SNR (dB)')
ax.set_ylabel('NMSE')
ax.legend()
ax.grid()
(exp_dir / "figs").mkdir(exist_ok=True)
plt.tight_layout()
plt.savefig(exp_dir / f'figs/nmse-vs-snr.pdf', bbox_inches='tight',
transparent="True", pad_inches=0)
plt.show()
| 1,435 | 27.156863 | 95 | py |
qsft | qsft-master/synt_exp/quick_example.py | import numpy as np
from qsft.qsft import QSFT
from qsft.query import get_reed_solomon_dec
from synt_exp.synt_src.synthetic_signal import get_random_subsampled_signal
if __name__ == '__main__':
np.random.seed(20)
q = 3
n = 40
N = q ** n
sparsity = 100
a_min = 1
a_max = 1
b = 4
noise_sd = 1
num_subsample = 3
num_repeat = 1
t = 4
decoder = get_reed_solomon_dec(n, t, q)
delays_method_source = "identity"
delays_method_channel = "identity"
query_args = {
"query_method": "complex",
"num_subsample": num_subsample,
"delays_method_source": delays_method_source,
"subsampling_method": "qsft",
"delays_method_channel": delays_method_channel,
"num_repeat": num_repeat,
"b": b,
"t": t
}
qsft_args = {
"num_subsample": num_subsample,
"num_repeat": num_repeat,
"reconstruct_method_source": delays_method_source,
"reconstruct_method_channel": delays_method_channel,
"b": b,
"noise_sd": noise_sd,
"source_decoder": decoder
}
'''
Generate a Signal Object
'''
test_signal = get_random_subsampled_signal(n=n,
q=q,
sparsity=sparsity,
a_min=a_min,
a_max=a_max,
noise_sd=noise_sd,
query_args=query_args,
max_weight=t)
'''
Create a QSFT instance and perform the transformation
'''
sft = QSFT(**qsft_args)
result = sft.transform(test_signal, verbosity=10, timing_verbose=True, report=True, sort=True)
'''
Display the Reported Results
'''
gwht = result.get("gwht")
loc = result.get("locations")
n_used = result.get("n_samples")
peeled = result.get("locations")
avg_hamming_weight = result.get("avg_hamming_weight")
max_hamming_weight = result.get("max_hamming_weight")
print("found non-zero indices QSFT: ")
print(peeled)
print("True non-zero indices: ")
print(test_signal.locq.T)
print("Total samples = ", n_used)
print("Total sample ratio = ", n_used / q ** n)
signal_w_diff = test_signal.signal_w.copy()
for key in gwht.keys():
signal_w_diff[key] = signal_w_diff.get(key, 0) - gwht[key]
print("NMSE SPRIGHT= ",
np.sum(np.abs(list(signal_w_diff.values())) ** 2) / np.sum(np.abs(list(test_signal.signal_w.values())) ** 2))
print("AVG Hamming Weight of Nonzero Locations = ", avg_hamming_weight)
print("Max Hamming Weight of Nonzero Locations = ", max_hamming_weight)
| 2,810 | 32.86747 | 118 | py |
qsft | qsft-master/synt_exp/__init__.py | 0 | 0 | 0 | py | |
qsft | qsft-master/synt_exp/synt_src/synthetic_signal.py | import numpy as np
from qsft.utils import igwht_tensored, random_signal_strength_model, qary_vec_to_dec, sort_qary_vecs
from qsft.input_signal import Signal
from qsft.input_signal_subsampled import SubsampledSignal
from qsft.utils import dec_to_qary_vec
from multiprocess import Pool
import time
def generate_signal_w(n, q, sparsity, a_min, a_max, noise_sd=0, full=True, max_weight=None):
"""
Generates a sparse fourier transform
"""
max_weight = n if max_weight is None else max_weight
N = q ** n
if max_weight == n:
locq = sort_qary_vecs(np.random.randint(q, size=(n, sparsity)).T).T
else:
non_zero_idx_vals = np.random.randint(q-1, size=(max_weight, sparsity))+1
non_zero_idx_pos = np.random.choice(a=n, size=(sparsity, max_weight))
locq = np.zeros((n, sparsity), dtype=int)
for i in range(sparsity):
locq[non_zero_idx_pos[i, :], i] = non_zero_idx_vals[:, i]
locq = sort_qary_vecs(locq.T).T
loc = qary_vec_to_dec(locq, q)
strengths = random_signal_strength_model(sparsity, a_min, a_max)
if full:
wht = np.zeros((N,), dtype=complex)
for l, s in zip(loc, strengths):
wht[l] = s
signal_w = wht + np.random.normal(0, noise_sd, size=(N, 2)).view(np.complex).reshape(N)
return np.reshape(signal_w, [q] * n), locq, strengths
else:
signal_w = dict(zip(list(map(tuple, locq.T)), strengths))
return signal_w, locq, strengths
def get_random_signal(n, q, noise_sd, sparsity, a_min, a_max):
"""
Computes a full random time-domain signal, which is sparse in the fequency domain. This function is only suitable for
small n since for large n, storing all q^n symbols is not tractable.
"""
signal_w, locq, strengths = generate_signal_w(n, q, noise_sd, sparsity, a_min, a_max, full=True)
signal_t = igwht_tensored(signal_w, q, n)
signal_params = {
"n": n,
"q": q,
"noise_sd": noise_sd,
"signal_t": signal_t,
"signal_w": signal_w,
"folder": "test_data"
}
return SyntheticSignal(locq, strengths, **signal_params)
class SyntheticSignal(Signal):
"""
This is essentially just a signal object, except the strengths and locations of the non-zero indicies are known, and
included as attributes
"""
def __init__(self, locq, strengths, **kwargs):
super().__init__(**kwargs)
self.locq = locq
self.strengths = strengths
def get_random_subsampled_signal(n, q, noise_sd, sparsity, a_min, a_max, query_args, max_weight=None):
"""
Similar to get_random_signal, but instead of returning a SyntheticSignal object, it returns a SyntheticSubsampledSignal
object. The advantage of this is that a subsampled signal does not compute the time domain signal on creation, but
instead, creates it on the fly. This should be used (1) when n is large or (2) when sampling is expensive.
"""
start_time = time.time()
signal_w, locq, strengths = generate_signal_w(n, q, sparsity, a_min, a_max, noise_sd, full=False, max_weight=max_weight)
signal_params = {
"n": n,
"q": q,
"query_args": query_args,
}
print(f"Generation Time:{time.time() - start_time}", flush=True)
return SyntheticSubsampledSignal(signal_w=signal_w, locq=locq, strengths=strengths,
noise_sd=noise_sd, **signal_params)
class SyntheticSubsampledSignal(SubsampledSignal):
"""
This is a Subsampled signal object, except it implements the unimplemented 'subsample' function.
"""
def __init__(self, **kwargs):
self.q = kwargs["q"]
self.n = kwargs["n"]
self.locq = kwargs["locq"]
self.noise_sd = kwargs["noise_sd"]
freq_normalized = 2j * np.pi * kwargs["locq"] / kwargs["q"]
strengths = kwargs["strengths"]
def sampling_function(query_batch):
query_indices_qary_batch = np.array(dec_to_qary_vec(query_batch, self.q, self.n)).T
return np.exp(query_indices_qary_batch @ freq_normalized) @ strengths
self.sampling_function = sampling_function
super().__init__(**kwargs)
def subsample(self, query_indices):
"""
Computes the signal/function values at the queried indicies on the fly
"""
batch_size = 10000
res = []
query_indices_batches = np.array_split(query_indices, len(query_indices)//batch_size + 1)
with Pool() as pool:
for new_res in pool.imap(self.sampling_function, query_indices_batches):
res = np.concatenate((res, new_res))
return res
def get_MDU(self, ret_num_subsample, ret_num_repeat, b, trans_times=False):
"""
wraps get_MDU method from SubsampledSignal to add synthetic noise
"""
mdu = super().get_MDU(ret_num_subsample, ret_num_repeat, b, trans_times)
for i in range(len(mdu[2])):
for j in range(len(mdu[2][i])):
size = np.array(mdu[2][i][j]).shape
nu = self.noise_sd / np.sqrt(2 * self.q ** b)
mdu[2][i][j] += np.random.normal(0, nu, size=size + (2,)).view(np.complex).reshape(size)
return mdu
| 5,257 | 39.137405 | 124 | py |
qsft | qsft-master/synt_exp/synt_src/synthetic_helper.py | from qsft.test_helper import TestHelper
from synt_exp.synt_src.synthetic_signal import SyntheticSubsampledSignal
class SyntheticHelper(TestHelper):
def generate_signal(self, signal_args):
return SyntheticSubsampledSignal(**signal_args)
| 249 | 34.714286 | 72 | py |
qsft | qsft-master/rna_exp/run-tests-complexity-vs-size.py | import numpy as np
import pandas as pd
import uuid
import argparse
from pathlib import Path
from rna_exp.rna_src.rna_helper import RNAHelper
from qsft.parallel_tests import run_tests
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 504)
pd.set_option('display.width', 1000)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--debug', type=bool, default=False)
parser.add_argument('--num_subsample', type=int, nargs="+")
parser.add_argument('--num_repeat', type=int, nargs="+")
parser.add_argument('--b', type=int, nargs="+")
parser.add_argument('--noise_sd', type=float, nargs="+")
parser.add_argument('--n', type=int, nargs="+")
parser.add_argument('--iters', type=int, default=1)
parser.add_argument('--subsampling', type=int, default=True)
parser.add_argument('--jobid', type=int)
args = parser.parse_args()
debug = args.debug
if debug:
args.num_subsample = [4]
args.num_repeat = [3]
args.b = [7]
args.n = [15]
args.iters = 1
args.jobid = "debug-" + str(uuid.uuid1())[:8]
args.subsampling = True
args.noise_sd = np.logspace(-3, -5, num=10)
if debug:
exp_dir_base = Path(f"results/{str(args.jobid)}")
else:
exp_dir_base = Path(f"/global/scratch/users/erginbas/qsft/synt-exp-results/{str(args.jobid)}")
args.q = 4
exp_dir_base.mkdir(parents=True, exist_ok=True)
(exp_dir_base / "figs").mkdir(exist_ok=True)
print("Parameters :", args, flush=True)
methods = ["qsft"]
dataframes = []
print("Starting the tests...", flush=True)
for n_idx in range(len(args.n)):
n = args.n[n_idx]
b_valid = [b for b in args.b if b <= n]
subsampling_args = {
"num_subsample": max(args.num_subsample),
"num_repeat": max(args.num_repeat),
"b": max(b_valid),
"all_bs": b_valid
}
test_args = {
"n_samples": 5000
}
print()
print(fr"n = {n}, N = {args.q ** n}, sigma = {args.noise_sd}")
for it in range(args.iters):
exp_dir = exp_dir_base / f"i{it}"
exp_dir.mkdir(parents=True, exist_ok=True)
signal_args = {
"n": n,
"q": args.q
}
helper = RNAHelper(signal_args=signal_args, methods=methods, subsampling=args.subsampling,
exp_dir=exp_dir, subsampling_args=subsampling_args, test_args=test_args)
for method in methods:
if method == "lasso" and args.q ** n > 3000:
pass
else:
dataframes.append(run_tests(method, helper, 1, args.num_subsample, args.num_repeat,
b_valid, args.noise_sd, parallel=False))
results_df = pd.concat(dataframes, ignore_index=True)
print()
print(results_df)
results_df.to_pickle(exp_dir_base / "result.pkl")
| 3,056 | 29.57 | 103 | py |
qsft | qsft-master/rna_exp/plot-complexity-vs-size.py | import numpy as np
import matplotlib.pyplot as plt
import sys
import pandas as pd
from scipy import interpolate
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 504)
pd.set_option('display.width', 1000)
sys.path.append("..")
sys.path.append("../src")
from pathlib import Path
from qsft.utils import best_convex_underestimator
import scipy
if __name__ == '__main__':
exp_nums = ["13844137", "13844138", "13846802", "13846805"]
dfs = []
for exp_id in exp_nums:
exp_dir = Path(f"results_final/{exp_id}")
dfs.append(pd.read_pickle(exp_dir / "result.pkl"))
results_df = pd.concat(dfs, ignore_index=True)
group_by = ["method", "n", "q", "num_subsample", "num_repeat", "b"]
results_df = results_df.groupby(group_by, as_index=False).min()
# fig, ax = plt.subplots(2, 2)
colorMap = plt.get_cmap('cividis_r')
sampleAlpha = 0.6
timeAlpha = 0.8
methods = ["qsft"]
_, ax = plt.subplots(1, 1, figsize=(4, 3))
ns = results_df["n"].unique()
for n in ns:
df_n = results_df.loc[results_df['n'] == n]
xs = df_n["n_samples"] / 10
ys = 10 * df_n["nmse"] / np.log(df_n["n_samples"])
all_points = list(zip(xs, ys))
model = lambda t, a, b: a + b * np.exp(-t)
a, _ = scipy.optimize.curve_fit(model, np.log10(xs), ys)
polyline_points = np.logspace(2.6, 7, 50)
plt.plot(polyline_points, [model(np.log10(p), a[0], a[1]) for p in polyline_points])
# if len(all_points) > 3:
# bcue = best_convex_underestimator(np.array(all_points))
# ax.plot(bcue[:, 0], bcue[:, 1], lw=1.5, label=f"n={n}")
ax.scatter(*zip(*all_points), s=10)
ax.set_xscale("log")
ax.set_xlabel('Sample Complexity')
ax.set_ylabel('Test NMSE')
ax.legend()
ax.grid()
# ax.set_xticks(ns)
# for m in range(len(methods)):
# time_comp_m = np.array(time_comp[m])
# total_success_time = np.zeros(len(ns))
# count_success_time = np.zeros(len(ns))
# for row in time_comp_m:
# if row[2] < 0.1:
# n_bin = np.where(int(row[0]) == ns)[0][0]
# total_success_time[n_bin] += row[1]
# count_success_time[n_bin] += 1
#
# avg_success_time = total_success_time / count_success_time
#
# print(total_success_time)
#
# ax[-1].plot(ns, avg_success_time, "o-")
# ax[-1].set_xlabel('n')
# ax[-1].set_ylabel('Runtime Complexity (sec)')
# ax[-1].set_yscale("log")
# ax[-1].grid(True)
# ax[-1].set_xticks(ns)
plt.tight_layout()
Path(f"figs/").mkdir(exist_ok=True)
plt.savefig('figs/complexity-vs-n-rna.pdf', bbox_inches='tight',
transparent="True", pad_inches=0.1)
plt.show()
| 2,831 | 30.120879 | 92 | py |
qsft | qsft-master/rna_exp/qspright-sample-vs-nmse.py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
sys.path.append("../src")
import pandas as pd
import uuid
from rna_exp.rna_src.rna_helper import RNAHelper
from qsft.utils import best_convex_underestimator
from qsft.parallel_tests import run_tests
import argparse
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument('--debug', type=bool, default=False)
parser.add_argument('--num_subsample', type=int, nargs="+")
parser.add_argument('--num_repeat', type=int, nargs="+")
parser.add_argument('--b', type=int, nargs="+")
parser.add_argument('--noise_sd', type=float, nargs="+")
parser.add_argument('--n', type=int)
parser.add_argument('--iters', type=int, default=1)
parser.add_argument('--subsampling', type=int, default=True)
parser.add_argument('--jobid', type=int)
args = parser.parse_args()
debug = args.debug
if debug:
args.num_subsample = [2, 3, 4]
args.num_repeat = [4, 6]
# args.b = [7, 8]
# args.n = 15
# args.noise_sd = np.logspace(-3.7, -4.3, num=3)
args.b = [2, 3, 4]
args.n = 8
args.noise_sd = np.logspace(-2, -3.5, num=5)
args.iters = 1
args.jobid = "debug-" + str(uuid.uuid1())[:8]
args.subsampling = True
exp_dir = Path(f"results/{str(args.jobid)}")
else:
exp_dir = Path(f"/global/scratch/users/erginbas/qsft/rna-exp-results/{str(args.jobid)}")
print(exp_dir)
print("Parameters :", args, flush=True)
query_args = {
"query_method": "complex",
"delays_method": "nso",
"num_subsample": max(args.num_subsample),
"num_repeat": max(args.num_repeat),
"b": max(args.b),
"all_bs": args.b
}
methods = ["qsft", "lasso"]
colors = ["red", "blue", "green", "purple"]
test_args = {
"n_samples": 50000
}
print("Loading/Calculating data...", flush=True)
exp_dir.mkdir(parents=True, exist_ok=True)
(exp_dir / "figs").mkdir(exist_ok=True)
print(exp_dir)
helper = RNAHelper(args.n, baseline_methods=methods, subsampling=args.subsampling,
query_args=query_args, test_args=test_args, exp_dir=exp_dir)
n = helper.n
q = 4
print("n = {}, N = {:.2e}".format(n, q ** n))
print("Starting the tests...", flush=True)
fig, ax = plt.subplots()
for m in range(len(methods)):
# Test QSFT with different parameters
# Construct a grid of parameters. For each entry, run multiple test rounds.
# Compute the average for each parameter selection.
results_df = run_tests(methods[m], helper, args.iters, args.num_subsample, args.num_repeat,
args.b, args.noise_sd, parallel=False)
# results_df.to_csv(f'results/{str(args.jobid)}/results_df_{methods[m]}.csv')
means = results_df.groupby(["num_subsample", "num_repeat", "b", "noise_sd"], as_index=False).mean()
stds = results_df.groupby(["num_subsample", "num_repeat", "b", "noise_sd"], as_index=False).std()
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
print(results_df)
x_values = []
y_values = []
labels = []
all_points = []
for i in means.index:
mean_row = means.iloc[i]
std_row = stds.iloc[i]
ax.errorbar(mean_row['n_samples'], mean_row['nmse'],
xerr=std_row['n_samples'], yerr=std_row['nmse'], fmt="o", color=colors[m])
all_points.append([mean_row['n_samples'], mean_row['nmse']])
label = f'({int(mean_row["b"])},{int(mean_row["num_subsample"])},{int(mean_row["num_repeat"])})'
labels.append(label)
for i in range(len(all_points)):
ax.annotate(labels[i], xy=all_points[i], xycoords='data',
xytext=(20, 10), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
shrinkA=0, shrinkB=5,
connectionstyle="arc3,rad=0.4",
color='blue'), )
try:
if len(all_points) > 3:
bcue = best_convex_underestimator(np.array(all_points))
ax.plot(bcue[:, 0], bcue[:, 1], 'r--', lw=1.5, label="Best Cvx Underest.")
except:
pass
ax.set_xlabel('Total Samples')
ax.set_ylabel('Test NMSE')
plt.legend()
plt.grid()
plt.savefig(exp_dir / f'figs/nmse-vs-sample.png')
plt.show() | 4,317 | 30.289855 | 104 | py |
qsft | qsft-master/rna_exp/__init__.py | 0 | 0 | 0 | py | |
qsft | qsft-master/rna_exp/rna_src/query_iterator.py | import numpy as np
from qsft.utils import dec_to_qary_vec
class QueryIterator(object):
nucs = np.array(["A", "U", "C", "G"])
q = 4
def __init__(self, base_seq, positions, query_indices, q):
self.base_seq = np.array(list(base_seq))
self.positions = positions
self.full = self.base_seq.copy()
self.length = len(query_indices)
self.q = q
self.query_indices = query_indices
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.q == 4:
return self._next_q4()
elif self.q == 2:
return self._next_q2()
def _next_q4(self):
if self.i == self.length:
raise StopIteration
idx = self.query_indices[self.i][0]
idx_dec = self.query_indices[self.i][1]
seq = self.nucs[idx]
self.full[:] = self.base_seq
self.full[self.positions] = seq
self.i += 1
return idx_dec, "".join(self.full)
def _next_q2(self):
if self.i == self.length:
raise StopIteration
idx_bin = self.query_indices[self.i][0]
idx_dec = self.query_indices[self.i][1]
idx = dec_to_qary_vec([idx_dec], 4, len(idx_bin)//2)[:, 0]
seq = self.nucs[idx]
self.full[:] = self.base_seq
self.full[self.positions] = seq
self.i += 1
return idx_dec, "".join(self.full)
def __len__(self):
return self.length | 1,466 | 28.34 | 66 | py |
qsft | qsft-master/rna_exp/rna_src/utils.py | import numpy as np
from tqdm import tqdm
from itertools import chain, combinations
def divisors(num):
"""Returns all divisors of a given integer"""
divs = []
for x in range (1, num):
if (num % x) == 0:
divs.append(x)
return divs
def powerset(iterable):
"""Returns the powerset of a given set"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def get_all_interactions(L, index_1=False):
"""
Returns a list of all epistatic interactions for a given sequence length.
This sets of the order used for beta coefficients throughout the code.
If index_1=True, then returns epistatic interactions corresponding to
1-indexing.
"""
if index_1:
pos = range(1, L+1)
else:
pos = range(L)
all_U = list(powerset(pos))
return all_U
def complete_graph_evs(q):
"""
Returns a set of eigenvectors of complete graph of size q as column vectors of a matrix
"""
x = np.ones(q)
y = np.eye(q)[0]
v = x - np.linalg.norm(x, ord=2) * y
w = v / np.linalg.norm(v, ord=2)
w = w.reshape(q, 1)
P = np.eye(q) - 2*np.dot(w, w.T)
return P
def fourier_basis_recursive(L, q):
"""
Recursively constructs the Fourier basis corresponding to the H(L, q) hamming graph
(i.e. graphs of sequences of length L with alphabet size q). Hamming graphs are the
Cartesian product H(L+1, q) = H(L, q) x Kq, and the eigevectors of a cartesian product
(A x B) are the Kronecker product of the eigenvectors of A and B. This method is much
faster than constructing the the Fourier representation for each sequence individually
(as with fourier_for_seqs, below) but the resulting basis less interpretable
(i.e. it is difficult to associate rows with particular sequences and columns with
particular epistatic interactions). In all of this code, we enforce that beta is ordered
according to epistatic interactions, so DO NOT multiply the basis resulting from this
method by a sample of beta.
"""
Pq = complete_graph_evs(q)
phi = np.copy(Pq)
for i in range(L-1):
phi = np.kron(Pq, phi)
return phi
def get_encodings(qs):
"""
Returns a length L list of arrays containing the encoding vectors corresponding
to each alphabet element at each position in sequence, given the alphabet size
at each position.
"""
encodings = []
Pqs = []
L = len(qs)
for i in range(L):
qi = qs[i]
Pq = complete_graph_evs(qi) * np.sqrt(qi)
Pqs.append(Pq)
enc_i = Pq[:, 1:]
encodings.append(enc_i)
return encodings
def fourier_for_seq(int_seq, encodings):
"""
Returns an M x 1 array containing the Fourier encoding of a sequence,
given the integer representation of the sequence and the encodings returned
by get_encodings, where M = prod(qs) and qs is the alphabet size at each position.
"""
L = len(int_seq)
all_U = get_all_interactions(L)
all_U = [list(U) for U in all_U]
epi_encs = []
enc_1 = encodings[0][int_seq[0]]
for U in all_U:
if len(U) > 0 and 0 == U[0]:
U_enc = enc_1
U.pop(0)
else:
U_enc = np.array([1])
epi_encs.append(U_enc)
for l in range(1, L):
enc_l = encodings[l][int_seq[l]]
for k, U in enumerate(all_U):
U_enc = epi_encs[k]
if len(U) > 0 and l==U[0]:
U_enc = np.kron(U_enc,enc_l)
U.pop(0)
epi_encs[k] = U_enc
all_enc = np.concatenate(epi_encs)
return all_enc
def fourier_from_seqs(int_seqs, qs):
"""
Returns an N x M array containing the Fourier encodings of a given list of
N sequences with alphabet sizes qs.
"""
if type(qs) == int:
qs = [qs]*len(int_seqs[0])
M = np.prod(qs)
N = len(int_seqs)
encodings = get_encodings(qs)
phi = np.zeros((N, M))
for i, seq in enumerate(tqdm(int_seqs)):
phi[i] = fourier_for_seq(seq, encodings) / np.sqrt(M)
return phi
def convert_01_bin_seqs(bin_seqs):
"""Converts a numpy array of {0, 1} binary sequences to {-1, 1} sequences"""
assert type(bin_seqs) == np.ndarray
bin_seqs[bin_seqs == 0] = -1
return bin_seqs
def walsh_hadamard_from_seqs(bin_seqs):
"""
Returns an N x 2^L array containing the Walsh-Hadamard encodings of
a given list of N binary ({0,1}) sequences. This will return the
same array as fourier_from_seqs(bin_seqs, [2]*L), but is much
faster.
"""
bin_seqs_ = convert_01_bin_seqs(np.array(bin_seqs))
L = len(bin_seqs_[0])
all_U = get_all_interactions(L)
M = 2**L
N = len(bin_seqs)
X = np.zeros((N, len(all_U)))
for i, U in enumerate(all_U):
if len(U) == 0:
X[:, i] = 1
else:
X[:, i] = np.prod(bin_seqs_[:, U], axis=1)
X = X / np.sqrt(M)
return X
def calc_frac_var_explained(beta):
"""
Calculates the fraction of variance explained by the largest elements in a
given vector beta.
"""
beta_sq = np.abs(beta)**2
beta_sq /= np.sum(beta_sq)
bm_sorted = sorted(beta_sq, reverse=True)
return np.cumsum(bm_sorted)
def calc_frac_var_explained_from_beta_var(beta_var, samples=1000, up_to=None):
"""
Numerically calculates the fraction variance explained by the largest elements in
samples of a normally distributed random vector with variances given by beta_var.
Let S be the number of largest elements to consider. For each value of S between 0
and up_to (default up_to is len(beta_var)), this method returns the mean and std.
dev. of the fraction variance explained by the largest S elements in samples
of the random vector.
"""
beta_var_nz = beta_var[np.nonzero(beta_var)]
M = len(beta_var_nz)
if up_to is None:
up_to = M
fv = np.zeros(up_to)
fv_std = np.zeros(up_to)
samples = np.random.randn(samples, M) * np.sqrt(beta_var_nz).reshape(1, M)
sorted_beta_sq = -np.sort(-samples**2, axis=1)
mags = np.sum(sorted_beta_sq, axis=1)
for i in range(up_to):
if i > M:
fv[i] = 1
fv_std[i] = 1
if i == 0:
continue
all_fv = np.sum(sorted_beta_sq[:, :i], axis=1) / mags
fv[i] = np.mean(all_fv)
fv_std[i] = np.std(all_fv)
return fv, fv_std | 6,463 | 30.842365 | 92 | py |
qsft | qsft-master/rna_exp/rna_src/data_utils.py | import pandas as pd
import numpy as np
import itertools
from Bio import PDB
from tqdm import tqdm
from sklearn.linear_model import Lasso
from rna_exp.rna_src import utils
from rna_exp.rna_src import gnk_model
from rna_exp.rna_src import structure_utils
"""
Utility functions for loading and processing empirical fitness function data
"""
# global variables
MTAGBFP_POSITIONS = [20, 45, 63, 127, 143, 158, 168, 172, 174, 197, 206, 207, 227]
HIS3P_BIG_QS = [2, 2, 3, 2, 2, 3, 3, 4, 2, 4, 4]
HIS3P_POSITIONS = [145, 147, 148, 151, 152, 154, 164, 165, 168, 169, 170]
def load_mtagbfp_data():
"""
Loads mTagBFP2 blue flourescence fitness data from Poelwijk et. al. (2019).
Returns the data as (X, y), where X is a matrix of Walsh-Hadamard encodings
of sequences and y is an array of corresponding fitness values. Raw data
is taken from Supplementary Data 3 of Poelwijk et. al. (2019) at
https://www.nature.com/articles/s41467-019-12130-8#Sec20
"""
df = pd.read_csv("../data/mtagbfp_raw_data.csv")
y = np.array(df['brightness.1'][1:]).astype(float)
bin_seqs_ = list(df['binary'][1:])
bin_seqs = []
for s_ in bin_seqs_:
s_ = s_[1:-1]
s = []
for si in s_:
if si == '0':
s.append(0)
else:
s.append(1)
bin_seqs.append(s)
bin_seqs = np.array(bin_seqs)
L = len(bin_seqs[0])
X = utils.walsh_hadamard_from_seqs(bin_seqs)
return X, y
def load_his3p_small_data():
"""
Loads His3p fitness data from Pokusaeva et. al. (2019) for binary sequences.
Returns the data as (X, y), where X is a matrix of Walsh-Hadamard encodings
of sequences and y is an array of corresponding fitness values. Raw data is
taken from https://github.com/Lcarey/HIS3InterspeciesEpistasis/tree/master/Data.
"""
df = pd.read_csv("../data/his3p_raw_data.csv")
extract = lambda x: "".join([x[i-1] for i in HIS3P_POSITIONS])
mut_seqs = df['seq'].apply(extract) # extract subsequences
seqs_split = mut_seqs.str.split(pat ="\s*", expand = True)
seqs_split = seqs_split.iloc[:, 1:-1]
# find the two most frequently occuring alphabet elements at each position
best = []
for i in range(1, 12):
pos = seqs_split[i]
cts = pos.value_counts()
best.append(list(cts.iloc[:2].index))
bin_seqs_ = list(itertools.product((0, 1), repeat=11))
best_combos = []
for bs in bin_seqs_:
seq = "".join([best[i][bs[i]] for i in range(len(bs))])
best_combos.append(seq)
num = 0
bin_match =[]
y_match = []
# collect fitness values
for i, s in enumerate(best_combos):
matches = mut_seqs.loc[mut_seqs == s]
if len(matches) == 0:
continue
fitness = np.log(np.mean(np.exp(df['log_fitness'].loc[matches.index]))) # calculate mean if there are multiple fitness values
y_match.append(fitness)
bin_match.append(bin_seqs_[i])
X = utils.walsh_hadamard_from_seqs(np.array(bin_match))
return X, np.array(y_match)
#######################
### His3p(big) data ###
#######################
"""
Loading and processing the His3p(big) data takes much more time and memory than
the smaller datasets above, so the pipeline is split into multiple functions, whose
results are saved for further use. In order to load this data from scratch, one
must run find_his3p_big_sequences(), followed by build_his3p_fourier(), and then
load_his3p_big_fourier().
"""
def find_his3p_big_sequences(save=False):
"""
Searches through the His3p raw data to find sequences with fitness data that
correspond to combinations of extant amino acids at each position (i.e. the
most frequently occuring amino acids at each position in the data). See page
4 of Pokusaeva et. al. (2019) for more information about these extant sequences.
Returns a dictionary containing the found sequences, corresponding fitness values,
and indices in the raw data. This takes some time to run, so there is an option
to save the dictionary into the results fold by setting save=True.
"""
df = pd.read_csv("../data/his3p_raw_data.csv")
qs = HIS3P_BIG_QS
extract = lambda x: "".join([x[i-1] for i in HIS3P_POSITIONS])
mut_seqs = df['seq'].apply(extract)
seqs_split = mut_seqs.str.split(pat ="\s*", expand = True)
seqs_split = seqs_split.iloc[:, 1:-1]
best = []
for i in range(1, 12):
pos = seqs_split[i]
cts = pos.value_counts()
if i == 6:
best.append(list(cts.iloc[[0,1,3]].index))
else:
best.append(list(cts.iloc[:qs[i-1]].index))
sizes = [list(range(q)) for q in qs]
int_seqs = list(itertools.product(*sizes))
best_combos = []
for bs in int_seqs:
seq = []
for i in range(len(bs)):
seq.append(best[i][bs[i]])
seq = "".join(seq)
best_combos.append(seq)
num = 0
int_match =[]
y_match = []
idx_match = []
print("Finding sequences in data...")
for i, s in enumerate(tqdm(best_combos)):
matches = mut_seqs.loc[mut_seqs == s]
if len(matches) == 0:
continue
fitness = np.log(np.mean(np.exp(df['log_fitness'].loc[matches.index])))
y_match.append(fitness)
int_match.append(int_seqs[i])
idx_match.append(i)
num += 1
out_dict = {"seq": int_match, "y": y_match, "idx": idx_match}
if save:
np.save("../results/his3p_big_data.npy", out_dict)
return out_dict
def build_his3p_big_fourier():
"""
Converts the His3p(big) sequences into Fourier encodings and returns the matrix
Will try to load the dict resulting from find_his3p_big_sequences,
but will otherwise run that method.
"""
qs = HIS3P_BIG_QS
try:
save_dict = np.load("../results/his3p_big_data.npy",allow_pickle=True).item()
except FileNotFoundError:
save_dict = find_his3p_big_sequences(save=save)
int_seqs = save_dict['seq']
M = np.prod(qs)
N = len(int_seqs)
phi = np.zeros((N, M))
encodings = utils.get_encodings(qs)
print("Calculating Fourier encoding for each sequence...")
for i, seq in enumerate(tqdm(int_seqs)):
phi[i] = utils.fourier_for_seq(seq, encodings) / np.sqrt(M)
return phi
def load_his3p_big_data(save=False):
"""
Loads His3p(big) fitness data from Pokusaeva, et. al. (2019). Returns
the data as (X, y), where X is a matrix of Fourier encodings of sequences
and y is an array of corresponding fitness values. Will try to load dictionary
from find_his3p_big_sequences, but will otherwise run that method
(if save=True, then the dictionary will be saved for future use).
"""
try:
save_dict = np.load("../results/his3p_big_data.npy",allow_pickle=True).item()
except FileNotFoundError:
save_dict = find_his3p_big_sequences(save=save)
y = np.array(save_dict['y'])
try:
X = np.load("../results/his3p_big_fourier.npy")
except FileNotFoundError:
X = build_his3p_big_fourier(save=save)
return X, y
def _get_contact_map(which_data):
"""
Returns the contact map corresponding to either the TagBFP
(which_data='mtagbfp') or His3p data (which_data='his3p').
"""
if which_data == 'mtagbfp':
name = '3m24'
pos = MTAGBFP_POSITIONS
elif which_data == 'his3p':
name = 'his3_itasser'
pos = HIS3P_POSITIONS
structure = PDB.PDBParser().get_structure(name, '../data/%s.pdb' % name)
chains = structure.get_chains()
chain1 = next(chains)
contact_map, resid2idx = structure_utils.calc_min_dist_contact_map(chain1)
pos_in_cm = [resid2idx[p] for p in pos]
cm_sub = contact_map[pos_in_cm][:, pos_in_cm]
return cm_sub
def _get_binarized_contact_map(which_data, threshold=4.5):
"""
Returns the binarized contact map corresponding to either the mTagBFP
(which_data='mtagbfp') or His3p data (which_data='his3p').
"""
cm_sub = _get_contact_map(which_data)
bin_cm = structure_utils.binarize_contact_map(cm_sub, threshold=threshold)
return bin_cm
def get_mtagbfp_contact_map():
"""
Returns the contact map of the TagBFP structure, for the positions in
the empirical fitness function of Poelwijk, et. al. (2019).
"""
return _get_contact_map('mtagbfp')
def get_his3p_contact_map():
"""
Returns the contact map of the His3p I-TASSER predicted structure, for the
positions in the empirical fitness function of Pokusaeva, et. al. (2019).
"""
return _get_contact_map('his3p')
def get_mtagbfp_binarized_contact_map(threshold=4.5):
"""
Returns the binarized contact map of the TagBFP structure, for the positions in
the empirical fitness function of Poelwijk, et. al. (2019).
"""
return _get_binarized_contact_map('mtagbfp', threshold=threshold)
def get_his3p_binarized_contact_map(threshold=4.5):
"""
Returns the binarized contact map of the His3p I-TASSER predicted structure,
for the positions in the empirical fitness function of Pokusaeva, et. al. (2019).
"""
return _get_binarized_contact_map('his3p', threshold=threshold)
def _calculate_wh_coefficients_complete(which_data, save=False):
"""
Calculate the WH coefficients of the complete mTagBFP (which_data='mtagbfp'),
His3p(small) (which_data='his3p_small') or His3p(big) (which_data='his3p_big')
empirical fitness functions.
"""
alpha = 1e-12
if which_data == 'mtagbfp':
X, y = load_mtagbfp_data()
elif which_data == 'his3p_small':
X, y = load_his3p_small_data()
elif which_data == 'his3p_big':
try:
beta = np.load("../results/his3p_big_beta.npy")
print("Loaded saved beta array.")
return beta
except FileNotFoundError:
X, y = load_his3p_big_data()
alpha = 1e-10 # slightly higher because data is less complete than others
model = Lasso(alpha=alpha)
print("Fitting Fourier coefficients (this may take some time)...")
model.fit(X, y)
beta = model.coef_
beta[0] = model.intercept_
if which_data == 'his3p_big' and save:
np.save("../results/his3p_big_beta.npy", beta)
return beta
def calculate_mtagbfp_wh_coefficients():
"""
Calculate the WH coefficients of the mTagBFP2 fitness functions.
"""
return _calculate_wh_coefficients_complete('mtagbfp')
def calculate_his3p_small_wh_coefficients():
"""
Calculate the Walsh-Hadamard coefficients of the His3p(small) fitness functions.
"""
return _calculate_wh_coefficients_complete('his3p_small')
def calculate_his3p_big_fourier_coefficients(save=False):
"""
Calculate the Fourier coefficients of the His3p(big) fitness functions.
Since this takes a long time, there is an option to save the beta
coefficients in the results folder.
"""
return _calculate_wh_coefficients_complete('his3p_big', save=save)
def calculate_mtagbfp_gnk_wh_coefficient_vars(return_neighborhoods=False):
"""
Returns the variances of WH coefficients in GNK fitness functions with Structural
neighborhoods corresponding to the TagBFP structure. If return_neighborhoods is
True then the Structural neighborhoods are also returned
"""
L = 13
q = 2
bin_cm = get_mtagbfp_binarized_contact_map()
V = structure_utils.contact_map_to_neighborhoods(bin_cm)
gnk_beta_var = gnk_model.calc_beta_var(L, q, V)
if return_neighborhoods:
return gnk_beta_var, V
else:
return gnk_beta_var
def calculate_his3p_small_gnk_wh_coefficient_vars(return_neighborhoods=False):
"""
Returns the variances of WH coefficients in GNK fitness functions with
Structural neighborhoods corresponding to the His3p structure.
"""
L = 11
q = 2
bin_cm = get_his3p_binarized_contact_map()
V = structure_utils.contact_map_to_neighborhoods(bin_cm)
gnk_beta_var = gnk_model.calc_beta_var(L, q, V)
if return_neighborhoods:
return gnk_beta_var, V
else:
return gnk_beta_var | 12,224 | 34.537791 | 134 | py |
qsft | qsft-master/rna_exp/rna_src/rna_helper.py | import numpy as np
import json
from qsft.test_helper import TestHelper
from qsft.utils import NpEncoder
from rna_exp.rna_src.input_rna_signal_subsampled import RnaSubsampledSignal
from rna_exp.rna_src.rna_utils import get_rna_base_seq
class RNAHelper(TestHelper):
mfe_base = 0
base_seq_list = None
positions = None
def __init__(self, signal_args, methods, subsampling_args, test_args, exp_dir, subsampling=False):
config_path = exp_dir / "config.json"
config_exists = config_path.is_file()
if config_exists:
with open(config_path) as f:
config_dict = json.load(f)
subsampling_args = config_dict["subsampling_args"]
signal_args = config_dict["signal_args"]
else:
positions = list(np.sort(np.random.choice(len(get_rna_base_seq()), size=signal_args["n"], replace=False)))
signal_args.update({
"base_seq": get_rna_base_seq(),
"positions": positions
})
config_dict = {"signal_args": signal_args, "subsampling_args": subsampling_args}
with open(config_path, "w") as f:
json.dump(config_dict, f, cls=NpEncoder)
print("Positions: ", signal_args["positions"])
super().__init__(signal_args, methods, subsampling_args, test_args, exp_dir, subsampling)
def generate_signal(self, signal_args):
return RnaSubsampledSignal(**signal_args)
| 1,466 | 36.615385 | 118 | py |
qsft | qsft-master/rna_exp/rna_src/rna_utils.py | import RNA
import itertools
import utils
import linecache
import tracemalloc
"""
Utility functions for loading and processing the quasi-empirical RNA fitness function.
"""
def dna_to_rna(seq):
"""
Converts DNA sequences to RNA sequences.
"""
rs = []
for s in seq:
if s == 'T':
rs.append('U')
else:
rs.append(s)
return "".join(rs)
def insert(base_seq, positions, sub_seq):
"""
Inserts a subsequence into a base sequence
"""
new_seq = list(base_seq)
for i, p in enumerate(positions):
new_seq[p-1] = sub_seq[i]
return "".join(new_seq)
def get_rna_base_seq():
"""
Returns the sequence of RFAM: AANN01066007.1
"""
# base_seq = "CTGAGCCGTTACCTGCAGCTGATGAGCTCCAAAAAGAGCGAAACCTGCTAGGTCCTGCAGTACTGGCTTAAGAGGCT"
base_seq = "CTGAGCCGTTACCTGCAGCTGATGAGCTCCAAAAAGA"
return dna_to_rna(base_seq)
def sample_structures_and_find_pairs(base_seq, positions, samples=10000):
"""
Samples secondary structures from the Boltzmann distribution
and finds pairs of positions that are paired in any of the
sampled strutures.
"""
md = RNA.md()
md.uniq_ML = 1
fc = RNA.fold_compound(base_seq, md)
(ss, mfe) = fc.mfe()
fc.exp_params_rescale(mfe)
fc.pf()
important_pairs = set()
for s in fc.pbacktrack(10000):
pairs = find_pairs(s)
for p in pairs:
if p[0] in positions and p[1] in positions:
if p[0] > p[1]:
print(p, s)
important_pairs.add(tuple(p))
return important_pairs
def pairs_to_neighborhoods(positions, pairs):
"""
Converts a list of pairs of interacting positions into a set of neighborhoods.
"""
V = []
for i, p in enumerate(positions):
Vp = [i+1]
for pair in pairs:
if pair[0] == p:
Vp.append(positions.index(pair[1]) + 1)
elif pair[1] == p:
Vp.append(positions.index(pair[0]) + 1)
V.append(sorted(Vp))
return V
def find_pairs(ss):
"""
Finds interacting pairs in a RNA secondary structure
"""
pairs = []
op = []
N = len(ss)
for i in range(N):
if ss[i] == '(':
op.append(i)
elif ss[i] == ')':
pair = (op.pop(), i)
pairs.append(pair)
return pairs
def generate_householder_matrix(positions, n):
nucs = ["A", "U", "C", "G"]
nucs_idx = {nucs[i]: i for i in range(len(nucs))}
seqs_as_list = list(itertools.product(nucs, repeat=len(positions)))
int_seqs = [[nucs_idx[si] for si in s] for s in seqs_as_list]
print("Constructing Fourier matrix...")
X = utils.fourier_from_seqs(int_seqs, [4] * n)
return X
'''
def calculate_rna_gnk_wh_coefficient_vars(pairs_from_scratch=False, return_neighborhoods=False):
"""
Returns the variances of WH coefficients in GNK fitness functions with
Structural neighborhoods corresponding to RNA secondary structure. If pairs_from_scratch
is True, then structures are sampled to find paired positions, otherwise pre-calculated
pairs are used.
"""
L = 8
q = 4
if pairs_from_scratch:
important_pairs = sample_structures_and_find_pairs(data_utils.get_rna_base_seq(),
positions, samples=10000) # uncomment to calculate from scratch
else:
important_pairs = {(21, 52), (20, 44), (20, 52), (20, 43)} # pre-calculated
# add adjacent pairs
important_pairs = important_pairs.union({(20, 21), (43, 44)})
V = pairs_to_neighborhoods(self.positions, important_pairs)
gnk_beta_var = gnk_model.calc_beta_var(L, q, V)
if return_neighborhoods:
return gnk_beta_var, V
else:
return gnk_beta_var
'''
def display_top(snapshot, key_type='lineno', limit=10):
snapshot = snapshot.filter_traces((
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<unknown>"),
))
top_stats = snapshot.statistics(key_type)
print("----------------------")
print("Top %s lines" % limit)
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
print("#%s: %s:%s: %.1f KiB"
% (index, frame.filename, frame.lineno, stat.size / 1024))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
print(' %s' % line)
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
print("%s other: %.1f KiB" % (len(other), size / 1024))
total = sum(stat.size for stat in top_stats)
print("Total allocated size: %.1f KiB" % (total / 1024))
print("----------------------")
| 4,803 | 27.093567 | 122 | py |
qsft | qsft-master/rna_exp/rna_src/__init__.py | 0 | 0 | 0 | py | |
qsft | qsft-master/rna_exp/rna_src/input_rna_signal_subsampled.py | from qsft.input_signal_subsampled import SubsampledSignal
import numpy as np
from multiprocessing import Pool
from qsft.utils import dec_to_qary_vec, qary_vec_to_dec
import RNA
class RnaSubsampledSignal(SubsampledSignal):
nucs = np.array(["A", "U", "C", "G"])
def __init__(self, **kwargs):
self.base_seq = kwargs.get("base_seq")
self.positions = kwargs.get("positions")
self.sampling_function = kwargs.get("sampling_function")
self.q = kwargs.get("q")
self.n = kwargs.get("n")
(_, mfe_base) = RNA.fold("".join(self.base_seq))
self.mfe_base = mfe_base
self.pool = Pool(initializer=worker_init, initargs=(self.base_seq, self.positions, self.mfe_base))
super().__init__(**kwargs)
self.pool.close()
def subsample(self, query_indices):
batch_size = 250
res = np.zeros(len(query_indices))
counter = 0
# pbar = tqdm(total=len(query_indices), miniters=batch_size, position=0)
query_batches = np.array_split(query_indices, len(query_indices)//batch_size)
for new_res in self.pool.imap(sampling_function, query_batches):
res[counter: counter+len(new_res)] = new_res
counter += len(new_res)
# pbar.update(len(new_res))
return res
def worker_init(base_seq, positions_input, mfe_base_input):
global base_seq_list
global positions
global mfe_base
base_seq_list = np.array(list(base_seq))
mfe_base = mfe_base_input
positions = positions_input
def sampling_function(query_batch):
global base_seq_list
global positions
global mfe_base
query_batch_q = np.array(dec_to_qary_vec(query_batch, 4, len(positions))).T
y = []
for query_index in query_batch_q:
full = next_q4(base_seq_list, positions, query_index)
fc = RNA.fold_compound(full)
(_, mfe) = fc.mfe()
y.append(mfe - mfe_base)
return y
nucs = np.array(["A", "U", "C", "G"])
def next_q4(base_seq_list, positions, query_index):
seq = nucs[query_index]
base_seq_list[positions] = seq
return "".join(base_seq_list)
def next_q2(base_seq_list, positions, query_index):
idx = dec_to_qary_vec([qary_vec_to_dec(query_index, 2)], 4, len(query_index)//2)[:, 0]
seq = nucs[idx]
base_seq_list[positions] = seq
return "".join(base_seq_list)
| 2,382 | 28.060976 | 106 | py |
qsft | qsft-master/rna_exp/rna_src/structure_utils.py | import numpy as np
from Bio import PDB
"""
Various utility functions for working with PDB structures.
"""
def binarize_contact_map(contact_map, threshold=8.0):
"""Returns binary version of contact map."""
return np.less(contact_map, threshold)
def calc_min_dist(res1, res2):
"""Returns the minimum distance between any two atoms in res1 and res2."""
md = -1
for atom1 in res1:
for atom2 in res2:
dist = atom1-atom2 # biopython defines this operator
if md == -1 or dist < md:
md = dist
return md
def calc_min_dist_contact_map(chain1, chain2=None):
"""
Calculates the minimum distance between any two atoms in every pair of residues
in chain1 and (optionally) chain2. Returns the array of distances along with a
dictionary that maps residue IDs to indices in the array.
"""
resid_to_idx1 = {}
if chain2 is None:
chain2 = chain1
one_chain = True
else:
one_chain = False
resid_to_idx2 = {}
l1 = len(chain1)
l2 = len(chain2)
contact_map = np.ones((l1, l2)) * -1
for i, res1 in enumerate(chain1):
resid_to_idx1[int(res1.id[1])] = i
for j, res2 in enumerate(chain2):
if one_chain:
if j < i:
continue
contact_map[i, j] = calc_min_dist(res1, res2)
if one_chain:
contact_map[j, i] = contact_map[i, j]
resid_to_idx1[int(res2.id[1])] = j
else:
resid_to_idx2[int(res2.id[1])] = j
# Set missing distances larger than maximum distance in structure.
missing_value = np.amax(contact_map) + 1.0
contact_map[np.less(contact_map, 0)] = missing_value
if one_chain:
return contact_map, resid_to_idx1
else:
return contact_map, resid_to_idx1, resid_to_idx2
def contact_map_to_neighborhoods(contact_map):
"""Convert a binarized contact map to a set of neighborhoods"""
L = contact_map.shape[0]
V = []
for i in range(L):
Vi = [i+1]
for j in range(L):
if i == j:
continue
elif contact_map[i, j]:
Vi.append(j+1)
V.append(sorted(Vi))
return V | 2,281 | 28.636364 | 84 | py |
qsft | qsft-master/rna_exp/rna_src/gnk_model.py | import numpy as np
import itertools
from scipy.special import binom
from math import factorial
from rna_exp.rna_src import utils
def get_neighborhood_powerset(V):
"""Returns the union of powersets of a set of neighborhoods"""
Vs = [sorted(Vk) for Vk in V]
powersets = [tuple(utils.powerset(Vs[i])) for i in range(len(Vs))]
T = set().union(*powersets)
return T
def calculate_sparsity(L, q, V):
"""Calculates sparsity given any neighborhoods V=[V1,V2,...,VL]"""
T = get_neighborhood_powerset(V)
sparsity = 0
for U in T:
sparsity += (q-1)**len(U)
return sparsity
def calc_bn_sparsity(L, q, K):
"""Calculates sparsity of Block Beighborhood scheme"""
sparsity = (L/K)*(q**K - 1) +1
return sparsity
def calc_an_sparsity(L, q, K):
"""Calculates sparsity of Adjacent Neighborhood scheme"""
return 1 + L*(q-1)*q**(K-1)
def _calc_set_prob(r, L, K):
"""Calculates p(r) for a set of size r, for use with 'calc_mean_rn_sparsity'"""
if r == 0 or r == 1:
return 1
else:
ar = (factorial(K-1) / factorial(L-1)) * (factorial(L-r) / factorial(K-r))
br = ar * ((K-r) / (L-r))
term1 = (1-ar)**r
term2 = (1-br)**(L-r)
return 1-term1*term2
def calc_mean_rn_sparsity(L, q, K):
"""Calculates expected sparsity of Random Neighborhood scheme"""
sparsity = 0
for r in range(K+1):
pr = _calc_set_prob(r, L, K)
sparsity += binom(L, r)*pr *(q-1)**r
return sparsity
def calc_max_rn_sparsity(L, q, K):
"""Calculates an upper bound on the sparsity of the Random Neighborhood scheme"""
bd = 1+L*(q-1)
for r in range(2, K+1):
bd += L * binom(K, r) * (q-1)**r
return bd
def build_adj_neighborhoods(L, K, symmetric=True):
"""Build Adjacent Neighborhoods with periodic boundary conditions"""
V = []
M = (K-1)/2
for i in range(L):
if symmetric:
start = np.floor(i-M)
else:
start = i
Vi = [int(((start + j) % L)+1) for j in range(K)]
V.append(Vi)
return V
def build_block_neighborhoods(L, K):
"""Build neighborhoods according to the Block Neighborhood scheme"""
assert L % K == 0
V = []
block_size = int(L/K)
for j in range(L):
val = int(K*np.floor(j / K))
Vj = list(range(val+1, val+K+1))
V.append(Vj)
return V
def sample_random_neighborhoods(L, K):
"""Sample neighborhoods according to the Random Neighborhood scheme"""
V = []
for i in range(L):
indices = [j+1 for j in range(L) if j != i]
Vi = list(np.random.choice(indices, size=K-1, replace=False))
Vi.append(i+1)
V.append(sorted(Vi))
return V
def calc_beta_var(L, qs, V):
"""
Calculates the variance of beta coefficients for a given sequence length, L,
list of alphabet sizes, and neighborhoods V. The returned coefficients are ordered
by degree of epistatic interaction.
"""
if type(qs) is int:
qs = [qs]*L
all_U = utils.get_all_interactions(L, index_1=True) # index by 1 to match neighborhoods
z = np.prod(qs)
beta_var_U = []
facs = []
for j, Vj in enumerate(V):
fac = 1
for k in Vj:
fac *= 1/qs[k-1]
facs.append(fac)
for i, U in enumerate(all_U):
sz = np.prod([qs[k-1]-1 for k in U])
bv = 0
for j, Vj in enumerate(V):
Uset = set(U)
Vj_set = set(Vj)
if Uset.issubset(Vj_set):
bv += facs[j]
bv *= z
bv_expand = bv*np.ones(int(sz))
beta_var_U.append(bv_expand)
beta_var = np.concatenate(beta_var_U)
return beta_var
def sample_gnk_fitness_function(L, qs, V='random', K=None):
"""
Sample a GNK fitness function given the sequence length, alphabet sizes
and neighborhoods. If V='random', V='block', or V='adjacent', then
the neighborhoods will be set to the corresponding standard neighborhood
scheme. Otherwise, V must be a list of neighborhoods.
"""
if type(V) is str:
assert K is not None
if V == 'random':
V = sample_random_neighborhoods(L, K)
elif V == 'adjacent':
V = build_adj_neighborhoods(L, K)
elif V == 'block':
V = build_block_neighborhoods(L, K)
beta_var = calc_beta_var(L, qs, V)
use_wh = False
if type(qs) is int:
if qs == 2:
use_wh = True
qs = [qs]*L
alphs = [list(range(q)) for q in qs]
seqs = list(itertools.product(*alphs))
if use_wh:
phi = utils.walsh_hadamard_from_seqs(seqs)
else:
phi = utils.fourier_from_seqs(seqs, qs)
beta = np.random.randn(len(beta_var))*np.sqrt(beta_var)
f = np.dot(phi, beta)
return f | 4,787 | 28.018182 | 91 | py |
qsft | qsft-master/rna_exp/rna_src/input_rna_signal.py | from qsft.input_signal import Signal
import numpy as np
import itertools
from rna_exp.rna_src.rna_utils import insert
from multiprocessing import Pool
from tqdm import tqdm
from functools import partial
tqdm = partial(tqdm, position=0, leave=True)
class RnaSignal(Signal):
def __init__(self, **kwargs):
self.base_seq = kwargs.get("base_seq")
self.positions = kwargs.get("positions")
self.n = len(self.positions)
self.sampling_function = kwargs.get("sampling_function")
super().__init__(**kwargs)
def sample(self):
nucs = ["A", "U", "C", "G"]
seqs_as_list = list(itertools.product(nucs, repeat=len(self.positions)))
seqs = np.array(["".join(s) for s in seqs_as_list])
query = []
for i, s in enumerate(seqs):
full = insert(self.base_seq, self.positions, s)
query.append(full)
raise NotImplementedError("things need to be changed")
with Pool() as pool:
y = list(tqdm(pool.imap(self.sampling_function, query), total=len(seqs)))
self._signal_t = np.array(y) | 1,107 | 31.588235 | 85 | py |
qsft | qsft-master/qsft/spright.py | '''
SPRIGHT decoding main file. Logic flow:
1. Generate a signal from src/input_signal.py
2. Subsample from src/query.py
3. Peel using src/reconstruct.py
'''
import numpy as np
import galois
import sys
import tqdm
import time
sys.path.append("../src")
from archive.qsft_rand import dec_to_bin, bin_to_dec, qary_vec_to_dec, binary_ints, qary_ints
from archive.qsft_rand import compute_delayed_wht, compute_delayed_gwht, get_Ms, get_b, get_D
from archive.qsft_rand import singleton_detection
class SPRIGHT:
'''
Class to store encoder/decoder configurations and carry out encoding/decoding.
Attributes
----------
query_method : str
The method to generate the sparsity coefficient and the subsampling matrices.
Currently implemented methods:
"simple" : choose some predetermined matrices based on problem size.
delays_method : str
The method to generate the matrix of delays.
Currently implemented methods:
"identity_like" : return a zero row and the identity matrix vertically stacked together.
"random" : make a random set of delays.
reconstruct_method : str
The method to detect singletons.
Currently implemented methods:
"noiseless" : decode according to [2], section 4.2, with the assumption the signal is noiseless.
"mle" : naive noisy decoding; decode by taking the maximum-likelihood singleton that could be at that bin.
'''
def __init__(self, query_method, delays_method, reconstruct_method):
self.query_method = query_method
self.delays_method = delays_method
self.reconstruct_method = reconstruct_method
def transform(self, signal, verbose=False, report=False):
'''
Full SPRIGHT encoding and decoding. Implements Algorithms 1 and 2 from [2].
(numbers) in the comments indicate equation numbers in [2].
Arguments
---------
signal : Signal object.
The signal to be transformed / compared to.
verbose : boolean
Whether to print intermediate steps.
Returns
-------
wht : ndarray
The WHT constructed by subsampling and peeling.
'''
GF = galois.GF(signal.q)
# check the condition for p_failure > eps
# upper bound on the number of peeling rounds, error out after that point
num_peeling = 0
result = []
wht = np.zeros_like(signal.signal_t)
b = get_b(signal, method=self.query_method)
peeling_max = 2 ** b
Ms = get_Ms(signal.n, b, signal.q, method=self.query_method)
Us, Ss = [], []
singletons = {}
multitons = []
if report:
used = set()
if self.delays_method != "nso":
num_delays = signal.n + 1
else:
num_delays = signal.n * int(np.log2(signal.n)) # idk
if signal.q == 2:
K = binary_ints(signal.n)
else:
K = GF(qary_ints(signal.n, signal.q))
# subsample, make the observation [U] and offset signature [S] matrices
for M in Ms:
D = get_D(signal.n, method=self.delays_method, num_delays=num_delays, q=signal.q)
if verbose:
print("------")
print("a delay matrix")
print(D)
if signal.q == 2:
U, used_i = compute_delayed_wht(signal, M, D)
else:
U, used_i = compute_delayed_gwht(signal, M, D, signal.q)
Us.append(U)
Ss.append((-1) ** (D @ K)) # offset signature matrix
if report:
used = used.union(used_i)
cutoff = 2 * signal.noise_sd ** 2 * (2 ** (signal.n - b)) * num_delays # noise threshold
if verbose:
print('cutoff: {}'.format(cutoff))
# K is the binary representation of all integers from 0 to 2 ** n - 1.
if signal.q == 2:
select_froms = np.array([[bin_to_dec(row) for row in M.T.dot(K).T] for M in Ms])
else:
select_froms = np.array([qary_vec_to_dec(-M.T @ K, signal.q) for M in Ms])
# `select_froms` is the collection of 'j' values and associated indices
# so that we can quickly choose from the coefficient locations such that M.T @ k = j as in (20)
# example: ball j goes to bin at "select_froms[i][j]"" in stage i
# begin peeling
# index convention for peeling: 'i' goes over all M/U/S values
# i.e. it refers to the index of the subsampling group (zero-indexed - off by one from the paper).
# 'j' goes over all columns of the WHT subsample matrix, going from 0 to 2 ** b - 1.
# e.g. (i, j) = (0, 2) refers to subsampling group 0, and aliased bin 2 (10 in binary)
# which in the example of section 3.2 is the multiton X[0110] + X[1010] + W1[10]
# a multiton will just store the (i, j)s in a list
# a singleton will map from the (i, j)s to the true (binary) values k.
# e.g. the singleton (0, 0), which in the example of section 3.2 is X[0100] + W1[00]
# would be stored as the dictionary entry (0, 0): array([0, 1, 0, 0]).
there_were_multitons = True
while there_were_multitons and num_peeling < peeling_max:
if verbose:
print('-----')
print('the measurement matrix')
for U in Us:
print(U)
# first step: find all the singletons and multitons.
singletons = {} # dictionary from (i, j) values to the true index of the singleton, k.
multitons = [] # list of (i, j) values indicating where multitons are.
if signal.q > 2:
# TODO
selection = 0
else:
for i, (U, S, select_from) in enumerate(zip(Us, Ss, select_froms)):
for j, col in enumerate(U.T):
# note that np.inner(x, x) is used as norm-squared: marginally faster than taking norm and squaring
if np.inner(col, col) > cutoff:
selection = np.where(select_from == j)[0] # pick all the k such that M.T @ k = j
k, sgn = singleton_detection(
col,
method=self.reconstruct_method,
selection=selection,
S_slice=S[:, selection],
n=signal.n
) # find the best fit singleton
k_dec = bin_to_dec(k)
rho = np.dot(S[:,k_dec], col)*sgn/len(col)
residual = col - sgn * rho * S[:,k_dec]
if verbose:
print((i, j), np.inner(residual, residual))
if np.inner(residual, residual) > cutoff:
multitons.append((i, j))
else: # declare as singleton
singletons[(i, j)] = (k, rho, sgn)
if verbose:
print('amplitude: {}'.format(rho))
# all singletons and multitons are discovered
if verbose:
print('singletons:')
for ston in singletons.items():
print("\t{0} {1}\n".format(ston, bin_to_dec(ston[1][0])))
print("Multitons : {0}\n".format(multitons))
# raise RuntimeError("stop")
# WARNING: this is not a correct thing to do
# in the last iteration of peeling, everything will be singletons and there
# will be no multitons
if len(multitons) == 0: # no more multitons, and can construct final WHT
there_were_multitons = False
# balls to peel
balls_to_peel = set()
ball_values = {}
ball_sgn = {}
for (i, j) in singletons:
k, rho, sgn = singletons[(i, j)]
ball = bin_to_dec(k)
balls_to_peel.add(ball)
ball_values[ball] = rho
ball_sgn[ball] = sgn
if verbose:
print('these balls will be peeled')
print(balls_to_peel)
# peel
for ball in balls_to_peel:
num_peeling += 1
k = dec_to_bin(ball, signal.n)
potential_peels = [(l, bin_to_dec(M.T.dot(k))) for l, M in enumerate(Ms)]
result.append((k, ball_sgn[ball]*ball_values[ball]))
for peel in potential_peels:
signature_in_stage = Ss[peel[0]][:,ball]
to_subtract = ball_sgn[ball] * ball_values[ball] * signature_in_stage
Us[peel[0]][:,peel[1]] -= to_subtract
if verbose:
print('this is subtracted:')
print(to_subtract)
print("Peeled ball {0} off bin {1}".format(bin_to_dec(k), peel))
loc = set()
for k, value in result: # iterating over (i, j)s
idx = bin_to_dec(k) # converting 'k's of singletons to decimals
loc.add(idx)
if wht[idx] == 0:
wht[idx] = value
else:
wht[idx] = (wht[idx] + value) / 2
# average out noise; e.g. in the example in 3.2, U1[11] and U2[11] are the same singleton,
# so averaging them reduces the effect of noise.
wht /= 2 ** (signal.n - b)
if not report:
return wht
else:
return wht, len(used), loc
def method_test(self, signal, num_runs=10):
'''
Tests a method on a signal and reports its average execution time and sample efficiency.
'''
time_start = time.time()
samples = 0
successes = 0
for i in tqdm.trange(num_runs):
wht, num_samples, loc = self.transform(signal, report=True)
if loc == set(signal.loc):
successes += 1
samples += num_samples
return (time.time() - time_start) / num_runs, successes / num_runs, samples / (num_runs * 2 ** signal.n)
def method_report(self, signal, num_runs=10):
'''
Reports the results of a method_test.
'''
print(
"Testing SPRIGHT with query method {0}, delays method {1}, reconstruct method {2}."
.format(self.query_method, self.delays_method, self.reconstruct_method)
)
t, s, sam = self.method_test(signal, num_runs)
print("Average time in seconds: {}".format(t))
print("Success ratio: {}".format(s))
print("Average sample ratio: {}".format(sam))
if __name__ == "__main__":
np.random.seed(10)
from archive.qsft_rand import Signal
test_signal = Signal(8, [4, 6, 10, 15, 24, 37, 48, 54], q=2, strengths=[2, 4, 1, 1, 1, 3, 8, 1], noise_sd=0)
test_signal_complex = Signal(3, [4, 6, 10, 15, 24, 37, 48, 54], q=4, strengths=[2, 4, 1, 1, 1, 3, 8, 1], noise_sd=0)
test_one_method = False
if test_one_method:
spright = SPRIGHT(
query_method="simple",
delays_method="complex",
reconstruct_method="noiseless"
)
residual = spright.transform(test_signal, verbose=False, report=False) - test_signal.signal_w
print("Residual energy: {0}".format(np.inner(residual, residual)))
else:
configs = [
{"query_method" : "simple", "delays_method" : "identity_like", "reconstruct_method" : "mle"},
{"query_method" : "simple", "delays_method" : "random" , "reconstruct_method" : "mle"},
{"query_method" : "simple", "delays_method" : "nso" , "reconstruct_method" : "mle"},
{"query_method" : "simple", "delays_method" : "nso" , "reconstruct_method" : "nso"}
]
for config in configs:
try:
SPRIGHT(**config).method_report(test_signal)
except KeyboardInterrupt:
continue
| 12,417 | 42.118056 | 123 | py |
qsft | qsft-master/qsft/input_signal_subsampled.py | from qsft.utils import qary_ints, qary_vec_to_dec, gwht, load_data, save_data
from qsft.input_signal import Signal
from qsft.query import get_Ms_and_Ds
from pathlib import Path
from math import floor
from tqdm import tqdm
import numpy as np
import random
import time
class SubsampledSignal(Signal):
"""
A shell Class for input signal/functions that are too large and cannot be stored in their entirety. In addition to
the signal itself, this must also contain information about the M and D matricies that are used for subsampling
Notable attributes are included below.
Attributes
---------
query_args : dict
These are the parameters that determine the structure of the Ms and Ds needed for subsampling.
It contains the following sub-parameters:
b : int
The max dimension of subsampling (i.e., we will subsample functions with b inputs, or equivalently a signal of
length q^b)
all_bs : list, (optional)
List of all the b values that should be subsampled. This is most useful when you want to repeat an experiment
many times with different values of b to see which is most efficient
For a description of the "delays_method_channel", "delays_method_source", "num_repeat" and "num_subsample", see
the docstring of the QSFT class.
subsampling_method
If set to "simple" the M matricies are generated according to the construction in Appendix C, i.e., a
block-wise identity structure.
If set to "complex" the elements of the M matricies are uniformly populated from integers from 0 to q-1.
It should be noted that these matricies are not checked to be full rank (w.r.t. the module where arithemtic is
over the integer quotient ring), and so it is possible that the actual dimension of subsampling may be
lower. For large enough n and b this isn't a problem, since w.h.p. the matricies are full rank.
L : np.array
An array that enumerates all q^b q-ary vectors of length b
foldername : str
If set, and the file {foldername}/Ms_and_Ds.pickle exists, the Ms and Ds are read directly from the file.
Furthermore, if the transforms for all the bs are in {foldername}/transforms/U{i}_b{b}.pickle, the transforms can be
directly loaded into memory.
"""
def _set_params(self, **kwargs):
self.n = kwargs.get("n")
self.q = kwargs.get("q")
self.N = self.q ** self.n
self.signal_w = kwargs.get("signal_w")
self.query_args = kwargs.get("query_args")
self.b = self.query_args.get("b")
self.all_bs = self.query_args.get("all_bs", [self.b]) # all b values to sample/transform at
self.num_subsample = self.query_args.get("num_subsample")
if "num_repeat" not in self.query_args:
self.query_args["num_repeat"] = 1
self.num_repeat = self.query_args.get("num_repeat")
self.subsampling_method = self.query_args.get("subsampling_method")
self.delays_method_source = self.query_args.get("delays_method_source")
self.delays_method_channel = self.query_args.get("delays_method_channel")
self.L = None # List of all length b qary vectors
self.foldername = kwargs.get("folder")
def _init_signal(self):
if self.subsampling_method == "uniform":
self._subsample_uniform()
elif self.subsampling_method == "qsft":
self._set_Ms_and_Ds_qsft()
self._subsample_qsft()
else:
self._set_Ms_and_Ds_qsft()
self._subsample_qsft()
def _check_transforms_qsft(self):
"""
Returns
-------
True if the transform is already computed and saved for all values of b, else False
"""
if self.foldername:
Path(f"{self.foldername}/transforms/").mkdir(exist_ok=True)
for b in self.all_bs:
for i in range(len(self.Ms)):
Us_path = Path(f"{self.foldername}/transforms/U{i}_b{b}.pickle")
if not Us_path.is_file():
return False
return True
else:
return False
def _set_Ms_and_Ds_qsft(self):
"""
Sets the values of Ms and Ds, either by loading from folder if exists, otherwise it loaded from query_args
"""
if self.foldername:
Path(f"{self.foldername}").mkdir(exist_ok=True)
Ms_and_Ds_path = Path(f"{self.foldername}/Ms_and_Ds.pickle")
if Ms_and_Ds_path.is_file():
self.Ms, self.Ds = load_data(Ms_and_Ds_path)
else:
self.Ms, self.Ds = get_Ms_and_Ds(self.n, self.q, **self.query_args)
save_data((self.Ms, self.Ds), f"{self.foldername}/Ms_and_Ds.pickle")
else:
self.Ms, self.Ds = get_Ms_and_Ds(self.n, self.q, **self.query_args)
def _subsample_qsft(self):
"""
Subsamples and computes the sparse fourier transform for each subsampling group if the samples are not already
present in the folder
"""
self.Us = [[{} for j in range(len(self.Ds[i]))] for i in range(len(self.Ms))]
self.transformTimes = [[{} for j in range(len(self.Ds[i]))] for i in range(len(self.Ms))]
if self.foldername:
Path(f"{self.foldername}/samples").mkdir(exist_ok=True)
Path(f"{self.foldername}/transforms/").mkdir(exist_ok=True)
pbar = tqdm(total=0, position=0)
for i in range(len(self.Ms)):
for j in range(len(self.Ds[i])):
transform_file = Path(f"{self.foldername}/transforms/U{i}_{j}.pickle")
if self.foldername and transform_file.is_file():
self.Us[i][j], self.transformTimes[i][j] = load_data(transform_file)
pbar.total = len(self.Ms) * len(self.Ds[0]) * len(self.Us[i][j])
pbar.update(len(self.Us[i][j]))
else:
sample_file = Path(f"{self.foldername}/samples/M{i}_D{j}.pickle")
if self.foldername and sample_file.is_file():
samples = load_data(sample_file)
pbar.total = len(self.Ms) * len(self.Ds[0]) * len(samples)
pbar.update(len(samples))
else:
query_indices = self._get_qsft_query_indices(self.Ms[i], self.Ds[i][j])
block_length = len(query_indices[0])
samples = np.zeros((len(query_indices), block_length), dtype=np.complex)
pbar.total = len(self.Ms) * len(self.Ds[0]) * len(query_indices)
if block_length > 10000:
for k in range(len(query_indices)):
samples[k] = self.subsample(query_indices[k])
pbar.update()
else:
all_query_indices = np.concatenate(query_indices)
all_samples = self.subsample(all_query_indices)
for k in range(len(query_indices)):
samples[k] = all_samples[k * block_length: (k+1) * block_length]
pbar.update()
if self.foldername:
save_data(samples, sample_file)
for b in self.all_bs:
start_time = time.time()
self.Us[i][j][b] = self._compute_subtransform(samples, b)
self.transformTimes[i][j][b] = time.time() - start_time
if self.foldername:
save_data((self.Us[i][j], self.transformTimes[i][j]), transform_file)
def _subsample_uniform(self):
"""
Uniformly subsamples the signal. Useful when you are solving via LASSO
"""
if self.foldername:
Path(f"{self.foldername}").mkdir(exist_ok=True)
sample_file = Path(f"{self.foldername}/signal_t.pickle")
if self.foldername and sample_file.is_file():
signal_t = load_data(sample_file)
else:
query_indices = self._get_random_query_indices(self.query_args["n_samples"])
samples = self.subsample(query_indices)
signal_t = dict(zip(query_indices, samples))
if self.foldername:
save_data(signal_t, sample_file)
self.signal_t = signal_t
def get_all_qary_vectors(self):
if self.L is None:
self.L = np.array(qary_ints(self.b, self.q)) # List of all length b qary vectors
return self.L
def subsample(self, query_indices):
raise NotImplementedError
def _get_qsft_query_indices(self, M, D_sub):
"""
Gets the indicies to be queried for a given M and D
Parameters
----------
M
D_sub
Returns
-------
base_inds_dec : list
The i-th element in the list is the affine space {Mx + d_i, forall x}, but in a decimal index, because it is
more efficient, where d_i is the i-th row of D_sub.
"""
b = M.shape[1]
L = self.get_all_qary_vectors()
ML = (M @ L) % self.q
base_inds = [(ML + np.outer(d, np.ones(self.q ** b, dtype=int))) % self.q for d in D_sub]
base_inds = np.array(base_inds)
base_inds_dec = []
for i in range(len(base_inds)):
base_inds_dec.append(qary_vec_to_dec(base_inds[i], self.q))
return base_inds_dec
def _get_random_query_indices(self, n_samples):
"""
Returns random indicies to be sampled.
Parameters
----------
n_samples
Returns
-------
base_ids_dec
Indicies to be queried in decimal representation
"""
# n_samples = np.minimum(n_samples, self.N)
base_inds_dec = [floor(random.uniform(0, 1) * self.N) for _ in range(n_samples)]
return base_inds_dec
def get_MDU(self, ret_num_subsample, ret_num_repeat, b, trans_times=False):
"""
Allows the QSFT Class to get the effective Ms, Ds and Us (subsampled transforms).
Parameters
----------
ret_num_subsample
ret_num_repeat
b
trans_times
Returns
-------
Ms_ret
Ds_ret
Us_ret
"""
Ms_ret = []
Ds_ret = []
Us_ret = []
Ts_ret = []
if ret_num_subsample <= self.num_subsample and ret_num_repeat <= self.num_repeat and b <= self.b:
subsample_idx = np.random.choice(self.num_subsample, ret_num_subsample, replace=False)
delay_idx = np.random.choice(self.num_repeat, ret_num_repeat, replace=False)
for i in subsample_idx:
Ms_ret.append(self.Ms[i][:, :b])
Ds_ret.append([])
Us_ret.append([])
Ts_ret.append([])
for j in delay_idx:
Ds_ret[-1].append(self.Ds[i][j])
Us_ret[-1].append(self.Us[i][j][b])
Ts_ret[-1].append(self.transformTimes[i][j][b])
if trans_times:
return Ms_ret, Ds_ret, Us_ret, Ts_ret
else:
return Ms_ret, Ds_ret, Us_ret
else:
raise ValueError("There are not enough Ms or Ds.")
def _compute_subtransform(self, samples, b):
transform = [gwht(row[::(self.q ** (self.b - b))], self.q, b) for row in samples]
return transform
def get_source_parity(self):
return self.Ds[0][0].shape[0]
| 11,744 | 42.5 | 122 | py |
qsft | qsft-master/qsft/qsft.py | '''
Class for computing the q-ary fourier transform of a function/signal
'''
import time
import numpy as np
from qsft.reconstruct import singleton_detection
from qsft.input_signal_subsampled import SubsampledSignal
from qsft.utils import bin_to_dec, qary_vec_to_dec, sort_qary_vecs, calc_hamming_weight, dec_to_qary_vec
from synt_exp.synt_src.synthetic_signal import SyntheticSubsampledSignal
class QSFT:
'''
Class to encapsulate the configuration of our fourier algorithm.
Attributes
---------
reconstruct_method_source : str
method of reconstruction for source coding: "identity" - default setting, should be used unless you know that all
indicies have low hamming weight
"coded" - Currently only supports prime q, if you know the max hamming
weight of less than t this option should be used and will greatly reduce
complexity. Note a source_decoder object must also be passed
reconstruct_method_channel : str
Method of reconstruction for channel coding: "mle" - exact MLE computation. Fine for small problems but not
recommended it is exponential in n
"nso" - symbol-wise recovery suitable when a repetition type code is used
"identity" - no channel coding, only use when there is no noise
num_subsamples : int
The number of different subsampling groups M used
num_repeat : int
When a repetition code is used for channel coding, (NSO) this is the number of repetitions
b : int
Size of the sub-sampling signal. In general, we need q^b = O(K) where K is the number of nonzero terms in the
transform. In practice, any q^b > K typically works well.
noise_sd : scalar
A noise parameter. Roughly, the standard deviation of the noise if it was an additive gaussian.
source_decoder : function
A function that takes in a source coded index, and returns decoded value of that index. Only needed when
reconstruct_method_source = "coded"
'''
def __init__(self, **kwargs):
self.reconstruct_method_source = kwargs.get("reconstruct_method_source")
self.reconstruct_method_channel = kwargs.get("reconstruct_method_channel")
self.num_subsample = kwargs.get("num_subsample")
self.num_repeat = kwargs.get("num_repeat")
self.b = kwargs.get("b")
self.source_decoder = kwargs.get("source_decoder", None)
def transform(self, signal, verbosity=0, report=False, timing_verbose=False, **kwargs):
"""
Computes the q-ary fourier transform of a signal object
Arguments
---------
signal : Signal
Signal object to be transformed.
verbosity : int
Larger numbers lead to increased number of printouts
timing_verbose : Boolean
If set to True, outputs detailed information about the amount of time each transform step takes.
report : Boolean
If set to True this function returns optional outputs "runtime": transform_time + peeling_time,
"n_samples": total number of samples,"locations": locations of nonzero indicies,"avg_hamming_weight" average
hamming weight of non-zero indicies and "max_hamming_weight": the maximum hamming weight of a nonzero index
Returns
-------
gwht : dict
Fourier transform (WHT) of the input signal
runtime : scalar
transform time + peeling time.
n_samples : int
number of samples used in computing the transform.
locations : list
List of nonzero indicies in the transform.
avg_hamming_weight : scalar
Average hamming wieght of non-zero indicies.
max_hamming_weight : int
Max hamming weight among the non-zero indicies.
"""
q = signal.q
n = signal.n
b = self.b
omega = np.exp(2j * np.pi / q)
result = []
gwht = {}
gwht_counts = {}
peeling_max = q ** n
peeled = set([])
if isinstance(signal, SubsampledSignal):
Ms, Ds, Us, Ts = signal.get_MDU(self.num_subsample, self.num_repeat, self.b, trans_times=True)
else:
raise NotImplementedError("QSFT currently only supports signals that inherit from SubsampledSignal")
for i in range(len(Ds)):
Us[i] = np.vstack(Us[i])
Ds[i] = np.vstack(Ds[i])
transform_time = np.sum(Ts)
if timing_verbose:
print(f"Transform Time:{transform_time}", flush=True)
Us = np.array(Us)
# print(Us)
gamma = 0.5
cutoff = 1e-9 + (1 + gamma) * (signal.noise_sd ** 2) / (q ** b) # noise threshold
cutoff = kwargs.get("cutoff", cutoff)
if verbosity >= 2:
print("cutoff = ", cutoff, flush=True)
# begin peeling
# index convention for peeling: 'i' goes over all M/U/S values
# i.e. it refers to the index of the subsampling group (zero-indexed - off by one from the paper).
# 'j' goes over all columns of the WHT subsample matrix, going from 0 to 2 ** b - 1.
# e.g. (i, j) = (0, 2) refers to subsampling group 0, and aliased bin 2 (10 in binary)
# which in the example of section 3.2 is the multiton X[0110] + X[1010] + W1[10]
# a multiton will just store the (i, j)s in a list
# a singleton will map from the (i, j)s to the true (binary) values k.
# e.g. the singleton (0, 0), which in the example of section 3.2 is X[0100] + W1[00]
# would be stored as the dictionary entry (0, 0): array([0, 1, 0, 0]).
max_iter = 15
iter_step = 0
cont_peeling = True
num_peeling = 0
peeling_start = time.time()
if timing_verbose:
start_time = time.time()
while cont_peeling and num_peeling < peeling_max and iter_step < max_iter:
iter_step += 1
if verbosity >= 2:
print('-----')
print("iter ", iter_step, flush=True)
# print('the measurement matrix')
# for U in Us:
# print(U)
# first step: find all the singletons and multitons.
singletons = {} # dictionary from (i, j) values to the true index of the singleton, k.
multitons = [] # list of (i, j) values indicating where multitons are.
for i, (U, M, D) in enumerate(zip(Us, Ms, Ds)):
for j, col in enumerate(U.T):
if np.linalg.norm(col) ** 2 > cutoff * len(col):
k = singleton_detection(
col,
method_channel=self.reconstruct_method_channel,
method_source=self.reconstruct_method_source,
q=q,
source_parity=signal.get_source_parity(),
nso_subtype="nso1",
source_decoder=self.source_decoder
)
signature = omega ** (D @ k)
rho = np.dot(np.conjugate(signature), col) / D.shape[0]
residual = col - rho * signature
j_qary = dec_to_qary_vec([j], q, b).T[0]
bin_matching = np.all((M.T @ k) % q == j_qary)
if verbosity >= 5:
print((i, j), np.linalg.norm(residual) ** 2, cutoff * len(col))
if (not bin_matching) or np.linalg.norm(residual) ** 2 > cutoff * len(col):
multitons.append((i, j))
if verbosity >= 6:
print("We have a Multiton")
else: # declare as singleton
singletons[(i, j)] = (k, rho)
if verbosity >= 3:
print("We have a Singleton at " + str(k))
else:
if verbosity >= 6:
print("We have a Zeroton")
# all singletons and multitons are discovered
if verbosity >= 5:
print('singletons:')
for ston in singletons.items():
print("\t{0} {1}\n".format(ston, bin_to_dec(ston[1][0])))
print("Multitons : {0}\n".format(multitons))
# if there were no multi-tons or single-tons, decrease cutoff
if len(multitons) == 0 or len(singletons) == 0:
cont_peeling = False
# balls to peel
balls_to_peel = set()
ball_values = {}
for (i, j) in singletons:
k, rho = singletons[(i, j)]
ball = tuple(k) # Must be a hashable type
#qary_vec_to_dec(k, q)
balls_to_peel.add(ball)
ball_values[ball] = rho
result.append((k, ball_values[ball]))
if verbosity >= 5:
print('these balls will be peeled')
print(balls_to_peel)
# peel
for ball in balls_to_peel:
num_peeling += 1
k = np.array(ball)[..., np.newaxis]
potential_peels = [(l, qary_vec_to_dec(M.T.dot(k) % q, q)[0]) for l, M in enumerate(Ms)]
if verbosity >= 6:
k_dec = qary_vec_to_dec(k, q)
peeled.add(int(k_dec))
print("Processing Singleton {0}".format(k_dec))
print(k)
for (l, j) in potential_peels:
print("The singleton appears in M({0}), U({1})".format(l, j))
for peel in potential_peels:
signature_in_stage = omega ** (Ds[peel[0]] @ k)
to_subtract = ball_values[ball] * signature_in_stage.reshape(-1, 1)
# print(np.linalg.norm(Us[peel[0]][:, peel[1]]), np.linalg.norm(to_subtract))
if verbosity >= 6:
print("Peeled ball {0} off bin {1}".format(qary_vec_to_dec(k, q), peel))
Us[peel[0]][:, peel[1]] -= np.array(to_subtract)[:, 0]
if verbosity >= 5:
print("Iteration Complete: The peeled indicies are:")
print(np.sort(list(peeled)))
loc = set()
for k, value in result: # iterating over (i, j)s
loc.add(tuple(k))
if tuple(k) in gwht_counts:
gwht[tuple(k)] = (gwht[tuple(k)] * gwht_counts[tuple(k)] + value) / (gwht_counts[tuple(k)] + 1)
gwht_counts[tuple(k)] = gwht_counts[tuple(k)] + 1
else:
gwht[tuple(k)] = value
gwht_counts[tuple(k)] = 1
if timing_verbose:
print(f"Peeling Time:{time.time() - start_time}", flush=True)
peeling_time = time.time() - peeling_start
if not report:
return gwht
else:
n_samples = np.prod(np.shape(np.array(Us)))
if len(loc) > 0:
loc = list(loc)
if kwargs.get("sort", False):
loc = sort_qary_vecs(loc)
avg_hamming_weight = np.mean(calc_hamming_weight(loc))
max_hamming_weight = np.max(calc_hamming_weight(loc))
else:
loc, avg_hamming_weight, max_hamming_weight = [], 0, 0
result = {
"gwht": gwht,
"runtime": transform_time + peeling_time,
"n_samples": n_samples,
"locations": loc,
"avg_hamming_weight": avg_hamming_weight,
"max_hamming_weight": max_hamming_weight
}
return result
| 12,144 | 42.067376 | 122 | py |
qsft | qsft-master/qsft/ReedSolomon.py | import galois
from galois._codes._reed_solomon import decode_jit
import numpy as np
import math
class ReedSolomon(galois.ReedSolomon):
"""
Class that extends galois.ReedSolomon. Mainly it is needed to implement syndrome decoding.
Attributes
---------
prime_field : GF.field
The galois feild of base q
s :
"""
def __init__(self, n: int, t: int, q: int):
self.prime_field = galois.GF(q)
self.s = math.ceil(math.log(n) / math.log(q))
nt = (q ** self.s) - 1 if n <= (q ** self.s) - 1 else (q ** (self.s + 1)) - 1
self.s = self.s if n <= (q ** self.s) - 1 else self.s + 1
self.ns = n
super().__init__(n=nt, k=nt-2*t)
def syndrome_decode(self, syndrome):
"""
Syndrome Decoding for the RS decoder
Parameters
----------
syndrome : self.prime_field.array()
The syndrome as represented in the prime field. (i.e., the raw output of D*k)
Returns
-------
err : prime_field.array
The decoded value of k
n_errors : the number of decoding errors
"""
# Invoke the JIT compiled function
q = self.field.characteristic
codeword = self.field.Zeros(self.n)
syndrome_qs = self.field.Vector([syndrome[self.s * i:self.s * (i + 1)] for i in range(2 * self.t)])
dec_codeword, n_errors = decode_jit(self.field)(codeword[:, np.newaxis].T, syndrome_qs[:, np.newaxis].T, self.c,
self.t, int(self.field.primitive_element))
return -dec_codeword[:, -self.ns:], n_errors
def get_delay_matrix(self):
"""
Generates teh parity check matrix of the constructed ReedSolomon code in the prime field
Returns
-------
D : GF.array
Coded delay matrix
"""
Hvec = self.H[:, -self.ns:].vector()
p = self.get_parity_length()
D = self.prime_field.Zeros((p+1, self.ns))
for i in range(self.ns):
for j in range(2 * self.t):
D[(self.s * j + 1):(self.s * (j + 1) + 1), i] = Hvec[j, i, :]
return D
def get_parity_length(self):
"""
Returns the number of parity symbols in the code
Returns
-------
P
"""
return 2*self.t*self.s
| 2,356 | 30.426667 | 120 | py |
qsft | qsft-master/qsft/lasso.py | import numpy as np
from group_lasso import GroupLasso
from sklearn.linear_model import Ridge
import time
from group_lasso._fista import ConvergenceWarning
from sklearn.utils._testing import ignore_warnings
from qsft.utils import calc_hamming_weight, dec_to_qary_vec, qary_ints
@ignore_warnings(category=ConvergenceWarning)
def lasso_decode(signal, n_samples, noise_sd = 0, refine=True, verbose=False, report=True):
"""
Implements Complex LASSO via Fast Iterative Soft Thresholding (FISTA) with optional Ridge Regression refinement
Parameters
---------
signal : Signal
Signal object to be transformed.
n_samples : int
number of samples used in computing the transform.
verbosity : bool
If True printouts are increased.
noise_sd : scalar
Noise standard deviation.
refine : bool
If True Ridge Regression refinement is used.
Returns
-------
gwht : dict
Fourier transform (WHT) of the input signal
runtime : scalar
transform time + peeling time.
locations : list
List of nonzero indicies in the transform.
avg_hamming_weight : scalar
Average hamming wieght of non-zero indicies.
max_hamming_weight : int
Max hamming weight among the non-zero indicies.
"""
q = signal.q
n = signal.n
N = q ** n
dtype = int if (q ** 2)*n > 255 else np.uint8
start_time = time.time()
if verbose:
print("Setting up LASSO problem")
(sample_idx_dec, y) = list(signal.signal_t.keys()), list(signal.signal_t.values())
sample_idx_dec = sample_idx_dec[:n_samples]
y = y[:n_samples]
# WARNING: ADD NOISE ONLY FOR SYNTHETIC SIGNALS
if signal.is_synt:
y += np.random.normal(0, noise_sd / np.sqrt(2), size=(len(y), 2)).view(np.complex).reshape(len(y))
sample_idx = dec_to_qary_vec(sample_idx_dec, q, n, dtype=dtype)
y = np.concatenate((np.real(y), np.imag(y)))
freqs = np.array(sample_idx).T @ qary_ints(n, q, dtype=dtype)
X = np.exp(2j*np.pi*freqs/q).astype(np.csingle)
X_ext = np.concatenate((np.concatenate((np.real(X), -np.imag(X)), axis=1), np.concatenate((np.imag(X), np.real(X)), axis=1)))
groups = [i % N for i in range(2*N)]
lasso_start = time.time()
if verbose:
print(f"Setup Time:{time.time() - start_time}sec")
print("Running Iterations...")
start_time = time.time()
lasso = GroupLasso(groups=groups,
group_reg=0.1,
l1_reg=0,
tol=1e-8,
n_iter=25,
supress_warning=True,
fit_intercept=False)
lasso.fit(X_ext, y)
if verbose:
print(f"LASSO fit time:{time.time() - start_time}sec")
w = lasso.coef_
non_zero = np.nonzero(w[:, 0])[0]
if len(non_zero) > 0 and refine:
ridge = Ridge(alpha=1e-2, tol=1e-8)
ridge.fit(X_ext[:, non_zero], y)
w[non_zero] = ridge.coef_[:, np.newaxis]
gwht = w[0:N] + 1j*w[N:(2*N)]
gwht = np.reshape(gwht, [q] * n)
gwht_dict = {}
non_zero_pos = np.array(np.nonzero(gwht)).T
for p in non_zero_pos:
gwht_dict[tuple(p)] = gwht[tuple(p)]
runtime = time.time() - lasso_start
if not report:
return gwht_dict
else:
if len(non_zero_pos) > 0:
loc = list(non_zero_pos)
avg_hamming_weight = np.mean(calc_hamming_weight(loc))
max_hamming_weight = np.max(calc_hamming_weight(loc))
else:
loc, avg_hamming_weight, max_hamming_weight = [], 0, 0
result = {
"gwht": gwht_dict,
"n_samples": n_samples,
"locations": loc,
"runtime": runtime,
"avg_hamming_weight": avg_hamming_weight,
"max_hamming_weight": max_hamming_weight
}
return result
| 3,870 | 28.549618 | 129 | py |
qsft | qsft-master/qsft/utils.py | '''
Utility functions.
'''
import numpy as np
import scipy.fft as fft
from group_lasso import GroupLasso
from sklearn.linear_model import Ridge
import itertools
import math
import random
import time
from scipy.spatial import ConvexHull
import zlib
import pickle
import json
import matplotlib.pyplot as plt
def fwht(x):
"""Recursive implementation of the 1D Cooley-Tukey FFT"""
# x = np.asarray(x, dtype=float)
N = x.shape[0]
if N == 1:
return x
else:
X_even = fwht(x[0:(N//2)])
X_odd = fwht(x[(N//2):])
return np.concatenate([(X_even + X_odd),
(X_even - X_odd)])
def gwht(x,q,n):
"""Computes the GWHT of an input signal with forward scaling"""
x_tensor = np.reshape(x, [q] * n)
x_tf = fft.fftn(x_tensor) / (q ** n)
x_tf = np.reshape(x_tf, [q ** n])
return x_tf
def gwht_tensored(x,q,n):
"""Computes the GWHT of an input signal with forward scaling"""
x_tf = fft.fftn(x) / (q ** n)
return x_tf
def igwht(x,q,n):
"""Computes the IGWHT of an input signal with forward scaling"""
x_tensor = np.reshape(x, [q] * n)
x_tf = fft.ifftn(x_tensor) * (q ** n)
x_tf = np.reshape(x_tf, [q ** n])
return x_tf
def igwht_tensored(x,q,n):
"""Computes the IGWHT of an input signal with forward scaling"""
x_tf = fft.ifftn(x) * (q ** n)
return x_tf
def bin_to_dec(x):
n = len(x)
c = 2**(np.arange(n)[::-1])
return c.dot(x).astype(np.int)
def nth_roots_unity(n):
return np.exp(-2j * np.pi / n * np.arange(n))
def near_nth_roots(ratios, q, eps):
in_set = np.zeros(ratios.shape, dtype=bool)
omega = nth_roots_unity(q)
for i in range(q):
in_set = in_set | (np.square(np.abs(ratios - omega[i])) < eps)
is_singleton = in_set.all()
return is_singleton
def qary_vec_to_dec(x, q):
n = x.shape[0]
return np.array([q ** (n - (i + 1)) for i in range(n)], dtype=object) @ np.array(x, dtype=object)
def dec_to_qary_vec(x, q, n):
qary_vec = []
for i in range(n):
qary_vec.append(np.array([a // (q ** (n - (i + 1))) for a in x], dtype=object))
x = x - (q ** (n-(i + 1))) * qary_vec[i]
return np.array(qary_vec, dtype=int)
def dec_to_bin(x, num_bits):
assert x < 2**num_bits, "number of bits are not enough"
u = bin(x)[2:].zfill(num_bits)
u = list(u)
u = [int(i) for i in u]
return np.array(u)
def binary_ints(m):
'''
Returns a matrix where row 'i' is dec_to_bin(i, m), for i from 0 to 2 ** m - 1.
From https://stackoverflow.com/questions/28111051/create-a-matrix-of-binary-representation-of-numbers-in-python.
'''
a = np.arange(2 ** m, dtype=int)[np.newaxis,:]
b = np.arange(m, dtype=int)[::-1,np.newaxis]
return np.array(a & 2**b > 0, dtype=int)
def angle_q(x,q):
return (((np.angle(x) % (2*np.pi) // (np.pi/q)) + 1) // 2) % q # Can be made much faster
def qary_ints(m, q, dtype=int):
return np.array(list(itertools.product(np.arange(q), repeat=m)), dtype=dtype).T
def comb(n, k):
return math.factorial(n) // math.factorial(k) // math.factorial(n - k)
def qary_ints_low_order(m, q, order):
num_of_ks = np.sum([comb(m, o) * ((q-1) ** o) for o in range(order + 1)])
K = np.zeros((num_of_ks, m))
counter = 0
for o in range(order + 1):
positions = itertools.combinations(np.arange(m), o)
for pos in positions:
K[counter:counter+((q-1) ** o), pos] = np.array(list(itertools.product(1 + np.arange(q-1), repeat=o)))
counter += ((q-1) ** o)
return K.T
def base_ints(q, m):
'''
Returns a matrix where row 'i' is the base-q representation of i, for i from 0 to q ** m - 1.
Covers the functionality of binary_ints when n = 2, but binary_ints is faster for that case.
'''
get_row = lambda i: np.array([int(j) for j in np.base_repr(i, base=q).zfill(m)])
return np.vstack((get_row(i) for i in range(q ** m)))
def polymod(p1, p2, q, m):
'''
Computes p1 modulo p2, and takes the coefficients modulo q.
'''
p1 = np.trim_zeros(p1, trim='f')
p2 = np.trim_zeros(p2, trim='f')
while len(p1) >= len(p2) and len(p1) > 0:
p1 -= p1[0] // p2[0] * np.pad(p2, (0, len(p1) - len(p2)))
p1 = np.trim_zeros(p1, trim='f')
return np.pad(np.mod(p1, q), (m + 1 - len(p1), 0))
def rref(A, b, q):
'''
Row reduction, to easily solve finite field systems.
'''
raise NotImplementedError()
def sign(x):
'''
Replacement for np.sign that matches the convention (footnote 2 on page 11).
'''
return (1 - np.sign(x)) // 2
def flip(x):
'''
Flip all bits in the binary array x.
'''
return np.bitwise_xor(x, 1)
def random_signal_strength_model(sparsity, a, b):
magnitude = np.random.uniform(a, b, sparsity)
phase = np.random.uniform(0, 2*np.pi, sparsity)
return magnitude * np.exp(1j*phase)
def best_convex_underestimator(points):
hull = ConvexHull(points)
vertices = points[hull.vertices]
first_point_idx = np.argmin(vertices[:, 0])
last_point_idx = np.argmax(vertices[:, 0])
if last_point_idx == vertices.shape[0]:
return vertices[first_point_idx:]
if first_point_idx < last_point_idx:
return vertices[first_point_idx:last_point_idx+1]
else:
return np.concatenate((vertices[first_point_idx:], vertices[:last_point_idx+1]))
def sort_qary_vecs(qary_vecs):
qary_vecs = np.array(qary_vecs)
idx = np.lexsort(qary_vecs.T[::-1, :])
return qary_vecs[idx]
def calc_hamming_weight(qary_vecs):
qary_vecs = np.array(qary_vecs)
return np.sum(qary_vecs != 0, axis = 1)
def save_data(data, filename):
with open(filename, 'wb') as f:
f.write(zlib.compress(pickle.dumps(data, pickle.HIGHEST_PROTOCOL), 9))
def load_data(filename):
start = time.time()
with open(filename, 'rb') as f:
data = pickle.loads(zlib.decompress(f.read()))
return data
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj) | 6,283 | 29.357488 | 116 | py |
qsft | qsft-master/qsft/test_helper.py | import numpy as np
from qsft.lasso import lasso_decode
from qsft.qsft import QSFT
from qsft.utils import gwht, dec_to_qary_vec, NpEncoder
import json
from qsft.query import get_reed_solomon_dec
class TestHelper:
def __init__(self, signal_args, methods, subsampling_args, test_args, exp_dir, subsampling=True):
self.n = signal_args["n"]
self.q = signal_args["q"]
self.exp_dir = exp_dir
self.subsampling = subsampling
config_path = self.exp_dir / "config.json"
config_exists = config_path.is_file()
if not config_exists:
config_dict = {"query_args": subsampling_args}
with open(config_path, "w") as f:
json.dump(config_dict, f, cls=NpEncoder)
self.signal_args = signal_args
self.subsampling_args = subsampling_args
self.test_args = test_args
if self.subsampling:
if len(set(methods).intersection(["qsft"])) > 0:
self.train_signal = self.load_train_data()
# print("Quaternary Training data loaded.", flush=True)
if len(set(methods).intersection(["qsft_binary"])) > 0:
self.train_signal_binary = self.load_train_data_binary()
# print("Binary Training data loaded.", flush=True)
if len(set(methods).intersection(["lasso"])) > 0:
self.train_signal_uniform = self.load_train_data_uniform()
# print("Uniform Training data loaded.", flush=True)
if len(set(methods).intersection(["qsft_coded"])) > 0:
self.train_signal_coded = self.load_train_data_coded()
# print("Uniform Training data loaded.", flush=True)
self.test_signal = self.load_test_data()
# print("Test data loaded.", flush=True)
else:
self.train_signal = self.load_full_data()
self.test_signal = self.train_signal
if any([m.startswith("binary") for m in methods]):
raise NotImplementedError # TODO: implement the conversion
# print("Full data loaded.", flush=True)
def generate_signal(self, signal_args):
raise NotImplementedError
def load_train_data(self):
signal_args = self.signal_args.copy()
query_args = self.subsampling_args.copy()
query_args.update({
"subsampling_method": "qsft",
"query_method": "complex",
"delays_method_source": "identity",
"delays_method_channel": "nso"
})
signal_args["folder"] = self.exp_dir / "train"
signal_args["query_args"] = query_args
return self.generate_signal(signal_args)
def load_train_data_coded(self):
signal_args = self.signal_args.copy()
query_args = self.subsampling_args.copy()
query_args.update({
"subsampling_method": "qsft",
"query_method": "complex",
"delays_method_source": "coded",
"delays_method_channel": "nso",
"t": signal_args["t"]
})
signal_args["folder"] = self.exp_dir / "train_coded"
signal_args["query_args"] = query_args
return self.generate_signal(signal_args)
def load_train_data_binary(self):
return None
# signal_args = self.signal_args.copy()
# query_args = signal_args["query_args"]
# signal_args["n_orig"] = signal_args["n"]
# signal_args["q_orig"] = signal_args["q"]
# factor = round(np.log(signal_args["q"]) / np.log(2))
# signal_args["n"] = factor * signal_args["n"]
# signal_args["q"] = 2
# signal_args["query_args"]["subsampling_method"] = "qsft"
# signal_args["query_args"]["b"] = factor * query_args["b"]
# signal_args["query_args"]["all_bs"] = [factor * b for b in query_args["all_bs"]]
# signal_args["query_args"]["num_repeat"] = max(1, query_args["num_repeat"] // factor)
# signal_args["folder"] = self.exp_dir / "train_binary"
# return self.generate_signal(signal_args)
def load_train_data_uniform(self):
signal_args = self.signal_args.copy()
query_args = self.subsampling_args.copy()
n_samples = query_args["num_subsample"] * (signal_args["q"] ** query_args["b"]) *\
query_args["num_repeat"] * (signal_args["n"] + 1)
query_args = {"subsampling_method": "uniform", "n_samples": n_samples}
signal_args["folder"] = self.exp_dir / "train_uniform"
signal_args["query_args"] = query_args
return self.generate_signal(signal_args)
def load_test_data(self):
signal_args = self.signal_args.copy()
(self.exp_dir / "test").mkdir(exist_ok=True)
signal_args["query_args"] = {"subsampling_method": "uniform", "n_samples": self.test_args.get("n_samples")}
signal_args["folder"] = self.exp_dir / "test"
signal_args["noise_sd"] = 0
return self.generate_signal(signal_args)
def load_full_data(self):
# TODO: implement
return None
def compute_model(self, method, model_kwargs, report=False, verbosity=0):
if method == "gwht":
return self._calculate_gwht(model_kwargs, report, verbosity)
elif method == "qsft":
return self._calculate_qsft(model_kwargs, report, verbosity)
elif method == "qsft_binary":
return self._calculate_qsft_binary(model_kwargs, report, verbosity)
elif method == "qsft_coded":
return self._calculate_qsft_coded(model_kwargs, report, verbosity)
elif method == "lasso":
return self._calculate_lasso(model_kwargs, report, verbosity)
else:
raise NotImplementedError()
def test_model(self, method, **kwargs):
if method == "qsft" or method == "qsft_coded" or method == "lasso":
return self._test_qary(**kwargs)
elif method == "qsft_binary":
return self._test_binary(**kwargs)
else:
raise NotImplementedError()
def _calculate_gwht(self, model_kwargs, report=False, verbosity=0):
"""
Calculates GWHT coefficients of the RNA fitness function. This will try to load them
from the results folder, but will otherwise calculate from scratch. If save=True,
then coefficients will be saved to the results folder.
"""
if verbosity >= 1:
print("Finding all GWHT coefficients")
beta = gwht(self.train_signal, q=4, n=self.n)
print("Found GWHT coefficients")
return beta
def _calculate_qsft(self, model_kwargs, report=False, verbosity=0):
"""
Calculates GWHT coefficients of the RNA fitness function using QSFT.
"""
if verbosity >= 1:
print("Estimating GWHT coefficients with QSFT")
qsft = QSFT(
reconstruct_method_source="identity",
reconstruct_method_channel="nso",
num_subsample=model_kwargs["num_subsample"],
num_repeat=model_kwargs["num_repeat"],
b=model_kwargs["b"]
)
self.train_signal.noise_sd = model_kwargs["noise_sd"]
out = qsft.transform(self.train_signal, verbosity=verbosity, timing_verbose=(verbosity >= 1), report=report)
if verbosity >= 1:
print("Found GWHT coefficients")
return out
def _calculate_qsft_coded(self, model_kwargs, report=False, verbosity=0):
"""
Calculates GWHT coefficients of the RNA fitness function using QSFT.
"""
if verbosity >= 1:
print("Estimating GWHT coefficients with QSFT")
decoder = get_reed_solomon_dec(self.signal_args["n"], self.signal_args["t"], self.signal_args["q"])
qsft = QSFT(
reconstruct_method_source="coded",
reconstruct_method_channel="nso",
num_subsample=model_kwargs["num_subsample"],
num_repeat=model_kwargs["num_repeat"],
b=model_kwargs["b"],
source_decoder=decoder
)
self.train_signal_coded.noise_sd = model_kwargs["noise_sd"]
out = qsft.transform(self.train_signal_coded, verbosity=verbosity, timing_verbose=(verbosity >= 1), report=report)
if verbosity >= 1:
print("Found GWHT coefficients")
return out
def _calculate_qsft_binary(self, model_kwargs, report=False, verbosity=0):
"""
Calculates GWHT coefficients of the RNA fitness function using QSFT.
"""
factor = round(np.log(self.q) / np.log(2))
if verbosity >= 1:
print("Estimating GWHT coefficients with QSFT")
qsft = QSFT(
reconstruct_method_source="identity",
reconstruct_method_channel="nso",
num_subsample=model_kwargs["num_subsample"],
num_repeat=max(1, model_kwargs["num_repeat"] // factor),
b=factor * model_kwargs["b"],
)
self.train_signal_binary.noise_sd = model_kwargs["noise_sd"] / factor
out = qsft.transform(self.train_signal_binary, verbosity=verbosity, timing_verbose=(verbosity >= 1), report=report)
if verbosity >= 1:
print("Found GWHT coefficients")
return out
def _calculate_lasso(self, model_kwargs, report=False, verbosity=0):
"""
Calculates GWHT coefficients of the RNA fitness function using LASSO. This will try to load them
from the results folder, but will otherwise calculate from scratch. If save=True,
then coefficients will be saved to the results folder.
"""
if verbosity > 0:
print("Finding Fourier coefficients with LASSO")
self.train_signal_uniform.noise_sd = model_kwargs["noise_sd"]
out = lasso_decode(self.train_signal_uniform, model_kwargs["n_samples"], noise_sd=model_kwargs["noise_sd"])
if verbosity > 0:
print("Found Fourier coefficients")
return out
def _test_qary(self, beta):
"""
:param beta:
:return:
"""
if len(beta.keys()) > 0:
test_signal = self.test_signal.signal_t
(sample_idx_dec, samples) = list(test_signal.keys()), list(test_signal.values())
batch_size = 10000
beta_keys = list(beta.keys())
beta_values = list(beta.values())
y_hat = []
for i in range(0, len(sample_idx_dec), batch_size):
sample_idx_dec_batch = sample_idx_dec[i:i + batch_size]
sample_idx_batch = dec_to_qary_vec(sample_idx_dec_batch, self.q, self.n)
freqs = np.array(sample_idx_batch).T @ np.array(beta_keys).T
H = np.exp(2j * np.pi * freqs / self.q)
y_hat.append(H @ np.array(beta_values))
y_hat = np.concatenate(y_hat)
return np.linalg.norm(y_hat - samples) ** 2 / np.linalg.norm(samples) ** 2
else:
return 1
def _test_binary(self, beta):
"""
:param beta:
:return:
"""
if len(beta.keys()) > 0:
test_signal = self.test_signal.signal_t
(sample_idx_dec, samples) = list(test_signal.keys()), list(test_signal.values())
batch_size = 10000
beta_keys = list(beta.keys())
beta_values = list(beta.values())
y_hat = []
for i in range(0, len(sample_idx_dec), batch_size):
sample_idx_dec_batch = sample_idx_dec[i:i + batch_size]
sample_idx_batch = dec_to_qary_vec(sample_idx_dec_batch, 2, 2 * self.n)
freqs = np.array(sample_idx_batch).T @ np.array(beta_keys).T
H = np.exp(2j * np.pi * freqs / 2)
y_hat.append(H @ np.array(beta_values))
# TODO: Write with an if clause
y_hat = np.abs(np.concatenate(y_hat))
return np.linalg.norm(y_hat - samples) ** 2 / np.linalg.norm(samples) ** 2
else:
return 1
| 12,024 | 40.608997 | 123 | py |
qsft | qsft-master/qsft/parallel_tests.py | import itertools
from multiprocessing import Pool
from tqdm import tqdm
from functools import partial
import numpy as np
import pandas as pd
from qsft.test_helper import TestHelper
tqdm = partial(tqdm, position=0, leave=True)
def _test(i):
"""
Runs a single instance of a test
Parameters
----------
i
index of the test
Returns
-------
result : dict
Result of a test, containing a variety of important quantities
"""
df_row = test_df.iloc[i]
num_subsample, num_repeat, b, noise_sd = int(df_row["num_subsample"]), int(df_row["num_repeat"]), int(df_row["b"]), df_row["noise_sd"]
# set model arguments
model_kwargs = {}
model_kwargs["num_subsample"] = num_subsample
model_kwargs["num_repeat"] = num_repeat
model_kwargs["b"] = b
model_kwargs["noise_sd"] = noise_sd
model_kwargs["n_samples"] = num_subsample * (helper_obj.q ** b) * num_repeat * (helper_obj.n + 1)
model_result = helper_obj.compute_model(method=method, model_kwargs=model_kwargs, report=True, verbosity=0)
test_kwargs = {}
test_kwargs["beta"] = model_result.get("gwht")
del model_result['gwht']
del model_result['locations']
nmse = helper_obj.test_model(method=method, **test_kwargs)
result = {}
result["n"] = helper_obj.n
result["q"] = helper_obj.q
result["runtime"] = model_result.get("runtime")
result["found_sparsity"] = len(test_kwargs["beta"])
result["n_samples"] = model_result.get("n_samples")
result["ratio_samples"] = model_result.get("n_samples") / (helper_obj.q ** helper_obj.n)
result["max_hamming_weight"] = model_result.get("max_hamming_weight")
result["nmse"] = nmse
result["method"] = method
return result
def run_tests(test_method, helper: TestHelper, iters, num_subsample_list, num_repeat_list, b_list, noise_sd_list, parallel=True):
"""
Run an experiment based on the given parameters
Parameters
----------
test_method
helper
iters
num_subsample_list
num_repeat_list
b_list
noise_sd_list
parallel
Returns
-------
"""
global test_df
global helper_obj
global method
helper_obj = helper
method = test_method
test_params_list = list(itertools.product(num_subsample_list, num_repeat_list, b_list, noise_sd_list, range(iters)))
test_df = pd.DataFrame(data=test_params_list, columns=["num_subsample", "num_repeat", "b", "noise_sd", "iter"])
exp_count = len(test_df)
pred = []
if parallel:
with Pool() as pool:
# run the tests in parallel
pbar = tqdm(pool.imap(_test, range(exp_count)), total=exp_count)
best_result = np.inf
for result in pbar:
pred.append(result)
best_result = min(best_result, result['nmse'])
pbar.set_postfix({"min NMSE": best_result})
else:
pbar = tqdm(range(exp_count))
best_result = np.inf
for i in pbar:
result = _test(i)
pred.append(result)
if result['nmse'] < best_result:
best_result = result['nmse']
df_row = test_df.iloc[i]
num_subsample, num_repeat, b = int(df_row["num_subsample"]), int(df_row["num_repeat"]), int(df_row["b"])
pbar.set_postfix({"min NMSE": best_result, "b": b, "C": num_subsample, "P1": num_repeat})
results_df = pd.DataFrame(data=pred)
results_df = pd.concat([test_df, results_df], axis=1, join="inner")
return results_df
| 3,555 | 29.921739 | 138 | py |
qsft | qsft-master/qsft/__init__.py | 0 | 0 | 0 | py | |
qsft | qsft-master/qsft/query.py | '''
Methods for the query generator: specifically, to
1. generate sparsity coefficients b and subsampling matrices M
2. get the indices of a signal subsample
3. compute a subsampled and delayed Walsh-Hadamard transform.
'''
import time
import numpy as np
from qsft.utils import fwht, gwht, bin_to_dec, binary_ints, qary_ints
from qsft.ReedSolomon import ReedSolomon
def get_Ms_simple(n, b, q, num_to_get=None):
'''
Sets Ms[0] = [I 0 ...], Ms[1] = [0 I ...], Ms[2] = [0 0 I 0 ...] and so forth. See get_Ms for full signature.
'''
Ms = []
for i in range(num_to_get - 1, -1, -1):
M = np.zeros((n, b), dtype=np.int32)
M[(b * i) : (b * (i + 1)), :] = np.eye(b)
Ms.append(M)
return Ms
def get_Ms_complex(n, b, q, num_to_get=None):
"""
Generate M uniformly at random.
"""
Ms = []
# TODO Prevent duplicate M (Not a problem for large n, m )
for i in range(num_to_get):
M = np.random.randint(q, size=(n, b))
Ms.append(M)
return Ms
def get_Ms(n, b, q, num_to_get=None, method="simple"):
'''
Gets subsampling matrices for different sparsity levels.
Arguments
---------
n : int
log_q of the signal length (number of inputs to function).
b : int
subsampling dimension.
num_to_get : int
The number of M matrices to return.
method : str
The method to use. All methods referenced must use this signature (minus "method".)
Returns
-------
Ms : list of numpy.ndarrays, shape (n, b)
The list of subsampling matrices.
'''
if num_to_get is None:
num_to_get = max(n // b, 3)
if method == "simple" and num_to_get > n // b:
raise ValueError("When query_method is 'simple', the number of M matrices to return cannot be larger than n // b")
return {
"simple": get_Ms_simple,
"complex": get_Ms_complex
}.get(method)(n, b, q, num_to_get)
def get_D_identity(n, **kwargs):
int_delays = np.zeros(n, )
int_delays = np.vstack((int_delays, np.eye(n)))
return int_delays.astype(int)
def get_D_random(n, **kwargs):
'''
Gets a random delays matrix of dimension (num_delays, n). See get_D for full signature.
'''
q=kwargs.get("q")
num_delays = kwargs.get("num_delays")
return np.random.choice(q, (num_delays, n))
def get_D_source_coded(n, **kwargs):
q = kwargs.get("q")
t_max = kwargs.get("t")
D = ReedSolomon(n, t_max, q).get_delay_matrix()
return np.array(D, dtype=int)
def get_D_nso(n, D_source, **kwargs):
'''
Get a repetition code based (NSO-SPRIGHT) delays matrix. See get_D for full signature.
'''
num_repeat = kwargs.get("num_repeat")
q = kwargs.get("q")
random_offsets = get_D_random(n, q=q, num_delays=num_repeat)
D = []
for row in random_offsets:
modulated_offsets = (row - D_source) % q
D.append(modulated_offsets)
return D
def get_D_channel_coded(n, D, **kwargs):
raise NotImplementedError("One day this might be implemented")
def get_D_channel_identity(n, D, **kwargs):
q = kwargs.get("q")
return [D % q]
def get_D(n, **kwargs):
'''
Delay generator: gets a delays matrix.
Arguments
---------
n : int
number of bits: log2 of the signal length.
Returns
-------
D : numpy.ndarray of binary ints, dimension (num_delays, n).
The delays matrix; if num_delays is not specified in kwargs, see the relevant sub-function for a default.
'''
delays_method_source = kwargs.get("delays_method_source", "random")
D = {
"random": get_D_random,
"identity": get_D_identity,
"coded": get_D_source_coded
}.get(delays_method_source)(n, **kwargs)
delays_method_channel = kwargs.get("delays_method_channel", "identity")
D = {
"nso": get_D_nso,
"coded": get_D_channel_coded,
"identity": get_D_channel_identity
}.get(delays_method_channel)(n, D, **kwargs)
return D
def subsample_indices(M, d):
'''
Query generator: creates indices for signal subsamples.
Arguments
---------
M : numpy.ndarray, shape (n, b)
The subsampling matrix; takes on binary values.
d : numpy.ndarray, shape (n,)
The subsampling offset; takes on binary values.
Returns
-------
indices : numpy.ndarray, shape (B,)
The (decimal) subsample indices. Mostly for debugging purposes.
'''
L = binary_ints(M.shape[1])
inds_binary = np.mod(np.dot(M, L).T + d, 2).T
return bin_to_dec(inds_binary)
def compute_delayed_gwht(signal, M, D, q):
"""
Computes the Fourier transform of the delayed signal for some M and for each row in the delay matrix D
"""
b = M.shape[1]
L = np.array(qary_ints(b, q)) # List of all length b qary vectors
base_inds = [(M @ L + np.outer(d, np.ones(q ** b, dtype=int))) % q for d in D]
used_inds = np.swapaxes(np.array(base_inds), 0, 1)
used_inds = np.reshape(used_inds, (used_inds.shape[0], -1))
samples_to_transform = signal.get_time_domain(base_inds)
transform = np.array([gwht(row, q, b) for row in samples_to_transform])
return transform, used_inds
def get_Ms_and_Ds(n, q, **kwargs):
"""
Based on the parameters provided in kwargs, generates Ms and Ds.
"""
timing_verbose = kwargs.get("timing_verbose", False)
if timing_verbose:
start_time = time.time()
query_method = kwargs.get("query_method")
b = kwargs.get("b")
num_subsample = kwargs.get("num_subsample")
Ms = get_Ms(n, b, q, method=query_method, num_to_get=num_subsample)
if timing_verbose:
print(f"M Generation:{time.time() - start_time}")
Ds = []
if timing_verbose:
start_time = time.time()
D = get_D(n, q=q, **kwargs)
if timing_verbose:
print(f"D Generation:{time.time() - start_time}")
for M in Ms:
Ds.append(D)
return Ms, Ds
def compute_delayed_wht(signal, M, D):
'''
Creates random delays, subsamples according to M and the random delays,
and returns the subsample WHT along with the delays.
Arguments
---------
signal : Signal object
The signal to subsample, delay, and compute the WHT of.
M : numpy.ndarray, shape (n, b)
The subsampling matrix; takes on binary values.
num_delays : int
The number of delays to apply; or, the number of rows in the delays matrix.
force_identity_like : boolean
Whether to make D = [0; I] like in the noiseless case; for debugging.
'''
inds = np.array([subsample_indices(M, d) for d in D])
used_inds = set(np.unique(inds))
samples_to_transform = signal.signal_t[np.array([subsample_indices(M, d) for d in D])] # subsample to allow small WHTs
return np.array([fwht(row) for row in samples_to_transform]), used_inds # compute the small WHTs
def get_reed_solomon_dec(n, t_max, q):
"""
Gets a suitable reed solomon decoder
Returns
-------
A Reed Solomon syndrome decoder for a t_max error correcting code.
"""
primeset = [2, 3, 5, 7, 11, 13, 15, 17, 19, 23, 29]
if q in primeset:
return ReedSolomon(n, t_max, q).syndrome_decode
else:
raise NotImplementedError("q is not a prime number under 30!")
| 7,267 | 28.786885 | 122 | py |
qsft | qsft-master/qsft/reconstruct.py | '''
Methods for the reconstruction engine; specifically, to:
1. carry out singleton detection
2. get the cardinalities of all bins in a subsampling group (debugging only).
'''
import numpy as np
from qsft.utils import angle_q
def singleton_detection_noiseless(U_slice, **kwargs):
'''
Finds the true index of a singleton, or the best-approximation singleton of a multiton.
Assumes P = n + 1 and D = [0; I].
Arguments
---------
U_slice : numpy.ndarray, (P,).
The WHT component of a subsampled bin, with element i corresponding to delay i.
Returns
-------
k : numpy.ndarray
Index of the corresponding right node, in binary form.
'''
q = kwargs.get('q')
angles = np.angle(U_slice)
angles = q*(angles[1:] - angles[0])/(2*np.pi)
angles = angles.round().astype(int) % q
return angles
def singleton_detection_coded(k, **kwargs):
'''
Finds the true index of a singleton, or the best-approximation singleton of a multiton.
Assumes the Delays matrix is generated by a code, and the syndrome decoder is passed to it.
Arguments
---------
U_slice : numpy.ndarray, (P,).
The WHT component of a subsampled bin, with element i corresponding to delay i.
Returns
-------
k : numpy.ndarray
Index of the corresponding right node, in binary form.
'''
decoder = kwargs.get('source_decoder')
dec = decoder(list(k))
return np.array(dec[0][0, :], dtype=np.int32)
def singleton_detection_mle(U_slice, **kwargs):
'''
Finds the true index of a singleton, or the best-approximation singleton of a multiton, in the presence of noise.
Uses MLE: looks at the residuals created by peeling off each possible singleton.
Arguments
---------
U_slice : numpy.ndarray, (P,).
The WHT component of a subsampled bin, with element i corresponding to delay i.
selection : numpy.ndarray.
The decimal preimage of the bin index, i.e. the list of potential singletons whose signature under M could be the j of the bin.
S_slice : numpy.ndarray
The set of signatures under the delays matrix D associated with each of the elements of 'selection'.
n : int
The signal's number of bits.
Returns
-------
k : numpy.ndarray, (n,)
The index of the singleton.
'''
selection, S_slice, q, n = kwargs.get("selection"), kwargs.get("S_slice"), kwargs.get("q"), kwargs.get("source_parity")
P = S_slice.shape[0]
alphas = 1/P * np.dot(np.conjugate(S_slice).T, U_slice)
residuals = np.linalg.norm(U_slice - (alphas * S_slice).T, ord=2, axis=1)
k_sel = np.argmin(residuals)
return selection[k_sel], S_slice[:, k_sel]
def singleton_detection_nso(U_slice, **kwargs):
"""
Singleton Detection Via NSO Algorithm
nso1 - Multiplying by conjugate "Soft Decoding"
nso2 - Quantized angle "Hard Decoding"
"""
nso_type = kwargs.get("nso_subtype", "nso1")
if nso_type == "nso1":
return singleton_detection_nso1(U_slice, **kwargs)
elif nso_type == "nso2":
return singleton_detection_nso2(U_slice, **kwargs)
def singleton_detection_nso1(U_slice, **kwargs):
"""
Soft Decoding NSO algorithm
"""
q, p1 = kwargs.get("q"), kwargs.get("source_parity")
q_roots = 2 * np.pi / q * np.arange(q + 1)
U_slice_zero = U_slice[0::p1]
k_sel_qary = np.zeros((p1-1, ), dtype=int)
for i in range(1, p1):
U_slice_i = U_slice[i::p1]
angle = np.angle(np.mean(U_slice_zero * np.conjugate(U_slice_i))) % (2 * np.pi)
idx = (np.abs(q_roots - angle)).argmin() % q
k_sel_qary[i-1] = idx
return k_sel_qary
def singleton_detection_nso2(U_slice, **kwargs):
"""
Hard Decoding NSO Algorithm
"""
q, p1 = kwargs.get("q"), kwargs.get("source_parity")
U_slice_zero = U_slice[0::p1]
angle_0 = angle_q(U_slice_zero, q)
k_sel_qary = np.zeros((p1-1, ), dtype=int)
for i in range(1, p1):
U_slice_i = U_slice[i::p1]
angle = angle_q(U_slice_i, q)
idx = np.round(np.mean((angle_0 - angle) % q)) % q
k_sel_qary[i-1] = idx
return k_sel_qary
def singleton_detection(U_slice, method_source="identity", method_channel="identity", **kwargs):
"""
Recovers the index value k of a singleton.
Parameters
----------
U_slice : np.array
The relevant subsampled fourier transform to be considered
method_source
method of reconstruction for source coding: "identity" - default setting, should be used unless you know that all
indicies have low hamming weight
"coded" - Currently only supports prime q, if you know the max hamming
weight of less than t this option should be used and will greatly reduce
complexity. Note a source_decoder object must also be passed
method_channel
Method of reconstruction for channel coding: "mle" - exact MLE computation. Fine for small problems but not
recommended it is exponential in n
"nso" - symbol-wise recovery suitable when a repetition type code is used
"identity" - no channel coding, only use when there is no noise
Returns
-------
Value of the computed singleton index k
"""
# Split detection into two phases, channel and source decoding
k = {
"mle": singleton_detection_mle,
"nso": singleton_detection_nso,
"identity": singleton_detection_noiseless,
}.get(method_channel)(U_slice, **kwargs)
if method_source != "identity":
k = {
"coded": singleton_detection_coded
}.get(method_source)(k, **kwargs)
return k | 5,932 | 34.315476 | 131 | py |
qsft | qsft-master/qsft/input_signal.py | """
A shell Class for common interface to an input signal. This class should be extended when implemented
"""
import numpy as np
from qsft.utils import gwht_tensored, igwht_tensored, save_data, load_data
from pathlib import Path
class Signal:
"""
Class to encapsulate a time domain signal and its q-ary Fourier transform.
Attributes
---------
n : int
number of bits: number of function inputs.
q : int
Locations of true peaks in the W-H spectrum. Elements must be integers in [0, q ** n - 1].
noise_sd : scalar
The standard deviation of the added noise.
signal_t
Time domain representation of the signal.
signal_w
Frequency domain representation of the signal,
calc_w
If True and signal_w is not included, it is computed based on signal_t.
foldername
If signal_t is not provided, the signal will be read from {foldername}/signal_t.pickle. If signal_t is provided, a
copy of the signal is written to {foldername}/signal_t.pickle
"""
def __init__(self, **kwargs):
self._set_params(**kwargs)
self._init_signal()
def _set_params(self, **kwargs):
self.n = kwargs.get("n")
self.q = kwargs.get("q")
self.noise_sd = kwargs.get("noise_sd", 0)
self.N = self.q ** self.n
self.signal_t = kwargs.get("signal_t")
self.signal_w = kwargs.get("signal_w")
self.calc_w = kwargs.get("calc_w", False)
self.foldername = kwargs.get("folder")
self.is_synt = False
def _init_signal(self):
if self.signal_t is None:
signal_path = Path(f"{self.foldername}/signal_t.pickle")
if signal_path.is_file():
self.signal_t = load_data(Path(f"{self.foldername}/signal_t.pickle"))
else:
self.sample()
Path(f"{self.foldername}").mkdir(exist_ok=True)
save_data(self.signal_t, Path(f"{self.foldername}/signal_t.pickle"))
if self.calc_w and self.signal_w is None:
self.signal_w = gwht_tensored(self.signal_t, self.q, self.n)
if np.linalg.norm(self.signal_t - igwht_tensored(self.signal_w, self.q, self.n)) / self.N < 1e-5:
print("verified transform")
def sample(self):
raise NotImplementedError
def shape(self):
'''
shape: returns the shape of the time domain signal.
Returns
-------
shape of time domain signal
'''
return tuple([self.q for i in range(self.n)])
def get_time_domain(self, inds):
'''
Arguments
---------
inds: tuple of 1d n-element arrays that represent the indicies to be queried
Returns
-------
indices : linear output of the queried indicies
'''
inds = np.array(inds)
if len(inds.shape) == 3:
return [self.signal_t[tuple(inds)] for inds in inds]
elif len(inds.shape) == 2:
return self.signal_t[tuple(inds)]
| 3,034 | 30.28866 | 118 | py |
sort | sort-master/sort.py | """
SORT: A Simple, Online and Realtime Tracker
Copyright (C) 2016-2020 Alex Bewley alex@bewley.ai
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
import os
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from skimage import io
import glob
import time
import argparse
from filterpy.kalman import KalmanFilter
np.random.seed(0)
def linear_assignment(cost_matrix):
try:
import lap
_, x, y = lap.lapjv(cost_matrix, extend_cost=True)
return np.array([[y[i],i] for i in x if i >= 0]) #
except ImportError:
from scipy.optimize import linear_sum_assignment
x, y = linear_sum_assignment(cost_matrix)
return np.array(list(zip(x, y)))
def iou_batch(bb_test, bb_gt):
"""
From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2]
"""
bb_gt = np.expand_dims(bb_gt, 0)
bb_test = np.expand_dims(bb_test, 1)
xx1 = np.maximum(bb_test[..., 0], bb_gt[..., 0])
yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1])
xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2])
yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3])
w = np.maximum(0., xx2 - xx1)
h = np.maximum(0., yy2 - yy1)
wh = w * h
o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1])
+ (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh)
return(o)
def convert_bbox_to_z(bbox):
"""
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
the aspect ratio
"""
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x = bbox[0] + w/2.
y = bbox[1] + h/2.
s = w * h #scale is just area
r = w / float(h)
return np.array([x, y, s, r]).reshape((4, 1))
def convert_x_to_bbox(x,score=None):
"""
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
"""
w = np.sqrt(x[2] * x[3])
h = x[2] / w
if(score==None):
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))
else:
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))
class KalmanBoxTracker(object):
"""
This class represents the internal state of individual tracked objects observed as bbox.
"""
count = 0
def __init__(self,bbox):
"""
Initialises a tracker using initial bounding box.
"""
#define constant velocity model
self.kf = KalmanFilter(dim_x=7, dim_z=4)
self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0], [0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])
self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])
self.kf.R[2:,2:] *= 10.
self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities
self.kf.P *= 10.
self.kf.Q[-1,-1] *= 0.01
self.kf.Q[4:,4:] *= 0.01
self.kf.x[:4] = convert_bbox_to_z(bbox)
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 0
self.hit_streak = 0
self.age = 0
def update(self,bbox):
"""
Updates the state vector with observed bbox.
"""
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1
self.kf.update(convert_bbox_to_z(bbox))
def predict(self):
"""
Advances the state vector and returns the predicted bounding box estimate.
"""
if((self.kf.x[6]+self.kf.x[2])<=0):
self.kf.x[6] *= 0.0
self.kf.predict()
self.age += 1
if(self.time_since_update>0):
self.hit_streak = 0
self.time_since_update += 1
self.history.append(convert_x_to_bbox(self.kf.x))
return self.history[-1]
def get_state(self):
"""
Returns the current bounding box estimate.
"""
return convert_x_to_bbox(self.kf.x)
def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):
"""
Assigns detections to tracked object (both represented as bounding boxes)
Returns 3 lists of matches, unmatched_detections and unmatched_trackers
"""
if(len(trackers)==0):
return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
iou_matrix = iou_batch(detections, trackers)
if min(iou_matrix.shape) > 0:
a = (iou_matrix > iou_threshold).astype(np.int32)
if a.sum(1).max() == 1 and a.sum(0).max() == 1:
matched_indices = np.stack(np.where(a), axis=1)
else:
matched_indices = linear_assignment(-iou_matrix)
else:
matched_indices = np.empty(shape=(0,2))
unmatched_detections = []
for d, det in enumerate(detections):
if(d not in matched_indices[:,0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t, trk in enumerate(trackers):
if(t not in matched_indices[:,1]):
unmatched_trackers.append(t)
#filter out matched with low IOU
matches = []
for m in matched_indices:
if(iou_matrix[m[0], m[1]]<iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1,2))
if(len(matches)==0):
matches = np.empty((0,2),dtype=int)
else:
matches = np.concatenate(matches,axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
class Sort(object):
def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):
"""
Sets key parameters for SORT
"""
self.max_age = max_age
self.min_hits = min_hits
self.iou_threshold = iou_threshold
self.trackers = []
self.frame_count = 0
def update(self, dets=np.empty((0, 5))):
"""
Params:
dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).
Returns the a similar array, where the last column is the object ID.
NOTE: The number of objects returned may differ from the number of detections provided.
"""
self.frame_count += 1
# get predicted locations from existing trackers.
trks = np.zeros((len(self.trackers), 5))
to_del = []
ret = []
for t, trk in enumerate(trks):
pos = self.trackers[t].predict()[0]
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
if np.any(np.isnan(pos)):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets,trks, self.iou_threshold)
# update matched trackers with assigned detections
for m in matched:
self.trackers[m[1]].update(dets[m[0], :])
# create and initialise new trackers for unmatched detections
for i in unmatched_dets:
trk = KalmanBoxTracker(dets[i,:])
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
d = trk.get_state()[0]
if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
ret.append(np.concatenate((d,[trk.id+1])).reshape(1,-1)) # +1 as MOT benchmark requires positive
i -= 1
# remove dead tracklet
if(trk.time_since_update > self.max_age):
self.trackers.pop(i)
if(len(ret)>0):
return np.concatenate(ret)
return np.empty((0,5))
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='SORT demo')
parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',action='store_true')
parser.add_argument("--seq_path", help="Path to detections.", type=str, default='data')
parser.add_argument("--phase", help="Subdirectory in seq_path.", type=str, default='train')
parser.add_argument("--max_age",
help="Maximum number of frames to keep alive a track without associated detections.",
type=int, default=1)
parser.add_argument("--min_hits",
help="Minimum number of associated detections before track is initialised.",
type=int, default=3)
parser.add_argument("--iou_threshold", help="Minimum IOU for match.", type=float, default=0.3)
args = parser.parse_args()
return args
if __name__ == '__main__':
# all train
args = parse_args()
display = args.display
phase = args.phase
total_time = 0.0
total_frames = 0
colours = np.random.rand(32, 3) #used only for display
if(display):
if not os.path.exists('mot_benchmark'):
print('\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n')
exit()
plt.ion()
fig = plt.figure()
ax1 = fig.add_subplot(111, aspect='equal')
if not os.path.exists('output'):
os.makedirs('output')
pattern = os.path.join(args.seq_path, phase, '*', 'det', 'det.txt')
for seq_dets_fn in glob.glob(pattern):
mot_tracker = Sort(max_age=args.max_age,
min_hits=args.min_hits,
iou_threshold=args.iou_threshold) #create instance of the SORT tracker
seq_dets = np.loadtxt(seq_dets_fn, delimiter=',')
seq = seq_dets_fn[pattern.find('*'):].split(os.path.sep)[0]
with open(os.path.join('output', '%s.txt'%(seq)),'w') as out_file:
print("Processing %s."%(seq))
for frame in range(int(seq_dets[:,0].max())):
frame += 1 #detection and frame numbers begin at 1
dets = seq_dets[seq_dets[:, 0]==frame, 2:7]
dets[:, 2:4] += dets[:, 0:2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2]
total_frames += 1
if(display):
fn = os.path.join('mot_benchmark', phase, seq, 'img1', '%06d.jpg'%(frame))
im =io.imread(fn)
ax1.imshow(im)
plt.title(seq + ' Tracked Targets')
start_time = time.time()
trackers = mot_tracker.update(dets)
cycle_time = time.time() - start_time
total_time += cycle_time
for d in trackers:
print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1'%(frame,d[4],d[0],d[1],d[2]-d[0],d[3]-d[1]),file=out_file)
if(display):
d = d.astype(np.int32)
ax1.add_patch(patches.Rectangle((d[0],d[1]),d[2]-d[0],d[3]-d[1],fill=False,lw=3,ec=colours[d[4]%32,:]))
if(display):
fig.canvas.flush_events()
plt.draw()
ax1.cla()
print("Total Tracking took: %.3f seconds for %d frames or %.1f FPS" % (total_time, total_frames, total_frames / total_time))
if(display):
print("Note: to get real runtime results run without the option: --display")
| 11,739 | 34.468278 | 242 | py |
cogcn | cogcn-main/cogcn/utils.py | import pickle as pkl
import os
import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch
import pandas as pd
from sklearn.metrics import roc_auc_score, average_precision_score
from matplotlib import pyplot as plt
def load_data_cma(dataset):
adj_file = os.path.join(dataset, "struct.csv")
feat_file = os.path.join(dataset, "content.csv")
# Load reate adjacency matrix
adj = pd.read_csv(adj_file, header=None)
adj = adj.values
adj = nx.from_numpy_matrix(adj)
adj = nx.adjacency_matrix(adj)
print("Adjacency matrix shape:", adj.shape)
# Load features
feat = pd.read_csv(feat_file, header=None)
feat = feat.values
features = torch.FloatTensor(feat)
print("Features shape:", features.shape)
return adj, features
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def preprocess_graph(adj):
adj = sp.coo_matrix(adj)
adj_ = adj + sp.eye(adj.shape[0])
rowsum = np.array(adj_.sum(1))
degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten())
adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo()
# return sparse_to_tuple(adj_normalized)
return sparse_mx_to_torch_sparse_tensor(adj_normalized)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def plot_losses(losses, epoch_mark):
for i in range(4):
plt.subplot(2,2,i+1)
plt.plot(losses[:,i])
plt.axvline(epoch_mark, color='r')
plt.axvline(epoch_mark*2, color='g')
plt.show()
| 2,240 | 29.69863 | 95 | py |
cogcn | cogcn-main/cogcn/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import GraphConvolution
class GCNAE(nn.Module):
def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout):
super(GCNAE, self).__init__()
self.encgc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu)
self.encgc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
self.decgc1 = GraphConvolution(hidden_dim2, hidden_dim1, dropout, act=F.relu)
self.decgc2 = GraphConvolution(hidden_dim1, input_feat_dim, dropout, act=lambda x: x)
def encode(self, x, adj):
hidden1 = self.encgc1(x, adj)
hidden2 = self.encgc2(hidden1, adj)
return hidden2
def decode(self, hidden, adj):
hidden1 = self.decgc1(hidden, adj)
recon = self.decgc2(hidden1, adj)
return recon
def forward(self, x, adj):
enc = self.encode(x, adj)
dec = self.decode(enc, adj)
return dec, enc
class InnerProductDecoder(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, dropout, act=torch.sigmoid):
super(InnerProductDecoder, self).__init__()
self.dropout = dropout
self.act = act
def forward(self, z):
z = F.dropout(z, self.dropout, training=self.training)
adj = self.act(torch.mm(z, z.t()))
return adj
| 1,415 | 31.930233 | 93 | py |
cogcn | cogcn-main/cogcn/kmeans.py | import sys
import torch
import torch.nn as nn
from sklearn.cluster import KMeans
class Clustering(object):
def __init__(self, K, n_init=5, max_iter=250):
self.K = K
self.n_init = n_init
self.max_iter = max_iter
self.u = None
self.M = None
def cluster(self, embed):
embed_np = embed.detach().cpu().numpy()
clustering = KMeans(n_clusters=self.K, n_init=self.n_init, max_iter=self.max_iter)
clustering.fit(embed_np)
self.M = clustering.labels_
self.u = self._compute_centers(self.M, embed_np)
def get_loss(self, embed):
loss = torch.Tensor([0.])
#TODO: This may be slightly inefficient, we can fix it later to use matrix multiplications
for i, clusteridx in enumerate(self.M):
x = embed[i]
c = self.u[clusteridx]
difference = x - c
err = torch.sum(torch.mul(difference, difference))
loss += err
return loss
def get_membership(self):
return self.M
def _compute_centers(self,labels, embed):
"""
sklearn kmeans may not give accurate cluster centers in some cases (see doc), so we compute ourselves
"""
clusters = {}
for i,lbl in enumerate(labels):
if clusters.get(lbl) is None:
clusters[lbl] = []
clusters[lbl].append(torch.FloatTensor(embed[i]))
centers = {}
for k in clusters:
all_embed = torch.stack(clusters[k])
center = torch.mean(all_embed, 0)
centers[k] = center
return centers | 1,646 | 28.410714 | 109 | py |
cogcn | cogcn-main/cogcn/layers.py | import torch
import torch.nn.functional as F
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0., act=F.relu):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.act = act
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight)
def forward(self, input, adj):
input = F.dropout(input, self.dropout, self.training)
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
output = self.act(output)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
| 1,110 | 30.742857 | 77 | py |
cogcn | cogcn-main/cogcn/train.py | from __future__ import division
from __future__ import print_function
import argparse
import time
import sys
import os
import pickle
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
from torch import optim
from matplotlib import pyplot as plt
from model import GCNAE
from optimizer import compute_structure_loss, compute_attribute_loss, update_o1, update_o2
from utils import load_data_cma, preprocess_graph, plot_losses
from kmeans import Clustering
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='gcn_vae', help="models used")
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
# 5000 epochs is where the pretrain loss starts hovering around 5.0 - 6.0
parser.add_argument('--k', type=int, default=6, help='Number of clusters.')
parser.add_argument('--preepochs', type=int, default=350, help='Number of epochs to pre-train.')
parser.add_argument('--clusepochs', type=int, default=1, help='Number of epochs to pre-train for clustering.')
parser.add_argument('--epochs', type=int, default=300, help='Number of epochs to train.')
parser.add_argument('--hidden1', type=int, default=64, help='Number of units in hidden layer 1.')
parser.add_argument('--hidden2', type=int, default=32, help='Number of units in hidden layer 2.')
parser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate.')
parser.add_argument('--lambda1', type=float, default=0.1, help='Structure loss weight.')
parser.add_argument('--lambda2', type=float, default=0.1, help='Attribute loss weight.')
parser.add_argument('--lambda3', type=float, default=0.8, help='Clustering loss weight.')
parser.add_argument('--dropout', type=float, default=0.2, help='Dropout rate (1 - keep probability).')
parser.add_argument('--dataset-str', type=str, default=None, help='type of dataset.')
parser.add_argument('--outfile', type=str, default='embeddings', help='output embeddings file.')
args = parser.parse_args()
def gae_for(args):
print("Using {} dataset".format(args.dataset_str))
adj, features = load_data_cma(args.dataset_str)
n_nodes, feat_dim = features.shape
# Some preprocessing
adj_norm = preprocess_graph(adj)
model = GCNAE(feat_dim, args.hidden1, args.hidden2, args.dropout)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
gamma = 0.98
schedule_update_interval = 400
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma)
# initialize all outlier scores with the equal values summing to 1
init_value = [1./n_nodes] * n_nodes
o_1 = torch.FloatTensor(init_value) # structural outlier
o_2 = torch.FloatTensor(init_value) # attribute outlier
lossfn = nn.MSELoss(reduction='none')
optimizer = optim.Adam(model.parameters(), lr=args.lr)
lambda1 = args.lambda1 / (args.lambda1 + args.lambda2)
lambda2 = args.lambda2 / (args.lambda1 + args.lambda2)
kmeans = Clustering(args.k)
# PRETRAIN ON STRUCTURE AND ATTRIBUTE LOSSES, NO OUTLIER LOSS
for epoch in range(args.preepochs):
model.train()
optimizer.zero_grad()
recon, embed = model(features, adj_norm)
structure_loss = compute_structure_loss(adj_norm, embed, o_1)
attribute_loss = compute_attribute_loss(lossfn, features, recon, o_2)
loss = lambda1 * structure_loss + lambda2 * attribute_loss
# Update the functions F and G (embedding network)
loss.backward()
cur_loss = loss.item()
optimizer.step()
if (epoch+1) % 100 == 0:
#print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(cur_loss), "time=", "{:.5f}".format(time.time() - t), "Pretrain:",pretrain)
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(cur_loss), "lr=", "{:.5f}".format(scheduler.get_last_lr()[0]))
# Initialize clusters
recon, embed = model(features, adj_norm)
kmeans.cluster(embed)
# TRAIN ON ALL THREE LOSES WITH OUTLIER UPDATES
for epoch in range(args.epochs):
# Update the values of O_i1 and O_i2
o_1 = update_o1(adj_norm, embed)
o_2 = update_o2(features, recon)
if (epoch+1) % schedule_update_interval == 0:
scheduler.step()
model.train()
optimizer.zero_grad()
recon, embed = model(features, adj_norm)
kmeans.cluster(embed)
structure_loss = compute_structure_loss(adj_norm, embed, o_1)
attribute_loss = compute_attribute_loss(lossfn, features, recon, o_2)
clustering_loss = kmeans.get_loss(embed)
loss = (args.lambda1 * structure_loss) + (args.lambda2 * attribute_loss) + (args.lambda3 * clustering_loss)
#loss = (args.lambda1 * structure_loss) + (args.lambda2 * attribute_loss)
# Update the functions F and G (embedding network)
loss.backward()
cur_loss = loss.item()
optimizer.step()
if (epoch+1) % 100 == 0:
#print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(cur_loss), "time=", "{:.5f}".format(time.time() - t), "Pretrain:",pretrain)
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(cur_loss), "lr=", "{:.5f}".format(scheduler.get_last_lr()[0]))
# Extract embeddings
adj_norm = preprocess_graph(adj)
recon, embed = model(features, adj_norm)
embed = embed.detach().cpu().numpy()
embfile = os.path.join(args.dataset_str, args.outfile+".pkl")
with open(embfile,"wb") as f:
pickle.dump(embed, f)
o_1 = o_1.detach().cpu().numpy()
o_2 = o_2.detach().cpu().numpy()
outlfile = os.path.join(args.dataset_str, args.outfile+"_outliers.pkl")
with open(outlfile,"wb") as f:
pickle.dump([o_1, o_2], f)
membfile = os.path.join(args.dataset_str, args.outfile+"_membership.pkl")
with open(membfile,"wb") as f:
pickle.dump(kmeans.get_membership(), f)
if __name__ == '__main__':
gae_for(args)
| 5,980 | 39.412162 | 157 | py |
cogcn | cogcn-main/cogcn/optimizer.py | import sys
import torch
import torch.nn as nn
import torch.nn.modules.loss
import torch.nn.functional as F
from sklearn.cluster import KMeans
def compute_attribute_loss(lossfn, features, recon, outlier_wt):
loss = lossfn(features, recon)
loss = loss.sum(dim=1)
outlier_wt = torch.log(1/outlier_wt)
attr_loss = torch.sum(torch.mul(outlier_wt, loss))
return attr_loss
def compute_structure_loss(adj, embed, outlier_wt):
# to compute F(x_i).F(x_j)
embeddot = torch.mm(embed, torch.transpose(embed, 0, 1))
adj_tensor = adj.to_dense()
# compute A_ij - F(x_i)*F(x_j)
difference = adj_tensor - embeddot
# square difference and sum
loss = torch.sum(torch.mul(difference, difference), dim=1)
outlier_wt = torch.log(1/outlier_wt)
struct_loss = torch.sum(torch.mul(outlier_wt, loss))
return struct_loss
def update_o1(adj, embed):
# to compute F(x_i).F(x_j)
embed = embed.data
embeddot = torch.mm(embed, torch.transpose(embed, 0, 1))
adj_tensor = adj.to_dense()
# compute A_ij - F(x_i)*F(x_j)
difference = adj_tensor - embeddot
# square difference and sum
error = torch.sum(torch.mul(difference, difference), dim=1)
# compute the denominator
normalization_factor = torch.sum(error)
# normalize the errors
o1 = error/normalization_factor
return o1
def update_o2(features, recon):
features = features.data
recon = recon.data
# error = x - F(G(x))
error = features - recon
# error now = (x - F(G(x)))^2, summed across dim 1
error = torch.sum(torch.mul(error, error), dim=1)
# compute the denominator
normalization_factor = torch.sum(error)
# normalize the errors
o2 = error/normalization_factor
return o2
| 1,768 | 23.915493 | 64 | py |
deepglo | deepglo-master/DeepGLO/DeepGLO.py | from __future__ import print_function
import torch, h5py
import numpy as np
from scipy.io import loadmat
from torch.nn.utils import weight_norm
import torch.nn as nn
import torch.optim as optim
import numpy as np
# import matplotlib
from torch.autograd import Variable
import sys
import itertools
import torch.nn.functional as F
import copy
import os
import gc
from DeepGLO.data_loader import *
from sklearn.decomposition import NMF
use_cuda = True #### Assuming you have a GPU ######
from DeepGLO.utilities import *
from DeepGLO.LocalModel import *
from DeepGLO.metrics import *
import copy
import random
import pickle
np.random.seed(111)
torch.cuda.manual_seed(111)
torch.manual_seed(111)
random.seed(111)
def get_model(A, y, lamb=0):
"""
Regularized least-squares
"""
n_col = A.shape[1]
return np.linalg.lstsq(
A.T.dot(A) + lamb * np.identity(n_col), A.T.dot(y), rcond=None
)
class DeepGLO(object):
def __init__(
self,
Ymat,
vbsize=150,
hbsize=256,
num_channels_X=[32, 32, 32, 32, 1],
num_channels_Y=[32, 32, 32, 32, 1],
kernel_size=7,
dropout=0.2,
rank=64,
kernel_size_Y=7,
lr=0.0005,
val_len=24,
end_index=20000,
normalize=False,
start_date="2016-1-1",
freq="H",
covariates=None,
use_time=True,
dti=None,
svd=False,
period=None,
forward_cov=False,
):
self.start_date = start_date
self.freq = freq
self.covariates = covariates
self.use_time = use_time
self.dti = dti
self.dropout = dropout
self.period = period
self.forward_cov = forward_cov
self.Xseq = TemporalConvNet(
num_inputs=1,
num_channels=num_channels_X,
kernel_size=kernel_size,
dropout=dropout,
init=True,
)
if normalize:
self.s = np.std(Ymat[:, 0:end_index], axis=1)
# self.s[self.s == 0] = 1.0
self.s += 1.0
self.m = np.mean(Ymat[:, 0:end_index], axis=1)
self.Ymat = (Ymat - self.m[:, None]) / self.s[:, None]
self.mini = np.abs(np.min(self.Ymat))
self.Ymat = self.Ymat + self.mini
else:
self.Ymat = Ymat
self.normalize = normalize
n, T = self.Ymat.shape
t0 = end_index + 1
if t0 > T:
self.Ymat = np.hstack([self.Ymat, self.Ymat[:, -1].reshape(-1, 1)])
if svd:
indices = np.random.choice(self.Ymat.shape[0], rank, replace=False)
X = self.Ymat[indices, 0:t0]
mX = np.std(X, axis=1)
mX[mX == 0] = 1.0
X = X / mX[:, None]
Ft = get_model(X.transpose(), self.Ymat[:, 0:t0].transpose(), lamb=0.1)
F = Ft[0].transpose()
self.X = torch.from_numpy(X).float()
self.F = torch.from_numpy(F).float()
else:
R = torch.zeros(rank, t0).float()
X = torch.normal(R, 0.1)
C = torch.zeros(n, rank).float()
F = torch.normal(C, 0.1)
self.X = X.float()
self.F = F.float()
self.vbsize = vbsize
self.hbsize = hbsize
self.num_channels_X = num_channels_X
self.num_channels_Y = num_channels_Y
self.kernel_size_Y = kernel_size_Y
self.rank = rank
self.kernel_size = kernel_size
self.lr = lr
self.val_len = val_len
self.end_index = end_index
self.D = data_loader(
Ymat=self.Ymat,
vbsize=vbsize,
hbsize=hbsize,
end_index=end_index,
val_len=val_len,
shuffle=False,
)
def tensor2d_to_temporal(self, T):
T = T.view(1, T.size(0), T.size(1))
T = T.transpose(0, 1)
return T
def temporal_to_tensor2d(self, T):
T = T.view(T.size(0), T.size(2))
return T
def calculate_newX_loss_vanilla(self, Xn, Fn, Yn, Xf, alpha):
Yout = torch.mm(Fn, Xn)
cr1 = nn.L1Loss()
cr2 = nn.MSELoss()
l1 = cr2(Yout, Yn) / torch.mean(Yn ** 2)
l2 = cr2(Xn, Xf) / torch.mean(Xf ** 2)
return (1 - alpha) * l1 + alpha * l2
def recover_future_X(
self,
last_step,
future,
cpu=True,
num_epochs=50,
alpha=0.5,
vanilla=True,
tol=1e-7,
):
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
X = self.X[:, last_step - rg : last_step]
X = self.tensor2d_to_temporal(X)
outX = self.predict_future(model=self.Xseq, inp=X, future=future, cpu=cpu)
outX = self.temporal_to_tensor2d(outX)
Xf = outX[:, -future::]
Yn = self.Ymat[:, last_step : last_step + future]
Yn = torch.from_numpy(Yn).float()
if cpu:
self.Xseq = self.Xseq.cpu()
else:
Yn = Yn.cuda()
Xf = Xf.cuda()
Fn = self.F
Xt = torch.zeros(self.rank, future).float()
Xn = torch.normal(Xt, 0.1)
if not cpu:
Xn = Xn.cuda()
lprev = 0
for i in range(num_epochs):
Xn = Variable(Xn, requires_grad=True)
optim_Xn = optim.Adam(params=[Xn], lr=self.lr)
optim_Xn.zero_grad()
loss = self.calculate_newX_loss_vanilla(
Xn, Fn.detach(), Yn.detach(), Xf.detach(), alpha
)
loss.backward()
optim_Xn.step()
# Xn = torch.clamp(Xn.detach(), min=0)
if np.abs(lprev - loss.cpu().item()) <= tol:
break
if i % 1000 == 0:
print("Recovery Loss: " + str(loss.cpu().item()))
lprev = loss.cpu().item()
self.Xseq = self.Xseq.cuda()
return Xn.detach()
def step_factX_loss(self, inp, out, last_vindex, last_hindex, reg=0.0):
Xout = self.X[:, last_hindex + 1 : last_hindex + 1 + out.size(2)]
Fout = self.F[self.D.I[last_vindex : last_vindex + out.size(0)], :]
if use_cuda:
Xout = Xout.cuda()
Fout = Fout.cuda()
Xout = Variable(Xout, requires_grad=True)
out = self.temporal_to_tensor2d(out)
optim_X = optim.Adam(params=[Xout], lr=self.lr)
Hout = torch.matmul(Fout, Xout)
optim_X.zero_grad()
loss = torch.mean(torch.pow(Hout - out.detach(), 2))
l2 = torch.mean(torch.pow(Xout, 2))
r = loss.detach() / l2.detach()
loss = loss + r * reg * l2
loss.backward()
optim_X.step()
# Xout = torch.clamp(Xout, min=0)
self.X[:, last_hindex + 1 : last_hindex + 1 + inp.size(2)] = Xout.cpu().detach()
return loss
def step_factF_loss(self, inp, out, last_vindex, last_hindex, reg=0.0):
Xout = self.X[:, last_hindex + 1 : last_hindex + 1 + out.size(2)]
Fout = self.F[self.D.I[last_vindex : last_vindex + out.size(0)], :]
if use_cuda:
Xout = Xout.cuda()
Fout = Fout.cuda()
Fout = Variable(Fout, requires_grad=True)
optim_F = optim.Adam(params=[Fout], lr=self.lr)
out = self.temporal_to_tensor2d(out)
Hout = torch.matmul(Fout, Xout)
optim_F.zero_grad()
loss = torch.mean(torch.pow(Hout - out.detach(), 2))
l2 = torch.mean(torch.pow(Fout, 2))
r = loss.detach() / l2.detach()
loss = loss + r * reg * l2
loss.backward()
optim_F.step()
self.F[
self.D.I[last_vindex : last_vindex + inp.size(0)], :
] = Fout.cpu().detach()
return loss
def step_temporal_loss_X(self, inp, last_vindex, last_hindex):
Xin = self.X[:, last_hindex : last_hindex + inp.size(2)]
Xout = self.X[:, last_hindex + 1 : last_hindex + 1 + inp.size(2)]
for p in self.Xseq.parameters():
p.requires_grad = False
if use_cuda:
Xin = Xin.cuda()
Xout = Xout.cuda()
Xin = Variable(Xin, requires_grad=True)
Xout = Variable(Xout, requires_grad=True)
optim_out = optim.Adam(params=[Xout], lr=self.lr)
Xin = self.tensor2d_to_temporal(Xin)
Xout = self.tensor2d_to_temporal(Xout)
hatX = self.Xseq(Xin)
optim_out.zero_grad()
loss = torch.mean(torch.pow(Xout - hatX.detach(), 2))
loss.backward()
optim_out.step()
# Xout = torch.clamp(Xout, min=0)
temp = self.temporal_to_tensor2d(Xout.detach())
self.X[:, last_hindex + 1 : last_hindex + 1 + inp.size(2)] = temp
return loss
def predict_future_batch(self, model, inp, future=10, cpu=True):
if cpu:
model = model.cpu()
inp = inp.cpu()
else:
inp = inp.cuda()
out = model(inp)
output = out[:, :, out.size(2) - 1].view(out.size(0), out.size(1), 1)
out = torch.cat((inp, output), dim=2)
torch.cuda.empty_cache()
for i in range(future - 1):
inp = out
out = model(inp)
output = out[:, :, out.size(2) - 1].view(out.size(0), out.size(1), 1)
out = torch.cat((inp, output), dim=2)
torch.cuda.empty_cache()
out = self.temporal_to_tensor2d(out)
out = np.array(out.cpu().detach())
return out
def predict_future(self, model, inp, future=10, cpu=True, bsize=90):
n = inp.size(0)
inp = inp.cpu()
ids = np.arange(0, n, bsize)
ids = list(ids) + [n]
out = self.predict_future_batch(model, inp[ids[0] : ids[1], :, :], future, cpu)
torch.cuda.empty_cache()
for i in range(1, len(ids) - 1):
temp = self.predict_future_batch(
model, inp[ids[i] : ids[i + 1], :, :], future, cpu
)
torch.cuda.empty_cache()
out = np.vstack([out, temp])
out = torch.from_numpy(out).float()
return self.tensor2d_to_temporal(out)
def predict_global(
self, ind, last_step=100, future=10, cpu=False, normalize=False, bsize=90
):
if ind is None:
ind = np.arange(self.Ymat.shape[0])
if cpu:
self.Xseq = self.Xseq.cpu()
self.Xseq = self.Xseq.eval()
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
X = self.X[:, last_step - rg : last_step]
n = X.size(0)
T = X.size(1)
X = self.tensor2d_to_temporal(X)
outX = self.predict_future(
model=self.Xseq, inp=X, future=future, cpu=cpu, bsize=bsize
)
outX = self.temporal_to_tensor2d(outX)
F = self.F
Y = torch.matmul(F, outX)
Y = np.array(Y[ind, :].cpu().detach())
self.Xseq = self.Xseq.cuda()
del F
torch.cuda.empty_cache()
for p in self.Xseq.parameters():
p.requires_grad = True
if normalize:
Y = Y - self.mini
Y = Y * self.s[ind, None] + self.m[ind, None]
return Y
else:
return Y
def train_Xseq(self, Ymat, num_epochs=20, early_stop=False, tenacity=3):
seq = self.Xseq
num_channels = self.num_channels_X
kernel_size = self.kernel_size
vbsize = min(self.vbsize, Ymat.shape[0] / 2)
for p in seq.parameters():
p.requires_grad = True
TC = LocalModel(
Ymat=Ymat,
num_inputs=1,
num_channels=num_channels,
kernel_size=kernel_size,
vbsize=vbsize,
hbsize=self.hbsize,
normalize=False,
end_index=self.end_index - self.val_len,
val_len=self.val_len,
lr=self.lr,
num_epochs=num_epochs,
)
TC.train_model(early_stop=early_stop, tenacity=tenacity)
self.Xseq = TC.seq
def train_factors(
self,
reg_X=0.0,
reg_F=0.0,
mod=5,
early_stop=False,
tenacity=3,
ind=None,
seed=False,
):
self.D.epoch = 0
self.D.vindex = 0
self.D.hindex = 0
if use_cuda:
self.Xseq = self.Xseq.cuda()
for p in self.Xseq.parameters():
p.requires_grad = True
l_F = [0.0]
l_X = [0.0]
l_X_temporal = [0.0]
iter_count = 0
vae = float("inf")
scount = 0
Xbest = self.X.clone()
Fbest = self.F.clone()
while self.D.epoch < self.num_epochs:
last_epoch = self.D.epoch
last_vindex = self.D.vindex
last_hindex = self.D.hindex
inp, out, vindex, hindex = self.D.next_batch(option=1)
if use_cuda:
inp = inp.float().cuda()
out = out.float().cuda()
if iter_count % mod >= 0:
l1 = self.step_factF_loss(inp, out, last_vindex, last_hindex, reg=reg_F)
l_F = l_F + [l1.cpu().item()]
if iter_count % mod >= 0:
l1 = self.step_factX_loss(inp, out, last_vindex, last_hindex, reg=reg_X)
l_X = l_X + [l1.cpu().item()]
if seed == False and iter_count % mod == 1:
l2 = self.step_temporal_loss_X(inp, last_vindex, last_hindex)
l_X_temporal = l_X_temporal + [l2.cpu().item()]
iter_count = iter_count + 1
if self.D.epoch > last_epoch:
print("Entering Epoch# ", self.D.epoch)
print("Factorization Loss F: ", np.mean(l_F))
print("Factorization Loss X: ", np.mean(l_X))
print("Temporal Loss X: ", np.mean(l_X_temporal))
if ind is None:
ind = np.arange(self.Ymat.shape[0])
else:
ind = ind
inp = self.predict_global(
ind,
last_step=self.end_index - self.val_len,
future=self.val_len,
cpu=False,
)
R = self.Ymat[ind, self.end_index - self.val_len : self.end_index]
S = inp[:, -self.val_len : :]
ve = np.abs(R - S).mean() / np.abs(R).mean()
print("Validation Loss (Global): ", ve)
if ve <= vae:
vae = ve
scount = 0
Xbest = self.X.clone()
Fbest = self.F.clone()
# Xseqbest = TemporalConvNet(
# num_inputs=1,
# num_channels=self.num_channels_X,
# kernel_size=self.kernel_size,
# dropout=self.dropout,
# )
# Xseqbest.load_state_dict(self.Xseq.state_dict())
Xseqbest = pickle.loads(pickle.dumps(self.Xseq))
else:
scount += 1
if scount > tenacity and early_stop:
print("Early Stopped")
self.X = Xbest
self.F = Fbest
self.Xseq = Xseqbest
if use_cuda:
self.Xseq = self.Xseq.cuda()
break
def create_Ycov(self):
t0 = self.end_index + 1
self.D.epoch = 0
self.D.vindex = 0
self.D.hindex = 0
Ycov = copy.deepcopy(self.Ymat[:, 0:t0])
Ymat_now = self.Ymat[:, 0:t0]
if use_cuda:
self.Xseq = self.Xseq.cuda()
self.Xseq = self.Xseq.eval()
while self.D.epoch < 1:
last_epoch = self.D.epoch
last_vindex = self.D.vindex
last_hindex = self.D.hindex
inp, out, vindex, hindex = self.D.next_batch(option=1)
if use_cuda:
inp = inp.cuda()
Xin = self.tensor2d_to_temporal(self.X[:, last_hindex : last_hindex + inp.size(2)]).cuda()
Xout = self.temporal_to_tensor2d(self.Xseq(Xin)).cpu()
Fout = self.F[self.D.I[last_vindex : last_vindex + out.size(0)], :]
output = np.array(torch.matmul(Fout, Xout).detach())
Ycov[
last_vindex : last_vindex + output.shape[0],
last_hindex + 1 : last_hindex + 1 + output.shape[1],
] = output
for p in self.Xseq.parameters():
p.requires_grad = True
if self.period is None:
Ycov_wc = np.zeros(shape=[Ycov.shape[0], 1, Ycov.shape[1]])
if self.forward_cov:
Ycov_wc[:, 0, 0:-1] = Ycov[:, 1::]
else:
Ycov_wc[:, 0, :] = Ycov
else:
Ycov_wc = np.zeros(shape=[Ycov.shape[0], 2, Ycov.shape[1]])
if self.forward_cov:
Ycov_wc[:, 0, 0:-1] = Ycov[:, 1::]
else:
Ycov_wc[:, 0, :] = Ycov
Ycov_wc[:, 1, self.period - 1 : :] = Ymat_now[:, 0 : -(self.period - 1)]
return Ycov_wc
def train_Yseq(self, num_epochs=20, early_stop=False, tenacity=7):
Ycov = self.create_Ycov()
self.Yseq = LocalModel(
self.Ymat,
num_inputs=1,
num_channels=self.num_channels_Y,
kernel_size=self.kernel_size_Y,
dropout=self.dropout,
vbsize=self.vbsize,
hbsize=self.hbsize,
num_epochs=num_epochs,
lr=self.lr,
val_len=self.val_len,
test=True,
end_index=self.end_index - self.val_len,
normalize=False,
start_date=self.start_date,
freq=self.freq,
covariates=self.covariates,
use_time=self.use_time,
dti=self.dti,
Ycov=Ycov,
)
self.Yseq.train_model(early_stop=early_stop, tenacity=tenacity)
def train_all_models(
self, init_epochs=100, alt_iters=10, y_iters=200, tenacity=7, mod=5
):
print("Initializing Factors.....")
self.num_epochs = init_epochs
self.train_factors()
if alt_iters % 2 == 1:
alt_iters += 1
print("Starting Alternate Training.....")
for i in range(1, alt_iters):
if i % 2 == 0:
print(
"--------------------------------------------Training Factors. Iter#: "
+ str(i)
+ "-------------------------------------------------------"
)
self.num_epochs = 300
self.train_factors(
seed=False, early_stop=True, tenacity=tenacity, mod=mod
)
else:
print(
"--------------------------------------------Training Local Model. Iter#: "
+ str(i)
+ "-------------------------------------------------------"
)
self.num_epochs = 300
T = np.array(self.X.cpu().detach())
self.train_Xseq(
Ymat=T,
num_epochs=self.num_epochs,
early_stop=True,
tenacity=tenacity,
)
self.num_epochs = y_iters
self.train_Yseq(num_epochs=y_iters, early_stop=True, tenacity=tenacity)
def predict(
self, ind=None, last_step=100, future=10, cpu=False, normalize=False, bsize=90
):
if ind is None:
ind = np.arange(self.Ymat.shape[0])
if cpu:
self.Xseq = self.Xseq.cpu()
self.Yseq.seq = self.Yseq.seq.eval()
self.Xseq = self.Xseq.eval()
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
covs = self.Yseq.covariates[:, last_step - rg : last_step + future]
# print(covs.shape)
yc = self.predict_global(
ind=ind,
last_step=last_step,
future=future,
cpu=cpu,
normalize=False,
bsize=bsize,
)
if self.period is None:
ycovs = np.zeros(shape=[yc.shape[0], 1, yc.shape[1]])
if self.forward_cov:
ycovs[:, 0, 0:-1] = yc[:, 1::]
else:
ycovs[:, 0, :] = yc
else:
ycovs = np.zeros(shape=[yc.shape[0], 2, yc.shape[1]])
if self.forward_cov:
ycovs[:, 0, 0:-1] = yc[:, 1::]
else:
ycovs[:, 0, :] = yc
period = self.period
while last_step + future - (period - 1) > last_step + 1:
period += self.period
ycovs[:, 1, period - 1 : :] = self.Ymat[
:, last_step - rg : last_step + future - (period - 1)
] ### this seems like we are looking ahead, but it will not use the last coordinate, which is the only new point added
# print(ycovs.shape)
Y = self.Yseq.predict_future(
data_in=self.Ymat[ind, last_step - rg : last_step],
covariates=covs,
ycovs=ycovs,
future=future,
cpu=cpu,
bsize=bsize,
normalize=False,
)
if normalize:
Y = Y - self.mini
Y = Y * self.s[ind, None] + self.m[ind, None]
return Y
else:
return Y
def rolling_validation(self, Ymat, tau=24, n=7, bsize=90, cpu=False, alpha=0.3):
prevX = self.X.clone()
prev_index = self.end_index
out = self.predict(
last_step=self.end_index,
future=tau,
bsize=bsize,
cpu=cpu,
normalize=self.normalize,
)
out_global = self.predict_global(
np.arange(self.Ymat.shape[0]),
last_step=self.end_index,
future=tau,
cpu=cpu,
normalize=self.normalize,
bsize=bsize,
)
predicted_values = []
actual_values = []
predicted_values_global = []
S = out[:, -tau::]
S_g = out_global[:, -tau::]
predicted_values += [S]
predicted_values_global += [S_g]
R = Ymat[:, self.end_index : self.end_index + tau]
actual_values += [R]
print("Current window wape: " + str(wape(S, R)))
self.Xseq = self.Xseq.eval()
self.Yseq.seq = self.Yseq.seq.eval()
for i in range(n - 1):
Xn = self.recover_future_X(
last_step=self.end_index + 1,
future=tau,
num_epochs=100000,
alpha=alpha,
vanilla=True,
cpu=True,
)
self.X = torch.cat([self.X, Xn], dim=1)
self.end_index += tau
out = self.predict(
last_step=self.end_index,
future=tau,
bsize=bsize,
cpu=cpu,
normalize=self.normalize,
)
out_global = self.predict_global(
np.arange(self.Ymat.shape[0]),
last_step=self.end_index,
future=tau,
cpu=cpu,
normalize=self.normalize,
bsize=bsize,
)
S = out[:, -tau::]
S_g = out_global[:, -tau::]
predicted_values += [S]
predicted_values_global += [S_g]
R = Ymat[:, self.end_index : self.end_index + tau]
actual_values += [R]
print("Current window wape: " + str(wape(S, R)))
predicted = np.hstack(predicted_values)
predicted_global = np.hstack(predicted_values_global)
actual = np.hstack(actual_values)
dic = {}
dic["wape"] = wape(predicted, actual)
dic["mape"] = mape(predicted, actual)
dic["smape"] = smape(predicted, actual)
dic["mae"] = np.abs(predicted - actual).mean()
dic["rmse"] = np.sqrt(((predicted - actual) ** 2).mean())
dic["nrmse"] = dic["rmse"] / np.sqrt(((actual) ** 2).mean())
dic["wape_global"] = wape(predicted_global, actual)
dic["mape_global"] = mape(predicted_global, actual)
dic["smape_global"] = smape(predicted_global, actual)
dic["mae_global"] = np.abs(predicted_global - actual).mean()
dic["rmse_global"] = np.sqrt(((predicted_global - actual) ** 2).mean())
dic["nrmse_global"] = dic["rmse"] / np.sqrt(((actual) ** 2).mean())
baseline = Ymat[:, Ymat.shape[1] - n * tau - tau : Ymat.shape[1] - tau]
dic["baseline_wape"] = wape(baseline, actual)
dic["baseline_mape"] = mape(baseline, actual)
dic["baseline_smape"] = smape(baseline, actual)
self.X = prevX
self.end_index = prev_index
return dic
| 25,258 | 32.235526 | 131 | py |
deepglo | deepglo-master/DeepGLO/data_loader.py | import torch, h5py
import numpy as np
from scipy.io import loadmat
import torch.nn as nn
import torch.optim as optim
import numpy as np
# import matplotlib
from torch.autograd import Variable
import itertools
from sklearn.preprocessing import normalize
import datetime
import json
import os, sys
import pandas as pd
import pyarrow.parquet as pq
from DeepGLO.Ftree import *
class data_loader(object):
"""
Data Loader Class for DeepGLO
"""
def __init__(
self,
Ymat,
covariates=None,
Ycov=None,
vbsize=200,
hbsize=100,
end_index=20000,
val_len=30,
shuffle=False,
):
"""
Argeuments:
Ymat: time-series matrix n*T
covariates: global covariates common for all time series r*T, where r is the number of covariates
Ycov: per time-series covariates n*l*T, l such covariates per time-series
All of the above arguments are numpy arrays
vbsize: vertical batch size
hbsize: horizontal batch size
end_index: training and validation set is only from 0:end_index
val_len: validation length. The last 'val_len' time-points for every time-series is the validation set
shuffle: data is shuffles if True (this is deprecated and set to False)
"""
n, T = Ymat.shape
self.vindex = 0
self.hindex = 0
self.epoch = 0
self.vbsize = vbsize
self.hbsize = hbsize
self.Ymat = Ymat
self.val_len = val_len
self.end_index = end_index
self.val_index = np.random.randint(0, n - self.vbsize - 5)
self.shuffle = shuffle
self.I = np.array(range(n))
self.covariates = covariates
self.Ycov = Ycov
def next_batch(self, option=1):
"""
Arguments:
option = 1 means data is returned as pytorch tensor of shape nd*cd*td where nd is vbsize, hb is hsize and cd is the number os channels (depends on covariates)
option = 0 is deprecated
Returns:
inp: input batch
out: one shifted output batch
vindex: strating vertical index of input batch
hindex: starting horizontal index of input batch
"""
n, T = self.Ymat.shape
if self.hindex + self.hbsize + 1 >= self.end_index:
pr_hindex = self.hindex
self.hindex = 0
if self.vindex + self.vbsize >= n:
pr_vindex = self.vindex
self.vindex = 0
self.epoch = self.epoch + 1
if self.shuffle:
I = np.random.choice(n, n, replace=False)
self.I = I
self.Ymat = self.Ymat[self.I, :]
else:
pr_vindex = self.vindex
self.vindex = self.vindex + self.vbsize
else:
pr_hindex = self.hindex
self.hindex = self.hindex + self.hbsize
pr_vindex = self.vindex
data = self.Ymat[
int(pr_vindex) : int(pr_vindex + self.vbsize),
int(pr_hindex) : int(min(self.end_index, pr_hindex + self.hbsize)),
]
out_data = self.Ymat[
int(pr_vindex) : int(pr_vindex + self.vbsize),
int(pr_hindex + 1) : int(min(self.end_index, pr_hindex + self.hbsize) + 1),
]
nd, Td = data.shape
if self.covariates is not None:
covs = self.covariates[
:, int(pr_hindex) : int(min(self.end_index, pr_hindex + self.hbsize))
]
rcovs = np.repeat(
covs.reshape(1, covs.shape[0], covs.shape[1]), repeats=nd, axis=0
)
if self.Ycov is not None:
ycovs = self.Ycov[
int(pr_vindex) : int(pr_vindex + self.vbsize),
:,
int(pr_hindex) : int(min(self.end_index, pr_hindex + self.hbsize)),
]
if option == 1:
inp = torch.from_numpy(data).view(1, nd, Td)
out = torch.from_numpy(out_data).view(1, nd, Td)
if self.covariates is not None:
rcovs = torch.from_numpy(rcovs).float()
if self.Ycov is not None:
ycovs = torch.from_numpy(ycovs).float()
inp = inp.transpose(0, 1).float()
if self.covariates is not None:
inp = torch.cat((inp, rcovs), 1)
if self.Ycov is not None:
inp = torch.cat((inp, ycovs), 1)
out = out.transpose(0, 1).float()
else:
inp = torch.from_numpy(data).float()
out = torch.from_numpy(out_data).float()
inp[torch.isnan(inp)] = 0
out[torch.isnan(out)] = 0
return inp, out, self.vindex, self.hindex
def supply_test(self, option=1):
"""
Supplies validation set in the same format as above
"""
n, T = self.Ymat.shape
index = self.val_index
in_data = self.Ymat[
int(index) : int(index + self.vbsize),
int(self.end_index) : int(self.end_index + self.val_len),
]
out_data = self.Ymat[
int(index) : int(index + self.vbsize),
int(self.end_index + 1) : int(self.end_index + self.val_len + 1),
]
nd, Td = in_data.shape
if self.covariates is not None:
covs = self.covariates[
:, int(self.end_index) : int(self.end_index + self.val_len)
]
rcovs = np.repeat(
covs.reshape(1, covs.shape[0], covs.shape[1]), repeats=nd, axis=0
)
if self.Ycov is not None:
ycovs = self.Ycov[
int(index) : int(index + self.vbsize),
:,
int(self.end_index) : int(self.end_index + self.val_len),
]
if option == 1:
inp = torch.from_numpy(in_data).view(1, nd, Td)
inp = inp.transpose(0, 1).float()
if self.covariates is not None:
rcovs = torch.from_numpy(rcovs).float()
if self.Ycov is not None:
ycovs = torch.from_numpy(ycovs).float()
out = torch.from_numpy(out_data).view(1, nd, Td)
if self.covariates is not None:
inp = torch.cat((inp, rcovs), 1)
if self.Ycov is not None:
inp = torch.cat((inp, ycovs), 1)
out = out.transpose(0, 1).float()
else:
inp = torch.from_numpy(in_data).float()
out = torch.from_numpy(out_data).float()
return inp, out, self.vindex, self.hindex
| 6,610 | 34.735135 | 167 | py |
deepglo | deepglo-master/DeepGLO/Ftree.py | import numpy as np
import pandas as pd
class FplusTreeSampling(object):
"""
F+ tree for sampling from a large population
Construct in O(N) time
Sample and update in O(log(N)) time
"""
def __init__(self, dimension, weights=None):
self.dimension = dimension
self.layers = int(np.ceil(np.log2(dimension)))
self.F = [np.array([])] * self.layers
self.initialize(weights)
def initialize(self, weights=None):
"""
initialize F+ tree with uniform weights
"""
# initialzie last layer with weights
if weights is None:
weight = 1.0 / self.dimension
self.F[-1] = np.ones((self.dimension,)) * weight
else:
self.F[-1] = weights
# for l in range(self.layers-2, -1 , -1):
# weight *= 2
# length = int(np.ceil(self.F[l+1].shape[0]/2.0))
# self.F[l] = np.ones((length,)) * weight
# if len(self.F[l+1])%2 != 0 :
# self.F[l][-1] = self.F[l+1][-1]
# else:
# self.F[l][-1] = self.F[l+1][-1] + self.F[l+1][-2]
# assert(self.F[0][0] + self.F[0][1] == 1.0)
for l in range(self.layers - 2, -1, -1):
length = int(np.ceil(self.F[l + 1].shape[0] / 2.0))
self.F[l] = np.ones((length,))
if len(self.F[l + 1]) % 2 != 0:
self.F[l][:-1] = self.F[l + 1][:-1].reshape((-1, 2)).sum(axis=1)
self.F[l][-1] = self.F[l + 1][-1]
else:
self.F[l] = self.F[l + 1].reshape((-1, 2)).sum(axis=1)
def print_graph(self):
if self.dimension > 1000:
print("Are you crazy?")
return
for fl in self.F:
for prob in fl:
print(prob, " ")
print("||")
def total_weight(self):
"""
return the total weight sum
"""
return self.F[0][0] + self.F[0][1]
def get_weight(self, indices):
"""
return the weight of given indices
"""
return self.F[-1][indices]
def sample_batch(self, batch_size):
"""
sample a batch without replacement
"""
indices = np.zeros((batch_size,), dtype=np.int)
weights = np.zeros((batch_size,), dtype=np.float)
for i in range(batch_size):
indices[i] = self.__sample()
weights[i] = self.F[-1][indices[i]]
self.__update(indices[i], 0) # wighout replacement
self.update_batch(indices, weights) # resume their original weights
return indices
def update_batch(self, indices, probs):
"""
update weights of a given batch
"""
for i, p in zip(indices, probs):
self.__update(i, p)
def __sample(self):
"""
sample a single node, in log(N) time
"""
u = np.random.sample() * self.F[0][0]
i = 0
for fl in self.F[1:]:
# i_left = 2*i
# i_right = 2*i +1
if u > fl[2 * i] and fl.shape[0] >= 2 * (i + 1): # then chose i_right
u -= fl[2 * i]
i = 2 * i + 1
else:
i = 2 * i
return i
def __update(self, idx, prob):
"""
update weight of a single node, in log(N) time
"""
delta = prob - self.F[-1][idx]
for l in range(self.layers - 1, -1, -1):
self.F[l][idx] += delta
idx = idx // 2
| 3,508 | 29.513043 | 82 | py |
deepglo | deepglo-master/DeepGLO/utilities.py | import pandas as pd
import numpy as np
import datetime
def last_days(num=60, date=datetime.datetime(2018, 6, 20)):
y = [str(date.year) + "%02d" % date.month + "%02d" % date.day]
for i in range(1, num):
d = date - datetime.timedelta(days=i)
y = y + [str(d.year) + "%02d" % d.month + "%02d" % d.day]
return y
def date_range(d1=datetime.datetime(2018, 3, 19), d2=datetime.datetime(2018, 6, 20)):
td = d2 - d1
ndays = td.days
y = [str(d1.year) + "%02d" % d1.month + "%02d" % d1.day]
for i in range(1, ndays + 1):
d = d1 + datetime.timedelta(days=i)
y = y + [str(d.year) + "%02d" % d.month + "%02d" % d.day]
return y
| 680 | 29.954545 | 85 | py |
deepglo | deepglo-master/DeepGLO/time.py | import pandas as pd
import numpy as np
class TimeCovariates(object):
def __init__(self, start_date, num_ts=100, freq="H", normalized=True):
self.start_date = start_date
self.num_ts = num_ts
self.freq = freq
self.normalized = normalized
self.dti = pd.date_range(self.start_date, periods=self.num_ts, freq=self.freq)
def _minute_of_hour(self):
minutes = np.array(self.dti.minute, dtype=np.float)
if self.normalized:
minutes = minutes / 59.0 - 0.5
return minutes
def _hour_of_day(self):
hours = np.array(self.dti.hour, dtype=np.float)
if self.normalized:
hours = hours / 23.0 - 0.5
return hours
def _day_of_week(self):
dayWeek = np.array(self.dti.dayofweek, dtype=np.float)
if self.normalized:
dayWeek = dayWeek / 6.0 - 0.5
return dayWeek
def _day_of_month(self):
dayMonth = np.array(self.dti.day, dtype=np.float)
if self.normalized:
dayMonth = dayMonth / 30.0 - 0.5
return dayMonth
def _day_of_year(self):
dayYear = np.array(self.dti.dayofyear, dtype=np.float)
if self.normalized:
dayYear = dayYear / 364.0 - 0.5
return dayYear
def _month_of_year(self):
monthYear = np.array(self.dti.month, dtype=np.float)
if self.normalized:
monthYear = monthYear / 11.0 - 0.5
return monthYear
def _week_of_year(self):
weekYear = np.array(self.dti.weekofyear, dtype=np.float)
if self.normalized:
weekYear = weekYear / 51.0 - 0.5
return weekYear
def get_covariates(self):
MOH = self._minute_of_hour().reshape(1, -1)
HOD = self._hour_of_day().reshape(1, -1)
DOM = self._day_of_month().reshape(1, -1)
DOW = self._day_of_week().reshape(1, -1)
DOY = self._day_of_year().reshape(1, -1)
MOY = self._month_of_year().reshape(1, -1)
WOY = self._week_of_year().reshape(1, -1)
all_covs = [MOH, HOD, DOM, DOW, DOY, MOY, WOY]
return np.vstack(all_covs)
| 2,134 | 30.865672 | 86 | py |
deepglo | deepglo-master/DeepGLO/metrics.py | import numpy as np
def smape(P, A):
nz = np.where(A > 0)
Pz = P[nz]
Az = A[nz]
return np.mean(2 * np.abs(Az - Pz) / (np.abs(Az) + np.abs(Pz)))
def mape(P, A):
nz = np.where(A > 0)
Pz = P[nz]
Az = A[nz]
return np.mean(np.abs(Az - Pz) / np.abs(Az))
def wape(P, A):
return np.mean(np.abs(A - P)) / np.mean(np.abs(A))
def confidence_score(func, P1, A1, num):
P = P1.flatten()
A = A1.flatten()
values = []
for i in range(num):
I = np.random.choice(len(P), int(0.63 * len(P)), replace=False)
p = P[I]
a = A[I]
values += [func(p, a)]
return np.mean(values), np.std(values)
def confidence_score_dim(func, P1, A1):
composite = np.hstack([P1, A1])
n, m = P1.shape
values = np.apply_along_axis(lambda x: func(x[0:m], x[m::]), axis=0, arr=composite)
return np.mean(values), np.std(values)
| 898 | 18.543478 | 87 | py |
deepglo | deepglo-master/DeepGLO/__init__.py | # Implement your code here.
| 28 | 13.5 | 27 | py |
deepglo | deepglo-master/DeepGLO/LocalModel.py | import torch, h5py
import numpy as np
from scipy.io import loadmat
from torch.nn.utils import weight_norm
import torch.nn as nn
import torch.optim as optim
import numpy as np
# import matplotlib
from torch.autograd import Variable
import itertools
import torch.nn.functional as F
from DeepGLO.data_loader import *
use_cuda = True #### Assuming you have a GPU ######
from DeepGLO.utilities import *
from DeepGLO.time import *
from DeepGLO.metrics import *
import random
import pickle
np.random.seed(111)
torch.cuda.manual_seed(111)
torch.manual_seed(111)
random.seed(111)
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, : -self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(
self,
n_inputs,
n_outputs,
kernel_size,
stride,
dilation,
padding,
dropout=0.1,
init=True,
):
super(TemporalBlock, self).__init__()
self.kernel_size = kernel_size
self.conv1 = weight_norm(
nn.Conv1d(
n_inputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(
nn.Conv1d(
n_outputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(
self.conv1,
self.chomp1,
self.relu1,
self.dropout1,
self.conv2,
self.chomp2,
self.relu2,
self.dropout2,
)
self.downsample = (
nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
)
self.init = init
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
if self.init:
nn.init.normal_(self.conv1.weight, std=1e-3)
nn.init.normal_(self.conv2.weight, std=1e-3)
self.conv1.weight[:, 0, :] += (
1.0 / self.kernel_size
) ###new initialization scheme
self.conv2.weight += 1.0 / self.kernel_size ###new initialization scheme
nn.init.normal_(self.conv1.bias, std=1e-6)
nn.init.normal_(self.conv2.bias, std=1e-6)
else:
nn.init.xavier_uniform_(self.conv1.weight)
nn.init.xavier_uniform_(self.conv2.weight)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.1)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalBlock_last(nn.Module):
def __init__(
self,
n_inputs,
n_outputs,
kernel_size,
stride,
dilation,
padding,
dropout=0.2,
init=True,
):
super(TemporalBlock_last, self).__init__()
self.kernel_size = kernel_size
self.conv1 = weight_norm(
nn.Conv1d(
n_inputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(
nn.Conv1d(
n_outputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(
self.conv1,
self.chomp1,
self.dropout1,
self.conv2,
self.chomp2,
self.dropout2,
)
self.downsample = (
nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
)
self.init = init
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
if self.init:
nn.init.normal_(self.conv1.weight, std=1e-3)
nn.init.normal_(self.conv2.weight, std=1e-3)
self.conv1.weight[:, 0, :] += (
1.0 / self.kernel_size
) ###new initialization scheme
self.conv2.weight += 1.0 / self.kernel_size ###new initialization scheme
nn.init.normal_(self.conv1.bias, std=1e-6)
nn.init.normal_(self.conv2.bias, std=1e-6)
else:
nn.init.xavier_uniform_(self.conv1.weight)
nn.init.xavier_uniform_(self.conv2.weight)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.1)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return out + res
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.1, init=True):
super(TemporalConvNet, self).__init__()
layers = []
self.num_channels = num_channels
self.num_inputs = num_inputs
self.kernel_size = kernel_size
self.dropout = dropout
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
if i == num_levels - 1:
layers += [
TemporalBlock_last(
in_channels,
out_channels,
kernel_size,
stride=1,
dilation=dilation_size,
padding=(kernel_size - 1) * dilation_size,
dropout=dropout,
init=init,
)
]
else:
layers += [
TemporalBlock(
in_channels,
out_channels,
kernel_size,
stride=1,
dilation=dilation_size,
padding=(kernel_size - 1) * dilation_size,
dropout=dropout,
init=init,
)
]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
class LocalModel(object):
def __init__(
self,
Ymat,
num_inputs=1,
num_channels=[32, 32, 32, 32, 32, 1],
kernel_size=7,
dropout=0.2,
vbsize=300,
hbsize=128,
num_epochs=100,
lr=0.0005,
val_len=10,
test=True,
end_index=120,
normalize=False,
start_date="2016-1-1",
freq="H",
covariates=None,
use_time=False,
dti=None,
Ycov=None,
):
"""
Arguments:
Ymat: input time-series n*T
num_inputs: always set to 1
num_channels: list containing channel progression of temporal comvolution network
kernel_size: kernel size of temporal convolution filters
dropout: dropout rate for each layer
vbsize: vertical batch size
hbsize: horizontal batch size
num_epochs: max. number of epochs
lr: learning rate
val_len: validation length
test: always set to True
end_index: no data is touched fro training or validation beyond end_index
normalize: normalize dataset before training or not
start_data: start data in YYYY-MM-DD format (give a random date if unknown)
freq: "H" hourly, "D": daily and for rest see here: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases
covariates: global covariates common for all time series r*T, where r is the number of covariates
Ycov: per time-series covariates n*l*T, l such covariates per time-series
use_time: if false, default trime-covriates are not used
dti: date time object can be explicitly supplied here, leave None if default options are to be used
"""
self.start_date = start_date
if use_time:
self.time = TimeCovariates(
start_date=start_date, freq=freq, normalized=True, num_ts=Ymat.shape[1]
)
if dti is not None:
self.time.dti = dti
time_covariates = self.time.get_covariates()
if covariates is None:
self.covariates = time_covariates
else:
self.covariates = np.vstack([time_covariates, covariates])
else:
self.covariates = covariates
self.Ycov = Ycov
self.freq = freq
self.vbsize = vbsize
self.hbsize = hbsize
self.num_inputs = num_inputs
self.num_channels = num_channels
self.num_epochs = num_epochs
self.lr = lr
self.val_len = val_len
self.Ymat = Ymat
self.test = test
self.end_index = end_index
self.normalize = normalize
self.kernel_size = kernel_size
if normalize:
Y = Ymat
m = np.mean(Y[:, 0 : self.end_index], axis=1)
s = np.std(Y[:, 0 : self.end_index], axis=1)
# s[s == 0] = 1.0
s += 1.0
Y = (Y - m[:, None]) / s[:, None]
mini = np.abs(np.min(Y))
self.Ymat = Y + mini
self.m = m
self.s = s
self.mini = mini
if self.Ycov is not None:
self.num_inputs += self.Ycov.shape[1]
if self.covariates is not None:
self.num_inputs += self.covariates.shape[0]
self.seq = TemporalConvNet(
num_inputs=self.num_inputs,
num_channels=num_channels,
kernel_size=kernel_size,
dropout=dropout,
init=True,
)
self.seq = self.seq.float()
self.D = data_loader(
Ymat=self.Ymat,
vbsize=vbsize,
hbsize=hbsize,
end_index=end_index,
val_len=val_len,
covariates=self.covariates,
Ycov=self.Ycov,
)
self.val_len = val_len
if use_cuda:
self.seq = self.seq.cuda()
def __loss__(self, out, target, dic=None):
criterion = nn.L1Loss()
return criterion(out, target) / torch.abs(target.data).mean()
def __prediction__(self, data):
dic = None
out = self.seq(data)
return out, dic
def train_model(self, early_stop=False, tenacity=10):
"""
early_stop: set true for using early stop
tenacity: patience for early_stop
"""
print("Training Local Model(Tconv)")
if use_cuda:
self.seq = self.seq.cuda()
optimizer = optim.Adam(params=self.seq.parameters(), lr=self.lr)
iter_count = 0
loss_all = []
loss_test_all = []
vae = float("inf")
scount = 0
while self.D.epoch < self.num_epochs:
last_epoch = self.D.epoch
inp, out_target, _, _ = self.D.next_batch()
if self.test:
inp_test, out_target_test, _, _ = self.D.supply_test()
current_epoch = self.D.epoch
if use_cuda:
inp = inp.cuda()
out_target = out_target.cuda()
inp = Variable(inp)
out_target = Variable(out_target)
optimizer.zero_grad()
out, dic = self.__prediction__(inp)
loss = self.__loss__(out, out_target, dic)
iter_count = iter_count + 1
for p in self.seq.parameters():
p.requires_grad = True
loss.backward()
for p in self.seq.parameters():
p.grad.data.clamp_(max=1e5, min=-1e5)
optimizer.step()
loss_all = loss_all + [loss.cpu().item()]
if self.test:
if use_cuda:
inp_test = inp_test.cuda()
out_target_test = out_target_test.cuda()
inp_test = Variable(inp_test)
out_target_test = Variable(out_target_test)
out_test, dic = self.__prediction__(inp_test)
losst = self.__loss__(out_test, out_target_test, dic)
loss_test_all = loss_test_all + [losst.cpu().item()]
if current_epoch > last_epoch:
ve = loss_test_all[-1]
print("Entering Epoch# ", current_epoch)
print("Train Loss:", np.mean(loss_all))
print("Validation Loss:", ve)
if ve <= vae:
vae = ve
scount = 0
# self.saved_seq = TemporalConvNet(
# num_inputs=self.seq.num_inputs,
# num_channels=self.seq.num_channels,
# kernel_size=self.seq.kernel_size,
# dropout=self.seq.dropout,
# )
# self.saved_seq.load_state_dict(self.seq.state_dict())
self.saved_seq = pickle.loads(pickle.dumps(self.seq))
else:
scount += 1
if scount > tenacity and early_stop:
self.seq = self.saved_seq
if use_cuda:
self.seq = self.seq.cuda()
break
def convert_to_input(self, data, cuda=True):
n, m = data.shape
inp = torch.from_numpy(data).view(1, n, m)
inp = inp.transpose(0, 1).float()
if cuda:
inp = inp.cuda()
return inp
def convert_covariates(self, data, covs, cuda=True):
nd, td = data.shape
rcovs = np.repeat(
covs.reshape(1, covs.shape[0], covs.shape[1]), repeats=nd, axis=0
)
rcovs = torch.from_numpy(rcovs).float()
if cuda:
rcovs = rcovs.cuda()
return rcovs
def convert_ycovs(self, data, ycovs, cuda=True):
nd, td = data.shape
ycovs = torch.from_numpy(ycovs).float()
if cuda:
ycovs = ycovs.cuda()
return ycovs
def convert_from_output(self, T):
out = T.view(T.size(0), T.size(2))
return np.array(out.cpu().detach())
def predict_future_batch(
self, data, covariates=None, ycovs=None, future=10, cpu=False
):
if cpu:
self.seq = self.seq.cpu()
else:
self.seq = self.seq.cuda()
inp = self.convert_to_input(data)
if covariates is not None:
cov = self.convert_covariates(data, covariates)
inp = torch.cat((inp, cov[:, :, 0 : inp.size(2)]), 1)
if ycovs is not None:
ycovs = self.convert_ycovs(data, ycovs)
inp = torch.cat((inp, ycovs[:, :, 0 : inp.size(2)]), 1)
if cpu:
inp = inp.cpu()
cov = cov.cpu()
ycovs = ycovs.cpu()
out, dic = self.__prediction__(inp)
ci = inp.size(2)
output = out[:, :, out.size(2) - 1].view(out.size(0), out.size(1), 1)
if covariates is not None:
output = torch.cat(
(output, cov[:, :, ci].view(cov.size(0), cov.size(1), 1)), 1
)
if ycovs is not None:
output = torch.cat(
(output, ycovs[:, :, ci].view(ycovs.size(0), ycovs.size(1), 1)), 1
)
out = torch.cat((inp, output), dim=2)
for i in range(future - 1):
inp = out
out, dic = self.__prediction__(inp)
output = out[:, :, out.size(2) - 1].view(out.size(0), out.size(1), 1)
ci += 1
if covariates is not None:
output = torch.cat(
(output, cov[:, :, ci].view(cov.size(0), cov.size(1), 1)), 1
)
if ycovs is not None:
output = torch.cat(
(output, ycovs[:, :, ci].view(ycovs.size(0), ycovs.size(1), 1)), 1
)
out = torch.cat((inp, output), dim=2)
out = out[:, 0, :].view(out.size(0), 1, out.size(2))
out = out.cuda()
y = self.convert_from_output(out)
self.seq = self.seq.cuda()
return y
def predict_future(
self,
data_in,
covariates=None,
ycovs=None,
future=10,
cpu=False,
bsize=40,
normalize=False,
):
"""
data_in: input past data in same format of Ymat
covariates: input past covariates
ycovs: input past individual covariates
future: number of time-points to predict
cpu: if true then gpu is not used
bsize: batch size for processing (determine according to gopu memory limits)
normalize: should be set according to the normalization used in the class initialization
"""
if normalize:
data = (data_in - self.m[:, None]) / self.s[:, None]
data += self.mini
else:
data = data_in
n, T = data.shape
I = list(np.arange(0, n, bsize))
I.append(n)
bdata = data[range(I[0], I[1]), :]
if ycovs is not None:
out = self.predict_future_batch(
bdata, covariates, ycovs[range(I[0], I[1]), :, :], future, cpu
)
else:
out = self.predict_future_batch(bdata, covariates, None, future, cpu)
for i in range(1, len(I) - 1):
bdata = data[range(I[i], I[i + 1]), :]
self.seq = self.seq.cuda()
if ycovs is not None:
temp = self.predict_future_batch(
bdata, covariates, ycovs[range(I[i], I[i + 1]), :, :], future, cpu
)
else:
temp = self.predict_future_batch(bdata, covariates, None, future, cpu)
out = np.vstack([out, temp])
if normalize:
temp = (out - self.mini) * self.s[:, None] + self.m[:, None]
out = temp
return out
def rolling_validation(self, Ymat, tau=24, n=7, bsize=90, cpu=False, alpha=0.3):
last_step = Ymat.shape[1] - tau * n
rg = 1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels) - 1)
self.seq = self.seq.eval()
if self.covariates is not None:
covs = self.covariates[:, last_step - rg : last_step + tau]
else:
covs = None
if self.Ycov is not None:
ycovs = self.Ycov[:, :, last_step - rg : last_step + tau]
else:
ycovs = None
data_in = Ymat[:, last_step - rg : last_step]
out = self.predict_future(
data_in,
covariates=covs,
ycovs=ycovs,
future=tau,
cpu=cpu,
bsize=bsize,
normalize=self.normalize,
)
predicted_values = []
actual_values = []
S = out[:, -tau::]
predicted_values += [S]
R = Ymat[:, last_step : last_step + tau]
actual_values += [R]
print("Current window wape: " + str(wape(S, R)))
for i in range(n - 1):
last_step += tau
rg = 1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels) - 1)
if self.covariates is not None:
covs = self.covariates[:, last_step - rg : last_step + tau]
else:
covs = None
if self.Ycov is not None:
ycovs = self.Ycov[:, :, last_step - rg : last_step + tau]
else:
ycovs = None
data_in = Ymat[:, last_step - rg : last_step]
out = self.predict_future(
data_in,
covariates=covs,
ycovs=ycovs,
future=tau,
cpu=cpu,
bsize=bsize,
normalize=self.normalize,
)
S = out[:, -tau::]
predicted_values += [S]
R = Ymat[:, last_step : last_step + tau]
actual_values += [R]
print("Current window wape: " + str(wape(S, R)))
predicted = np.hstack(predicted_values)
actual = np.hstack(actual_values)
dic = {}
dic["wape"] = wape(predicted, actual)
dic["mape"] = mape(predicted, actual)
dic["smape"] = smape(predicted, actual)
dic["mae"] = np.abs(predicted - actual).mean()
dic["rmse"] = np.sqrt(((predicted - actual) ** 2).mean())
dic["nrmse"] = dic["rmse"] / np.sqrt(((actual) ** 2).mean())
baseline = Ymat[:, Ymat.shape[1] - n * tau - tau : Ymat.shape[1] - tau]
dic["baseline_wape"] = wape(baseline, actual)
dic["baseline_mape"] = mape(baseline, actual)
dic["baseline_smape"] = smape(baseline, actual)
return dic
| 21,683 | 31.804841 | 157 | py |
deepglo | deepglo-master/run_scripts/run_traffic.py | #### OS and commanline arguments
import sys
import multiprocessing as mp
import gzip
import subprocess
from pathlib import Path
import argparse
import logging
import os
sys.path.append('./')
#### DeepGLO model imports
from DeepGLO.metrics import *
from DeepGLO.DeepGLO import *
from DeepGLO.LocalModel import *
import pandas as pd
import numpy as np
import pickle
import random
np.random.seed(111)
torch.cuda.manual_seed(111)
torch.manual_seed(111)
random.seed(111)
import json
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def bool2str(b):
if b:
return "true"
else:
return "false"
Ymat = np.load("./datasets/traffic.npy")
vbsize = 128 ## vertical batch size
hbsize = 256 ## horizontal batch size
num_channels_X = [32, 32, 32, 32, 32, 1] ## number of channels for local model
num_channels_Y = [32, 32, 32, 32, 32, 1] ## number of channels for hybrid model
kernel_size = 7 ## kernel size for local models
dropout = 0.2 ## dropout during training
rank = 64 ## rank of global model
kernel_size_Y = 7 ## kernel size of hybrid model
lr = 0.0005 ## learning rate
val_len = 24 ## validation length
end_index = Ymat.shape[1] - 24 * 7 ## models will not look beyond this during training
start_date = "2012-1-1" ## start date time for the time-series
freq = "H" ## frequency of data
covariates = None ## no covraites specified
use_time = True ## us time covariates
dti = None ## no spcified time covariates (using default)
svd = False ## factor matrices are not initialized by NMF
period = None ## periodicity of 24 is expected, leave it out if not known
y_iters = 300 ## max. number of iterations while training Tconv models
init_epochs = 100 ## max number of iterations while initialiozing factors
forward_cov = False
logging.basicConfig(
stream=sys.stdout,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def main(args):
DG = DeepGLO(
Ymat,
vbsize=vbsize,
hbsize=hbsize,
num_channels_X=num_channels_X,
num_channels_Y=num_channels_Y,
kernel_size=kernel_size,
dropout=dropout,
rank=rank,
kernel_size_Y=kernel_size_Y,
lr=lr,
val_len=val_len,
end_index=end_index,
normalize=normalize,
start_date=start_date,
freq=freq,
covariates=covariates,
use_time=use_time,
dti=dti,
svd=svd,
period=period,
forward_cov=forward_cov,
)
DG.train_all_models(y_iters=y_iters, init_epochs=init_epochs)
result_dic = DG.rolling_validation(
Ymat=Ymat, tau=24, n=7, bsize=100, cpu=False, alpha=0.3
)
print(result_dic)
out_path = Path(
".",
"results",
"result_dictionary_traffic_" + bool2str(normalize) + ".pkl",
)
pickle.dump(result_dic, open(out_path, "wb"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--normalize",
type=str2bool,
required=True,
help="normalize for training or not",
)
args = parser.parse_args()
global normalize
normalize = args.normalize
main(args)
| 3,498 | 24.727941 | 87 | py |
deepglo | deepglo-master/run_scripts/run_wiki.py | #### OS and commanline arguments
import sys
import multiprocessing as mp
import gzip
import subprocess
from pathlib import Path
import argparse
import logging
import os
sys.path.append('./')
#### DeepGLO model imports
from DeepGLO.metrics import *
from DeepGLO.DeepGLO import *
from DeepGLO.LocalModel import *
import pandas as pd
import numpy as np
import pickle
import random
np.random.seed(111)
torch.cuda.manual_seed(111)
torch.manual_seed(111)
random.seed(111)
import json
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def bool2str(b):
if b:
return "true"
else:
return "false"
Ymat = np.load("./datasets/wiki.npy")
vbsize = 2048 ## vertical batch size
hbsize = 256 ## horizontal batch size
num_channels_X = [32, 32, 32, 32, 1] ## number of channels for local model
num_channels_Y = [32, 32, 32, 32, 1] ## number of channels for hybrid model
kernel_size = 7 ## kernel size for local models
dropout = 0.2 ## dropout during training
rank = 128 ## rank of global model
kernel_size_Y = 7 ## kernel size of hybrid model
lr = 0.0005 ## learning rate
val_len = 14 ## validation length
end_index = Ymat.shape[1] - 14 * 4 ## models will not look beyond this during training
start_date = "2012-1-1" ## start date time for the time-series
freq = "D" ## frequency of data
covariates = None ## no covraites specified
use_time = True ## us time covariates
dti = None ## no specified time covariates (using default)
svd = True ## factor matrices are initialized by NMF
period = 7 ## periodicity of 7 is expected, leave it out if not known
y_iters = 300 ## max. number of iterations while training Tconv models
init_epochs = 100 ## max number of iterations while initializing factors
forward_cov = False
logging.basicConfig(
stream=sys.stdout,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def main(args):
DG = DeepGLO(
Ymat,
vbsize=vbsize,
hbsize=hbsize,
num_channels_X=num_channels_X,
num_channels_Y=num_channels_Y,
kernel_size=kernel_size,
dropout=dropout,
rank=rank,
kernel_size_Y=kernel_size_Y,
lr=lr,
val_len=val_len,
end_index=end_index,
normalize=normalize,
start_date=start_date,
freq=freq,
covariates=covariates,
use_time=use_time,
dti=dti,
svd=svd,
period=period,
forward_cov=forward_cov,
)
DG.train_all_models(y_iters=y_iters, init_epochs=init_epochs)
result_dic = DG.rolling_validation(
Ymat=Ymat, tau=14, n=4, bsize=100, cpu=False, alpha=0.5
)
print(result_dic)
out_path = Path(
".",
"results",
"result_dictionary_wiki_" + bool2str(normalize) + ".pkl",
)
pickle.dump(result_dic, open(out_path, "wb"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--normalize",
type=str2bool,
required=True,
help="normalize for training or not",
)
args = parser.parse_args()
global normalize
normalize = args.normalize
main(args)
| 3,475 | 24.940299 | 87 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.