repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter4/SobelOperator.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples<gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 4
SobelOperator: Detect edges by the sobel operator
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF, showImageF, showImageL
from ConvolutionUtilities import createSobelKernel
from ImageOperatorsUtilities import thresholdImage
from PlotUtilities import plotQuiver
# Math and iteration
from math import sqrt, atan2
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
kernelSize = Size of the kernel
threshold = Threshold value
quiverSample = Distance between arrows in the quiver plot. Increase to have less arrows
quiverScale = Scale of arrows in the quiver plot. Increase to make arrows smaller
'''
pathToDir = "../../Images/Chapter4/Input/"
imageName = "Zebra.png"
kernelSize = 5
threshold = 4000
quiverSample = 5
quiverScale = 500
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create images to store the result
outputMagnitude = createImageF(width, height)
outputDirection = createImageF(width, height)
# Create Kernel
sobelX, sobelY = createSobelKernel(kernelSize)
# The center of the kernel
kernelCentre = int((kernelSize - 1) / 2)
# Convolution with two kernels
for x,y in itertools.product(range(0, width), range(0, height)):
mX, wX, mY, wY = 0.0, 0.0, 0.0, 0.0
for wx,wy in itertools.product(range(0, kernelSize), range(0, kernelSize)):
posY = y + wy - kernelCentre
posX = x + wx - kernelCentre
if posY > -1 and posY < height and posX > -1 and posX < width:
mX += float(inputImage[posY,posX]) * sobelX[wy, wx]
wX += sobelX[wy, wx]
mY += float(inputImage[posY,posX]) * sobelY[wy, wx]
wY += sobelY[wy, wx]
if wX > 0:
mX = mX / wX
if wY > 0:
mY = mY / wY
outputMagnitude[y,x] = sqrt(mX * mX + mY * mY)
outputDirection[y,x] = atan2(mY, mX)
# Threshold image
outputthresholdMagnitude = thresholdImage(outputMagnitude, threshold, True)
# Show output image
showImageF(outputMagnitude)
showImageL(outputthresholdMagnitude)
# Plot scaled vectors
plotQuiver(outputMagnitude, outputDirection, quiverScale, quiverSample )
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter3/Thresholding.py | <filename>ExamplesPython_3.6/Chapter3/Thresholding.py<gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
Thresholding: Create binary image by thresholding
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
threshold = Threshold value
'''
# Data directory
pathToDir = "../../Images/Chapter3/Input/"
imageName = "horse.png"
threshold = 130
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create images to store the result
outputImage = createImageL(width, height)
# Set the pixels in the output image
for x,y in itertools.product(range(0, width), range(0, height)):
if inputImage[y,x] > threshold:
outputImage[y,x] = 255
else:
outputImage[y,x] = 0
# Show output image
showImageL(outputImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter7/EllipticFourierDescriptors.py | <gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 7
EllipticFourierDescriptors: Compute the elliptic Fourier descriptors of a shape in an image
'''
# Set module functions
from ImageUtilities import createImageF, createVectorF
from ImageRegionsUtilities import findLongestCentredSegmentinImage, showShapeinImage, shapeMaxMin
from PlotUtilities import plotHistogram, plotCurve
# Math and iteration
from math import pi, sqrt, sin, cos
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
gaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
numDescriptors = Number of descriptors
'''
pathToDir = "../../Images/Chapter7/Input/"
imageName = "f14r.png"
gaussianKernelSize = 5
sobelKernelSize = 3
upperT = 0.3
lowerT = 0.05
numDescriptors = 20
# Obtain a shape from the input image and show
centre, shape, width, height = findLongestCentredSegmentinImage(pathToDir + imageName, \
gaussianKernelSize, sobelKernelSize, upperT, lowerT)
showShapeinImage(shape, centre, width, height)
# Add one coefficient to include the shape position
numEdges = len(shape[0])
if numDescriptors == 0: numDescriptors = 1 + int(numEdges /2)
else: numDescriptors += 1
# Display functions for x and y with domain values form 0 to 2pi
x, y = shape[1,:], shape[0,:]
maxVal, minVal = shapeMaxMin(shape)
plotCurve(x, rangeY = [minVal, maxVal], rangeX = [0, 2.0*pi])
plotCurve(y, rangeY = [minVal, maxVal], rangeX = [0, 2.0*pi])
# Compute coefficients. The vector a contains ax,ay and b bx,by
t = 2.0 * pi / numEdges
a = createImageF(numDescriptors, 2)
b = createImageF(numDescriptors, 2)
for k in range(1, numDescriptors):
for p in range(0, numEdges):
a[0, k] += x[p] * cos(k*t*p)
a[1, k] += y[p] * cos(k*t*p)
b[0, k] += x[p] * sin(k*t*p)
b[1, k] += y[p] * sin(k*t*p)
for k in range(1, numDescriptors):
a[0, k] *= (2.0/numEdges)
a[1, k] *= (2.0/numEdges)
b[0, k] *= (2.0/numEdges)
b[1, k] *= (2.0/numEdges)
# Compute descriptors
normA = a[0, 1]*a[0, 1] + a[1, 1]*a[1, 1]
normB = b[0, 1]*b[0, 1] + b[1, 1]*b[1, 1]
descriptors = createVectorF(numDescriptors)
for k in range(0, numDescriptors):
descriptors[k] = sqrt( (a[0, k]*a[0, k] + a[1, k]*a[1, k])/normA) + \
sqrt( (b[0, k]*b[0, k] + b[1, k]*b[1, k])/normB)
plotHistogram(descriptors, [0, 1], .95)
# Draw shape from coefficients
shapeReconst = createImageF(numEdges, 2)
for k in range(1, numDescriptors):
for p in range(0, numEdges):
shapeReconst[0, p] += a[1,k] * cos(k*t*p) + b[1,k] * sin(k*t*p)
shapeReconst[1, p] += a[0,k] * cos(k*t*p) + b[0,k] * sin(k*t*p)
showShapeinImage(shapeReconst, centre, width, height)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter7/KrawtchoukMoments.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples<gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 7
KrawtchoukMoments: Compute Krawtchouk moments for a region in an image.
Compute moments, moments by using geometric moments and invariant moments
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL,createImageF, scaleImageL
from ImageRegionsUtilities import pixlesList, weightedKrawtchoukPolynomials, \
geometricMoments
from PrintUtilities import printImageRangeF
from PlotUtilities import plotSurface, plotCurve
# Math and iteration
from math import pi, atan, sin, cos, sqrt
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
p = Polynomial parameter. 0.5 for centralized polynomials. Power
numMoments = Number of moments
background = The gray level range of the background pixels
'''
pathToDir = "../../Images/Chapter7/Input/"
imageName = "f14.png"
numMoments = 4
p = 0.5
background = [200, 255] # white background image
reducedSize = 80 # reduce the image size to avoid overflowing or use recurrence relations
# Read image into array and show
inputImage, inputWidth, inputHeight = imageReadL(pathToDir+imageName)
# Reduce the image size to avoid large exponents in the computation
scale = max(max(inputWidth, inputHeight) / float(reducedSize), 1.0)
width, height = int(inputWidth/scale), int(inputHeight/scale)
scaledImage = scaleImageL(inputImage, width, height)
showImageL(scaledImage)
# Get a list that contains the pixels of the shape in the form (y,x,v)
shapeImage = pixlesList(scaledImage, background)
numPoints = len(shapeImage)
# Polynomials, coefficients and weights for the Krawtchouk polynomials
# Considering that A*C = k. For a the coefficients and C the powers x, x^2, x^3,..
N = max(width, height)
kW, aW, sigma, ro, w = weightedKrawtchoukPolynomials(p, N)
# Krawtchouk moments of the shape by standard definition
Q = createImageF(numMoments, numMoments)
for m,n in itertools.product(range(0, numMoments), range(0, numMoments)):
for indexPixel in range(0, numPoints):
y, x = (shapeImage[indexPixel])[0], (shapeImage[indexPixel])[1]
v = (shapeImage[indexPixel])[2]
Q[n,m] += w[x,m] * kW[x,m] * w[y,n] * kW[y,n] * v
printImageRangeF(Q, [0,numMoments-1],[0,numMoments-1], " 8.2f")
# Krawtchouk moments from the geometric moments Gij = x**i , y**j.
G = createImageF(N, N)
for i,j in itertools.product(range(0, N), range(0, N)):
for indexPixel in range(0, numPoints):
y, x= (shapeImage[indexPixel])[0], (shapeImage[indexPixel])[1]
v = (shapeImage[indexPixel])[2]
G[j,i] += sqrt(sigma[x] * sigma[y]) * y**j * x**i * v
Qs = createImageF(numMoments, numMoments)
for m,n in itertools.product(range(0, numMoments), range(0, numMoments)):
for i,j in itertools.product(range(0, N), range(0, N)):
Qs[n,m] += aW[m,i] * aW[n,j] * G[j,i]
Qs[n,m] *= (1.0 / sqrt(ro[n]*ro[m]))
printImageRangeF(Qs, [0,numMoments-1],[0,numMoments-1], " 8.2f")
# Invariant Krawtchouk moments by using weighted invariant Geometric moments G(j,i)
Qi = createImageF(numMoments, numMoments)
M = geometricMoments(shapeImage, 3)
xc,yc = M[1,0]/M[0,0], M[0,1]/M[0,0]
m11 = M[1,1]/M[0,0] - xc*yc
m20 = M[2,0]/M[0,0] - xc**2
m02 = M[0,2]/M[0,0] - yc**2
if m20 < m02: t = -(0.5 * atan(2.0*m11/(m20-m02)) + pi/2.0)
else: t = -(0.5 * atan(2.0*m11/(m20-m02)))
# Scale
q, n2 = (N*N/2.0)/M[0,0], N / 2.0
Nu = createImageF(N, N)
for j,i in itertools.product(range(0, N), range(0, N)):
for indexPixel in range(0, numPoints):
y, x = (shapeImage[indexPixel])[0], (shapeImage[indexPixel])[1],
val = (shapeImage[indexPixel])[2]
# Invariant moments
a = ((x-xc)*sin(t) + (y-yc) * cos(t))
b = ((x-xc)*cos(t) - (y-yc) * sin(t))
# To NxN image
u = sqrt(q) * a + n2
v = sqrt(q) * b + n2
if int(v) < N and int(u) < N:
Nu[i,j] += a**i * b**j * val * sqrt(sigma[int(v)] * sigma[int(u)])
c = 1.0 + ((i + j) / 2.0)
Nu[i,j] = (Nu[i,j] / pow(M[0,0],c))
for m,n in itertools.product(range(0, numMoments), range(0, numMoments)):
# Descriptors
for j,i in itertools.product(range(0, N), range(0, N)):
Qi[n,m] += Nu[i,j] * aW[m,i] * aW[n,j]
Qi[n,m] *= (1.0 / sqrt(ro[n]*ro[m]))
printImageRangeF(Qi, [0,numMoments-1],[0,numMoments-1], " 8.2f")
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter4/SobelKernel.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 4
SobelKernel: Generate a Sobel kernel of arbitrary size
'''
# Set module functions
from ImageUtilities import createImageUV
from PrintUtilities import printImageRangeF
# Math and iteration
from math import factorial
from timeit import itertools
'''
Parameters:
kernelSize = Size of the kernel
'''
kernelSize = 5
# Pascal kernels pascal2 is a shifted version of pascal1
pascal1 = createImageUV(kernelSize, kernelSize)
pascal2 = createImageUV(kernelSize, kernelSize)
smooth = createImageUV(kernelSize, kernelSize)
sobel = createImageUV(kernelSize, kernelSize)
# Create kernel
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
# Smooth
smooth[y,x,0] = factorial(kernelSize - 1) / \
(factorial(kernelSize - 1 - x) * factorial(x))
smooth[y,x,1] = factorial(kernelSize - 1) / \
(factorial(kernelSize - 1 - y) * factorial(y))
# Pascal
if (kernelSize - 2 - x >= 0):
pascal1[y,x,0] = factorial(kernelSize - 2) / \
(factorial(kernelSize - 2 - x) * factorial(x))
if (kernelSize - 2 - y >= 0):
pascal1[y,x,1] = factorial(kernelSize - 2) / \
(factorial(kernelSize - 2 - y) * factorial(y))
# Pascal shift to the right
xp = x - 1
if (kernelSize - 2 - xp >= 0 and xp >= 0):
pascal2[y,x,0] = factorial(kernelSize - 2) / \
(factorial(kernelSize - 2 - xp) * factorial(xp))
yp = y - 1
if (kernelSize - 2 - yp >= 0 and yp >= 0):
pascal2[y,x,1] = factorial(kernelSize - 2) / \
(factorial(kernelSize - 2 - yp) * factorial(yp))
# Sobel
sobel[y,x,0] = smooth[y,x,1] * (pascal1[y,x,0] - pascal2[y,x,0])
sobel[y,x,1] = smooth[y,x,0] * (pascal1[y,x,1] - pascal2[y,x,1])
# Print pixel's values of the kernel
printImageRangeF(smooth[:,:,0], [0, kernelSize-1], [0, kernelSize-1], '2.0f')
printImageRangeF(smooth[:,:,1], [0, kernelSize-1], [0, kernelSize-1], '2.0f')
printImageRangeF(pascal1[:,:,0], [0, kernelSize-1], [0, kernelSize-1], '2.0f')
printImageRangeF(pascal1[:,:,1], [0, kernelSize-1], [0, kernelSize-1], '2.0f')
printImageRangeF(pascal2[:,:,0], [0, kernelSize-1], [0, kernelSize-1], '2.0f')
printImageRangeF(pascal2[:,:,1], [0, kernelSize-1], [0, kernelSize-1], '2.0f')
printImageRangeF(sobel[:,:,0], [0, kernelSize-1], [0, kernelSize-1], '4.0f')
printImageRangeF(sobel[:,:,1], [0, kernelSize-1], [0, kernelSize-1], '4.0f')
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter4/LaplacianOperator.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 4
LaplacianOperator: Detect edges by the Laplacian operator
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL, showImageF
from ConvolutionUtilities import createLaplacianKernel, applyKernelF
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
kernelSize = Size of the kernel
sigma = Standard deviation of the kernel
'''
pathToDir = "../../Images/Chapter4/Input/"
imageName = "Lizard.png"
kernelSize = 12
sigma = 2
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create Kernel
kernelLaplacian = createLaplacianKernel(kernelSize, sigma)
# Apply kernel
gaussianImage = applyKernelF(inputImage, kernelLaplacian)
# Zero-crossing detector
edges = createImageL(width, height)
kernelCentre = int((kernelSize - 1) / 2)
for x,y in itertools.product(range(1, width-1), range(1, height-1)):
quadrantValue = [0.0, 0.0, 0.0, 0.0]
for wx,wy in itertools.product(range(-1, 1), range(-1, 1)):
quadrantValue[0] += gaussianImage[y+wy, x+wx]
for wx,wy in itertools.product(range(-1, 1), range(0, 2)):
quadrantValue[1] += gaussianImage[y+wy, x+wx]
for wx,wy in itertools.product(range(0, 2), range(-1, 1)):
quadrantValue[2] += gaussianImage[y+wy, x+wx]
for wx,wy in itertools.product(range(0, 2), range(0, 2)):
quadrantValue[3] += gaussianImage[y+wy, x+wx]
maxVal,minVal = max(quadrantValue), min(quadrantValue)
if maxVal > 0.0 and minVal < 0:
edges[y,x] = 255
showImageF(gaussianImage)
showImageL(edges)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter5/HoughTransformEllipses.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 5
HoughTransformEllipses: Ellipse detection by the Hough transform
'''
# Set module functions
from ImageUtilities import imageReadL, showImageF, showImageL, createScaleImageL, createImageNF
from ImageOperatorsUtilities import applyCannyEdgeDetector
from ImagePropertiesUtilities import imageArgMax, peakDetectorImageL
from PlotUtilities import plot3DHistogram
from PrintUtilities import printText
# Math and iteration
from math import pi, sin, cos
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
gaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
majorAxisRange = Integer range of possible mayor axis values
minorAxisRange = Integer range of possible minor axis values
angleRange = Possible rotations in degrees
'''
pathToDir = "../../Images/Chapter5/Input/"
imageName = "Cup.png"
gaussianKernelSize = 5
sobelKernelSize = 3
upperT = 0.4
lowerT = 0.2
majorAxisRange = [45,65]
minorAxisRange = [20,30]
angleRange = [0, 4]
# Read image into array and show
inputImage, width, height = imageReadL(pathToDir + imageName)
showImageL(inputImage)
# Compute edges
magnitude, angle = applyCannyEdgeDetector(inputImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
showImageF(magnitude)
# Five dimensional accumulator
majorAxisSize = majorAxisRange[1] - majorAxisRange[0]
minorAxisSize = minorAxisRange[1] - minorAxisRange[0]
angleSize = angleRange[1] - angleRange[0]
accumulator = createImageNF(width, height, majorAxisSize, minorAxisSize, angleSize)
# Gather evidence
for x,y in itertools.product(range(0, width), range(0, height)):
printText(x)
if magnitude[y,x] != 0:
for majAxis, minAxis in itertools.product(range(0, majorAxisSize), \
range(0, minorAxisSize)):
a = majAxis + majorAxisRange[0]
b = minAxis + minorAxisRange[0]
for rot in range(0,angleSize):
rotAngle = ((rot + angleRange[0]) * pi) / 180.0
for m in range(0,360):
angle = (m * pi) / 180.0
x0 = x+ a*cos(angle)*cos(rotAngle) - b*sin(angle)*sin(rotAngle)
y0 = y+ a*cos(angle)*sin(rotAngle) + b*sin(angle)*cos(rotAngle)
bX0 = int(x0)
bY0 = int(y0)
if bX0>0 and bX0<width-1 and bY0>0 and bY0<height-1:
wX = x0 - bX0
wY = y0 - bY0
accumulator[bY0,bX0,majAxis,minAxis,rot] += (1.0-wX)+(1.0-wY)
accumulator[bY0+1,bX0,majAxis,minAxis,rot] += wX + (1.0-wY)
accumulator[bY0,bX0+1,majAxis,minAxis,rot] += (1.0-wX) + wY
accumulator[bY0+1,bX0+1,majAxis,minAxis,rot] += wX + wY
# Find maximum
maximumPos = imageArgMax(accumulator)
# Plot a slide of the accumulator
plot3DHistogram(accumulator[:,:,maximumPos[2], maximumPos[3], maximumPos[4]])
# Prepare output image as a dark version of the input
outputImage = createScaleImageL(inputImage, 0.5)
# Draw ellipse on output image
y = maximumPos[0]
x = maximumPos[1]
majAxis = maximumPos[2]
minAxis = maximumPos[3]
rot = maximumPos[4]
rotAngle = ((rot+angleRange[0]) * pi) / 180.0
a = majAxis + majorAxisRange[0]
b = minAxis + minorAxisRange[0]
#print (a,b,rotAngle, x,y, maximumPos[4])
for m in range(0,360):
angle = (m * pi) / 180.0
x0 = int(x+ a*cos(angle)*cos(rotAngle) - b*sin(angle)*sin(rotAngle))
y0 = int(y+ a*cos(angle)*sin(rotAngle) + b*sin(angle)*cos(rotAngle))
if x0<width and x0>0 and y0<height and y0>0:
outputImage[y0,x0] = 255
showImageL(outputImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Modules/ImageRegionsUtilities.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
ImageRegionUtilities: Helper module to process regions and line segments
'''
# Images
from ImageUtilities import imageReadL, createImageF, createImageL, showImageL, createVectorF
from ImageOperatorsUtilities import applyCannyEdgeDetector
from ImagePropertiesUtilities import imageMaxMin
# Math and iteration
from math import pi, sqrt, sin, cos, atan, atan2, factorial, exp
from random import shuffle, sample
from timeit import itertools
# Array to store image data
from numpy import amax, amin
import numpy as np
# Combination function
def nCr(n, r):
if n < 0 or r < 0 or n-r < 0: return 0
return factorial(n) / (factorial(r) * factorial(n-r))
# Rising factorial function
def risingFactorial(x, n):
p = 1
for k in range(0,n):
p = p * (x + k)
return p
# Compute reference point in an edge image
def computeReferencePoint(edgeImage):
height, width = len(edgeImage), len(edgeImage[0])
refPoint = [0,0]
edgePoints = []
for x,y in itertools.product(range(0, width), range(0, height)):
if edgeImage[y,x] != 0:
refPoint[0] += y
refPoint[1] += x
edgePoints.append((y,x))
numPts = len(edgePoints)
refPoint = [int(refPoint[0]/numPts),int(refPoint[1]/numPts)]
return refPoint, edgePoints
# Return the longest segment from edges
def findLongestSegment(edges):
height, width = len(edges), len(edges[0])
# Find line segments
segmentsList = []
segmentsImage = createImageF(width, height)
maxSegmentLenght = 0
maxSegmentIndex = 0
for x,y in itertools.product(range(0, width), range(0, height)):
if edges[y,x] != 0 and segmentsImage[y,x] == 0:
segment = [ ]
segmentPoints = [(y,x)]
segmentsImage[y,x] = 255
while len(segmentPoints) > 0:
yc = (segmentPoints[0])[0]
xc = (segmentPoints[0])[1]
segment.append((yc,xc))
segmentPoints = segmentPoints[1:]
for dx,dy in itertools.product(range(-1,2), range(-1,2)):
xn, yn = xc+dx, yc+dy
if dx!=0 or dy!=0 and xn > 0 and yn > 0 and xn < width and yn < height:
if edges[yn,xn] != 0 and segmentsImage[yn,xn] == 0:
segmentPoints.append((yn,xn))
segmentsImage[yn,xn] = 255
segmentsList.append(segment)
if len(segment) > maxSegmentLenght:
maxSegmentLenght = len(segment)
maxSegmentIndex = len(segmentsList) - 1
mainSegment = []
segment = segmentsList[maxSegmentIndex]
curentElement = segment.pop(0)
sy,sx = curentElement[0], curentElement[1]
mainSegment.append(curentElement)
numPoints = len(segment)
while numPoints > 0:
closestElement = [0, float("inf")]
cy,cx = curentElement[0], curentElement[1]
for p in range(0, numPoints):
y,x = (segment[p])[0], (segment[p])[1]
d = sqrt((cx-x) * (cx-x) + (cy-y) * (cy-y) )
if d < closestElement[1] or (d == closestElement[1] and y > cy):
closestElement = [p, d]
# If we are closer to the first point, then end now
dFirst = sqrt((cx-sx) * (cx-sx) + (cy-sy) * (cy-sy) )
if (cx!=sx or cy!=sy) and 2*dFirst < closestElement[1]:
break
curentElement = segment.pop(closestElement[0])
numPoints = len(segment)
mainSegment.append(curentElement)
numPoints = len(mainSegment)
# Average to get more accurate direction
averageSize = 1
totalPixels = float(1 + 2*averageSize)
mainSegmentAverage = [ ]
for p in range(0, numPoints):
y,x = 0, 0
for w in range(-averageSize, averageSize+1):
p1 = p + w
if p1 < 0: p1 = p1 + numPoints
if p1 >= numPoints: p1 = p1 - numPoints
x += (mainSegment[p1])[1]
y += (mainSegment[p1])[0]
mainSegmentAverage.append((y/totalPixels, x/totalPixels))
return mainSegmentAverage
# Return the longest segment from an image
def findLongestCentredSegmentinImage(imageName, gaussianKernelSize, sobelKernelSize, upperT, lowerT):
# Read image into array and show
inputImage, width, height = imageReadL(imageName)
# Compute edges and find the segment in the image
magnitude, _ = applyCannyEdgeDetector(inputImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
mainSegmentAverage = findLongestSegment(magnitude)
# Compute centre
numPoints = len(mainSegmentAverage)
centre = [0,0]
for p in range(0, numPoints):
centre[0] += (mainSegmentAverage[p])[0]
centre[1] += (mainSegmentAverage[p])[1]
centre[0] /= numPoints
centre[1] /= numPoints
# Respect to the center and convert to an image array
shape= createImageF(numPoints, 2)
for p in range(0, numPoints):
y,x = (mainSegmentAverage[p])[0], (mainSegmentAverage[p])[1]
shape[0, p] = y-centre[0]
shape[1, p] = x-centre[1]
return centre, shape, width, height
def findLongesSegmentinImage(imageName, gaussianKernelSize, sobelKernelSize, upperT, lowerT):
# Read image into array and show
inputImage, width, height = imageReadL(imageName)
# Compute edges and find the segment in the image
magnitude, _ = applyCannyEdgeDetector(inputImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
mainSegmentAverage = findLongestSegment(magnitude)
# Convert to an image array
numPoints = len(mainSegmentAverage)
shape= createImageF(numPoints, 2)
for p in range(0, numPoints):
y,x = (mainSegmentAverage[p])[0], (mainSegmentAverage[p])[1]
shape[0, p] = y
shape[1, p] = x
return shape, width, height
# Get a list with the pixels outside a backgroundRange
def pixlesList(image, backgroundRange):
listPixels = [ ]
height, width = len(image), len(image[0])
for x,y in itertools.product(range(0, width), range(0, height)):
if image[y,x] < backgroundRange[0] or image[y,x] > backgroundRange[1]:
listPixels.append((y,x,1))
return listPixels
def edgesList(image, shapeImage, backgroundRange):
edgePixels = [ ]
height, width = len(image), len(image[0])
numPoints = len(shapeImage)
for indexPixel in range(0, numPoints):
y, x = (shapeImage[indexPixel])[0], (shapeImage[indexPixel])[1]
edge = False
for wx,wy in itertools.product(range(-1, 2), range(-1, 2)):
posX, posY = x + wx, y+ wy
if posY > -1 and posY < height and posX > -1 and posX < width:
if image[posY,posX] >= backgroundRange[0] and image[posY,posX] <= backgroundRange[1] :
edge = True
if edge:
edgePixels.append((y,x))
return edgePixels
def computeAngularFunctions(shape):
# Compute the accumulative arc lengths
numPoints = len(shape[0])
sumArcLenghts = []
y0, x0 = shape[0, numPoints-1], shape[1, numPoints-1]
shapeLenght = 0.0
for p in range(0, numPoints):
y,x = shape[0,p], shape[1,p]
shapeLenght += sqrt((y-y0)*(y-y0) + (x-x0)*(x-x0))
sumArcLenghts.append(shapeLenght)
y0,x0 = y,x
# Normalized lengths
normArcLenghts = []
for p in range(0, numPoints):
normArcLenghts.append((2.0*pi*sumArcLenghts[p])/shapeLenght);
# Compute angular function by an average window
windowSize = [5,10]
d = float(windowSize[1] -windowSize[0])
angularFunc = [ ]
for p in range(0, numPoints):
x1,x2,y1,y2 = 0.0, 0.0, 0.0, 0.0
# Average change
for q in range(windowSize[0], windowSize[1]):
pa,pb = p-q,p+q
if pa<0: pa += numPoints
if pb>=numPoints: pb -= numPoints
ya,xa = shape[0,pa], shape[1,pa]
yb,xb = shape[0,pb], shape[1,pb]
x1,y1 = x1+xa, y1+ya
x2,y2 = x2+xb, y2+yb
dx, dy = (x2-x1)/d, (y2-y1)/d
angle = atan2(dy, dx)
angularFunc.append(angle)
# Compute cumulative angular function
cumulativeFunc = [ ]
angle0 = angularFunc[numPoints-1]
sumAngle = 0.0
for p in range(0, numPoints):
angle = angularFunc[p]
diff = angle-angle0
if diff < pi:
diff += 2.0* pi
if diff > pi:
diff -= 2.0 * pi
sumAngle += diff
cumulativeFunc.append(sumAngle)
angle0 = angle
# Compute cumulative angular accumulated
cumulativeNormFunc = [ ]
for p in range(0, numPoints):
cumulativeNormFunc.append(cumulativeFunc[p]+normArcLenghts[p])
return sumArcLenghts, normArcLenghts, angularFunc, cumulativeFunc, cumulativeNormFunc
def weightedKrawtchoukPolynomials(p, width):
# Data containers
sigma = createVectorF(width)
ro = createVectorF(width)
K = createImageF(width,width)
# Coefficient size
N = width-1
# Weight
for x in range(0,width):
sigma[x] = nCr(N, x) * pow(p,x) * pow(1-p,N-x)
# Scale factor. Commented direct computation and using for to avoid factorial
#for n in range(0,width):
# ro[n] = pow(-1,n) * pow((1-p)/p,n) * (float(factorial(n)) / risingFactorial(-N, n))
ro[0] = 1
for n in range(1,N):
ro[n] = (-1*((1.0-p)/p)*n/(-N+(n-1)))*ro[n-1]
ro[N]=(((1.0-p)/p)*N)*ro[N-1]
# Krawtchouk matrix that store result of the polynomial
# Each row is a polynomial each column is the polynomial value for an x value
# Alternatively, we could have used the polynomial generating function
q = 1.0/p
for n,x in itertools.product(range(0, width), range(0, width)):
for s in range(0,width):
K[n,x] += pow(-1,s) * nCr(N-x, n-s) * nCr(x, s) * pow(q-1,n-s)
# Normalize rows for stability
for n in range(0,width):
scale = K[n,0]
for x in range(0,width):
K[n,x] /= scale
# Obtain the coefficients A of the polynomials from K
# Solve for the coefficients A in A*C = K
C = createImageF(width,width)
for n,x in itertools.product(range(0, width), range(0, width)):
C[n,x] = pow(x,n)
CT = np.transpose(C)
KT = np.transpose(K)
AT = np.linalg.solve(CT, KT) # solves the equation A*x=b A*C = k, C'*A' = K'
A = np.transpose(AT)
# Product defining the weighted
w = createImageF(width,width)
for n,x in itertools.product(range(0, width), range(0, width)):
w[n,x] = sqrt(sigma[x]/ro[n])
return K, A, sigma, ro, w
def geometricMoments(pixelList, numMoments):
numPoints = len(pixelList)
# Compute moments
M = createImageF(numMoments,numMoments)
for m,n in itertools.product(range(0, numMoments), range(0, numMoments)):
for indexPixel in range(0, numPoints):
y = (pixelList[indexPixel])[0]
x = (pixelList[indexPixel])[1]
val = (pixelList[indexPixel])[2]
M[n,m] += (x**n) * (y**m) * val
return M
def geometricInvariantMoments(pixelList, numMoments):
numPoints = len(pixelList)
# Compute moments
M = createImageF(numMoments,numMoments)
for m,n in itertools.product(range(0, numMoments), range(0, numMoments)):
for indexPixel in range(0, numPoints):
y = (pixelList[indexPixel])[0]
x = (pixelList[indexPixel])[1]
val = (pixelList[indexPixel])[2]
M[n,m] += (x**n) * (y**m) * val
# Geometric central Moments
xc,yc = M[1,0]/M[0,0], M[0,1]/M[0,0]
m11 = M[1,1]/M[0,0] - xc*yc
m20 = M[2,0]/M[0,0] - xc**2
m02 = M[0,2]/M[0,0] - yc**2
if m20 < m02:
t = -(0.5 * atan(2.0*m11/(m20-m02)) + pi/2.0)
else:
t = -(0.5 * atan(2.0*m11/(m20-m02)))
# Geometric invariant moments
v = createImageF(numMoments,numMoments)
vn = createImageF(numMoments,numMoments)
for m,n in itertools.product(range(0, numMoments), range(0, numMoments)):
for indexPixel in range(0, numPoints):
y = (pixelList[indexPixel])[0]
x = (pixelList[indexPixel])[1]
val = (pixelList[indexPixel])[2]
v[n,m] += ((x-xc)*cos(t) - (y-yc)*sin(t))**n * ((x-xc)*sin(t) + (y-yc)*cos(t))**m * val
l = (1 + ((n + m) / 2.0))
vn[n,m] = v[n,m] / pow(M[0,0],l)
return vn
# WaterShed transform
def watherShed(distanceImage, shapeImage, suppWindow):
height, width = len(distanceImage), len(distanceImage[0])
watershedImage = createImageF(width, height)
# Initial regions by finding the maximum
regionIndex = 1 # Start id for a region. Any number different from zero
numPoints = len(shapeImage)
for indexPixel in range(0, numPoints):
y, x = (shapeImage[indexPixel])[0], (shapeImage[indexPixel])[1]
if watershedImage[y,x] == 0:
peak = True
for wx,wy in itertools.product(range(x-suppWindow, x+suppWindow+1), \
range(y-suppWindow, y+suppWindow+1)):
if wy>=0 and wy<height and wx>=0 and wx<width:
if watershedImage[wy, wx] != 0 or \
distanceImage[y, x] < distanceImage[wy, wx]:
peak = False
if peak:
for wx,wy in itertools.product(range(x-suppWindow, x+suppWindow+1), \
range(y-suppWindow, y+suppWindow+1)):
if wy>=0 and wy<height and wx>=0 and wx<width:
watershedImage[wy, wx] = regionIndex
regionIndex += 1
floodRegion = [ ] # The region we need to flood
for indexPixel in range(0, numPoints):
y, x = (shapeImage[indexPixel])[0], (shapeImage[indexPixel])[1]
if watershedImage[y,x] == 0:
floodRegion.append((y,x))
# This is not required. We do it to get a better display
# Create random regions ID. We change the ID for a random value so we get a random gray level when showing the regions
c = sample(range(regionIndex), regionIndex)
for indexPixel in range(0, numPoints):
y, x = (shapeImage[indexPixel])[0], (shapeImage[indexPixel])[1]
if watershedImage[y, x] != 0:
watershedImage[y, x] = c[int(watershedImage[y, x])] + 1
# Flooding
maxDistance, _ = imageMaxMin(distanceImage)
for floodValue in range(int(maxDistance), 0, -1):
flooded = True
while flooded:
flooded = False
newFloodRegion = [ ]
growRegion = [ ]
shuffle(floodRegion)
for indexPixel in range(0, len(floodRegion)):
y, x = (floodRegion[indexPixel])[0], (floodRegion[indexPixel])[1]
# Points not flooded will be considered in following iterations
if distanceImage[y,x] <= floodValue:
newFloodRegion.append((y,x))
else:
# list of neighbours
n = [ ]
for wx,wy in itertools.product(range(-1, 2), range(-1, 2)):
posX, posY = x + wx, y+ wy
if posY > -1 and posY < height and posX > -1 and posX < width:
if watershedImage[posY, posX] != 0:
n.append(watershedImage[posY, posX])
# No neighbours, so we cannot grow
if(len(n) == 0):
newFloodRegion.append((y,x))
else:
# Grow of only one type of region
if len(set(n)) == 1:
growRegion.append((y,x,n[0]))
flooded = True
for pixel in growRegion:
watershedImage[pixel[0], pixel[1]] = pixel[2]
floodRegion = newFloodRegion
# Set the borders
shedID = regionIndex + 1
for indexPixel in range(0, numPoints):
y, x = (shapeImage[indexPixel])[0], (shapeImage[indexPixel])[1]
if watershedImage[y,x] == 0 and distanceImage[y, x] > 0.5:
watershedImage[y, x] = shedID
return watershedImage
# Return the maximum and minimum points in a shape
def shapeMaxMin(shape):
x,y = shape[1,:], shape[0,:]
maxY, maxX = amax(x), amax(y)
if maxY > maxX:
maxX = maxY
minY = amin(x)
minX = amin(y)
if minY < minX:
minX = minY
return maxX, minX
def showShapeinImage(shape, centre, width, height):
segmentsImage = createImageL(width, height)
numPoints = len(shape[0])
for p in range(0, numPoints):
y,x = int(centre[0]+shape[0,p]), int(centre[1]+shape[1,p])
if x > 0 and y > 0 and x < width and y < height:
segmentsImage[y,x] = 255
showImageL(segmentsImage)
# Compute density function from a region in an image
def densityHistogram(image, position, regionRadius, sigma, histoSize):
height = len(image)
width = len(image[0])
# Quantization scale
colourScale = 256.0 / histoSize
histogram = createImageF(histoSize, histoSize)
sumValue = 0
for deltaX, deltaY in itertools.product(range(-regionRadius[0],regionRadius[0]), range(-regionRadius[1], regionRadius[1])):
x, y = position[0] + deltaX, position[1] + deltaY
if x>0 and y>0 and x<width and y<height :
w = exp(-(deltaX*deltaX + deltaY*deltaY)/(2*sigma*sigma));
rgb = image[y,x] / 256.0
Cb = int((128 - 37.79*rgb[0] - 74.203*rgb[1] + 112*rgb[2])/colourScale)
Cr = int((128 + 112*rgb[0] - 93.786*rgb[1] - 18.214*rgb[2])/colourScale)
histogram[Cr,Cb] += w
sumValue += w
for r,b in itertools.product(range(0, histoSize), range(0, histoSize)):
histogram[r,b] /= sumValue
return histogram
# Get a 2D colour description from a RGB value
def colourFeature(rgb, colourScale):
nRGB = rgb / 256.0
cB = int((128 - 37.79*nRGB[0] - 74.203*nRGB[1] + 112*nRGB[2])/colourScale)
cR = int((128 + 112*nRGB[0] - 93.786*nRGB[1] - 18.214*nRGB[2])/colourScale)
return cB, cR
# Implementation of meanshift
def meanShift(inputImage, q, sizeReg, sigma, histoSize, newPos):
# Weights
weights = createImageF(2*sizeReg[0], 2*sizeReg[1])
currPos = [0, 0]
colourScale = 256.0 / histoSize
while(currPos != newPos):
currPos = newPos
qs = densityHistogram(inputImage, currPos, sizeReg, sigma, histoSize)
# Weights
for deltaX, deltaY in itertools.product(range(-sizeReg[0],sizeReg[0]), \
range(-sizeReg[1], sizeReg[1])):
# Position of the pixel in the image and in the weight array
x, y = currPos[0] + deltaX, currPos[1] + deltaY
px,py = deltaX+sizeReg[0], deltaY+sizeReg[1]
# Features
Cb,Cr= colourFeature(inputImage[y,x], colourScale)
# Update
if qs[Cr, Cb] > 0:
weights[py, px] = sqrt(q[Cr, Cb] / qs[Cr, Cb])
else:
weights[py, px] = sqrt(q[Cr, Cb] / .000000000001)
# Compute mean shift sums
meanSum = [0, 0]
kernelSum = 0
for deltaX, deltaY in itertools.product(range(-sizeReg[0],sizeReg[0]), \
range(-sizeReg[1], sizeReg[1])):
# Position of the pixel in the image
x, y = currPos[0] + deltaX, currPos[1] + deltaY
# Kernel parameter
w = exp(-(deltaX*deltaX + deltaY*deltaY)/(2*sigma*sigma));
# Weight index
px, py = deltaX+sizeReg[0], deltaY+sizeReg[1]
# Mean sum
meanSum[0] += w * weights[py, px] * x
meanSum[1] += w * weights[py, px] * y
# Kernel sum
kernelSum += w * weights[py, px]
# Mean shift
newPos = [int(meanSum[0] / kernelSum), int(meanSum[1] / kernelSum)]
return newPos
# Back project a source image into the target
def backProjection(sourceImage, targetImage, qSource, pSource, pTarget, sizeReg, histoSize):
height, width = len(sourceImage), len(sourceImage[0])
colourScale = 256.0 / histoSize
# Projection
projectionSource = createImageF(width, height)
projectionTarget = createImageF(width, height)
for x, y in itertools.product(range(0,width), range(0, height)):
Cb,Cr = colourFeature(sourceImage[y,x], colourScale)
projectionSource[y,x] = qSource[Cr,Cb]
Cb,Cr = colourFeature(targetImage[y,x], colourScale)
projectionTarget[y,x] = qSource[Cr,Cb]
# Compute geometric moments
momS = createImageF(3, 3)
momT = createImageF(3, 3)
sizeSearch = [int(sizeReg[0] *1.5), int(sizeReg[1] *1.5)]
for deltaX, deltaY in itertools.product(range(-sizeSearch[0], sizeSearch[0]), \
range(-sizeSearch[1], sizeSearch[1])):
x, y = pSource[0] + deltaX, pSource[1] + deltaY
for m,n in itertools.product(range(0, 3), range(0, 3)):
momS[n,m] += (x**n) * (y**m) * projectionSource[y,x]
x, y = pTarget[0] + deltaX, pTarget[1] + deltaY
for m,n in itertools.product(range(0, 3), range(0, 3)):
momT[n,m] += (x**n) * (y**m) * projectionTarget[y,x]
xc,yc = momS[1,0]/momS[0,0], momS[0,1]/momS[0,0]
a = momS[2,0]/momS[0,0] - xc*xc;
b = 2*(momS[1,1]/momS[0,0] - xc * yc);
c = momS[0,2]/momS[0,0]- yc*yc;
sxS = int(sqrt((a+c-sqrt(b*b+(a-c)*(a-c))/2)));
syS = int(sqrt((a+c+sqrt(b*b+(a-c)*(a-c))/2)));
xc,yc = momT[1,0]/momT[0,0], momT[0,1]/momT[0,0]
a = momT[2,0]/momT[0,0] - xc*xc;
b = 2*(momT[1,1]/momT[0,0] - xc * yc);
c = momT[0,2]/momT[0,0]- yc*yc;
sx = int(sqrt((a+c-sqrt(b*b+(a-c)*(a-c))/2)));
sy = int(sqrt((a+c+sqrt(b*b+(a-c)*(a-c))/2)));
sy = sy * sizeReg[1] / syS
sx = sx * sizeReg[0] / sxS
return [int(xc),int(yc)], [int(sx),int(sy)]
# Back project an image
def backProjectionImage(image, q, histoSize):
height, width = len(image), len(image[0])
colourScale = 256.0 / histoSize
# Projection
projection = createImageF(width, height)
for x, y in itertools.product(range(0,width), range(0, height)):
Cb,Cr = colourFeature(image[y,x], colourScale)
projection[y,x] = q[Cr,Cb]
return projection
# Determine the size of a region
def regionSize(backProjImage, newBackProjImage, pos, newPos, sizeReg):
# Compute geometric moments
momS = createImageF(3, 3)
momT = createImageF(3, 3)
sizeSearch = [int(sizeReg[0] *1.5), int(sizeReg[1] *1.5)]
for deltaX, deltaY in itertools.product(range(-sizeSearch[0], sizeSearch[0]), \
range(-sizeSearch[1], sizeSearch[1])):
x, y = pos[0] + deltaX, pos[1] + deltaY
for m,n in itertools.product(range(0, 3), range(0, 3)):
momS[n,m] += (x**n) * (y**m) * backProjImage[y,x]
x, y = newPos[0] + deltaX, newPos[1] + deltaY
for m,n in itertools.product(range(0, 3), range(0, 3)):
momT[n,m] += (x**n) * (y**m) * newBackProjImage[y,x]
xc,yc = momS[1,0]/momS[0,0], momS[0,1]/momS[0,0]
a = momS[2,0]/momS[0,0] - xc*xc;
b = 2*(momS[1,1]/momS[0,0] - xc * yc);
c = momS[0,2]/momS[0,0]- yc*yc;
sxS = int(sqrt((a+c-sqrt(b*b+(a-c)*(a-c))/2)));
syS = int(sqrt((a+c+sqrt(b*b+(a-c)*(a-c))/2)));
xc,yc = momT[1,0]/momT[0,0], momT[0,1]/momT[0,0]
a = momT[2,0]/momT[0,0] - xc*xc;
b = 2*(momT[1,1]/momT[0,0] - xc * yc);
c = momT[0,2]/momT[0,0]- yc*yc;
sx = int(sqrt((a+c-sqrt(b*b+(a-c)*(a-c))/2)));
sy = int(sqrt((a+c+sqrt(b*b+(a-c)*(a-c))/2)));
sy = sy * sizeReg[1] / syS
sx = sx * sizeReg[0] / sxS
return [int(xc),int(yc)], [int(sx),int(sy)]
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter2/SeparableFourierTransform.py | <filename>ExamplesPython_3.6/Chapter2/SeparableFourierTransform.py
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 2
SeparableFourierTransform: Compute the Fourier transform of an image using the separable formulation
Display the magnitude and phase and reconstruct image
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF, showImageF
from ImageOperatorsUtilities import imageLogF
from PrintUtilities import printProgress
# Iteration and Math functions
from math import sin, cos, pi, sqrt, atan2
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
'''
pathToDir = "../../Images/Chapter2/Input/"
imageName = "Square.png"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create coefficients Image. Two floats to represent a complex number
# Maximum frequency according to sampling
maxFreqW = int(width /2)
maxFreqH = int(height/2)
numCoeffW = 1 + 2 * maxFreqW
numCoeffH = 1 + 2 * maxFreqH
coeff = createImageF(numCoeffW ,numCoeffH , 2)
# Adjust the size of the data to be even
m = float(width)
n = float(height)
if width % 2 == 0:
m = width + 1.0
if height % 2 == 0:
n = height + 1.0
# Fundamental frequency
ww = (2.0 * pi) / m
wh = (2.0 * pi) / n
# Fourier Transform
for u in range(-maxFreqW, maxFreqW + 1):
printProgress(u + maxFreqW, numCoeffW)
entryW = u + maxFreqW
for v in range(-maxFreqH, maxFreqH + 1):
entryH = v + maxFreqH
coeff[entryH, entryW] = [0, 0]
for x in range(0, width):
sumY = [0, 0]
for y in range(0, height):
sumY[0] += inputImage[y,x] * cos(y * wh * v)
sumY[1] += inputImage[y,x] * sin(y * wh * v)
coeff[entryH, entryW][0] += sumY[0] * cos(x * ww * u) - sumY[1] * sin(x * ww * u)
coeff[entryH, entryW][1] -= cos(x * ww * u) * sumY[1] + sin(x * ww * u) * sumY[0]
for kw in range(-maxFreqW, maxFreqW + 1):
printProgress(kw + maxFreqW, numCoeffW)
entryW = kw + maxFreqW
for kh in range(-maxFreqH, maxFreqH + 1):
entryH = kh + maxFreqH
coeff[entryH, entryW][0] *= m*n
coeff[entryH, entryW][1] *= m*n
# Reconstruction
reconstruction = createImageF(width, height)
for u in range(-maxFreqW, maxFreqW + 1):
printProgress(u + maxFreqW, numCoeffW)
entryW = u + maxFreqW
for v in range(-maxFreqH, maxFreqH + 1):
entryH = v + maxFreqH
for x in range(0, width):
for y in range(0, height):
reconstruction[y,x] += (coeff[entryH, entryW][0] / (m*n)) * (cos(x * ww * u) * cos(y * wh * v) - sin(x * ww * u) * sin(y * wh * v)) - \
(coeff[entryH, entryW][1] / (m*n)) * (cos(x * ww * u) * sin(y * wh * v) + sin(x * ww * u) * cos(y * wh * v))
showImageF(reconstruction)
# Power
power = createImageF( 1 + 2 * maxFreqW, 1 + 2 * maxFreqH)
for kw,kh in itertools.product(range(-maxFreqW, maxFreqW + 1), range(-maxFreqH, maxFreqH + 1)):
entryW = kw + maxFreqW
entryH = kh + maxFreqH
power[entryH, entryW] = sqrt(coeff[entryH, entryW][0] * coeff[entryH, entryW][0] + \
coeff[entryH, entryW][1] * coeff[entryH, entryW][1])
power[entryH, entryW] = log(1.0 + power[entryH, entryW])
# Show the log of the power
powerLog = imageLogF(power)
showImageF(powerLog)
# Phase
phase = createImageF( 1 + 2 * maxFreqW, 1 + 2 * maxFreqH)
for kw,kh in itertools.product(range(-maxFreqW, maxFreqW + 1), range(-maxFreqH, maxFreqH + 1)):
indexInArrayW = kw + maxFreqW
indexInArrayH = kh + maxFreqH
phase[indexInArrayH, indexInArrayW] = atan2(coeff[indexInArrayH, indexInArrayW][1], \
coeff[indexInArrayH, indexInArrayW][0])
# Plot phase
showImageF(phase)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter7/AngularFunction.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 7
AngularFunction: Compute the angular functions of a shape
'''
# Set module functions
from ImageUtilities import imageReadL, createImageF, showImageF, showImageL
from ImageRegionsUtilities import findLongestCentredSegmentinImage, showShapeinImage
from PlotUtilities import plotCurveXY
# Math and iteration
from math import pi, sqrt, atan2
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
gaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
'''
pathToDir = "../../Images/Chapter7/Input/"
imageName = "Shape.png"
gaussianKernelSize = 5
sobelKernelSize = 3
upperT = 0.3
lowerT = 0.05
# Obtain a shape from the input image and draw it
centre, shape, width, height = findLongestCentredSegmentinImage(pathToDir + imageName, \
gaussianKernelSize, sobelKernelSize, upperT, lowerT)
showShapeinImage(shape, centre, width, height)
# Compute the accumulative arc lengths
numPoints = len(shape[0])
sumLenghts = []
y0, x0 = shape[0, numPoints-1], shape[1, numPoints-1]
shapeLenght = 0.0
for p in range(0, numPoints):
y,x = shape[0,p], shape[1,p]
shapeLenght += sqrt((y-y0)*(y-y0) + (x-x0)*(x-x0))
sumLenghts.append(shapeLenght)
y0,x0 = y,x
# Normalised arc lengths
normLenghts = []
for p in range(0, numPoints):
normLenghts.append((2.0*pi*sumLenghts[p])/shapeLenght);
# Compute angular function by an average window
windowSize = [1,10]
d = float(windowSize[1] -windowSize[0])
angularFunc = [ ]
for p in range(0, numPoints):
x1,x2,y1,y2 = 0.0, 0.0, 0.0, 0.0
# Average change
for q in range(windowSize[0], windowSize[1]):
pa,pb = p-q,p+q
if pa<0: pa += numPoints
if pb>=numPoints: pb -= numPoints
ya,xa = shape[0,pa], shape[1,pa]
yb,xb = shape[0,pb], shape[1,pb]
x1,y1 = x1+xa, y1+ya
x2,y2 = x2+xb, y2+yb
dx, dy = (x2-x1)/d, (y2-y1)/d
angle = atan2(dy, dx)
angularFunc.append(angle)
# Compute cumulative angular function
cumulativeFunc = [ ]
angle0 = angularFunc[numPoints-1]
sumAngle = 0.0
for p in range(0, numPoints):
angle = angularFunc[p]
if abs(angle-angle0) < pi:
sumAngle += angle-angle0
else:
sumAngle += angle-(angle0 + 2.0 *pi)
cumulativeFunc.append(sumAngle)
angle0 = angle
# Compute cumulative angular accumulated
cumNormFunc = [ ]
for p in range(0, numPoints):
cumNormFunc.append(cumulativeFunc[p]+normLenghts[p])
plotCurveXY(sumLenghts,angularFunc, [-3.2 , 3.2])
plotCurveXY(sumLenghts,cumulativeFunc, [-7, 0])
plotCurveXY(sumLenghts,cumNormFunc, [-3, 3])
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter3/AnisotropicDiffusion.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
AnisotropicDiffusion: Remove noise and keep edges by anisotropic diffusion filtering
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF, showImageF
# Math and iteration
from math import exp, pow
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
kernelSize = Size of the kernel
numIterations = Number of iterations
k = Rate of the conduction coefficient
lamda = Amount of smoothing
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "Giraffe.png"
kernelSize = 3
numIterations = 10
k = 10.0
lamda = 0.5
# Read image
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create images to store the result
outputImage = createImageF(width, height)
# Create images to store the iteration
image = createImageF(width, height)
# Apply filter
kernelCentre = int((kernelSize - 1) / 2)
for x,y in itertools.product(range(0, width), range(0, height)):
outputImage[y, x] = inputImage[y, x]
for iteration in range(0, numIterations):
for x,y in itertools.product(range(0, width), range(0, height)):
image[y, x] = outputImage[y, x]
for x,y in itertools.product(range(0, width), range(0, height)):
sumWeights = 0;
outputImage[y, x] = 0
centrePixleValue = image[y, x]
for wx,wy in itertools.product(range(0, kernelSize), range(0, kernelSize)):
posY, posX = y + wy - kernelCentre, x + wx - kernelCentre
if posY > -1 and posY < height and posX > -1 and posX < width:
# Weight according to gradient
weight = exp(-pow((image[posY, posX]-centrePixleValue)/k, 2) );
# Use lambda to weight the pixel value
if posY != y and posX != x:
weight *= lamda
sumWeights += weight
outputImage[y, x] += weight * float(image[posY, posX])
# Normalize
if sumWeights > 0:
outputImage[y, x] /= sumWeights
# Show output image
showImageF(outputImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter5/HoughTransform.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 5
HoughTransform: Line detection by the Hough transform
'''
# Set module functions
from ImageUtilities import imageReadL, createImageF, showImageF, showImageL, createScaleImageL
from ImageOperatorsUtilities import applyCannyEdgeDetector
from ImagePropertiesUtilities import imageMaxMin, peakDetectorImageL
from PlotUtilities import plot3DHistogram
# Math and iteration
from math import pi, tan
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
gaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
peakDetection = Percentage of the maximum peak value that is considered for threshold
'''
pathToDir = "../../Images/Chapter5/Input/"
imageName = "Road.png"
gaussianKernelSize = 7
sobelKernelSize = 3
upperT = 0.5
lowerT = 0.3
peakDetection = 0.7
# Read image into array and show
inputImage, width, height = imageReadL(pathToDir + imageName)
showImageL(inputImage)
# Compute edges
magnitude, angle = applyCannyEdgeDetector(inputImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
showImageF(magnitude)
# Two accumulators, for horizontal and vertical lines. Each one stores a range of 90 degrees
# The intersection c corresponds to the intersections with the lines x=0 and y=0
accHorizontal = createImageF(2*height,90)
accVertical = createImageF(2*width,90);
# Gather evidence
for x,y in itertools.product(range(0, width), range(0, height)):
if magnitude[y,x] != 0:
for m in range(0,90):
# Lines between -45 and 45 degrees
angle = ((-45 + m) * pi) / 180.0
c = y - tan(angle) * x
bucket = int(c)
if bucket> 0 and bucket < 2*height - 1:
weight = c - int(c)
accHorizontal[m, bucket] += (1.0 - weight)
accHorizontal[m, bucket+1] += weight
# Lines between 45 and 135 degrees
angle = ((45.0 + m) * pi) / 180.0
c = x - y / tan(angle)
bucket = int(c)
if bucket> 0 and bucket < 2*width - 1:
weight = c - int(c)
accVertical[m, bucket] += (1.0 - weight)
accVertical[m, bucket+1] += weight
# Find maximum
maxH, _ = imageMaxMin(accHorizontal)
maxV, _ = imageMaxMin(accVertical)
maximum = max(maxH, maxV)
peakThreshold = peakDetection * maximum
# Plot accumulators
plot3DHistogram(accHorizontal, [0,maximum])
plot3DHistogram(accVertical, [0,maximum])
# Prepare output image as a dark version of the input
outputImage = createScaleImageL(inputImage, 0.5)
# Peak detection
peakHorizontal = peakDetectorImageL(accHorizontal, peakThreshold)
peakVertical = peakDetectorImageL(accVertical, peakThreshold)
# Draw lines on output image
for peakIndex in range(0,len(peakHorizontal)):
m = (peakHorizontal[peakIndex])[0]
c = (peakHorizontal[peakIndex])[1]
strength = int(255.0 * accHorizontal[m, c] / maximum)
angle = ((-45 + m) * pi) / 180.0
for x in range(0, width -1):
y = int(c + tan(angle) * x)
if y > 0 and y < height -1:
outputImage[y,x] = strength
outputImage[y+1,x] = strength
for peakIndex in range(0,len(peakVertical)):
m = (peakVertical[peakIndex])[0]
c = (peakVertical[peakIndex])[1]
strength = int(255.0 * accVertical[m, c] / maximum)
angle = ((45 + m) * pi) / 180.0
for y in range(0, height -1):
x = int(c + y / tan(angle))
if x > 0 and x < width -1:
outputImage[y,x] = strength
outputImage[y,x+1] = strength
showImageL(outputImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter4/AreaMotion.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 4
AreaMotion: Compute optical flow by using correlation
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF
from PlotUtilities import plotQuiver
# Math and iteration
from math import sqrt, atan2, sin, cos
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
kernelSize = Size of the kernel
maxDisp = Maximum size of displacement
step = Delta that defines the image sample positions used to obtain optical flow
'''
pathToDir = "../../Images/Chapter4/Input/"
image1Name = "Rino0.png"
image2Name = "Rino1.png"
kernelSize = 11
maxDisp = 10
step = 10
# Read image into array. both images must have same size
inputImage1, width, height = imageReadL(pathToDir + image1Name)
inputImage2, _, _ = imageReadL(pathToDir + image2Name)
# Show input image
showImageL(inputImage1)
showImageL(inputImage2)
# The center of the kernel
kernelCentre = int((kernelSize - 1) / 2)
# Compute Motion in sampled points
motionMagnitude = createImageF(width, height)
motionDirection = createImageF(width, height)
motionWeight = createImageF(width, height)
for x,y in itertools.product(range(2 * step, width-2*step, step), \
range(2 * step, height-2*step,step)):
minDiference, nextDiference = float("inf"), float("inf")
mDisp = [0,0]
for dx,dy in itertools.product(range(-maxDisp, maxDisp), \
range(-maxDisp, maxDisp)):
if dx != 0 or dy != 0:
differenceMatching = 0
for wx,wy in itertools.product(range(0, kernelSize), \
range(0, kernelSize)):
y1, x1 = y + wy - kernelCentre, x + wx - kernelCentre
y2, x2 = y1 + dy, x1 + dx
if y1 > -1 and y1 < height and x1 > -1 and x1 < width and \
y2 > -1 and y2 < height and x2 > -1 and x2 < width:
differenceMatching += abs(float(inputImage1[y1,x1]) - \
float(inputImage2[y2,x2]))
# Keep the most similar
if differenceMatching < minDiference:
nextDiference = minDiference
minDiference = differenceMatching
mDisp = [dy,dx]
else:
if differenceMatching < nextDiference:
nextDiference = differenceMatching
# Set motion only if we find a good match
if minDiference != nextDiference:
motionMagnitude[y,x] = sqrt(mDisp[0]*mDisp[0]+mDisp[1]*mDisp[1])
motionDirection[y,x] = atan2(mDisp[0],-mDisp[1])
motionWeight[y,x] = nextDiference - minDiference
# Weighted average
motionMagnitudeW = createImageF(width, height)
motionDirectionW = createImageF(width, height)
for x,y in itertools.product(range(2 * step, width-2*step, step), \
range(2 * step, height-2*step,step)):
weightedX, weightedY = 0.0, 0.0
totalWeight = 0.0
for wx,wy in itertools.product(range(-1, 2), range(-1, 2)):
w = motionWeight[y + wy*step, x + wx*step]
m = motionMagnitude[y + wy*step, x + wx*step]
a = motionDirection[y + wy*step, x + wx*step]
vectorX = m * cos(a)
vectorY = m * sin(a)
weightedX += w*vectorX
weightedY += w*vectorY
totalWeight += w
if totalWeight > 0:
motionMagnitudeW[y,x] = sqrt(weightedX*weightedX + \
weightedY*weightedY) / totalWeight
motionDirectionW[y,x] = atan2(weightedY,weightedX)
# Plot scaled vectors
plotQuiver(motionMagnitude, motionDirection, 0.5, step)
plotQuiver(motionMagnitudeW, motionDirectionW, 0.5, step)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter7/KrawtchoukPolynomials.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples<filename>ExamplesPython_3.6/Chapter7/KrawtchoukPolynomials.py
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 7
KrawtchoukPolynomials: Compute two dimensional weighted Krawtchouk polynomials
'''
# Set module functions
from ImageUtilities import createVectorF, createImageF
from ImageRegionsUtilities import nCr, risingFactorial
from PlotUtilities import plotSurface, plotCurve
# Math and iteration
from math import sqrt, factorial
from timeit import itertools
'''
Parameters:
p = Polynomial parameter. 0.5 for centralized polynomials. It should be between .1 and .9
width = Width of the data image. Number of polynomials. It must be less that 100 to avoid overflow
numPolynomialsDraw = Number of polynomials to draw
'''
numPolynomialsDraw = 5
p = 0.3
width = 100
# Data containers
sigma = createVectorF(width)
ro = createVectorF(width)
K = createImageF(width,width)
# Coefficient size
N = width-1
# Weight. It can be replaced by recurrence relation
for x in range(0,width):
sigma[x] = nCr(N, x) * pow(p,x) * pow(1-p,N-x)
# Scale factor. It can be replaced by recurrence relation
for n in range(0,width):
ro[n] = pow(-1,n) * pow((1-p)/p,n) * (float(factorial(n)) / risingFactorial(-N, n))
# Krawtchouk matrix that store result of the polynomial
# Each row is a polynomial each column is the polynomial value for an x value
# Alternatively, we could have used the polynomial generating function
for n,x in itertools.product(range(0, width), range(0, width)):
for i in range(0,width):
K[n,x] += pow(-1,i) * nCr(N-x, n-i) * nCr(x,i) * pow(p,n-i) * pow(1.0-p,i)
# Normalize rows for stability
for n in range(0,width):
scale = K[n,0]
for x in range(0,width):
K[n,x] /= scale
# Product defining the weighted
Kweighted = createImageF(width,width)
for n,x in itertools.product(range(0, width), range(0, width)):
Kweighted[n,x] = K[n,x]*sqrt(sigma[x]/ro[n])
# Plot
plotSurface(Kweighted)
for n in range(0,numPolynomialsDraw):
plotCurve(Kweighted[n,:])
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter8/WatershedGradientTransform.py | <gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 8
WaterShedGradientTransform: Compute Watershed transform by considering the gradient image
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL,createImageF, showImageF
from ImagePropertiesUtilities import imageMaxMin
from ConvolutionUtilities import createGaussianKernel, createSobelKernel, applyKernelMA, applyKernelF
from ImageRegionsUtilities import watherShed
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
suppWindow = Size of the window used to find maximum
'''
pathToDir = "../../Images/Chapter8/Input/"
imageName = "Logs.png"
suppWindow = 5
# Read image into array and show
inputImage, width, height = imageReadL(pathToDir+imageName)
showImageL(inputImage)
# Apply Sobel kernel. We use normalized magnitude in this example
sobelX, sobelY = createSobelKernel(3)
normalizeMagnitude = False
magnitude, _, _, _ = applyKernelMA(inputImage, sobelX, sobelY, normalizeMagnitude)
showImageF(magnitude)
# Apply Gaussian kernel
gaussianKernel = createGaussianKernel(10)
gaussianImage = applyKernelF(magnitude, gaussianKernel)
# Invert the image and add all pixels to the shape
shapeImage = [ ]
distanceImage = createImageF(width, height)
maxGradient, minGradient = imageMaxMin(gaussianImage)
for x,y in itertools.product(range(0, width), range(0, height)):
distanceImage[y,x] = maxGradient - gaussianImage[y,x]
shapeImage.append((y,x))
showImageF(distanceImage)
# Watershed of the distance image
watershed = watherShed(distanceImage, shapeImage, suppWindow)
showImageF(watershed)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter7/AngularFourierDescriptors.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 7
AngularFourierDescriptors: Compute the angular Fourier descriptors of a shape in an image
'''
# Set module functions
from ImageUtilities import createImageF, createVectorF
from ImageRegionsUtilities import findLongestCentredSegmentinImage, showShapeinImage, computeAngularFunctions
from PlotUtilities import plotHistogram, plotCurveXY
# Math and iteration
from math import pi, sqrt, sin, cos
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
gaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
numDescriptors = Number of descriptors
'''
pathToDir = "../../Images/Chapter7/Input/"
imageName = "f14.png"
gaussianKernelSize = 5
sobelKernelSize = 3
upperT = 0.3
lowerT = 0.05
numDescriptors = 20
# Obtain a shape from the input image and show shape is a 2 row image with y,x coordinates
centre, shape, width, height = findLongestCentredSegmentinImage(pathToDir + imageName, \
gaussianKernelSize, sobelKernelSize, upperT, lowerT)
showShapeinImage(shape, centre, width, height)
# Obtain the angular functions and plot
sumArcLenghts, normArcLenghts, angularFunc, cumulativeFunc, cumulativeNormFunc = \
computeAngularFunctions(shape)
plotCurveXY(sumArcLenghts,angularFunc, [-3.2, 3.2])
plotCurveXY(normArcLenghts,cumulativeNormFunc, [-3.2, 3.2])
# Number of coefficients
numEdges = len(sumArcLenghts)
shapeLenght = sumArcLenghts[numEdges - 1]
# If number descriptors is 0 use the maximum according to the lenght
if numDescriptors == 0:
numDescriptors = 1 + int(numEdges /2)
# Compute coefficients
coefficients = createImageF(numDescriptors, 2)
lenghtNorm = 2.0 * pi / shapeLenght
for k in range(1, numDescriptors):
arcLenght = 0
for p in range(0, numEdges):
coefficients[0, k] += cumulativeFunc[p] * (sumArcLenghts[p] - arcLenght) \
* cos(k * sumArcLenghts[p] * lenghtNorm)
coefficients[1, k] += cumulativeFunc[p] * (sumArcLenghts[p] - arcLenght) \
* sin(k * sumArcLenghts[p] * lenghtNorm)
arcLenght = sumArcLenghts[p]
coefficients[0, k] = coefficients[0, k] *(2.0/shapeLenght)
coefficients[1, k] = coefficients[1, k] *(2.0/shapeLenght) - (2.0/k)
# Rotation invariant descriptors
descriptors = createVectorF(numDescriptors)
for k in range(0, numDescriptors):
descriptors[k] = sqrt(coefficients[0, k]*coefficients[0, k] + \
coefficients[1, k]*coefficients[1, k])
# Plot coefficients and descriptors
plotHistogram(descriptors, [0, 1], .95)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter3/HistogramEqualization.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
HistogramEqualization: Adjust image intensity according to the histogram
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL, createVectorI
from PlotUtilities import plotHistogram
from ImageOperatorsUtilities import computeHistogram
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "Horse.png"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Compute histogram of the input image
inputHistogram = computeHistogram(inputImage)
# Vector of integers values to store the number of times a pixel value is repeated
accumulateHistogram = createVectorI(256)
# Create images to store the result
outputImage = createImageL(width, height)
# Distribute the values of the input histogram into the output histogram
sumLevels = 0.0
normalization = float(width * height) / 256
for level in range(0, 256):
sumLevels += inputHistogram[level]
accumulateHistogram[level] = sumLevels / normalization
# Set the pixels in the output image according to the accumulate histogram
for x,y in itertools.product(range(0, width), range(0, height)):
outputImage[y,x] = accumulateHistogram[inputImage[y,x]]
# Compute histogram of the output image
outputHistogram = computeHistogram(outputImage)
# Show output image
showImageL(outputImage)
# Plot histograms
plotHistogram(inputHistogram)
plotHistogram(outputHistogram)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter5/HTCircleDecomposition.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 5
HTCirlceDecomposition: Circle detection by the Hough transform decomposition
'''
# Set module functions
from ImageUtilities import imageReadL, showImageF, showImageL, createScaleImageL, createVectorF, createImageF
from ImageOperatorsUtilities import applyCannyEdgeDetector
from ImagePropertiesUtilities import imageArgMax, peakDetectorVector
from PlotUtilities import plot3DHistogram, plotHistogram
# Math and iteration
from math import sqrt, pi, sin, cos
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
gaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
deltaPointRange = How far is the second point
'''
pathToDir = "../../Images/Chapter5/Input/"
imageName = "EyeClose.png"
gaussianKernelSize = 9
sobelKernelSize = 3
upperT = 0.4
lowerT = 0.1
deltaPointRange = [30,35]
# Read image into array and show
inputImage, width, height = imageReadL(pathToDir + imageName)
showImageL(inputImage)
# Compute edges
magnitude, angle = applyCannyEdgeDetector(inputImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
showImageF(magnitude)
# Gather evidence for the circle location by using two points
accumulator = createImageF(width, height)
for x1,y1 in itertools.product(range(0, width), range(0, height)):
if magnitude[y1,x1] != 0:
# Look for points at this distance
for dx,dy in itertools.product(range(0,deltaPointRange[1]+1), \
range(0,deltaPointRange[1]+1)):
if (dx!=0 or dy!=0) and (abs(dx) > deltaPointRange[0] or \
abs(dy) > deltaPointRange[0]):
x2, y2 = x1+dx, y1+dy
if x2 > 0 and y2 > 0 and x2 < width and y2 < height and \
magnitude[y2, x2] !=0:
xm, ym = (x1 + x2) / 2.0, (y1 + y2) / 2.0
if abs(dx) < abs(dy):
m = float(dx) / float(-dy)
for x in range(0, width):
y = m *(x - xm) + ym
intY = int(y)
if intY > 0 and intY < height -1:
weight = y - intY
accumulator[ intY, x] += (1.0 - weight)
accumulator[ intY+1, x] += weight
else:
m = float(-dy) / float(dx)
for y in range(0, height):
x = m *(y - ym) + xm
intX = int(x)
if intX > 0 and intX < width -1:
weight = x - intX
accumulator[ y, intX] += (1.0 - weight)
accumulator[ y, intX+1] += weight
# Find maximum
maximumPos = imageArgMax(accumulator)
maximum = accumulator[maximumPos[0],maximumPos[1]]
# Plot a slide of the accumulator
plot3DHistogram(accumulator)
# Prepare output image as a dark version of the input
outputImage = createScaleImageL(inputImage, 0.5)
# Accumulator for the radius
maxR = int(max(width, height)/2)
accR = createVectorF(maxR)
for x,y in itertools.product(range(0, width), range(0, height)):
if magnitude[y,x] != 0:
# Look for points at this distance
r = sqrt( (maximumPos[1] - x) * (maximumPos[1] - x) + \
(maximumPos[0] - y) * (maximumPos[0] - y) )
bucket = int(r)
if bucket> 0 and bucket < maxR - 1:
weight = r - int(r)
accR[bucket] += (1.0 - weight)
accR[bucket+1] += weight
plotHistogram(accR)
maximumR = imageArgMax(accR)[0]
# Draw located circle
for m in range(0,360):
angle = (m * pi) / 180.0
x = int(maximumPos[1]- maximumR * cos(angle));
y = int(maximumPos[0]- maximumR * sin(angle));
if x<width and x>0 and y<height and y>0:
outputImage[y,x] = 255
showImageL(outputImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter3/TruncatedMedianFilter.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
TruncatedMedianFilter: Noise reduction by truncated median filter
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
kernelSize = Size of the kernel
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "artery.png"
kernelSize = 7
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create images to store the result
outputImage = createImageL(width, height)
# Apply filter
kernelCentre = int((kernelSize - 1) / 2)
for x,y in itertools.product(range(0, width), range(0, height)):
# Iterate Window to collect values to compute mean and median
region = [ ]
sumValues = 0
for wx,wy in itertools.product(range(0, kernelSize), range(0, kernelSize)):
posY, posX = y + wy - kernelCentre, x + wx - kernelCentre
if posY > -1 and posY < height and posX > -1 and posX < width:
sumValues += inputImage[posY,posX]
region.append(inputImage[posY,posX])
# Compute mean and median of the window
numPixels = len(region)
if numPixels > 0:
# Mean and median
mean = sumValues / numPixels
region.sort()
median = region[int(numPixels/2)]
# Upper and low
upper, lower = 2.0*median-region[0], 2.0*median-region[numPixels-1]
# Create a list of truncated values
truncatedRegion = [ ]
for wx,wy in itertools.product(range(0, kernelSize), range(0, kernelSize)):
posY, posX = y + wy - kernelCentre, x + wx - kernelCentre
if posY > -1 and posY < height and posX > -1 and posX < width:
if (inputImage[posY,posX] < upper and median < mean) or \
(inputImage[posY,posX] > lower and median > mean):
truncatedRegion.append(inputImage[posY,posX])
# Compute median of truncated pixels
numTruncatedPixels = len(truncatedRegion)
if numTruncatedPixels > 0:
truncatedRegion.sort()
outputImage[y,x] = truncatedRegion[int(numTruncatedPixels/2)]
else:
outputImage[y,x] = median
# Show output image
showImageL(outputImage) |
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter4/CurvatureDetection.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 4
CurveatureDetection: Obtain curvature by computing angle differences or
'''
# Set module functions
from ImageUtilities import imageReadL, createImageF, showImageF, showImageL
from ImageOperatorsUtilities import applyCannyEdgeDetector
# Math and iteration
from math import cos, sin
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
GaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
windowDelta = Size of the region used to find neighbors
'''
pathToDir = "../../Images/Chapter4/Input/"
imageName = "Shapes.png"
GaussianKernelSize = 7
sobelKernelSize = 3
upperT = 0.4
lowerT = 0.2
windowDelta = 2
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Compute edges
magnitude, angle = applyCannyEdgeDetector(inputImage, GaussianKernelSize, sobelKernelSize, upperT, lowerT)
showImageF(magnitude)
# Compute curvature by subtracting the direction of neighbors
curvature = createImageF(width, height)
for x,y in itertools.product(range(0, width), range(0, height)):
# Edge
if magnitude[y,x] > 0:
# Consider neighbor edges
edgesNeigbor = [ ]
for wx,wy in itertools.product(range(-windowDelta, windowDelta+1), \
range(-windowDelta, windowDelta+1)):
if magnitude[y+wy, x+wx] > 0 :
edgesNeigbor.append((y+wy,x+wx))
# Use dot product to measure angle difference
np = len(edgesNeigbor)
for p in range(0, np):
y1 = (edgesNeigbor[p])[0]
x1 = (edgesNeigbor[p])[1]
curvature[y,x] += 1.0-(cos(angle[y1,x1]) * cos(angle[y,x]) \
+ sin(angle[y1,x1]) * sin(angle[y,x]))
if np > 0:
curvature[y,x] /= np
showImageF(curvature, 2)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter4/CurvatureByIntensity.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 4
CurveatureByIntensity: Obtain curvature by changes in intensities
'''
# Set module functions
from ImageUtilities import imageReadL, createImageF, showImageF, showImageL
from ImageOperatorsUtilities import applyCannyEdgeDetector
from ConvolutionUtilities import createSobelKernel, applyKernelF
# Math and iteration
from math import fabs
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
GaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
op = T for tangent direction
TI for tangent inverse
N for normal direction
NI for normal inverse
'''
pathToDir = "../../Images/Chapter4/Input/"
imageName = "Shapes.png"
GaussianKernelSize = 7
sobelKernelSize = 3
upperT = 0.4
lowerT = 0.2
op = "T"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# We apply Canny to obtain the edges from the image
# but also need the results of the Sobel operator (Gradient)
magnitude, angle, mX, mY = applyCannyEdgeDetector(inputImage, GaussianKernelSize, sobelKernelSize, upperT, lowerT, True)
# Obtain gradient of gradient
# We apply 4 convolutions, but these can be computed in a single image pass
sobelX, sobelY = createSobelKernel(GaussianKernelSize)
mXx = applyKernelF(mX, sobelX)
mXy = applyKernelF(mX, sobelY)
mYx = applyKernelF(mY, sobelX)
mYy = applyKernelF(mY, sobelY)
# Compute curvature
curvature = createImageF(width, height)
for x,y in itertools.product(range(0, width), range(0, height)):
# If it is an edge
if magnitude[y,x] > 0:
Mx2,My2,MxMy = mX[y,x]*mX[y,x], mY[y,x]*mY[y,x], mX[y,x]*mY[y,x]
if Mx2 + My2 !=0.0:
p = 1.0/ pow((Mx2 + My2), 1.5)
if op == "T":
curvature[y,x] = p * (My2 * mXx[y,x] - MxMy * mYx[y,x] + \
Mx2 * mYy[y,x] - MxMy * mXy[y,x])
if op == "TI":
curvature[y,x] = p * (-My2 * mXx[y,x] + MxMy * mYx[y,x] - \
Mx2 * mYy[y,x] + MxMy * mXy[y,x])
if op == "N":
curvature[y,x] = p * (Mx2 * mYx[y,x] - MxMy * mYx[y,x] - \
MxMy * mYy[y,x] + My2 * mXy[y,x])
if op == "NI":
curvature[y,x] = p * (-Mx2 * mYx[y,x] + MxMy * mXx[y,x] + \
MxMy * mYy[y,x] - My2 * mXy[y,x])
curvature[y,x] = fabs(curvature[y,x])
showImageF(curvature, 2)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter1/ImageBrightening.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 1
ImageBrightening: Increase the intensity of an image
'''
# Iteration
from timeit import itertools
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL
from PrintUtilities import printImageRangeL
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
brightDelta = Increase brightness
printRange = Image range to print
'''
pathToDir = "../../Images/Chapter1/Input/"
imageName = "Zebra.png"
brightDelta = 80;
printRange = [0, 10]
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Output image
outputImage = createImageL(width, height)
# Set the pixels in the output image
for x,y in itertools.product(range(0, width), range(0, height)):
outValue = int(inputImage[y,x]) + brightDelta
if outValue < 255:
outputImage[y,x] = outValue
else:
outputImage[y,x] = 255
# Show images
showImageL(inputImage)
showImageL(outputImage)
# Print image range
printImageRangeL(inputImage, printRange, printRange)
printImageRangeL(outputImage, printRange, printRange)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter7/RegionDescriptors.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples<filename>ExamplesPython_3.6/Chapter7/RegionDescriptors.py
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 7
RegionDescriptors: Compute basic region descriptors of a shape in an image
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL
from PrintUtilities import printText
from ImageRegionsUtilities import findLongesSegmentinImage
# Math and iteration
from math import pi, sqrt
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
gaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
'''
pathToDir = "../../Images/Chapter7/Input/"
imageName = "ConvShape.png"
gaussianKernelSize = 5
sobelKernelSize = 3
upperT = 0.3
lowerT = 0.05
# Read image into array and show
inputImage, width, height = imageReadL(pathToDir+imageName)
showImageL(inputImage)
# Area. Assume the image with pixel value different to zero define the regions
area = 0
for x,y in itertools.product(range(0, width), range(0, height)):
if inputImage[y,x] != 0 :
area += 1
# Obtain shape contour
shape, width, height = findLongesSegmentinImage(pathToDir + imageName, \
gaussianKernelSize, sobelKernelSize, upperT, lowerT)
# Perimeter and mean. The mean is the contour centre. The perimeter is the arc length
numPoints = len(shape[0])
mean = [0,0]
perimeter = 0.0
y0, x0 = shape[0, numPoints-1], shape[1, numPoints-1]
for p in range(0, numPoints):
y,x = shape[0,p], shape[1,p]
mean[0], mean[1] = mean[0]+x, mean[1]+y
perimeter += sqrt((y-y0)*(y-y0) + (x-x0)*(x-x0))
y0,x0 = y,x
mean[0],mean[1] = mean[0]/numPoints, mean[1]/numPoints
# Compactness
compactness = 4.0*pi*area/(perimeter*perimeter);
# Dispersion
maxDist, minDist = 0, float('Inf')
for p in range(0, numPoints):
y,x = shape[0,p], shape[1,p]
d = sqrt((x-mean[0])**2 + (y-mean[1])**2)
if d >maxDist:
maxDist = d
if d <minDist:
minDist = d
dispersion = pi*maxDist*maxDist/area
dispertionRatio = sqrt(maxDist/minDist)
# Print results
printText("Area = " + str(area))
printText("Mean = " + '%.2f' % mean[0] + ", " + '%.2f' % mean[1])
printText("Perimeter = " + '%.2f' % perimeter)
printText("Compactness = " + '%.2f' % compactness)
printText("Dispertion = " + '%.2f' % dispersion)
printText("DispertionRatio = " + '%.2f' % dispertionRatio)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter10/Transformations.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples<filename>ExamplesPython_3.6/Chapter10/Transformations.py
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 10
Transformations: Performs geometric transformations to an image
'''
# Set module functions
from ImageUtilities import imageReadRGB, imageReadL, showImageRGB, createImageRGB
from PlotUtilities import plot3DColorHistogram
# Math and iteration
from math import sin, cos, sqrt
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
maskName = Mask image name
transformationType = Similarity, Affine, Homography
'''
pathToDir = "../../Images/Chapter10/Input/"
imageName = "cube1.png"
maskName = "mask1.png"
transformationType = "Homography"
# Read image
inputImage, width, height = imageReadRGB(pathToDir + imageName)
maskImage, width, height = imageReadL(pathToDir + maskName)
showImageRGB(inputImage)
# Image cente
centreX, centreY = width/2, height/2
# Perform transformation
if transformationType == "Similarity":
# Similarity transformation
s = [.4, 0.8, 0.8, 100.0, 0.0] # Angle, scaleXY, translationXY
T = [[ s[1]*cos(s[0]), s[1]*sin(s[0]), s[3]], \
[ -s[2]*sin(s[0]), s[2]*cos(s[0]), s[4]], \
[0 ,0, 1]]
if transformationType == "Affine":
# Affine transformation
T = [[ .8, .1, 100], \
[ -.2, 1, 0], \
[0 ,0, 1]]
if transformationType == "Homography":
# Homography
T = [[ .8, 0, 100], \
[ .2,1, 0], \
[.0005 ,-0.0005 , 1.2]]
tImage = createImageRGB(width, height)
for y, x in itertools.product(range(0, height-1), range(0, width-1)):
# Alpha and colour
alpha = maskImage[y,x]/256.0
if alpha == 0:
continue
rgb = (inputImage[y,x]/4.0 + inputImage[y+1,x+1]/4.0 + \
inputImage[y+1,x]/4.0 + inputImage[y,x+1]/4.0) * alpha
# Transform
cx, cy = x - centreX, y - centreY
p0z = T[2][0] * cx + T[2][1] * cy + T[2][2]
p1z = T[2][0] * (cx+1) + T[2][1] * cy + T[2][2]
p2z = T[2][0] * (cx+1) + T[2][1] * (cy+1) + T[2][2]
if p0z != 0 and p1z != 0 and p2z !=0:
p0x = int((T[0][0] * cx + T[0][1] * cy + T[0][2]) / p0z + centreX)
p0y = int((T[1][0] * cx + T[1][1] * cy + T[1][2]) / p0z + centreY)
p1x = int((T[0][0] * (cx+1) + T[0][1] * cy + T[0][2]) / p1z + centreX)
p1y = int((T[1][0] * (cx+1) + T[1][1] * cy + T[1][2]) / p1z + centreY)
p2x = int((T[0][0] * (cx+1) + T[0][1] * (cy+1) + T[0][2]) / p2z + centreX)
p2y = int((T[1][0] * (cx+1) + T[1][1] * (cy+1) + T[1][2]) / p2z + centreY)
# Fill output image
v1,v2 = [p1x - p0x, p1y - p0y], [p2x - p0x, p2y - p0y]
lv1 = max(.001,sqrt(v1[0]*v1[0] + v1[1]*v1[1]))
lv2 = max(.001,sqrt(v2[0]*v2[0] + v2[1]*v2[1]))
v1N = [v1[0]/lv1, v1[1]/lv1]
v2N = [v2[0]/lv2, v2[1]/lv2]
for dV1, dV2 in itertools.product(range(0, int(lv1)+1), range(0, int(lv2)+1)):
a = int(p0x + dV1 * v1N[0] + dV2 * v2N[0])
b = int(p0y + dV1 * v1N[1] + dV2 * v2N[1])
if a>0 and a < width and b > 0 and b < height:
tImage[b,a] = rgb
showImageRGB(tImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter3/IntensityNormalization.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
IntensityNormalization: Normalize an image
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL
from ImagePropertiesUtilities import imageMaxMin
from PlotUtilities import plotHistogram
from ImageOperatorsUtilities import computeHistogram
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "Horse.png"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create image to store the normalization
outputNormalizedImage = createImageL(width, height)
# Maximum and range
maxVal, miniVal = imageMaxMin(inputImage)
brightRange = float(maxVal - miniVal)
# Set the pixels in the output image
for x,y in itertools.product(range(0, width), range(0, height)):
# Normalize the pixel value according to the range
outputNormalizedImage[y,x] = round((inputImage[y,x] - miniVal) * 255.0 / brightRange)
# Compute histogram
histogramNormalizedImage = computeHistogram(outputNormalizedImage)
# Show output image and plot histogram
showImageL(outputNormalizedImage)
plotHistogram(histogramNormalizedImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter4/GradienMotion.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 4
GradientMotion: Compute optical flow by using the gradient
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF, createImageUV
from ConvolutionUtilities import createGaussianKernel
from PlotUtilities import plotQuiver
# Math and iteration
from math import sqrt, atan2
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
step = Delta that defines the image sample positions used to obtain optical flow
smoothing = Smoothing parameter
numIterations = Number of iterations
'''
pathToDir = "../../Images/Chapter4/Input/"
image1Name = "Rino0.png"
image2Name = "Rino1.png"
step = 10
smoothing = .05
numIterations = 40
# Read image into array. both images must have same size
inputImage1, width, height = imageReadL(pathToDir + image1Name)
inputImage2, _, _ = imageReadL(pathToDir + image2Name)
# Show input image
showImageL(inputImage1)
showImageL(inputImage2)
# Reduce input image by sampling and by using a Gaussian filter
kernelSize = step * 4
kernelCentre = int((kernelSize - 1) / 2)
gaussKernel = createGaussianKernel(kernelSize)
# Create images to store the smaller image
widthSmall = int(width / step)
heightSmall = int(height / step)
image1Small = createImageF(widthSmall, heightSmall)
image2Small = createImageF(widthSmall, heightSmall)
for x,y in itertools.product(range(0, widthSmall), range(0, heightSmall)):
inputX = x * step
inputY = y * step
sumImage1 = 0.0
sumImage2 = 0.0
sumWeights = 0.0
for wx,wy in itertools.product(range(0, kernelSize), range(0, kernelSize)):
posY = inputY + wy - kernelCentre
posX = inputX + wx - kernelCentre
if posY > -1 and posY < height and posX > -1 and posX < width:
sumImage1 += float(inputImage1[posY,posX]) * float(gaussKernel[wy, wx])
sumImage2 += float(inputImage2[posY,posX]) * float(gaussKernel[wy, wx])
sumWeights += float(gaussKernel[wy, wx])
# If we have to normalize
if sumWeights != 0.0:
image1Small[y,x] = sumImage1 / sumWeights
image2Small[y,x] = sumImage2 / sumWeights
# Compute motion in the reduced image
motion = createImageUV(widthSmall, heightSmall)
motionTemp = createImageUV(widthSmall, heightSmall)
for k in range(0,numIterations):
for x,y in itertools.product(range(1, widthSmall-1), range(1, heightSmall-1)):
# Derivatives
gX = (float(image1Small[y,x+1]) - float(image1Small[y,x]) + \
float(image2Small[y,x+1]) - float(image2Small[y,x]) + \
float(image1Small[y+1,x+1]) - float(image1Small[y+1,x]) + \
float(image2Small[y+1,x+1]) - float(image2Small[y+1,x])) / 8.0
gY = (float(image1Small[y+1,x]) - float(image1Small[y,x]) + \
float(image2Small[y+1,x]) - float(image2Small[y,x]) + \
float(image1Small[y+1,x+1]) - float(image1Small[y,x+1]) + \
float(image2Small[y+1,x+1]) - float(image2Small[y,x+1])) / 8.0
gT = (float(image2Small[y,x]) - float(image1Small[y,x]) + \
float(image2Small[y+1,x]) - float(image1Small[y+1,x]) + \
float(image2Small[y,x+1]) - float(image1Small[y,x+1]) + \
float(image2Small[y+1,x+1]) - float(image1Small[y+1,x+1])) / 8.0
# Average, but not use borders since the motion in borders is not defined
average =[0.0, 0.0]
n = 0.0
for wx,wy in itertools.product(range(-1, 2), range(-1, 2)):
posY, posX = y + wy, x + wx
if posY > 0 and posY < heightSmall-1 and posX > 0 and \
posX < widthSmall-1:
average[0] += motion[posY,posX,0]
average[1] += motion[posY,posX,1]
n += 1.0
if n != 0:
average[0], average[1] = average[0]/ n, average[1] / n
# Solve equation to update estimates
A = float(gX * average[0] + gY * average[1] + gT)
B = float(1.0 + smoothing * (gX*gX + gY*gY))
motionTemp[y,x,0] = average[0] - (gX * smoothing * A / B)
motionTemp[y,x,1] = average[1] - (gY * smoothing * A / B)
# Update motion for next iteration
for x,y in itertools.product(range(1, widthSmall-1), range(1, heightSmall-1)):
motion[y,x,0] = motionTemp[y,x,0]
motion[y,x,1] = motionTemp[y,x,1]
# Compute magnitude and angle and set in the samples in the original image
# Alternatively, this could be shown in low resolution
motionMagnitude = createImageF(width, height)
motionDirection = createImageF(width, height)
for x,y in itertools.product(range(1, widthSmall-1), range(1, heightSmall-1)):
inputX = x * step
inputY = y * step
# The size is multiplied by sampling to define motion in high resolution
motionMagnitude[inputY,inputX] = sqrt(motion[y,x,0] * motion[y,x,0] + \
motion[y,x,1] * motion[y,x,1]) * step
motionDirection[inputY,inputX] = atan2( motion[y,x,1], -motion[y,x,0])
# Plot vectors
plotQuiver(motionMagnitude, motionDirection, .5, step)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter1/ImageDisplay.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 1
ImageDipslay: Loads and displays an image. Shows a surface and prints pixel data
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF
from PrintUtilities import printImageRangeL
from PlotUtilities import plotColorSurface, plot3DColorHistogram
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
'''
pathToDir = "../../Images/Chapter1/Input/"
imageName = "SmoothSquare.png"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Print pixel's values in an image range
printImageRangeL(inputImage, [0, width-1], [0, height-1])
# Create an image to store the z values for surface
outputZ = createImageF(width, height)
# Three float array to store colors of the surface
colorsRGB = createImageF(width, height, 3)
# Set surface and color values
for x in range(0, width):
for y in range(0, height):
pixelValue = float(inputImage[y,x])
outputZ[y,x] = 255 - pixelValue
pointColour = float(inputImage[y,x])/255.0
colorsRGB[y,x] = [pointColour, pointColour, pointColour]
# Plot surface
plotColorSurface(outputZ, colorsRGB, [0,400], 1)
# Plot histogram
plot3DColorHistogram(outputZ, colorsRGB, [0,400]) |
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter5/GeneralizedHoughTransform.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 5
GeneralizedHoughTransform: Shape detection by the generalized Hough transform
'''
# Set module functions
from ImageUtilities import imageReadL, showImageF, showImageL, createImageF
from ImageOperatorsUtilities import applyCannyEdgeDetector
from PlotUtilities import plot3DHistogram
# Math and iteration
from math import pi
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
templateName = Input image of a template
gaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
numEntries = Size of the R table
'''
pathToDir = "../../Images/Chapter5/Input/"
imageName = "Crossing.png"
templateName = "CrossingTemplate.png"
gaussianKernelSize = 5
sobelKernelSize = 3
upperT = 0.35
lowerT = 0.05
numEntries = 90
# Read image into array and show
templateImage, widthTemplate, heightTemplate = imageReadL(pathToDir + templateName)
inputImage, width, height = imageReadL(pathToDir + imageName)
showImageL(templateImage)
showImageL(inputImage)
# Compute edges
magnitudeTemplate, angleTemplate = applyCannyEdgeDetector(templateImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
magnitude, angle = applyCannyEdgeDetector(inputImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
showImageF(magnitudeTemplate)
showImageF(magnitude)
# Compute reference point in the template
refPoint = [0,0]
edgePoints = []
for x,y in itertools.product(range(0, widthTemplate), range(0, heightTemplate)):
if magnitudeTemplate[y,x] != 0:
refPoint[0] += y
refPoint[1] += x
edgePoints.append((y,x))
numPts = len(edgePoints)
refPoint = [int(refPoint[0]/numPts),int(refPoint[1]/numPts)]
# Build Rtable as a list of lists
rTable = [[] for entryIndex in range(numEntries)]
deltaAngle = 2.0 * pi / (numEntries - 1.0)
for p in range(0, numPts):
y, x = (edgePoints[p])[0], (edgePoints[p])[1]
# The angle is in the interval -pi,+pi
ang = angleTemplate[y,x] + pi
entryIndex = int(ang/deltaAngle)
entry = rTable[entryIndex]
entry.append((y-refPoint[0], x-refPoint[1]))
# Gather evidence of the template in the image
accumulator = createImageF(width, height)
maxSegmentLenght = 0
for x,y in itertools.product(range(0, width), range(0, height)):
if magnitude[y,x] != 0:
# The angle is in the interval -pi,+pi
ang = angle[y,x] + pi
entryIndex = int(ang/deltaAngle)
row = rTable[entryIndex]
numPts = len(row)
for p in range(0, numPts):
x0, y0 = x - (row[p])[1], y - (row[p])[0]
if y0>0 and x0>0 and y0<height and x0<width:
accumulator[y0][x0] += 1
# Plot accumulator
plot3DHistogram(accumulator)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter2/LowHighFilters.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 2
LowHighFilters: Filter an image in frequency domain
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF, showImageF
from FourierUtilities import computeCoefficients, computePowerfromCoefficients, reconstruction
from ImageOperatorsUtilities import imageLogF
# Math functions and iteration
from math import sqrt
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
'''
pathToDir = "../../Images/Chapter2/Input/"
imageName = "Giraffe.png"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show images
showImageL(inputImage)
# Compute coefficients
coeff, maxFreqW, maxFreqH = computeCoefficients(inputImage)
# Create low and high versions
coeffLow = createImageF( 1 + 2 * maxFreqW, 1 + 2 * maxFreqH, 2)
coeffHigh = createImageF( 1 + 2 * maxFreqW, 1 + 2 * maxFreqH, 2)
# Filter
cutFrequency = maxFreqW / 8
for kw,kh in itertools.product(range(-maxFreqW, maxFreqW + 1), \
range(-maxFreqH, maxFreqH + 1)):
IndexW, indexH = kw + maxFreqW, kh + maxFreqH
if sqrt(kw * kw + kh * kh) < cutFrequency:
coeffLow[indexH, IndexW][0] = coeff[indexH, IndexW][0]
coeffLow[indexH, IndexW][1] = coeff[indexH, IndexW][1]
else:
coeffHigh[indexH, IndexW][0] = coeff[indexH, IndexW][0]
coeffHigh[indexH, IndexW][1] = coeff[indexH, IndexW][1]
# Power
powerLow = computePowerfromCoefficients(coeffLow)
powerHigh = computePowerfromCoefficients(coeffHigh)
# Show power
powerLowLog = imageLogF(powerLow)
powerHighLog = imageLogF(powerHigh)
showImageF(powerLowLog)
showImageF(powerHighLog)
# Reconstruct image
imageLow = reconstruction(coeffLow)
imageHigh = reconstruction(coeffHigh)
showImageF(imageLow)
showImageF(imageHigh)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter3/ImageHistogram.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
ImageHistogram: Compute the histogram of an image whose gray level values
are transformed by scale * GrayLevel + Translation
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createVectorI, createImageL
from PlotUtilities import plotHistogram
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
scale = Scale the gray levels
translation = Value added to the gray levels
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "Horse.png"
scale = 1.0
translation = 0.0
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Transform the image
for x,y in itertools.product(range(0, width), range(0, height)):
b = int(scale*float(inputImage[y,x]) + translation)
inputImage[y,x] = max(0, min(b, 255))
# Show transformed image
showImageL(inputImage)
# Vector of integers values to store the number of times a pixel value is repeated
outputHistogram = createVectorI(256)
# Get the number of times a pixel value is found in the image
for x,y in itertools.product(range(0, width), range(0, height)):
pixelValue = inputImage[y,x]
outputHistogram[pixelValue] += 1
# Plot histogram
plotHistogram(outputHistogram)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter10/Projection.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 10
Projection: Compute a projection from seven corresponding image and 3D points and perform the
projection on the image
'''
# Set module functions
from ImageUtilities import imageReadRGB, imageReadL, showImageRGB, createImageRGB
from GeometricUtilities import solveSystem, projectionPoints, fillImage
from PrintUtilities import printText
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
maskName = Mask image name
'''
pathToDir = "../../Images/Chapter10/Input/"
imageName = "cube1.png"
maskName = "mask1.png"
# Read image data
inputImage, width, height = imageReadRGB(pathToDir + imageName)
maskImage, width, height = imageReadL(pathToDir + maskName)
showImageRGB(inputImage)
centreX, centreY = width/2, height/2
# Corresponding points in the cube image
pts = [[131-centreX,378-centreY],[110-centreX,188-centreY],
[200-centreX,73-centreY],[412-centreX,100-centreY],
[410-centreX,285-centreY],[349-centreX,418-centreY],
[345-centreX,220-centreY]]
q = [[0,0,1],[0,1,1], [0,1,0],[1,1,0], [1,0,0],[1,0,1], [1,1,1]]
# Fill matrix
M = [ ]
for row in range(0,6):
r1 = [ q[row][0],q[row][1], q[row][2],1,0,0,0,0,-pts[row][0]*q[row][0], \
-pts[row][0]*q[row][1],-pts[row][0]*q[row][2],-pts[row][0] ]
r2 = [ 0,0,0,0,q[row][0],q[row][1], q[row][2],1,-pts[row][1]*q[row][0], \
-pts[row][1]*q[row][1],-pts[row][1]*q[row][2], -pts[row][1] ]
M.append(r1)
M.append(r2)
printText(M)
# Solves the equation A*x=b
r = [0,0,0,0,0,0,0,0,0,0,0,1]
p = solveSystem(M, r)
P = [[p[0], p[1], p[2], p[3]], \
[p[4], p[5], p[6], p[7]], \
[p[8], p[9], p[10], p[11]] ]
printText(P)
# Output image
tImage = createImageRGB(width, height)
# Project world points in the plane (origin, v1, v2) into the image
npts = 100
origin, v1, v2 = [0,0,1], [1,0,0], [0,1,0] # 3D plane, origin and two axis
xy = [ ] # Result points in the image
for a in range(0, npts):
rowxy = [ ]
for b in range(0, npts):
# Points along each axis
v1D = [a*v1[0]/float(npts-1), a*v1[1]/float(npts-1), a*v1[2]/float(npts-1)]
v2D = [b*v2[0]/float(npts-1), b*v2[1]/float(npts-1), b*v2[2]/float(npts-1)]
# Point in the 3D plane
s = [origin[0]+v1D[0]+v2D[0], origin[1]+v1D[1]+v2D[1], origin[2]+v1D[2]+v2D[2]]
# Transformation by the projection
sx = p[0]*s[0] + p[1]*s[1] + p[2]*s[2] + p[3]
sy = p[4]*s[0] + p[5]*s[1] + p[6]*s[2] + p[7]
sz = p[8]*s[0] + p[9]*s[1] + p[10]*s[2] + p[11]
# Image coordinates
rowxy.append([int(sx/sz) + centreX, int(sy/sz) + centreY])
xy.append(rowxy)
# Fill the points as red
fillImage([255,0,0], xy, tImage)
# Yellow face. Project points as before and fill in image
xy = projectionPoints([0,1,0],[1,0,0], [0,0,1], npts, p, centreX, centreY)
fillImage([255,255,0], xy, tImage)
# Green face
xy = projectionPoints([1,0,0],[0,1,0], [0,0,1], npts, p, centreX, centreY)
fillImage([0,128,0], xy, tImage)
showImageRGB(tImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter3/GaussianConvolution.py | <gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
GaussianConvolution: Filter an image by the convolution of a Gaussian function
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF
from ConvolutionUtilities import applyKernel
from PlotUtilities import plotColorSurface, plot3DColorHistogram
from PrintUtilities import printImageRangeF
# Math and iteration
from math import pow, exp
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
kernelSize = Kernel size
sigma = Standard deviation
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "Giraffe.png"
kernelSize = 9
sigma = 3.0
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create image to store kernel
kernelImage = createImageF(kernelSize, kernelSize)
# Three float array to store colors to be used in the surface plot
colorsRGB = createImageF(kernelSize, kernelSize, 3)
# Set the pixels of Gaussian kernel
centre = (kernelSize - 1) / 2
sumValues = 0
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
kernelImage[y,x] = exp( -0.5 * (pow((x - centre)/sigma, 2.0) + \
pow((y - centre)/sigma, 2.0)) )
sumValues += kernelImage[y,x]
# This is only set for the plot
colorsRGB[y,x] = [kernelImage[y,x], kernelImage[y,x], kernelImage[y,x]]
# Normalisation
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
kernelImage[y,x] /= sumValues
# Apply kernel
outputImage = applyKernel(inputImage, kernelImage)
# Show function
plotColorSurface(kernelImage, colorsRGB)
# Show kernel
plot3DColorHistogram(kernelImage, colorsRGB)
# Print kernel
printImageRangeF(kernelImage, [0, kernelSize-1], [0, kernelSize-1], '7.3')
# Show output image
showImageL(outputImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Modules/FourierUtilities.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples<gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
FourierUtilties: Helper module for Fourier analysis
'''
# Images
from ImageUtilities import createImageF
from PrintUtilities import printProgress
# Math and iteration functions
from math import pi, sin, cos, atan2, sqrt
from timeit import itertools
# Compute the power and phase from an input image
def computePowerandPhase(inputImage):
height = len(inputImage)
width = len(inputImage[0])
# Create coefficients Image. Two floats to represent a complex number
# Maximum frequency according to sampling
maxFrequencyW = int(width /2)
maxFrequencyH = int(height/2)
numCoefficientsW = 1 + 2 * maxFrequencyW
numCoefficientsH = 1 + 2 * maxFrequencyH
coefficients = createImageF(numCoefficientsW ,numCoefficientsH , 2)
# Adjust the size of the data to be even
m = float(width)
n = float(height)
if width % 2 == 0:
m = width + 1
if height % 2 == 0:
n = height + 1
# Fundamental frequency
ww = (2.0 * pi) / float(m)
wh = (2.0 * pi) / float(n)
# Compute values
for kw in range(-maxFrequencyW, maxFrequencyW + 1):
printProgress(kw + maxFrequencyW, numCoefficientsW)
for kh in range(-maxFrequencyH, maxFrequencyH + 1):
indexInArrayW = kw + maxFrequencyW
indexInArrayH = kh + maxFrequencyH
for x,y in itertools.product(range(0, width), range(0, height)):
coefficients[indexInArrayH, indexInArrayW][0] += inputImage[y,x] * (cos(x * ww * kw) * cos(y * wh * kh) - sin(x * ww * kw) * sin(y * wh * kh))
coefficients[indexInArrayH, indexInArrayW][1] += inputImage[y,x] * (cos(x * ww * kw) * sin(y * wh * kh) + sin(x * ww * kw) * cos(y * wh * kh))
# Power
powerImage = createImageF( 1 + 2 * maxFrequencyW, 1 + 2 * maxFrequencyH)
for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \
range(-maxFrequencyH, maxFrequencyH + 1)):
indexInArrayW = kw + maxFrequencyW
indexInArrayH = kh + maxFrequencyH
powerImage[indexInArrayH, indexInArrayW] = sqrt(coefficients[indexInArrayH, indexInArrayW][0] * coefficients[indexInArrayH, indexInArrayW][0] + \
coefficients[indexInArrayH, indexInArrayW][1] * coefficients[indexInArrayH, indexInArrayW][1])
# Phase
phaseImage = createImageF( 1 + 2 * maxFrequencyW, 1 + 2 * maxFrequencyH)
for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \
range(-maxFrequencyH, maxFrequencyH + 1)):
indexInArrayW = kw + maxFrequencyW
indexInArrayH = kh + maxFrequencyH
phaseImage[indexInArrayH, indexInArrayW] = atan2(coefficients[indexInArrayH, indexInArrayW][1], coefficients[indexInArrayH, indexInArrayW][0])
return powerImage, phaseImage
# Compute Fourier transform coefficients from an image
def computeCoefficients(inputImage):
height = len(inputImage)
width = len(inputImage[0])
# Create coefficients Image. Two floats to represent a complex number
# Maximum frequency according to sampling
maxFrequencyW = int(width /2)
maxFrequencyH = int(height/2)
numCoefficientsW = 1 + 2 * maxFrequencyW
numCoefficientsH = 1 + 2 * maxFrequencyH
coefficients = createImageF(numCoefficientsW ,numCoefficientsH , 2)
# Adjust the size of the data to be even
m = float(width)
n = float(height)
if width % 2 == 0:
m = width + 1
if height % 2 == 0:
n = height + 1
# Fundamental frequency
ww = (2.0 * pi) / float(m)
wh = (2.0 * pi) / float(n)
# Compute values
for kw in range(-maxFrequencyW, maxFrequencyW + 1):
printProgress(kw + maxFrequencyW, numCoefficientsW - 1)
for kh in range(-maxFrequencyH, maxFrequencyH + 1):
indexInArrayW = kw + maxFrequencyW
indexInArrayH = kh + maxFrequencyH
for x,y in itertools.product(range(0, width), range(0, height)):
coefficients[indexInArrayH, indexInArrayW][0] += inputImage[y,x] * (cos(x * ww * kw) * cos(y * wh * kh) - sin(x * ww * kw) * sin(y * wh * kh))
coefficients[indexInArrayH, indexInArrayW][1] += inputImage[y,x] * (cos(x * ww * kw) * sin(y * wh * kh) + sin(x * ww * kw) * cos(y * wh * kh))
for kw in range(-maxFrequencyW, maxFrequencyW + 1):
for kh in range(-maxFrequencyH, maxFrequencyH + 1):
coefficients[indexInArrayH, indexInArrayW][0] /= (m*n)
coefficients[indexInArrayH, indexInArrayW][1] /= (m*n)
return coefficients, maxFrequencyW, maxFrequencyH
# Return the power image from the coefficients
def computePowerfromCoefficients(coefficients):
# Maximum frequency
maxFrequencyH = int((len(coefficients) - 1) / 2)
maxFrequencyW = int((len(coefficients[0]) - 1) / 2)
# Power
powerImage = createImageF( 1 + 2 * maxFrequencyW, 1 + 2 * maxFrequencyH)
for kw in range(-maxFrequencyW, maxFrequencyW + 1):
printProgress(kw + maxFrequencyW, 2 * maxFrequencyW)
for kh in range(-maxFrequencyH, maxFrequencyH + 1):
indexInArrayW = kw + maxFrequencyW
indexInArrayH = kh + maxFrequencyH
powerImage[indexInArrayH, indexInArrayW] = sqrt(coefficients[indexInArrayH, indexInArrayW][0] * coefficients[indexInArrayH, indexInArrayW][0] + \
coefficients[indexInArrayH, indexInArrayW][1] * coefficients[indexInArrayH, indexInArrayW][1])
return powerImage
# Inverse transform
def reconstruction(coefficients):
# Maximum frequency
maxFrequencyH = int((len(coefficients) - 1) / 2)
maxFrequencyW = int((len(coefficients[0]) - 1) / 2)
height = 2 * maxFrequencyH
width = 2 * maxFrequencyW
# Adjust the size of the data to be even
m = float(width)
n = float(height)
if width % 2 == 0:
m = width + 1
if height % 2 == 0:
n = height + 1
# Fundamental frequency
ww = (2.0 * pi) / float(m)
wh = (2.0 * pi) / float(n)
reconstructionImage = createImageF(m, n)
for x in range(0, width):
printProgress(x, width - 1)
for y in range(0, height):
for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \
range(-maxFrequencyH, maxFrequencyH + 1)):
indexInArrayW = kw + maxFrequencyW
indexInArrayH = kh + maxFrequencyH
reconstructionImage[y,x] += \
(coefficients[indexInArrayH, indexInArrayW][0] / (m*n)) * (cos(x * ww * kw) * cos(y * wh * kh) - sin(x * ww * kw) * sin(y * wh * kh)) + \
(coefficients[indexInArrayH, indexInArrayW][1] / (m*n)) * (cos(x * ww * kw) * sin(y * wh * kh) + sin(x * ww * kw) * cos(y * wh * kh))
return reconstructionImage
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Modules/GeometricUtilities.py | <gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
GeometricUtilities: Helper module to apply geometric transformations to images
'''
# Images
from ImageUtilities import createImageRGB
# Math
from math import sqrt
# Iteration
from timeit import itertools
# Image data
import numpy as np
# Solves the equation A*x=b
def solveSystem(A, b):
x = np.linalg.solve(A, b)
return x
# Transform an image
def imageTransform(image, maskImage, T):
height, width = len(image), len(image[0])
centreX, centreY = width/2, height/2
sImage = createImageRGB(width, height)
for y, x in itertools.product(range(0, height-1), range(0, width-1)):
# Alpha and colour
alpha = maskImage[y,x]/256.0
if alpha == 0:
continue
rgb = (image[y,x]/4.0 + image[y+1,x+1]/4.0 + image[y+1,x]/4.0 + \
image[y,x+1]/4.0) * alpha
# Transform
cx, cy = x - centreX, y - centreY
p0z = T[2][0] * cx + T[2][1] * cy + T[2][2]
p1z = T[2][0] * (cx+1) + T[2][1] * cy + T[2][2]
p2z = T[2][0] * (cx+1) + T[2][1] * (cy+1) + T[2][2]
if p0z != 0 and p1z != 0 and p2z !=0:
p0x = int((T[0][0] * cx + T[0][1] * cy + T[0][2]) / p0z + centreX)
p0y = int((T[1][0] * cx + T[1][1] * cy + T[1][2]) / p0z + centreY)
p1x = int((T[0][0] * (cx+1) + T[0][1] * cy + T[0][2]) / p1z + centreX)
p1y = int((T[1][0] * (cx+1) + T[1][1] * cy + T[1][2]) / p1z + centreY)
p2x = int((T[0][0] * (cx+1) + T[0][1] * (cy+1) + T[0][2]) / p2z + centreX)
p2y = int((T[1][0] * (cx+1) + T[1][1] * (cy+1) + T[1][2]) / p2z + centreY)
# Fill output image
v1, v2 = [p1x - p0x, p1y - p0y], [p2x - p0x, p2y - p0y]
lv1 = max(.001,sqrt(v1[0]*v1[0] + v1[1]*v1[1]))
lv2 = max(.001,sqrt(v2[0]*v2[0] + v2[1]*v2[1]))
v1N = [v1[0]/lv1, v1[1]/lv1]
v2N = [v2[0]/lv2, v2[1]/lv2]
for dV1, dV2 in itertools.product(range(0, int(lv1)+1), range(0, int(lv2)+1)):
a,b = int(p0x + dV1 * v1N[0] + dV2 * v2N[0]), int(p0y + dV1 * v1N[1] + dV2 * v2N[1])
if a>0 and a < width and b > 0 and b < height:
sImage[b,a] = rgb
return sImage
# Get corresponding points in the 3d points plane origin,v1,v2
def projectionPoints(origin, v1, v2, npts, p, centreX, centreY):
xy = [ ]
for a in range(0, npts):
rowxy = [ ]
for b in range(0, npts):
v1D = [a*v1[0]/float(npts-1), a*v1[1]/float(npts-1), a*v1[2]/float(npts-1)]
v2D = [b*v2[0]/float(npts-1), b*v2[1]/float(npts-1), b*v2[2]/float(npts-1)]
s = [origin[0]+v1D[0]+v2D[0], origin[1]+v1D[1]+v2D[1], origin[2]+v1D[2]+v2D[2]]
sx = p[0]*s[0] + p[1]*s[1] + p[2]*s[2] + p[3]
sy = p[4]*s[0] + p[5]*s[1] + p[6]*s[2] + p[7]
sz = p[8]*s[0] + p[9]*s[1] + p[10]*s[2] + p[11]
rowxy.append([int((sx/sz) + centreX), int((sy/sz) + centreY)])
xy.append(rowxy)
return xy
# Get all the projection points of a unit cube
def projectionCubePoints(npts, p, centreX, centreY):
xy1 = projectionPoints([0,0,1],[1,0,0], [0,1,0], npts, p, centreX, centreY)
xy2 = projectionPoints([0,1,0],[1,0,0], [0,0,1], npts, p, centreX, centreY)
xy3 = projectionPoints([1,0,0],[0,1,0], [0,0,1], npts, p, centreX, centreY)
return [xy1,xy2,xy3]
def fillImage(colour, xy, image):
npts = len(xy)
height = len(image)
width = len(image[0])
for a,b in itertools.product(range(0, npts-1), range(0, npts-1)):
c0,c1,c2 = xy[a][b], xy[a+1][b], xy[a][b+1]
# Fill output image
v1 = [c1[0]-c0[0], c1[1]-c0[1]]
v2 = [c2[0]-c0[0], c2[1]-c0[1]]
lv1 = max(.001,sqrt(v1[0]*v1[0] + v1[1]*v1[1]))
lv2 = max(.001,sqrt(v2[0]*v2[0] + v2[1]*v2[1]))
v1N = [v1[0]/lv1, v1[1]/lv1]
v2N = [v2[0]/lv2, v2[1]/lv2]
for dV1, dV2 in itertools.product(range(0, 4*(1+int(lv1))), range(0, 4*(1+int(lv2)))):
x = int(c0[0] + v2N[0] * dV2*.25 + v1N[0] * dV1*.25)
y = int(c0[1] + v2N[1] * dV2*.25 + v1N[1] * dV1*.25)
if x>0 and x < width and y > 0 and y < height:
image[y,x] = colour
def fillImageColours(colours, xy, image):
nfaces = len(xy)
height = len(image)
width = len(image[0])
for faceNum in range(0, nfaces):
face = xy[faceNum]
npts = len(face)
colour = colours[faceNum]
for a,b in itertools.product(range(0, npts-1), range(0, npts-1)):
c0,c1,c2 = face[a][b], face[a+1][b], face[a][b+1]
c = colour[a,b];
# Fill output image
v1 = [c1[0]-c0[0], c1[1]-c0[1]]
v2 = [c2[0]-c0[0], c2[1]-c0[1]]
lv1 = max(.001,sqrt(v1[0]*v1[0] + v1[1]*v1[1]))
lv2 = max(.001,sqrt(v2[0]*v2[0] + v2[1]*v2[1]))
v1N = [v1[0]/lv1, v1[1]/lv1]
v2N = [v2[0]/lv2, v2[1]/lv2]
for dV1, dV2 in itertools.product(range(0, 4*(1+int(lv1))), range(0, 4*(1+int(lv2)))):
x = int(c0[0] + v2N[0] * dV2*.25 + v1N[0] * dV1*.25)
y = int(c0[1] + v2N[1] * dV2*.25 + v1N[1] * dV1*.25)
if x>0 and x < width and y > 0 and y < height:
image[y,x] = c
def computeProjection(pts,q):
# Fill matrix
M = [ ]
for row in range(0,6):
r1 = [ q[row][0],q[row][1], q[row][2],1,0,0,0,0,-pts[row][0]*q[row][0], \
-pts[row][0]*q[row][1],-pts[row][0]*q[row][2],-pts[row][0] ]
r2 = [ 0,0,0,0,q[row][0],q[row][1], q[row][2],1,-pts[row][1]*q[row][0], \
-pts[row][1]*q[row][1],-pts[row][1]*q[row][2],-pts[row][1] ]
M.append(r1)
M.append(r2)
print(M)
# Solves the equation A*x=b
r = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0]
p = solveSystem(M, r)
return p
def getPointColours(xy, mask, image):
nfaces = len(xy)
height = len(image)
width = len(image[0])
colourImages = [ ]
for faceNum in range(0, nfaces):
face = xy[faceNum]
npts = len(face)
colours = createImageRGB(npts, npts)
for a,b in itertools.product(range(0, npts-1), range(0, npts-1)):
y,x = face[a][b][1], face[a][b][0]
if y>0 and y<height and x>0 and x<width:
alpha = mask[y,x] / 256.0
if alpha > 0.0:
c = image[y,x]
colours[a,b] = [alpha*c[0],alpha*c[1],alpha*c[2]]
colourImages.append(colours)
return colourImages
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter3/BasicPointOperators.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
BasicPointOperators: Applies point operations to an image (sawtooth,logarithmic,exponential)
and show the histogram of the resulting image
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL
from PlotUtilities import plotHistogram
from ImageOperatorsUtilities import computeHistogram
# Math functions and iteration
from math import log, exp
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
intevalSize = Define the sawtooth fixed interval size
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "Horse.png"
intevalSize = 64
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create 3 images to store the result of 3 operators
outputSawtoothImage = createImageL(width, height)
outputLogarithmicImage = createImageL(width, height)
outputExponentialImage = createImageL(width, height)
# Set the pixels in the output image
for x,y in itertools.product(range(0, width), range(0, height)):
inputValue = int(inputImage[y,x])
# Set the pixels in the sawtooth image
pixelInInterval = inputValue % intevalSize
gain = float(pixelInInterval) / float(intevalSize)
outputSawtoothImage[y,x] = inputValue * gain
# Set the pixels in the Logarithmic
outputLogarithmicImage[y,x] = 20 * log(inputValue * 100.0)
# Set the pixels in the Exponential image
outputExponentialImage[y,x] = 20 * exp(inputValue / 100.0)
# Compute histograms
histogramSawtoothImage = computeHistogram(outputSawtoothImage)
histogramLogarithmicImage = computeHistogram(outputLogarithmicImage)
histogramExponentialImage = computeHistogram(outputExponentialImage)
# Show output images
showImageL(outputSawtoothImage)
showImageL(outputLogarithmicImage)
showImageL(outputExponentialImage)
# Plot histograms
plotHistogram(histogramSawtoothImage)
plotHistogram(histogramLogarithmicImage)
plotHistogram(histogramExponentialImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter5/HTEllipseDecomposition.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 5
HTEllipseDecomposition: Ellipse detection by the Hough transform decomposition
'''
# Set module functions
from ImageUtilities import imageReadL, showImageF, showImageL, createScaleImageL, createImageF
from ImageOperatorsUtilities import applyCannyEdgeDetector
from ImagePropertiesUtilities import imageArgMax
from PlotUtilities import plot3DHistogram
# Math and iteration
from math import sqrt, pi, sin, cos, tan
from random import randint
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
gaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
axisRange = Possible axes size
angleRange = Possible angles
segmentLenghtThreshod = To remove small line segments. Percentage of larger segment
pairsPerPoint = Number of pairs for each edge point
'''
pathToDir = "../../Images/Chapter5/Input/"
imageName = "Cup.png"
gaussianKernelSize = 5
sobelKernelSize = 3
upperT = 0.3
lowerT = 0.2
axisRange = [20,65]
angleRange = [0, 4]
segmentLenghtThreshod = 0.20
pairsPerPoint = 30
# Sample points distance. Higher the eccentricity higher the distance so points give accurate positions
axisRatio = float(axisRange[1]) / (2.0*float(axisRange[0]))
deltaPointRange = [int(axisRatio*axisRange[0]), int(1.2*axisRange[1])]
# Read image #49 20 0.03490658503988659 125 77 2
inputImage, width, height = imageReadL(pathToDir + imageName)
# Compute edges
magnitude, angle = applyCannyEdgeDetector(inputImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
showImageF(magnitude)
# Find segments
segmentsList = []
segmentsImage = createImageF(width, height)
maxSegmentLenght = 0
for x,y in itertools.product(range(0, width), range(0, height)):
if magnitude[y,x] != 0 and segmentsImage[y,x] == 0:
segment = [ ]
segmentPoints = [(y,x)]
segmentsImage[y,x] = 255
while len(segmentPoints) > 0:
yc = (segmentPoints[0])[0]
xc = (segmentPoints[0])[1]
segment.append((yc,xc))
segmentPoints = segmentPoints[1:]
for dx,dy in itertools.product(range(-1,2), range(-1,2)):
xn, yn = xc+dx, yc+dy
if dx!=0 or dy!=0 and xn > 0 and yn > 0 and xn < width and yn < height:
if magnitude[yn,xn] != 0 and segmentsImage[yn,xn] == 0:
segmentPoints.append((yn,xn))
segmentsImage[yn,xn] = 255
segmentsList.append(segment)
if len(segment) > maxSegmentLenght:
maxSegmentLenght = len(segment)
# Remove segments based on its size
# Segments can also be removed based on curvature or
# gather evidence for each segment to determine if it is part of an ellipse
segments = []
numSegments = len(segmentsList)
for s in range(0, numSegments):
segment = segmentsList[s]
numPoints = len(segment)
if numPoints >= maxSegmentLenght * segmentLenghtThreshod:
for p in range(0, numPoints):
y = (segment[p])[0]
x = (segment[p])[1]
segments.append((y,x))
# Accumulator to gather evidence for the ellipse location
accumulator = createImageF(width, height)
numPoints = len(segments)
# For a pair p1 = (x1,y1), p2=(x2,y2)
for p1 in range(0, numPoints):
for p in range(0,pairsPerPoint):
p2 = randint(0, numPoints-1)
y1,x1 = (segments[p1])[0], (segments[p1])[1]
y2,x2 = (segments[p2])[0], (segments[p2])[1]
d = sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2))
if d > deltaPointRange[0]:
angle1, angle2 = -angle[y1,x1], -angle[y2,x2]
# To void parallel edge directions
w = cos(angle1)*cos(angle2) + sin(angle1)*sin(angle2)
if w < 0.9:
xm, ym = (x1 + x2) / 2.0, (y1 + y2) / 2.0
m1, m2 = tan(angle1), tan(angle2)
A,B = y1-y2, x2-x1
C,D = m1+m2, m1*m2
M,N = A*C+2*B*D, 2*A+B*C
norm = sqrt(M*M+N*N)
M,N = M/norm, N/norm
# Draw horizontal or vertical lines
if abs(M) < abs(N):
m = float(M) / float(N)
b1, b2 = y1-m1*x1, y2-m2*x2
xIntersect = (b2-b1)/ (m1-m2)
if xIntersect < xm:
xi,xf = int(xm), min(int(xm + axisRange[1]), width-1)
else:
xi,xf = max(1,int(xm - axisRange[1])), int(xm)
for x in range(xi,xf):
y = m *(x - xm) + ym
d1 = sqrt((x-x1)*(x-x1)+(y-y1)*(y-y1))
if d1 > axisRange[0] and d1 < axisRange[1]:
yInt = int(y)
if yInt > 0 and yInt < height -1:
weight = y - yInt
accumulator[yInt, x] += (1.0 - weight)
accumulator[yInt+1, x] += weight
else:
m = float(N) / float(M)
b1, b2 = x1-m1*y1, x2-m2*y2
yIntersect = (b2-b1) / (m1-m2)
if yIntersect < ym:
yi,yf = int(ym), min(int(ym + axisRange[1]), height-1)
else:
yi,yf = max(1,int(ym - axisRange[1])), int(ym)
for y in range(yi,yf):
x = m *(y - ym) + xm
d1 = sqrt((x-x1)*(x-x1)+(y-y1)*(y-y1))
if d1 > axisRange[0] and d1 < axisRange[1]:
xInt = int(x)
if xInt > 0 and xInt < width -1:
weight = x - xInt
accumulator[y, xInt] += (1.0 - weight)
accumulator[y, xInt+1] += weight
# Find maximum and plot accumulator
maximumPos = imageArgMax(accumulator)
#print(maximumPos[0], maximumPos[1])
plot3DHistogram(accumulator)
# Gather evidence axis parameters
angleSize = angleRange[1] - angleRange[0]
maxAxisSize = axisRange[1] - axisRange[0]
accumulatorAxis = createImageF(maxAxisSize, maxAxisSize, angleSize)
for x,y in itertools.product(range(0, width), range(0, height)):
if magnitude[y,x] != 0:
dx = x - maximumPos[1]
dy = y - maximumPos[0]
for r in range(0, angleSize):
rot = ((r + angleRange[0]) * pi) / 180.0
for angle in range(0,360):
t = (angle * pi) / 180.0
A,B = cos(t)*cos(rot), sin(t)*sin(rot)
C,D = cos(t)*sin(rot), sin(t)*cos(rot)
Det = A*D + C*B
if Det != 0:
Detx, Dety = dx*D+dy*B, A*dy-C*dx
a, b = Detx/Det - axisRange[0], Dety/Det - axisRange[0]
aInt, bInt = int(a), int(b)
if aInt>0 and bInt>0 and aInt<maxAxisSize-2 and bInt<maxAxisSize-2 \
and bInt<aInt:
weightA, weightB = a-aInt, b-bInt
accumulatorAxis[bInt, aInt,r] += (1.0-weightA) + (1.0-weightB)
accumulatorAxis[bInt, aInt+1,r] += weightA + (1.0-weightB)
accumulatorAxis[bInt+1,aInt,r] += (1.0-weightA) + weightB
accumulatorAxis[bInt+1,aInt+1,r] += weightA + weightB
# Find maximum and plot accumulator
maximumAxis = imageArgMax(accumulatorAxis)
plot3DHistogram(accumulatorAxis[:, :, maximumAxis[2]])
# Draw ellipse on an output image
outputImage = createScaleImageL(inputImage, 0.5)
rotAngle = ((maximumAxis[2]+angleRange[0]) * pi) / 180.0
a,b = maximumAxis[1]+axisRange[0], maximumAxis[0]+axisRange[0]
#print(a, b)
for m in range(0,360):
angle = (m * pi) / 180.0
x = int(maximumPos[1]+ a*cos(angle)*cos(rotAngle) - b*sin(angle)*sin(rotAngle))
y = int(maximumPos[0]+ a*cos(angle)*sin(rotAngle) + b*sin(angle)*cos(rotAngle))
if x<width and x>0 and y<height and y>0:
outputImage[y,x] = 255
showImageL(outputImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Modules/ImagePropertiesUtilities.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
ImagePropertiesUtilities: Helper module to obtain information from an image
'''
# Array to store image data
from numpy import amax, amin, unravel_index
# Mean form a float image
def meanStddev(image):
heightImage = len(image)
widthImage = len(image[0])
m = 0.0
for x in range(0, widthImage):
for y in range(0, heightImage):
m += float(image[y,x])
m /= float(widthImage) * heightImage
s = 0.0
for x in range(0, widthImage):
for y in range(0, heightImage):
s += (float(image[y,x]) -m) ** 2.0
return m,s
# Return the maximum and minimum
def imageMaxMin(image):
maximum = amax(image)
minimum = amin(image)
return maximum, minimum
# Return the maximum position
def imageArgMax(image):
index = unravel_index(image.argmax(), image.shape)
return index
# Detect a peaks in a 2D image
def peakDetectorImageL(image, peakThreshold, suppWindow = 3):
peaks = []
height = len(image)
width = len(image[0])
for y in range(0, height):
for x in range(0, width):
if image[y, x] > peakThreshold:
peak = True
for wy in range(y-suppWindow, y+suppWindow+1):
for wx in range(x-suppWindow, x+suppWindow+1):
if wy>=0 and wy<height and wx>=0 and wx<width and \
image[y, x] < image[wy, wx]:
peak = False
if peak:
peaks.append((y,x))
return peaks
# Detect a peaks in a vector
def peakDetectorVector(image, peakThreshold, suppWindow = 3):
peaks = []
size = len(image)
for x in range(0, size):
if image[x] > peakThreshold:
peak = True
for wx in range(x-suppWindow, x+suppWindow+1):
if wx>=0 and wx<size and image[x] < image[wx]:
peak = False
if peak:
peaks.append(x)
return peaks
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter9/BackProjection.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 9
BackProjection: Compute the position of a region in an image by backrpojection. It uses the
moments of the projections to determine the position of the region in the
target image
'''
# Set module functions
from ImageUtilities import imageReadRGB, showImageRGB, createImageF, createImageRGB, showImageF
from ImagePropertiesUtilities import imageMaxMin
from PlotUtilities import plot3DHistogram
from ImageRegionsUtilities import densityHistogram, colourFeature
# Math and iteration
from math import sqrt
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageNames = Input image name and image containing the new region
histoSize = Size of the histogram
positions = Positions of the region in the images [column, row]
sizeReg = Size of the region
sigma = Weight control
'''
pathToDir = "../../Images/Chapter9/Input/"
imageNames = ["frame1.bmp", "frame2.bmp"]
histoSize = 64
positions = [[100, 60], [89,54]]
sizeReg = [12, 18]
sigma = 6
# Read image into array
sourceImage, width, height = imageReadRGB(pathToDir + imageNames[0])
targetImage, width, height = imageReadRGB(pathToDir + imageNames[1])
# Show input images
showImageRGB(sourceImage)
showImageRGB(targetImage)
# Density source
q = densityHistogram(sourceImage, positions[0], sizeReg, sigma, histoSize)
#plot3DHistogram(q)
# Projection by setting the pixel's value to the high in the histogram for
# the source and target images
colourScale = 256.0 / histoSize
projectionSource = createImageF(width, height)
projectionTarget = createImageF(width, height)
for x, y in itertools.product(range(0,width), range(0, height)):
Cb,Cr = colourFeature(sourceImage[y,x], colourScale)
projectionSource[y,x] = q[Cr,Cb]
Cb,Cr = colourFeature(targetImage[y,x], colourScale)
projectionTarget[y,x] = q[Cr,Cb]
showImageF(projectionSource)
showImageF(projectionTarget)
# Compute geometric moments of the source and target image regions
momS = createImageF(3, 3)
momT = createImageF(3, 3)
ps, pt = positions[0], positions[1]
sizeSearch = [int(sizeReg[0] *1.5), int(sizeReg[1] *1.5)]
for deltaX, deltaY in itertools.product(range(-sizeSearch[0], sizeSearch[0]), \
range(-sizeSearch[1], sizeSearch[1])):
xs, ys = ps[0] + deltaX, ps[1] + deltaY
xt, yt = pt[0] + deltaX, pt[1] + deltaY
for m,n in itertools.product(range(0, 3), range(0, 3)):
momS[n,m] += (xs**n) * (ys**m) * projectionSource[y,x]
momT[n,m] += (xt**n) * (yt**m) * projectionTarget[y,x]
# Compute sxS, syS, the size of the projection in previous frame
xc,yc = momS[1,0]/momS[0,0], momS[0,1]/momS[0,0]
a = momS[2,0]/momS[0,0] - xc*xc;
b = 2*(momS[1,1]/momS[0,0] - xc * yc);
c = momS[0,2]/momS[0,0]- yc*yc;
sxS = int(sqrt((a+c-sqrt(b*b+(a-c)*(a-c))/2)));
syS = int(sqrt((a+c+sqrt(b*b+(a-c)*(a-c))/2)));
# Compute sx, sy, the size of the projection in current frame
xc,yc = momT[1,0]/momT[0,0], momT[0,1]/momT[0,0]
a = momT[2,0]/momT[0,0] - xc*xc;
b = 2*(momT[1,1]/momT[0,0] - xc * yc);
c = momT[0,2]/momT[0,0]- yc*yc;
sx = int(sqrt((a+c-sqrt(b*b+(a-c)*(a-c))/2)));
sy = int(sqrt((a+c+sqrt(b*b+(a-c)*(a-c))/2)));
# Determine size of the region in current frame
sy = int(sy * sizeReg[1] / syS)
sx = int(sx * sizeReg[0] / sxS)
# Show results
p = [int(xc), int(yc)]
m, _ = imageMaxMin(projectionTarget)
borderDistance = [sx -2, sy -2]
for x, y in itertools.product(range(p[0] - sx, p[0] + sx), \
range(p[1] - sy, p[1] + sy)):
if abs(x-p[0]) > borderDistance[0] or abs(y-p[1]) > borderDistance[1]:
projectionTarget[y,x] = m
showImageF(projectionTarget)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter3/MedianFilter.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
MedianFilter: Noise reduction by median filter
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
kernelSize = Size of the kernel
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "Fence.png"
kernelSize = 5
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create images to store the result
outputImage = createImageL(width, height)
# Apply filter
kernelCentre = int((kernelSize - 1) / 2)
for x,y in itertools.product(range(0, width), range(0, height)):
region = [ ]
for wx,wy in itertools.product(range(0, kernelSize), range(0, kernelSize)):
posY = y + wy - kernelCentre
posX = x + wx - kernelCentre
if posY > -1 and posY < height and posX > -1 and posX < width:
region.append(inputImage[posY,posX])
numPixels = len(region)
if numPixels > 0:
region.sort()
outputImage[y,x] = region[int(numPixels/2)]
# Show output image
showImageL(outputImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter10/Homography.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 10
Homography: Compute an homography from four corresponding image points and perform the
transformation on the image
'''
# Set module functions
from ImageUtilities import imageReadRGB, imageReadL, showImageRGB
from GeometricUtilities import solveSystem, imageTransform
# Math and iteration
from math import sin, cos, sqrt
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
maskName = Mask image name
'''
pathToDir = "../../Images/Chapter10/Input/"
imageName = "cube1.png"
maskName = "mask1.png"
# Read image data
inputImage, width, height = imageReadRGB(pathToDir + imageName)
maskImage, width, height = imageReadL(pathToDir + maskName)
showImageRGB(inputImage)
# Image centre
centreX, centreY = int(width/2), int(height/2)
# Corresponding points
p = [[116-centreX,202-centreY],[352-centreX,234-centreY],[140-centreX,384-centreY],[344-centreX,422-centreY]]
q = [[118-centreX,168-centreY],[312-centreX,238-centreY],[146-centreX,352-centreY],[322-centreX,422-centreY]]
# Find transform
M = [[-p[0][0], -p[0][1], -1, 0, 0, 0, p[0][0]*q[0][0], p[0][1]*q[0][0], q[0][0]], \
[ 0, 0, 0, -p[0][0], -p[0][1], -1, p[0][0]*q[0][1], p[0][1]*q[0][1], q[0][1]], \
[-p[1][0], -p[1][1], -1, 0, 0, 0, p[1][0]*q[1][0], p[1][1]*q[1][0], q[1][0]], \
[ 0, 0, 0, -p[1][0], -p[1][1], -1, p[1][0]*q[1][1], p[1][1]*q[1][1], q[1][1]], \
[-p[2][0], -p[2][1], -1, 0, 0, 0, p[2][0]*q[2][0], p[2][1]*q[2][0], q[2][0]], \
[ 0, 0, 0, -p[2][0], -p[2][1], -1, p[2][0]*q[2][1], p[2][1]*q[2][1], q[2][1]], \
[-p[3][0], -p[3][1], -1, 0, 0, 0, p[3][0]*q[3][0], p[3][1]*q[3][0], q[3][0]], \
[ 0, 0, 0, -p[3][0], -p[3][1], -1, p[3][0]*q[3][1], p[3][1]*q[3][1], q[3][1]], \
[ 1, 1, 1, 1, 1, 1, 1, 1, 1 ]]
# Solves the equation A*x=b
b = [0,0,0,0,0,0,0,0,1]
h = solveSystem(M, b)
H = [[h[0], h[1], h[2]], \
[h[3], h[4], h[5]], \
[h[6], h[7], h[8]] ]
#print(H)
# Transform image and show
tImage = imageTransform(inputImage, maskImage, H)
showImageRGB(tImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter2/FourierTransform.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 2
FourierTransform: Compute the Fourier transform of an image and display the magnitude and phase
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF, showImageF
from ImageOperatorsUtilities import imageLogF
from PrintUtilities import printProgress
# Math and iteration functions
from math import sin, cos, pi, sqrt, atan2
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
'''
pathToDir = "../../Images/Chapter2/Input/"
imageName = "Square.png"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create coefficients Image. Two floats to represent a complex number
maxFrequencyW = int(width /2)
maxFrequencyH = int(height/2)
numCoefficientsW = 1 + 2 * maxFrequencyW
numCoefficientsH = 1 + 2 * maxFrequencyH
coefficients = createImageF(numCoefficientsW ,numCoefficientsH , 2)
# Adjust the size of the data to be even
m = float(width)
n = float(height)
if width % 2 == 0:
m = width + 1.0
if height % 2 == 0:
n = height + 1.0
# Fundamental frequency
ww = (2.0 * pi) / m
wh = (2.0 * pi) / n
# Compute coefficients
for kw in range(-maxFrequencyW, maxFrequencyW + 1):
printProgress(kw + maxFrequencyW, numCoefficientsW)
indexInArrayW = kw + maxFrequencyW
for kh in range(-maxFrequencyH, maxFrequencyH + 1):
indexInArrayH = kh + maxFrequencyH
for x,y in itertools.product(range(0, width), range(0, height)):
coefficients[indexInArrayH, indexInArrayW][0] += inputImage[y,x] * \
(cos(x * ww * kw) * cos(y * wh * kh) - sin(x * ww * kw) * sin(y * wh * kh))
coefficients[indexInArrayH, indexInArrayW][1] += inputImage[y,x] * \
(cos(x * ww * kw) * sin(y * wh * kh) + sin(x * ww * kw) * cos(y * wh * kh))
for kw in range(-maxFrequencyW, maxFrequencyW + 1):
printProgress(kw + maxFrequencyW, numCoefficientsW)
indexInArrayW = kw + maxFrequencyW
for kh in range(-maxFrequencyH, maxFrequencyH + 1):
indexInArrayH = kh + maxFrequencyH
coefficients[indexInArrayH, indexInArrayW][0] *= m*n
coefficients[indexInArrayH, indexInArrayW][1] *= m*n
# Power
power = createImageF( 1 + 2 * maxFrequencyW, 1 + 2 * maxFrequencyH)
for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \
range(-maxFrequencyH, maxFrequencyH + 1)):
indexInArrayW = kw + maxFrequencyW
indexInArrayH = kh + maxFrequencyH
power[indexInArrayH, indexInArrayW] = \
sqrt(coefficients[indexInArrayH, indexInArrayW][0] * \
coefficients[indexInArrayH, indexInArrayW][0] + \
coefficients[indexInArrayH, indexInArrayW][1] * \
coefficients[indexInArrayH, indexInArrayW][1])
# Show the log of the power
powerLog = imageLogF(power)
showImageF(powerLog)
# Phase
phase = createImageF( 1 + 2 * maxFrequencyW, 1 + 2 * maxFrequencyH)
for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \
range(-maxFrequencyH, maxFrequencyH + 1)):
indexInArrayW = kw + maxFrequencyW
indexInArrayH = kh + maxFrequencyH
phase[indexInArrayH, indexInArrayW] = \
atan2(coefficients[indexInArrayH, indexInArrayW][1], \
coefficients[indexInArrayH, indexInArrayW][0])
# Show phase
showImageF(phase)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter3/OptimalThresholding.py | <filename>ExamplesPython_3.6/Chapter3/OptimalThresholding.py<gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
OptimalThresholding: Create binary image by finding an optimal threshold
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createVectorF
from PlotUtilities import plotHistogram
from ImageOperatorsUtilities import computeHistogram, thresholdImage
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "Horse.png"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Compute histogram of the input image
inputHistogram = computeHistogram(inputImage)
# Create histograms to store cumulative moments
w = createVectorF(256)
m = createVectorF(256)
# Create histograms to store separation
separability = createVectorF(256)
# Obtain histograms
normalization = 1.0 / float(width * height)
w[0] = normalization * inputHistogram[0]
for level in range(1, 256):
w[level] = w[level-1] + normalization * inputHistogram[level]
m[level] = m[level-1] + level * normalization * inputHistogram[level]
# Look for the maximum
maximumLevel = 0
for level in range(0, 256):
if w[level] * (float(level) - w[level]) != 0:
separability[level] = float(pow( ( m[255] * w[level] - m[level]), 2) \
/ (w[level] * (float(level) - w[level])))
if separability[level] > separability[maximumLevel]:
maximumLevel = level
outputImage = thresholdImage(inputImage, maximumLevel)
# Show output image
showImageL(outputImage)
plotHistogram(separability)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter5/PolarHoughTransform.py | <gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 5
PolarHoughTransform: Line detection by the Hough transform with polar parametrisation
'''
# Set module functions
from ImageUtilities import imageReadL, createImageF, showImageF, showImageL, createScaleImageL
from ImageOperatorsUtilities import applyCannyEdgeDetector
from ImagePropertiesUtilities import imageMaxMin, peakDetectorImageL
from PlotUtilities import plot3DHistogram
# Math and iteration
from math import fabs, sqrt, pi, sin, cos
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
gaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
peakDetection = Percentage of the maximum peak value that is considered for threshold
'''
pathToDir = "../../Images/Chapter5/Input/"
imageName = "Road.png"
gaussianKernelSize = 7
sobelKernelSize = 3
upperT = 0.5
lowerT = 0.3
peakDetection = 0.7
# Read image into array and show
inputImage, width, height = imageReadL(pathToDir + imageName)
showImageL(inputImage)
# Compute edges
magnitude, angle = applyCannyEdgeDetector(inputImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
showImageF(magnitude)
# Line parametrisation for normals from 0 to 360 degrees and r positive form the image centre
# Parametrisation r = (x-cx) * cos(t) + (y-cy) * sin(t)
maxLenght = int(sqrt(height*height + width*width) / 2)
accumulator = createImageF(maxLenght,360)
cx = int(width / 2)
cy = int(height / 2)
# Gather evidence
for x,y in itertools.product(range(0, width), range(0, height)):
if magnitude[y,x] != 0:
for m in range(0,360):
angle = (m * pi) / 180.0
r = (x-cx) * cos(angle) + (y-cy) * sin(angle)
bucket = int(r)
if bucket> 0 and bucket < maxLenght - 1:
weight = r - int(r)
accumulator[m, bucket] += (1.0 - weight)
accumulator[m, bucket+1] += weight
# Find maximum
maximum, _ = imageMaxMin(accumulator)
peakThreshold = peakDetection * maximum
# Plot accumulator
plot3DHistogram(accumulator)
# Prepare output image as a dark version of the input
outputImage = createScaleImageL(inputImage, 0.5)
# Peak detection
peaks = peakDetectorImageL(accumulator, peakThreshold)
# Draw lines on output image
for peakIndex in range(1,len(peaks)):
m = (peaks[peakIndex])[0]
r = (peaks[peakIndex])[1]
strength = int(255.0 * accumulator[m, r] / maximum)
angle = (m * pi) / 180.0
if fabs(sin(angle)) > fabs(cos(angle)):
for x in range(0, width -1):
y = int( (r - (x-cx) * cos(angle) ) / sin(angle)) + cy
if y > 0 and y < height -1:
outputImage[y,x] = strength
outputImage[y+1,x] = strength
else:
for y in range(0, height -1):
x = int( (r - (y-cy) * sin(angle) ) / cos(angle)) + cx
if x > 0 and x < width -1:
outputImage[y,x] = strength
outputImage[y+1,x] = strength
showImageL(outputImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter9/ColorHistograms.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 9
ColorHistograms: Compute 2D colour histogram of an image
'''
# Set module functions
from ImageUtilities import imageReadRGB, showImageRGB, createImageF, createImageRGB
from PlotUtilities import plot3DColorHistogram
# Math and iteration
from math import exp
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
histoSize = Size of the histogram
position = position of the region
regionRadius = Size of the kernel
sigma = weight control
'''
pathToDir = "../../Images/Chapter9/Input/"
imageName = "frame1.bmp"
histSize = 64
position = [100, 60]
regionRadius = [12, 18]
sigma = 4.0
# Read image into array
inputImage, width, height = imageReadRGB(pathToDir + imageName)
# Show input image
showImageRGB(inputImage)
# Three float array to store colors to be used in the surface plot
colorsRGB = createImageF(histSize, histSize, 3)
# Quantization scale
colourScale = 256.0 / histSize
# Create region image and histogram
regionImage = createImageRGB(2*regionRadius[0], 2*regionRadius[1])
histogram = createImageF(histSize, histSize)
sumValue = 0
for deltaX, deltaY in itertools.product(range(-regionRadius[0],regionRadius[0]), range(-regionRadius[1], regionRadius[1])):
x, y = position[0] + deltaX, position[1] + deltaY
px,py = deltaX+regionRadius[0], deltaY+regionRadius[1]
if x>0 and y>0 and x<width and y<height :
regionImage[py,px] = inputImage[y,x]
w = exp(-(deltaX*deltaX + deltaY*deltaY)/(2*sigma*sigma))
rgb = inputImage[y,x] / 256.0
Cb = int((128 - 37.79*rgb[0] - 74.203*rgb[1] + 112*rgb[2])/colourScale)
Cr = int((128 + 112*rgb[0] - 93.786*rgb[1] - 18.214*rgb[2])/colourScale)
histogram[Cr,Cb] += w
sumValue += w
for r,b in itertools.product(range(0, histSize), range(0, histSize)):
histogram[r,b] /= sumValue
colorsRGB[r,b] = [0.1, .4, .8]
# Show results
showImageRGB(regionImage)
plot3DColorHistogram(histogram, colorsRGB)
#imageSaveRGB(regionImage, "../../Images/Chapter9/Results/Fig_code9_1/region.png")
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter8/SuperpixelsSLIC.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 8
SuperpixelsSLIC: Perform the simple linear interactive clustering
'''
# Set module functions
from ImageUtilities import imageReadL, createImageF, createImageRGB, imageReadRGB, \
showImageRGB, createImage2I, createImageUV
from ConvolutionUtilities import createSobelKernel, applyKernelMA
from PrintUtilities import printProgress
# Math and iteration
from math import sqrt
from _testcapi import FLT_MAX
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
numPixels = Number of super pixels per row
m = Proximity constant
numIter = Number of iterations
'''
pathToDir = "../../Images/Chapter8/Input/"
imageName = "fish.png"
numPixels = 700
m = 10.0
numIter = 10
# Read image into array and show
inputImage, width, height = imageReadRGB(pathToDir+imageName)
inputImageL, _, _ = imageReadL(pathToDir+imageName)
showImageRGB(inputImage)
# Gradient used to determine initial positions
sobelX, sobelY = createSobelKernel(3)
normalizeMagnitude = False
gradient, _, _, _ = applyKernelMA(inputImageL, sobelX, sobelY, normalizeMagnitude)
# Determine the number of regions in horizontal and vertical
regionSide = int(sqrt(width * height / numPixels))
if regionSide % 2 == 0: regionSide -= 1
halfRegionSide = (regionSide - 1.0) / 2.0
regW, regH = 1 + int(width / regionSide), 1 + int(height / regionSide)
# Image to store the region colour and other image for position
regionColour = createImageF(regW, regH, 3)
regionPos = createImageUV(regW, regH)
# Initial regions
regionsID = createImage2I(width, height)
for x,y in itertools.product(range(0, regW), range(0, regH)):
ry, rx = y * regionSide, x * regionSide
# Position
py, px = int(ry + halfRegionSide), int(rx + halfRegionSide)
regionPos[y,x] = [py, px]
minGradient = FLT_MAX
for wx,wy in itertools.product(range(px-1, px+2), range(py-1, py+2)):
if (wy < height and wx < width):
if gradient[wy,wx] < minGradient:
minGradient = gradient[wy,wx]
regionPos[y,x] = [wy, wx]
# Colour
colors = [0, 0, 0]
npts = 0.0
for wx,wy in itertools.product(range(rx, rx + regionSide), \
range(ry, ry + regionSide)):
if wy>=0 and wy<height and wx>=0 and wx<width:
if (wy < height and wx < width):
regionsID[wy, wx] = [y, x]
colors += inputImage[wy,wx]
npts += 1
if npts > 0:
regionColour[y,x] = [int(colors[0] / npts), \
int(colors[1] / npts), int(colors[2] / npts)]
# Modify regions
for itr in range(0, numIter):
printProgress(itr, numIter)
# Values for new regions
newRegionColour = createImageF(regW, regH,3)
newRegionPos = createImageUV(regW, regH)
newRegionSize = createImageF(regW, regH)
# Per pixel
for x,y in itertools.product(range(0, width), range(0, height)):
ry, rx = regionsID[y,x]
colour = [float(inputImage[y,x][0]), float(inputImage[y,x][1]), float(inputImage[y,x][2])]
minD = [FLT_MAX, ry, rx]
for wx,wy in itertools.product(range(rx-2, rx + 3), range(ry-2, ry + 3)):
if wy>=0 and wy<regH and wx>=0 and wx<regW:
ds = sqrt((regionPos[wy,wx][0] - y)**2 + (regionPos[wy,wx][1] - x)**2)
dc = sqrt((float(regionColour[wy,wx][0]) - colour[0])**2 + (float(regionColour[wy,wx][1]) - colour[1])**2 + \
(float(regionColour[wy,wx][2]) - colour[2])**2)
D = dc/255.0 + (m / regionSide) * ds
if D < minD[0]:
minD = [D, wy, wx]
[_, minY, minX] = minD
newRegionColour[minY, minX] += colour
newRegionPos[minY, minX] += [y,x]
newRegionSize[minY, minX] += 1
regionsID[y, x] = [minY, minX]
# Update regions
for x,y in itertools.product(range(0, regW), range(0, regH)):
if newRegionSize[y,x] > 0:
regionPos[y,x] = newRegionPos[y,x] / newRegionSize[y,x]
regionColour[y,x] = newRegionColour[y,x] / newRegionSize[y,x]
# Show regions
resultRegions = createImageRGB(width, height)
for x,y in itertools.product(range(0, width), range(0, height)):
border = False
for wx,wy in itertools.product(range(x, x+2), range(y, y+2)):
if wy>=0 and wy<height and wx>=0 and wx<width:
if regionsID[y, x, 0] != regionsID[wy, wx, 0] or regionsID[y, x, 1] != regionsID[wy, wx, 1]:
border = True
if border:
resultRegions[y,x] = [255, 255, 255]
else:
[h, w] = regionsID[y,x];
resultRegions[y,x] = regionColour[h, w]
showImageRGB(resultRegions)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Modules/ConvolutionUtilities.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
ConvolutionUtilities: Helper module to apply convolutions. Functions to create kernels and perform convolutions
'''
# Images
from ImageUtilities import createImageF, createImageL
from ImagePropertiesUtilities import imageMaxMin
# Math and iteration
from math import exp, pow, factorial, sqrt, atan2
from timeit import itertools
# Generate a Gaussian kernel
def createGaussianKernel(kernelSize):
sigma = kernelSize / 3.0
kernelImage = createImageF(kernelSize, kernelSize)
centre = (kernelSize - 1) / 2
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
kernelImage[y,x] = exp( -0.5 * (pow((x - centre)/sigma, 2.0) + \
pow((y - centre)/sigma, 2.0)) )
return kernelImage
# Create a Sobel kernel k
def createSobelKernel(kenrelSize):
sobelX = createImageF(kenrelSize, kenrelSize)
sobelY = createImageF(kenrelSize, kenrelSize)
# Create kernel
for x,y in itertools.product(range(0, kenrelSize), range(0, kenrelSize)):
# Smooth
smoothX = factorial(kenrelSize - 1) / (factorial(kenrelSize - 1 - x) * factorial(x))
smoothY = factorial(kenrelSize - 1) / (factorial(kenrelSize - 1 - y) * factorial(y))
# Pascal
if ( kenrelSize - 2 - x >= 0):
p1X = factorial(kenrelSize - 2) / (factorial(kenrelSize - 2 - x) * factorial(x))
else:
p1X = 0
if ( kenrelSize - 2 - y >= 0):
p1Y = factorial(kenrelSize - 2) / (factorial(kenrelSize - 2 - y) * factorial(y))
else:
p1Y = 0
# Pascal shift to the right
xp = x - 1
if ( kenrelSize - 2 - xp >= 0 and xp >= 0):
p2X = factorial(kenrelSize - 2) / (factorial(kenrelSize - 2 - xp) * factorial(xp))
else:
p2X = 0
yp = y - 1
if ( kenrelSize - 2 - yp >= 0 and yp >= 0):
p2Y = factorial(kenrelSize - 2) / (factorial(kenrelSize - 2 - yp) * factorial(yp))
else:
p2Y = 0
# Sobel
sobelX[y,x] = smoothX * (p1Y - p2Y)
sobelY[y,x] = smoothY * (p1X - p2X)
return sobelX, sobelY
# Create a Laplacian kernel
def createLaplacianKernel(kernelSize, sigma):
# kernel
kernelLaplacian = createImageF(kernelSize, kernelSize)
# Create kernel
s2Inv = 1.0 / (sigma * sigma)
kernelCentre = (kernelSize - 1) / 2
# Generate kernel values
sumValues = 0.0
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
nx2 = float(x-kernelCentre) * float(x-kernelCentre)
ny2 = float(y-kernelCentre) * float(y-kernelCentre)
s = 0.5 * (nx2 + ny2) * s2Inv
kernelLaplacian[y,x] = - s2Inv * s2Inv * (1.0 - s) * exp(-s)
sumValues += kernelLaplacian[y,x]
# Normalize
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
kernelLaplacian[y,x] /= sumValues
return kernelLaplacian
# Apply kernel to an image returning a gray level image
def applyKernel(inputImage, kernelImage):
height = len(inputImage)
width = len(inputImage[0])
kernelHeight = len(kernelImage)
kerelWidth = len(kernelImage[0])
kernelCentreY = int((kernelHeight - 1) / 2)
kernelCentreX = int((kerelWidth - 1) / 2)
# Create images to store the result
outputImage = createImageL(width, height)
for x,y in itertools.product(range(0, width), range(0, height)):
sumKernel = 0
sumKernelWeights = 0
for wx,wy in itertools.product(range(0, kerelWidth), range(0, kernelHeight)):
posY = y + wy - kernelCentreY
posX = x + wx - kernelCentreX
if posY > -1 and posY < height and posX > -1 and posX < width:
sumKernel += float(inputImage[posY,posX]) * kernelImage[wy, wx]
sumKernelWeights += kernelImage[wy, wx]
if sumKernelWeights > 0:
outputImage[y,x] = sumKernel / sumKernelWeights
return outputImage
# Apply kernel returning a float image
def applyKernelF(inputImage, kernelImage):
height = len(inputImage)
width = len(inputImage[0])
kernelHeight = len(kernelImage)
kernelWidth = len(kernelImage[0])
kernelCentreY = int((kernelHeight - 1) / 2)
kernelCentreX = int((kernelWidth - 1) / 2)
# Create images to store the result
outputImage = createImageF(width, height)
for x,y in itertools.product(range(0, width), range(0, height)):
sumKernel = 0.0
sumKernelWeights = 0.0
for wx,wy in itertools.product(range(0, kernelWidth), range(0, kernelHeight)):
posY = y + wy - kernelCentreY
posX = x + wx - kernelCentreX
if posY > -1 and posY < height and posX > -1 and posX < width:
sumKernel += float(inputImage[posY,posX]) * float(kernelImage[wy, wx])
sumKernelWeights += float(kernelImage[wy, wx])
# If we have to normalize
if sumKernelWeights != 0.0:
outputImage[y,x] = sumKernel / sumKernelWeights
else:
outputImage[y,x] = sumKernel
return outputImage
# Apply kernels to an image return magnitude and angle
def applyKernelMA(inputImage, kernelX, kernelY, normalizeMagnitude = False):
height = len(inputImage)
width = len(inputImage[0])
kernelHeight = len(kernelX)
kerelWidth = len(kernelX[0])
kernelCentreY = int((kernelHeight - 1) / 2)
kernelCentreX = int((kerelWidth - 1) / 2)
# Create images to store the result
magnitude = createImageF(width, height)
direction = createImageF(width, height)
imageX = createImageF(width, height)
imageY = createImageF(width, height)
# Convolution with two kernels
for x,y in itertools.product(range(kernelCentreX, width - kernelCentreX), \
range(kernelCentreY, height - kernelCentreY)):
sumKernel = [0.0, 0.0]
sumKernelWeights = [0.0, 0.0]
for wx,wy in itertools.product(range(0, kerelWidth), range(0, kernelHeight)):
posY = y + wy - kernelCentreY
posX = x + wx - kernelCentreX
if posY > -1 and posY < height and posX > -1 and posX < width:
sumKernel[0] += float(inputImage[posY,posX]) * float(kernelX[wy, wx])
sumKernelWeights[0] += float(kernelX[wy, wx])
sumKernel[1] += float(inputImage[posY,posX]) * float(kernelY[wy, wx])
sumKernelWeights[1] += float(kernelY[wy, wx])
# If we have to normalize
if sumKernelWeights[0] != 0.0:
imageX[y,x] = sumKernel[0] / sumKernelWeights[0]
else:
imageX[y,x] = sumKernel[0]
# If we have to normalize
if sumKernelWeights[1] != 0.0:
imageY[y,x] = sumKernel[1] / sumKernelWeights[1]
else:
imageY[y,x] = sumKernel[1]
magnitude[y,x] = sqrt(imageX[y,x] * imageX[y,x] + imageY[y,x] * imageY[y,x])
direction[y,x] = atan2(imageY[y,x], imageX[y,x])
if normalizeMagnitude == True:
maximum, minimum = imageMaxMin(magnitude)
for x,y in itertools.product(range(0, width), range(0, height)):
magnitude[y,x] = (magnitude[y,x] - minimum) / float(maximum - minimum)
return magnitude, direction, imageX, imageY
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter5/FourierConvolution.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 5
FourierConvolution: Compute the matching of a template in an image by using Fourier convolutions
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF, showImageF
from FourierUtilities import computeCoefficients, reconstruction
from ImagePropertiesUtilities import imageMaxMin
from PrintUtilities import printText
from PlotUtilities import plot3DHistogram
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
templateName = Input template image name
addQuadraticTerm = Set to true to add the square term so that is equivalent to SSD
'''
pathToDir = "../../Images/Chapter5/Input/"
imageName = "Eye.png"
templateName = "EyeTemplate.png"
addQuadraticTerm = True
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
templateImage, widthTemplate, heightTemplate = imageReadL(pathToDir + templateName)
# We pad the input and template to this size
widthPad = width + widthTemplate - 1
heightPad = height + heightTemplate - 1
# Pad input
inputPad = createImageF(widthPad, heightPad)
for x,y in itertools.product(range(0, width), range(0, height)):
inputPad[y,x] = inputImage[y,x]
# Pad and invert template
templatePad = createImageF(widthPad, heightPad)
templatePadFlip = createImageF(widthPad, heightPad)
for x,y in itertools.product(range(0, widthTemplate), range(0, heightTemplate)):
templatePad[y,x] = templateImage[y, x]
templatePadFlip[y,x] = templateImage[heightTemplate-y-1, widthTemplate-x-1]
# Show input image and template
showImageF(inputPad)
showImageF(templatePad)
# Compute correlation in image domain sum of square differences
squaredTerm = createImageF(widthPad, heightPad)
corrImage = createImageF(widthPad, heightPad)
for x,y in itertools.product(range(0, widthPad), range(0, heightPad)):
for w,h in itertools.product(range(-widthTemplate+1,1), \
range(-heightTemplate+1,1)):
p, q = x+w, y+h
if p >=0 and q>= 0 and p < width and q < height:
squaredTerm[y,x] += inputPad[q,p] * inputPad[q,p]
corrImage[y,x] += 2.0 * templatePad[h+heightTemplate-1,w+widthTemplate-1] * inputPad[q,p]
if addQuadraticTerm:
for x,y in itertools.product(range(0, widthPad), range(0, heightPad)):
corrImage[y,x] += -squaredTerm[y,x]
showImageF(corrImage)
maxima, minima = imageMaxMin(corrImage)
plot3DHistogram(corrImage, [2*(minima+maxima)/3, maxima], [15, -47], False)
# Compute Fourier coefficients
imageCoeff, maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad)
templateCoeff, _, _ = computeCoefficients(templatePadFlip)
# Frequency domain multiplication defines convolution is space domain
resultCoeff = createImageF(1 + 2 * maxFrequencyW ,1 + 2 * maxFrequencyH , 2)
for kw, kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \
range(-maxFrequencyH, maxFrequencyH + 1)):
w = kw + maxFrequencyW
h = kh + maxFrequencyH
resultCoeff[h,w][0] = (imageCoeff[h,w][0] * templateCoeff[h,w][0] - \
imageCoeff[h,w][1] * templateCoeff[h,w][1])
resultCoeff[h,w][1] = (imageCoeff[h,w][1] * templateCoeff[h,w][0] + \
imageCoeff[h,w][0] * templateCoeff[h,w][1])
# Inverse Fourier transform
reconstructedResult = reconstruction(resultCoeff)
# Show convolution
showImageF(reconstructedResult)
maxima, minima = imageMaxMin(reconstructedResult)
plot3DHistogram(reconstructedResult, [minima, maxima])
# Add square term to define an operator equivalent to SSD
if addQuadraticTerm:
for x,y in itertools.product(range(0, widthPad), range(0, heightPad)):
reconstructedResult[y,x] = -squaredTerm[y,x] + 2.0 * reconstructedResult[y,x]
else:
for x,y in itertools.product(range(0, widthPad), range(0, heightPad)):
reconstructedResult[y,x] = 2.0 * reconstructedResult[y,x]
# Show convolution added the quadratic image term
showImageF(reconstructedResult)
maxima, minima = imageMaxMin(reconstructedResult)
plot3DHistogram(reconstructedResult, [2*(minima+maxima)/3, maxima], [15, -47], False)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter4/LaplacianKernel.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 4
LaplacianKernel: Creates a Laplacian kernel of arbitrary size
'''
# Set module functions
from ImageUtilities import createImageF
from PrintUtilities import printImageRangeF
from ImagePropertiesUtilities import imageMaxMin
from PlotUtilities import plotSurface
# Math and iteration
from math import exp
from timeit import itertools
'''
Parameters:
kernelSize = Size of the kernel
sigma = Standard deviation of the kernel
'''
kernelSize = 15
sigma = 1.5
# To store kernel
kernelLaplacian = createImageF(kernelSize, kernelSize)
# Create kernel
s2Inv = 1.0 / (sigma * sigma)
kernelCentre = (kernelSize - 1) / 2
# Generate kernel values
sumValues = 0.0
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
nx2 = float(x-kernelCentre) * float(x-kernelCentre)
ny2 = float(y-kernelCentre) * float(y-kernelCentre)
s = 0.5 * (nx2 + ny2) * s2Inv
kernelLaplacian[y,x] = - s2Inv * s2Inv * (1.0 - s) * exp(-s)
sumValues += kernelLaplacian[y,x]
# Normalize
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
kernelLaplacian[y,x] /= sumValues
# Print kernel
printImageRangeF(kernelLaplacian, [0, kernelSize-1], [0, kernelSize-1], ' 8.2f')
# Plot surface
maxValue, minValue = imageMaxMin(kernelLaplacian)
plotSurface(kernelLaplacian, [minValue, maxValue], 1)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter2/HartleyTransform.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 2
HartleyTransform: Compute the Hartley transform of an image
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF, showImageF
from ImageOperatorsUtilities import imageLogF
from PrintUtilities import printProgress
# Math functions and iteration
from math import sin, cos, pi
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
'''
pathToDir = "../../Images/Chapter2/Input/"
imageName = "Giraffe.png"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create coefficients Image. Maximum frequency according to sampling
maxFreqW = int(width / 2)
maxFreqH = int(height / 2)
numCoeffW = 1 + 2 * maxFreqW
numCoeffH = 1 + 2 * maxFreqH
coeff = createImageF(numCoeffW ,numCoeffH)
# Adjust the size of the data to be even
m = float(width)
n = float(height)
if width % 2 == 0:
m = width + 1.0
if height % 2 == 0:
n = height + 1.0
# Fundamental frequency
ww = (2.0 * pi) / m
wh = (2.0 * pi) / n
# Compute values
for u in range(-maxFreqW, maxFreqW + 1):
printProgress(u + maxFreqW, numCoeffW)
entryW = u + maxFreqW
for v in range(-maxFreqH, maxFreqH + 1):
entryH = v + maxFreqH
for x,y in itertools.product(range(0, width), range(0, height)):
coeff[entryH, entryW] += inputImage[y,x] * \
(cos(x * ww * u) + sin(x * ww * u)) * (cos(y * wh * v) + sin(y * wh * v))
# Include scale
for u in range(-maxFreqW, maxFreqW + 1):
printProgress(u + maxFreqW, numCoeffW)
entryW = u + maxFreqW
for v in range(-maxFreqH, maxFreqH + 1):
entryH = v + maxFreqH
coeff[entryH, entryW] /= m*n
# Show transform in log form. The function converts negative values to positive,
# so it is similar to the power
coeffLog = imageLogF(coeff)
showImageF(coeffLog)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter8/MaximallyStableExtremalRegions.py | <gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 8
MaximallyStableRegions: Compute maximally stable regions in an image
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL,createImageF, showImageF
from PrintUtilities import printProgress
# Iteration
from timeit import itertools
# Definition to find the most common element in a list
def mostCommon(lst):
return max(set(lst), key=lst.count)
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
incThreshold = How much the region can increase
timeThreshold = For how long the region must not grow to be stable
startL = Start grey level
endL = End gray level
incL = Gray level increment
minRegionSize = size of the smallest region
'''
pathToDir = "../../Images/Chapter8/Input/"
imageName = "castle1.png"
incThreshold = 20
timeThreshold = 30
startL = 10
endL = 140
incL = 2
minRegionSize = 50
maxRegionSize = 1000
# Read image into array and show
inputImage, width, height = imageReadL(pathToDir+imageName)
showImageL(inputImage)
# The number of times the region has grown, its size
timeRegions = { }
sizeRegions = { }
incSizeRegions = { }
# Regions
regionsImage = createImageF(width, height)
# Stable regions
resultImage = createImageF(width, height)
# Use a threshold to flood regions
nextRegionID = 1
for threshold in range(startL, endL, incL):
printProgress(threshold - startL, endL - startL)
# Init the change in size
for regionID in incSizeRegions:
incSizeRegions[regionID] = 0
# Repeatedly flood the image to grow regions
flooded = True
while flooded:
flooded = False
growRegion = [ ]
# For each non-region pixels
for x,y in itertools.product(range(0, width), range(0, height)):
if inputImage[y,x] <= threshold and regionsImage[y,x] == 0:
# List of neighbours
n = [ ]
for wx,wy in itertools.product(range(x-1, x+2), range(y-1, y+2)):
if wy>=0 and wy<height and wx>=0 and wx<width:
neighbourID = regionsImage[wy, wx]
if neighbourID != 0:
n.append(regionsImage[wy, wx])
# Grow the most common
if(len(n) != 0):
mc = mostCommon(n)
growRegion.append((y,x,mc))
flooded = True
for pixel in growRegion:
y, x, idRegion = pixel[0] , pixel[1] , pixel[2]
regionsImage[y, x] = idRegion
incSizeRegions[idRegion] += 1
# Repeatedly merge regions
merged = True
while merged:
merged = False
# For each non-region pixels
for x,y in itertools.product(range(0, width), range(0, height)):
if regionsImage[y,x] != 0:
# List of neighbours and positions
n, p = [ ], [ ]
for wx,wy in itertools.product(range(x-1, x+2), range(y-1, y+2)):
if wy>=0 and wy<height and wx>=0 and wx<width:
neighbourID = regionsImage[wy, wx]
if neighbourID != 0:
n.append(regionsImage[wy, wx])
p.append((wy, wx))
# Different neighbours, we need to merge
if len(n) != 0 and len(set(n)) != 1:
merged = True
unique = set(n)
mainRegion = regionsImage[y,x]
if mainRegion in unique:
unique.remove(mainRegion)
# Merge seeds
for otherRegion in p:
py, px = otherRegion[0], otherRegion[1]
if regionsImage[py, px ] != mainRegion:
growRegion.append(( py, px ))
regionsImage[py, px ] = mainRegion
while len(growRegion) > 0:
seed = growRegion.pop()
py,px = seed[0], seed[1]
regionsImage[py,px] = mainRegion
incSizeRegions[mainRegion] += 1
for wx,wy in itertools.product(range(px-1, px+2), range(py-1, py+2)):
if wy>=0 and wy<height and wx>=0 and wx<width:
if regionsImage[wy,wx] in unique:
regionsImage[wy,wx] = mainRegion
growRegion.append((wy,wx))
for regionID in unique:
del incSizeRegions[regionID]
del timeRegions[regionID]
del sizeRegions[regionID]
# Find new regions
for x,y in itertools.product(range(0, width), range(0, height)):
if inputImage[y,x] <= threshold and regionsImage[y,x] == 0:
timeRegions[nextRegionID] = 0
sizeRegions[nextRegionID] = 0
incSizeRegions[nextRegionID] = 0
growRegion = [(y,x)]
while len(growRegion) > 0:
seed = growRegion.pop()
py,px = seed[0], seed[1]
regionsImage[py,px] = nextRegionID
incSizeRegions[nextRegionID] += 1
for wx,wy in itertools.product(range(px-1, px+2), range(py-1, py+2)):
if wy>=0 and wy<height and wx>=0 and wx<width:
if inputImage[wy,wx] <= threshold and regionsImage[wy,wx] == 0:
growRegion.append((wy,wx))
nextRegionID += 1
# Update times for regions
for idRegion in incSizeRegions:
# Update the size
incSize = incSizeRegions[idRegion]
sizeRegions[idRegion] += incSize
# Update stable
if incSize < incThreshold:
timeRegions[idRegion] += 1
else:
timeRegions[idRegion] = 0
# Stable region condition
if timeRegions[idRegion] > timeThreshold and sizeRegions[idRegion] > minRegionSize:
for x,y in itertools.product(range(0, width), range(0, height)):
if regionsImage[y,x] == idRegion:
resultImage[y,x] = 255
timeRegions[idRegion] = 0
showImageF(resultImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter5/HoughTransformCircles.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 5
HoughTransformCircles: Circle detection by the Hough transform
'''
# Set module functions
from ImageUtilities import imageReadL, createImageF, showImageF, showImageL, createScaleImageL
from ImageOperatorsUtilities import applyCannyEdgeDetector
from ImagePropertiesUtilities import imageArgMax, peakDetectorImageL
from PlotUtilities import plot3DHistogram
# Math and iteration
from math import pi, sin, cos
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
gaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
peakDetection = Percentage of the maximum peak value that is considered for threshold
radiousRange = Integer range of possible circle radious
'''
pathToDir = "../../Images/Chapter5/Input/"
imageName = "EyeClose.png"
gaussianKernelSize = 9
sobelKernelSize = 3
upperT = 0.45
lowerT = 0.2
peakDetection = 0.95
radiousRange = [50,70]
# Read image into array and show
inputImage, width, height = imageReadL(pathToDir + imageName)
showImageL(inputImage)
# Compute edges
magnitude, angle = applyCannyEdgeDetector(inputImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
showImageF(magnitude)
# Accumulator
radiousSize = radiousRange[1] - radiousRange[0]
accumulator = createImageF(width, height, radiousSize)
# Gather evidence
for x,y in itertools.product(range(0, width), range(0, height)):
if magnitude[y,x] != 0:
for r in range(0, radiousSize):
radious = radiousRange[0] + r
for m in range(0,360):
angle = (m * pi) / 180.0
x0, y0 = x-radious * cos(angle), y-radious * sin(angle);
x0Int, y0Int = int(x0), int(y0)
if x0Int>0 and x0Int<width-1 and y0Int>0 and y0Int<height-1:
weightX, weightY = (x0 - x0Int), (y0 - y0Int)
accumulator[y0Int,x0Int,r] += (1.0-weightX) + (1.0-weightY)
accumulator[y0Int+1,x0Int,r] += weightX + (1.0-weightY)
accumulator[y0Int,x0Int+1,r] += (1.0-weightX) + weightY
accumulator[y0Int+1,x0Int+1,r] += weightX + weightY
# Find maximum
maximumPos = imageArgMax(accumulator)
maximum = accumulator[maximumPos[0],maximumPos[1],maximumPos[2]]
peakThreshold = peakDetection * maximum
# Plot a slide of the accumulator
plot3DHistogram(accumulator[:,:,maximumPos[2]])
# Prepare output image as a dark version of the input
outputImage = createScaleImageL(inputImage, 0.5)
# Draw circle on output image
for x,y in itertools.product(range(0, width), range(0, height)):
for r in range(0,radiousSize):
radious = radiousRange[0] + r
if accumulator[y,x,r] > peakThreshold:
strength = int(255.0 * accumulator[y, x, r] / maximum)
for m in range(0,360):
angle = (m * pi) / 180.0
x0 = int(x-radious * cos(angle));
y0 = int(y-radious * sin(angle));
if x0<width and x0>0 and y0<height and y0>0:
outputImage[y0,x0] = strength
showImageL(outputImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Modules/ImageUtilities.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
ImageUtilities: Helper module to support image read, create, show and write
Uses PIL and numpy to work with arrays containing image data
'''
# Image load by using PIL
from PIL import Image
# Array to store image data
from numpy import array, zeros, amax, amin, clip
# Read an image returning an array with 3 color components
def imageReadRGB(fileName):
inputImage = Image.open(fileName)
width, height = inputImage.size
inputArray = array(inputImage)
inputArray = inputArray[:, :, 0:3]
return inputArray, width, height
# Read an image returning an array with 1 component
def imageReadL(fileName):
inputImage = Image.open(fileName)
width, height = inputImage.size
inputArray = array(inputImage)
outputArray = zeros((height, width), dtype='uint8')
# Combine pixel values, for each row and column
for y in range(0, height):
for x in range(0, width):
rgb = inputArray[y,x]
outputArray[y,x] = (int(rgb[0]) + int(rgb[1]) + int(rgb[2])) / 3
return outputArray, width, height
# Save a image containing float as a gray level image
def imageSaveF(image, fileName, maxScale = 1):
height = len(image)
width = len(image[0])
maximum = amax(image)
minimum = amin(image)
# Create a gray level image for display
outputArray = createImageL(width, height)
# Scale the float values into gray scale values
for y in range(0, height):
for x in range(0, width):
if maximum != minimum:
outputArray[y,x] = clip( 255.0 * (image[y,x]*maxScale - minimum) / (maximum - minimum), 0, 255.0)
# Create output and save
outputImage = Image.fromarray(outputArray, 'L')
outputImage.save(fileName)
# Save a image gray level image
def imageSaveL(image, fileName):
# Create output and save
outputImage = Image.fromarray(image, 'L')
outputImage.save(fileName)
# Save a image gray level image
def imageSaveRGB(image, fileName):
# Create output and save
outputImage = Image.fromarray(image, 'RGB')
outputImage.save(fileName)
# Create a zero array with 3 color components
def createImageRGB(width, height):
outputArray = zeros((height, width, 3), dtype='uint8')
return outputArray
# Create a zero array with a color components
def createImageL(width, height):
outputArray = zeros((height, width), dtype='uint8')
return outputArray
# Create a zero array two float components
def createImageUV(width, height):
outputArray = zeros((height, width, 2), dtype='float')
return outputArray
# Create a zero array two int components
def createImage2I(width, height):
outputArray = zeros((height, width, 2), dtype='int')
return outputArray
# Create a zero array with float components
def createImageF(width, height, numComponents = 1):
if numComponents == 1:
outputArray = zeros((height, width), dtype='float')
else:
outputArray = zeros((height, width, numComponents), dtype='float')
return outputArray
# Create a multidimensional dimensional array
def createImageNF(*arg):
sizes = list(arg)
# Swap with and height
if len(arg) > 1:
width = sizes[0]
sizes[0] = sizes[1]
sizes[1] = width
outputArray = zeros(sizes, dtype='float')
return outputArray
# Create an array of floats form a list
def createImageFromDataF(dataValues):
height = len(dataValues)
width = len(dataValues[0])
outputArray = zeros((height, width), dtype='float')
# Combine pixel values, for each row and column
for y in range(0, height):
for x in range(0, width):
outputArray[y,x] = dataValues[y][x]
return outputArray, width, height
# Copy image scaling gray level
def createScaleImageL(image, grayScale):
height = len(image)
width = len(image[0])
outputImage = createImageL(width, height)
for y in range(0, height):
for x in range(0, width):
outputImage[y,x] = int(grayScale * image[y,x])
return outputImage
def scaleImageL(image, newWidth, newHeight):
height = len(image)
width = len(image[0])
outputImage= createImageL(newWidth, newHeight)
sW = float(width)/newWidth
sH = float(height)/newHeight
for y in range(0, newHeight):
for x in range(0, newWidth):
xS, yS = int(x * sW), int(y * sH)
outputImage[y,x] = image[yS,xS]
return outputImage;
# Create a 1D array of floats form a list
def createVectorFromDataF(dataValues):
width = len(dataValues)
outputArray = zeros(width, dtype='float')
for x in range(0, width):
outputArray[x] = dataValues[x]
return outputArray, width
# Create a 1D array of floats
def createVectorF(width):
outputArray = zeros(width, dtype='float')
return outputArray
# Create a 1D array of integers
def createVectorI(width):
outputArray = zeros(width, dtype='int')
return outputArray
# Show an array containing image data
def showImageRGB(image):
# Create output and show
outputImage = Image.fromarray(image, 'RGB')
outputImage.show()
# Show an array containing image data
def showImageL(image):
# Create output and show
outputImage = Image.fromarray(image, 'L')
outputImage.show()
# Show an array containing image data
def showImageF(image, maxScale = 1):
height = len(image)
width = len(image[0])
maximum = amax(image)
minimum = amin(image)
# Create a gray level image for display
outputArray = createImageL(width, height)
# Scale the float values into gray scale values
for y in range(0, height):
for x in range(0, width):
if maximum != minimum:
outputArray[y,x] = clip( 255.0 * (image[y,x]*maxScale - minimum) / (maximum - minimum), 0, 255.0)
else:
outputArray[y,x] = image[y,x]
# Create output and show
outputImage = Image.fromarray(outputArray, 'L')
outputImage.show() |
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter8/WatershedDistanceTransform.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 8
WaterShedDistanceTransform: Compute Watershed transform by topographic distance
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL,createImageF, showImageF
from ImagePropertiesUtilities import imageMaxMin
from ImageRegionsUtilities import pixlesList, edgesList
# Math and iteration
from math import sqrt
from _testcapi import FLT_MAX
from random import shuffle
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
backgroundRange = The gray level range value of the background
'''
pathToDir = "../../Images/Chapter8/Input/"
imageName = "circles.png"
background = [200, 255]
# Read image into array and show
inputImage, width, height = imageReadL(pathToDir+imageName)
showImageL(inputImage)
# Get a list that contains the pixels of the shape in the form (y,x,v)
shapeImage = pixlesList(inputImage, background)
numPoints = len(shapeImage)
# Get a list of edge pixels
edgePixels = edgesList(inputImage, shapeImage, background)
# Compute the radial distance to the edge
distanceImage = createImageF(width, height)
numEdges = len(edgePixels)
for indexPixel in range(0, numPoints):
y, x = (shapeImage[indexPixel])[0], (shapeImage[indexPixel])[1]
minEdgeDist = FLT_MAX
for indexEdge in range(0, numEdges):
edgeY, edgeX = (edgePixels[indexEdge])[0], (edgePixels[indexEdge])[1]
minEdgeDist = min(minEdgeDist, sqrt((edgeX-x)**2+(edgeY-y)**2))
distanceImage[y,x] = minEdgeDist
# Show distance
showImageF(distanceImage)
# Watershed image
watershed = createImageF(width, height)
# Initial regions by finding the maximum
suppWindow = 5 # Window used to find a maximum
regionIndex = 1 # Start id for a region
for indexPixel in range(0, numPoints):
y, x = (shapeImage[indexPixel])[0], (shapeImage[indexPixel])[1]
if watershed[y,x] == 0:
peak = True
for wx,wy in itertools.product(range(x-suppWindow, x+suppWindow+1), \
range(y-suppWindow, y+suppWindow+1)):
if wy>=0 and wy<height and wx>=0 and wx<width:
if watershed[wy, wx] != 0 or \
distanceImage[y, x] < distanceImage[wy, wx]:
peak = False
if peak:
for wx,wy in itertools.product(range(x-suppWindow, x+suppWindow+1), \
range(y-suppWindow, y+suppWindow+1)):
if wy>=0 and wy<height and wx>=0 and wx<width:
watershed[wy, wx] = regionIndex
regionIndex += 1
floodRegion = [ ] # The region we need to flood
for indexPixel in range(0, numPoints):
y, x = (shapeImage[indexPixel])[0], (shapeImage[indexPixel])[1]
if watershed[y,x] == 0:
floodRegion.append((y,x))
# Flooding
maxDistance, _ = imageMaxMin(distanceImage)
for floodValue in range(int(maxDistance), 0, -1):
flooded = True
while flooded:
flooded = False
newFloodRegion = [ ]
growRegion = [ ]
shuffle(floodRegion)
for indexPixel in range(0, len(floodRegion)):
y, x = (floodRegion[indexPixel])[0], (floodRegion[indexPixel])[1]
# Points not flooded will be considered in following iterations
if distanceImage[y,x] <= floodValue:
newFloodRegion.append((y,x))
else:
n = [ ] # List of neighbours
for wx,wy in itertools.product(range(-1, 2), range(-1, 2)):
posX, posY = x + wx, y+ wy
if posY > -1 and posY < height and posX > -1 and posX < width:
if watershed[posY, posX] != 0:
n.append(watershed[posY, posX])
# No neighbours, so we cannot grow
if(len(n) == 0):
newFloodRegion.append((y,x))
else:
# Grow of only one type of region
if len(set(n)) == 1:
growRegion.append((y,x,n[0]))
flooded = True
for pixel in growRegion:
y, x, idRegion = pixel[0] , pixel[1] , pixel[2]
watershed[y, x] = idRegion
floodRegion = newFloodRegion
# Set the borders
shedID = regionIndex + 1
for indexPixel in range(0, numPoints):
y, x = (shapeImage[indexPixel])[0], (shapeImage[indexPixel])[1]
if watershed[y,x] == 0 and distanceImage[y, x] > 0.5:
watershed[y, x] = shedID
# Show result
showImageF(watershed)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Modules/PlotUtilities.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
PlotUtilities: Helper module to show plots of image data
Uses numpy and matplotlib to work with arrays and plots
'''
# Math functions
import math
# Array to store data
import numpy as np
# Array to store image data
from numpy import zeros
# Plot functions
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Two alternative ways to import Axes3D.
import importlib
importlib.import_module('mpl_toolkits.mplot3d').Axes3D
#from mpl_toolkits.mplot3d import Axes3D
# Plot a histogram of data
def plotHistogram(data, plotRange = [0, 0], barSepartion = 1):
width = len(data)
x = np.linspace(0, width-1, width)
# Create figure
fig = plt.figure()
axes = fig.gca()
axes.bar(x, data, barSepartion)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
if plotRange[0] != 0 or plotRange[1] != 0:
axes.set_ylim([plotRange[0], plotRange[1]])
plt.show()
# Plot a histogram of data
def plotCurve(data, rangeY = [0, 0], rangeX = [0, 0]):
width = len(data)
x = np.linspace(0, width-1, width)
if rangeX[0] != 0 or rangeX[1] != 0:
x = np.linspace(rangeX[0], rangeX[1], width)
# Create figure
fig = plt.figure()
axes = fig.gca()
axes.plot(x, data)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
if rangeY[0] != 0 or rangeY[1] != 0:
axes.set_ylim([rangeY[0], rangeY[1]])
plt.show()
def plot2Curves(data1, data2, rangeY = [0, 0]):
width = len(data1)
x = np.linspace(0, width-1, width)
# Create figure
fig = plt.figure()
axes = fig.gca()
axes.plot(x, data1, marker='o' )
axes.plot(x, data2, marker='o' )
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
if rangeY[0] != 0 or rangeY[1] != 0:
axes.set_ylim([rangeY[0], rangeY[1]])
plt.show()
def plotCurves(data, rangeY = [0, 0]):
width = data.shape[1]
height = data.shape[0]
x = np.linspace(0, width-1, width)
# Create figure
fig = plt.figure()
axes = fig.gca()
for curveNum in range(1, height):
axes.plot(x, data[curveNum,:])
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
if rangeY[0] != 0 or rangeY[1] != 0:
axes.set_ylim([rangeY[0], rangeY[1]])
plt.show()
def plotCurveXY(dataX, dataY, rangeY = [0, 0]):
if rangeY[0] == 0 and rangeY[1] == 0:
rangeY = [min(dataY), max(dataY)]
# Create figure
fig = plt.figure()
axes = fig.gca()
axes.plot(dataX, dataY)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
deltaRange = (rangeY[1] - rangeY[0])/20.0
if rangeY[0] != 0 or rangeY[1] != 0:
axes.set_ylim([rangeY[0] -deltaRange, rangeY[1]+deltaRange])
plt.show()
# Plot a histogram of data
def plot3DColorHistogram(dataZ, colorsRGB, zRange = [0, 0]):
width = dataZ.shape[1]
height = dataZ.shape[0]
# Convert data to array
zData = np.array(dataZ)
# Create an X-Y mesh of the same dimension as the 2D data
xData, yData = np.meshgrid( np.arange(width), np.arange(height))
# Flatten the arrays so that they may be passed to "axes.bar3d".
xData = xData.flatten()
yData = yData.flatten()
zData = zData.flatten()
colorsArray = colorsRGB.reshape((width * height, 3))
# Create figure
fig = plt.figure()
axes = fig.gca(projection='3d')
axes.bar3d( xData, yData, np.zeros(len(zData)), .98, .98, zData, color=colorsArray, alpha=1.0, zsort='max')
axes.set_xlim3d(0, width)
axes.set_ylim3d(0, height)
if zRange[0] != 0 or zRange[1] != 0:
axes.set_zlim3d([zRange[0], zRange[1]])
plt.show()
# Plot a histogram of data
def plot3DHistogram(dataZ, zRange=[0, 0], axesView=[0,0], zTicks = True):
# Function formatter to simulate zlim_3d
def major_formatter(x, pos):
return "{:.1f}".format(x+zRange[0])
width = dataZ.shape[1]
height = dataZ.shape[0]
# Z clipping fails to clip bar charts
if zRange[0] != 0:
for x in range(0, width):
for y in range(0, height):
dataZ[y,x] -= zRange[0]
if dataZ[y,x] < 0:
dataZ[y,x] = 0
# Convert data to array
zData = np.array(dataZ)
# Create an X-Y mesh of the same dimension as the 2D data
xData, yData = np.meshgrid( np.arange(width), np.arange(height))
# Flatten the arrays so that they may be passed to "axes.bar3d".
xData = xData.flatten()
yData = yData.flatten()
zData = zData.flatten()
# Create figure
fig = plt.figure()
axes = fig.gca(projection='3d')
axes.bar3d( xData, yData, np.zeros(len(zData)), .98, .98, zData, alpha=1.0, zsort='min')
# Axes
axes.set_xlim3d(0, width)
axes.set_ylim3d(0, height)
# View
if axesView[0] != 0 or axesView[1] != 0:
axes.view_init(axesView[0], axesView[1])
# Shift the tick labels up by minimum, set_zlim3d does not work
if zRange[0] != 0 or zRange[1] != 0:
if zTicks:
axes.zaxis.set_major_formatter(ticker.FuncFormatter(major_formatter))
else:
axes.zaxis.set_major_formatter(ticker.NullFormatter())
# Set the shifted range
if zRange[0] != 0 or zRange[1] != 0:
axes.set_zlim3d([0, zRange[1] - zRange[0]])
plt.show()
# Plot a surface of data
def plotColorSurface(dataZ, colorsRGB, zRange = [0,0], stride = 1):
# Size of data edges. Edges is one more than matches
width = dataZ.shape[1]
height = dataZ.shape[0]
# The edges of the surface. One more than the patches
surfaceEdges = zeros((height + 1, width + 1), dtype='float')
# Create edge data form the height data
surfaceEdges[0, 0] = dataZ[0, 0]
surfaceEdges[0, width] = dataZ[0, width-1]
surfaceEdges[height, 0] = dataZ[height-1, 0]
surfaceEdges[height, width] = dataZ[height-1, width-1]
for x in range(1, width):
surfaceEdges[0, x] = (dataZ[0, x-1] + dataZ[0, x]) / 2.0
surfaceEdges[height, x] = (dataZ[height-1, x-1] + dataZ[height-1, x]) / 2.0
for y in range(1, height):
surfaceEdges[y, 0] = (dataZ[y-1, 0] + dataZ[y, 0]) / 2.0
surfaceEdges[y, width] = (dataZ[y-1, width-1] + dataZ[y, width-1]) / 2.0
for x in range(1, width):
for y in range(1, height):
surfaceEdges[y, x] = (dataZ[y-1,x-1] + dataZ[y-1,x] + dataZ[y,x-1] + dataZ[y,x]) / 4.0
# Create the x and y pixel indices arrays
x = np.linspace(0, width, width+1)
y = np.linspace(0, height, height+1)
xv, yv = np.meshgrid(x, y)
# Create figure
fig = plt.figure()
axes = fig.gca(projection='3d')
axes.plot_surface(xv, yv, surfaceEdges, rstride = stride, cstride = stride, linewidth=1, facecolors=colorsRGB, alpha=1.0, antialiased=False)
axes.set_xlim3d(0, width)
axes.set_ylim3d(0, height)
if zRange[0] != 0 or zRange[1] != 0:
axes.set_zlim3d([zRange[0], zRange[1]])
plt.show()
# Plot a surface of data
def plotSurface(dataZ, zRange = [0,0], stride = 1):
# Size of data edges. Edges is one more than matches
width = dataZ.shape[1]
height = dataZ.shape[0]
# The edges of the surface. One more than the patches
surfaceEdges = zeros((height + 1, width + 1), dtype='float')
# Create edge data form the heigh data
surfaceEdges[0, 0] = dataZ[0, 0]
surfaceEdges[0, width] = dataZ[0, width-1]
surfaceEdges[height, 0] = dataZ[height-1, 0]
surfaceEdges[height, width] = dataZ[height-1, width-1]
for x in range(1, width):
surfaceEdges[0, x] = (dataZ[0, x-1] + dataZ[0, x]) / 2.0
surfaceEdges[height, x] = (dataZ[height-1, x-1] + dataZ[height-1, x]) / 2.0
for y in range(1, height):
surfaceEdges[y, 0] = (dataZ[y-1, 0] + dataZ[y, 0]) / 2.0
surfaceEdges[y, width] = (dataZ[y-1, width-1] + dataZ[y, width-1]) / 2.0
for x in range(1, width):
for y in range(1, height):
surfaceEdges[y, x] = (dataZ[y-1,x-1] + dataZ[y-1,x] + dataZ[y,x-1] + dataZ[y,x]) / 4.0
# Create the x and y pixel indices arrays
x = np.linspace(0, width, width+1)
y = np.linspace(0, height, height+1)
xv, yv = np.meshgrid(x, y)
# Create figure
fig = plt.figure()
axes = fig.gca(projection='3d')
axes.plot_surface(xv, yv, surfaceEdges, rstride = stride, cstride = stride, linewidth=1, alpha=1.0, antialiased=False)
axes.set_xlim3d(0, width)
axes.set_ylim3d(0, height)
if zRange[0] != 0 or zRange[1] != 0:
axes.set_zlim3d([zRange[0], zRange[1]])
plt.show()
# Plot a surface of data
def plotWireframe(dataZ, zRange = [0, 0], stride = 1):
# Size of data edges. Edges is one more than matches
width = dataZ.shape[1]
height = dataZ.shape[0]
# Create an X-Y mesh of the same dimension as the 2D data
xData, yData = np.meshgrid( np.arange(width), np.arange(height))
fig = plt.figure()
axes = fig.gca(projection='3d')
axes.plot_wireframe(xData, yData, dataZ, rstride=stride, cstride=stride)
axes.set_xlim3d(0, width)
axes.set_ylim3d(0, height)
if zRange[0] != 0 or zRange[1] != 0:
axes.set_zlim3d([zRange[0], zRange[1]])
plt.show()
# Plot a surface of data
def plotQuiver(magnitude, direction, scaleVectors = 0, sampleSpace = 1):
width = magnitude.shape[1]
height = magnitude.shape[0]
# Create an X-Y mesh of the same dimension as the 2D data
xPos, yPos = np.meshgrid( np.arange(width), np.arange(height))
u = zeros((height, width), dtype='float')
v = zeros((height, width), dtype='float')
for y in range(0, height):
for x in range(0, width):
# Image origin is top left and graph origin is bottom left, so invert and width units
u[height - y - 1,x] = magnitude[y,x] * math.cos(direction[y,x]) /width
v[height - y - 1,x] = magnitude[y,x] * math.sin(direction[y,x]) /width
fig = plt.figure()
axes = fig.gca()
# For the upside down image
initY = (height-1) % sampleSpace
plt.quiver(xPos[initY::sampleSpace, ::sampleSpace], yPos[initY::sampleSpace, ::sampleSpace], \
u[initY::sampleSpace, ::sampleSpace], v[initY::sampleSpace, ::sampleSpace], \
pivot='mid', units='width', scale = scaleVectors)
axes.set_xlim(0, width-1)
axes.set_ylim(0, height-1)
plt.show()
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter4/CannyEdgeDetector.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 4
CannyEdgeDetector: Detect edges by the Canny multi-stage algorithm
'''
# Set module functions
from ImageUtilities import imageReadL, createImageF, showImageF, showImageL
from ConvolutionUtilities import createGaussianKernel, createSobelKernel, applyKernelF, applyKernelMA
# Math and iteration
from math import pi, cos, sin
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
GaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
normalizeMagnitude = Normalize the convolution output
upperT = upper threshold
lowerT = lower threshold
windowDelta = Size of window used in hysteresis
'''
pathToDir = "../../Images/Chapter4/Input/"
imageName = "Lizard.png"
GaussianKernelSize = 4
sobelKernelSize = 3
normalizeMagnitude = True
upperT = 0.25
lowerT = 0.1
windowDelta = 2
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Apply Gaussian kernel
gaussianKernel = createGaussianKernel(GaussianKernelSize)
gaussianImage = applyKernelF(inputImage, gaussianKernel)
# Apply Sobel kernel. We use normalized magnitude in this example
sobelX, sobelY = createSobelKernel(sobelKernelSize)
magnitude, angle, _, _ = applyKernelMA(gaussianImage, sobelX, sobelY, normalizeMagnitude)
# To store maximum suppression image
maxImage = createImageF(width, height)
# Non-maximum suppression
border = GaussianKernelSize
for x,y in itertools.product(range(border, width-border), \
range(border, height-border)):
# Only potential edges can be maximum
if magnitude[y,x] > lowerT:
# The normal angle is perpendicular to the edge angle
normalAngle = angle[y,x] - pi / 2.0
# Make sure the angle is between 0 and pi
while normalAngle < 0:
normalAngle += pi
while normalAngle > pi:
normalAngle -= pi
# Angle defining the first point
baseAngle = int( 4 * normalAngle / pi ) * (pi / 4.0)
# Integer delta positions for interpolation
# We use -y since the image origin is in top corner
x1, y1 = int(round(cos(baseAngle))), -int(round(sin(baseAngle)))
x2, y2 = int(round(cos(baseAngle + pi / 4.0))), \
-int(round(sin(baseAngle + pi / 4.0)))
# How far we are from (x1,y1).
# Maximum difference is pi / 4.0, so we multiply by 2
w = cos(2.0*(normalAngle - baseAngle))
# Point to interpolate
M1 = w * magnitude[y+y1,x+x1] + (1.0 - w) * magnitude[y+y2,x+x2]
# Point to interpolate for pixels in the other side of the edge
M2 = w * magnitude[y-y1,x-x1] + (1.0 - w) * magnitude[y-y2,x-x2]
# Determine if it is a maximum. If so make sure it will be preserved
if magnitude[y,x] > M1 and magnitude[y,x] > M2:
maxImage[y,x] = magnitude[y,x]
showImageF(maxImage)
# To compute hysteresis thresholded images we require two thresholds
edges = createImageF(width, height)
potentialEdges = [ ]
# Divide pixels as edges, no edges and unassigned
for x,y in itertools.product(range(1, width-1), range(1, height-1)):
# These are edges
if maxImage[y,x] > upperT: edges[y,x] = 255
# These are pixels that we do not want as edges
if maxImage[y,x] < lowerT: edges[y,x] = 0
# These may be edges
if maxImage[y,x] > lowerT and maxImage[y,x] <= upperT:
edges[y,x] = 128
# Show double threshold image
showImageF(edges)
# Resolve the potential edges
for x,y in itertools.product(range(1, width-1), range(1, height-1)):
# For each edge
if edges[y,x] == 255:
# Examine neighbors
potentialEdges = [ ]
for wx,wy in itertools.product(range(-windowDelta, windowDelta+1), \
range(-windowDelta, windowDelta+1)):
# It becomes an edge
if edges[y+wy,x+wx] == 128:
edges[y+wy,x+wx] = 255
potentialEdges.append((y+wy,x+wx))
# Look into new edges
while len(potentialEdges) > 0:
# Take element from potential edges
y, x = (potentialEdges[0])[0], (potentialEdges[0])[1]
potentialEdges = potentialEdges[1:]
# Examine neighbor
for wx,wy in itertools.product(range(-windowDelta, windowDelta+1), \
range(-windowDelta, windowDelta+1)):
# It becomes an edge
if edges[y+wy,x+wx] == 128:
edges[y+wy,x+wx] = 255
potentialEdges.append((y+wy,x+wx))
# Clean up remaining potential edges
for x,y in itertools.product(range(1, width-1), range(1, height-1)):
if edges[y,x] == 128:
edges[y,x] = 0
# Final edges
showImageF(edges)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter9/MeanShift.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 9
MeanShift: Tracks a region in an image by considering the colour histogram
'''
# Set module functions
from ImageUtilities import imageReadRGB, showImageRGB, createImageF, createImageRGB
from PlotUtilities import plot3DHistogram
from ImageRegionsUtilities import densityHistogram, colourFeature
# Math and iteration
from math import exp, sqrt
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageNames = Input image names
histoSize = Size of the histogram
initialPos = position of the region [column, row]
sizeReg = Size of the region [column, row]
sigma = weight control
'''
pathToDir = "../../Images/Chapter9/Input/"
imageNames = ["frame1.bmp", "frame2.bmp", "frame3.bmp", "frame4.bmp", "frame5.bmp", "frame6.bmp"]
histoSize = 64
initialPos = [100, 60]
sizeReg = [12, 18]
sigma = 6.0
positions = [ ]
positions.append(initialPos)
# Read image and compute density
inputImage, width, height = imageReadRGB(pathToDir + imageNames[0])
q = densityHistogram(inputImage, positions[0], sizeReg, sigma, histoSize)
plot3DHistogram(q)
# To store weights
weights = createImageF(2*sizeReg[0], 2*sizeReg[1])
# Avoid division by zero. Minimum value in the histogram
epsilon = 0.0000000001
# Quantization scale
colourScale = 256.0 / histoSize
# For each frame
numImages = len(imageNames)
for frameNum in range(1, numImages):
inputImage, _, _ = imageReadRGB(pathToDir + imageNames[frameNum])
currPos = [0, 0]
newPos = positions[frameNum-1]
while(currPos != newPos):
# Histogram in current position
currPos = newPos
qs = densityHistogram(inputImage, currPos, sizeReg, sigma, histoSize)
# Compute weights
for deltaX, deltaY in itertools.product(range(-sizeReg[0],sizeReg[0]), \
range(-sizeReg[1], sizeReg[1])):
# Position of the pixel in the image and in the weight array
x, y = currPos[0] + deltaX, currPos[1] + deltaY
px,py = deltaX+sizeReg[0], deltaY+sizeReg[1]
# The 2D colour description at this point. Scaled to fit the histogram values
Cb,Cr= colourFeature(inputImage[y,x], colourScale)
# Update weight considering original and current histogram values for the colour
if qs[Cr, Cb] == 0:
qs[Cr, Cb] = epsilon
weights[py, px] = sqrt(q[Cr, Cb] / qs[Cr, Cb])
# Compute mean shift sums
meanSum = [0, 0]
kernelSum = 0
for deltaX, deltaY in itertools.product(range(-sizeReg[0],sizeReg[0]), \
range(-sizeReg[1], sizeReg[1])):
# Position of the pixel in the image
x, y = currPos[0] + deltaX, currPos[1] + deltaY
# Kernel parameter
w = exp(-(deltaX*deltaX + deltaY*deltaY)/(2*sigma*sigma));
# Weight index
px, py = deltaX+sizeReg[0], deltaY+sizeReg[1]
# Mean sum
meanSum[0] += w * weights[py, px] * x
meanSum[1] += w * weights[py, px] * y
# Kernel sum
kernelSum += w * weights[py, px]
# Mean shift
newPos = [int(meanSum[0] / kernelSum), int(meanSum[1] / kernelSum)]
positions.append(newPos);
#print(positions)
# Show results
for frameNum in range(0, numImages):
image, _, _ = imageReadRGB(pathToDir + imageNames[frameNum])
p = positions[frameNum]
borderDistance = [sizeReg[0] -5, sizeReg[1] -5]
for x, y in itertools.product(range(p[0]-sizeReg[0], p[0]+sizeReg[0]), \
range(p[1]-sizeReg[1], p[1]+sizeReg[1])):
if abs(x-p[0]) > borderDistance[0] or abs(y-p[1]) > borderDistance[1]:
image[y,x] = [20, 20, 80]
showImageRGB(image)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter3/TemplateConvolution.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
TemplateConvolution: Filter an image by convolution of a template
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
kernelSize = Size of the kernel
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "Giraffe.png"
kernelSize = 5
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create Kernel
kernelImage = createImageL(kernelSize, kernelSize)
# Set the pixels of a flat kernel
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
kernelImage[y,x] = 1.0
# Create images to store the result
outputImage = createImageL(width, height)
# Apply kernel
kernelCentre = int((kernelSize - 1) / 2)
for x,y in itertools.product(range(0, width), range(0, height)):
sumKernel = 0
sumKernelWeights = 0
for wx,wy in itertools.product(range(0, kernelSize), range(0, kernelSize)):
posY = y + wy - kernelCentre
posX = x + wx - kernelCentre
if posY > -1 and posY < height and posX > -1 and posX < width:
sumKernel += inputImage[posY,posX] * kernelImage[wy, wx]
sumKernelWeights += kernelImage[wy, wx]
if sumKernelWeights > 0:
outputImage[y,x] = sumKernel / sumKernelWeights
# Show output image
showImageL(outputImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter2/FourierShiftedImage.py | <filename>ExamplesPython_3.6/Chapter2/FourierShiftedImage.py
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 2
FourierTransform: Compute the Fourier transform of a shifted image
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL, showImageF
from ImageOperatorsUtilities import imageLogF
from FourierUtilities import computePowerandPhase
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
'''
pathToDir = "../../Images/Chapter2/Input/"
imageName = "Dandelion.png"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Shift the image
shiftDistance = int(width / 3);
shiftImage = createImageL(width, height)
for x,y in itertools.product(range(0, width), range(0, height)):
xShift = (x - shiftDistance) % width
shiftImage[y][x] = inputImage[y][xShift]
# Show images
showImageL(inputImage)
showImageL(shiftImage)
# Compute power and phase
powerImage, phaseImage = computePowerandPhase(inputImage)
powerShiftImage, phaseShiftImage = computePowerandPhase(shiftImage)
# Show power
powerImageLog = imageLogF(powerImage)
powerShiftImageLog = imageLogF(powerShiftImage)
showImageF(powerImageLog)
showImageF(powerShiftImageLog)
# show phase
showImageF(phaseImage)
showImageF(phaseShiftImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter5/HTLinesDecomposition.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 5
HoughTransformEllipses: Line detection by the Hough transform decomposition
'''
# Set module functions
from ImageUtilities import imageReadL, showImageF, showImageL, createScaleImageL, createVectorF
from ImageOperatorsUtilities import applyCannyEdgeDetector
from ImagePropertiesUtilities import imageMaxMin, peakDetectorVector
from PlotUtilities import plot3DHistogram, plotHistogram
# Math and iteration
from math import fabs, sqrt, pi, sin, cos, atan2, atan
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
gaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
peakDetection = Percentage of the maximum peak value that is considered for threshold
deltaPtRange = How far is the second point
'''
pathToDir = "../../Images/Chapter5/Input/"
imageName = "Road.png"
gaussianKernelSize = 7
sobelKernelSize = 3
upperT = 0.5
lowerT = 0.3
peakDetection = 0.6
peakDetectionR = 0.7
deltaPtRange = [10, 15]
# Read image into array and show
inputImage, width, height = imageReadL(pathToDir + imageName)
showImageL(inputImage)
# Compute edges
magnitude, angle = applyCannyEdgeDetector(inputImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
showImageF(magnitude)
# Accumulator for the slope in degrees
maxLenght = int(sqrt(height*height + width*width) / 2)
accM = createVectorF(360)
cx, cy = int(width/2), int(height/2)
# Gather evidence for a point x,y
for x,y in itertools.product(range(0, width), range(0, height)):
if magnitude[y,x] != 0:
# Look for points at this distance
for dx,dy in itertools.product(range(-deltaPtRange[1],deltaPtRange[1]+1), \
range(-deltaPtRange[1],deltaPtRange[1]+1)):
if abs(dx) > deltaPtRange[0] or abs(dy) > deltaPtRange[0]:
wx,wy = x+dx, y+dy
if wx > 0 and wy > 0 and wx < width and wy < height \
and magnitude[wy, wx] !=0:
pointAngle = atan2(-float(wx-x), float(wy-y)) + pi
# If r is negative, the line is in the other side of the centre
r = (x-cx) * cos(pointAngle) + (y-cy) * sin(pointAngle)
if r < 0:
if pointAngle > pi: pointAngle -= pi
else: pointAngle += pi
# Accumulator entries depend on the distance to the second point
deltaDistance = sqrt(dx*dx + dy*dy)
incAngle = int(atan(1.0/deltaDistance) * 180.0 / pi)
buketAngleBase = int((pointAngle * 180.0) / pi)
# More buckets if the points are close
for deltaBucket in range(-incAngle, +incAngle+1):
bucket = buketAngleBase + deltaBucket
if bucket < 0:
bucket = 360 + bucket
if bucket >= 360:
bucket = bucket-360
w = (incAngle - fabs(deltaBucket)) / float(incAngle)
accM[bucket] += w
# Find maximum and plot histogram
maximum, _ = imageMaxMin(accM)
peakThreshold = peakDetection * maximum
plotHistogram(accM)
# Prepare output image as a dark version of the input
outputImage = createScaleImageL(inputImage, 0.5)
# Gather evidence for the r parameter in a second accumulator
peaks = peakDetectorVector(accM, peakThreshold)
for peakIndex in range(0,len(peaks)):
m = peaks[peakIndex]
accR = createVectorF(maxLenght)
angle = (m * pi) / 180.0
for x,y in itertools.product(range(0, width), range(0, height)):
if magnitude[y,x] != 0:
r = fabs((x-cx) * cos(angle) + (y-cy) * sin(angle))
bucket = int(r)
if bucket> 0 and bucket < maxLenght - 1:
weight = r - int(r)
accR[bucket] += (1.0 - weight)
accR[bucket+1] += weight
plotHistogram(accR)
# Find maximum in second accumulator
maximumR, _ = imageMaxMin(accR)
peakThresholdR = peakDetectionR * maximumR
peaksR = peakDetectorVector(accR, peakThresholdR)
# Draw the result
for peakRIndex in range(0,len(peaksR)):
r = peaksR[peakRIndex]
strength = int(255.0 * accR[r] / maximumR)
angle = (m * pi) / 180.0
if fabs(cos(angle)) < fabs(sin(angle)):
for x in range(0, width -1):
y = int( (r - (x-cx) * cos(angle) ) / sin(angle)) + cy
if y > 0 and y < height -1:
outputImage[y,x] = strength
outputImage[y+1,x] = strength
else:
for y in range(0, height -1):
x = int( (r - (y-cy) * sin(angle) ) / cos(angle)) + cx
if x > 0 and x < width -1:
outputImage[y,x] = strength
outputImage[y+1,x] = strength
showImageL(outputImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter8/WatershedEdgeTransform.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples<gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 8
WaterShedEdgeTransform: Compute Watershed transform by considering the edge image
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL,createImageF, showImageF
from ImageOperatorsUtilities import applyCannyEdgeDetector
from ImageRegionsUtilities import watherShed
from PrintUtilities import printProgress
# Math and iteration
from math import sqrt
from _testcapi import FLT_MAX
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
kernelSize = Gaussian and Sobel kernel size
normalizeMagnitude = Normalise the convolution output
upperT = upper threshold
lowerT = lower threshold
windowDelta = Size of window used in hysteresis
suppWindow = Size of the window used to find maxima
'''
pathToDir = "../../Images/Chapter8/Input/"
imageName = "Logs.png"
cannyKernelSize = 7
upperT = 0.5
lowerT = 0.1
windowDelta = 3
suppWindow = 5
# Read image into array and show
inputImage, width, height = imageReadL(pathToDir+imageName)
showImageL(inputImage)
# Compute edges
magnitude, angle = applyCannyEdgeDetector(inputImage, cannyKernelSize, cannyKernelSize, upperT, lowerT)
showImageF(magnitude)
# Divide pixels into edge and region pixels
edgePixels = [ ]
shapeImage = [ ]
for x,y in itertools.product(range(0, width), range(0, height)):
if magnitude[y,x] > 0:
edgePixels.append((y,x))
shapeImage.append((y,x))
# Radial is the minimal distance to the edge
distanceImage = createImageF(width, height)
numEdges = len(edgePixels)
for x in range(0, width):
printProgress(x, width)
for y in range(0, height):
minEdgeDist = FLT_MAX
for indexEdge in range(0, numEdges):
edgeY, edgeX = (edgePixels[indexEdge])[0], (edgePixels[indexEdge])[1]
minEdgeDist = min(minEdgeDist, sqrt((edgeX-x)**2+(edgeY-y)**2) )
# We define an edge in a distance image as 1. In this case we do not have edges so all flood
distanceImage[y,x] = minEdgeDist + 2.0
showImageF(distanceImage)
# Watershed of the distance image
watershed = watherShed(distanceImage, shapeImage, suppWindow)
showImageF(watershed)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter5/TemplateMatching.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples<gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 5
TemplateMatching: Compute the matching of a template in an image
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF, showImageF
from PrintUtilities import printProgress
from ImagePropertiesUtilities import imageMaxMin
from PlotUtilities import plot3DHistogram
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
templateName = Input template image name
thresholdVal = Only pixels in the template with value greater that this are used
-1 to use all pixels or 0 to use edges with value >0
'''
pathToDir = "../../Images/Chapter5/Input/"
imageName = "Eye.png"
templateName = "EyeTemplate.png"
thresholdVal = -1
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
templateImage, widthTemplate, heightTemplate = imageReadL(pathToDir + templateName)
# Show input image and template
showImageL(inputImage)
showImageL(templateImage)
# Create an accumulator. We look in a reduced size image
accumulator = createImageF(width, height)
# Template matching
templateCentreX = int((widthTemplate - 1) / 2)
templateCentreY = int((heightTemplate - 1) / 2)
for x in range(0, width):
printProgress(x, width)
for y in range(0, height):
for wx,wy in itertools.product(range(0, widthTemplate), range(0, heightTemplate)):
posY = y + wy - templateCentreY
posX = x + wx - templateCentreX
# The threshold is used to accumulate only the edge pixels in an edge template
# The difference of pixel values is inverted to show the best match as a peak
if posY > -1 and posY < height and posX > -1 and posX < width and \
templateImage[wy,wx] > thresholdVal:
diff = 1.0 - abs(float(inputImage[posY,posX]) - \
float(templateImage[wy, wx])) / 255.0
accumulator[y,x] += diff*diff
# Show accumulator within a maxima and mininma region
maxima, minima = imageMaxMin(accumulator)
showImageF(accumulator)
plot3DHistogram(accumulator, [minima, maxima])
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Modules/PrintUtilities.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
PrintUtilities: Helper module to print image data and messages on the standard device
'''
# Print image range
def printImageRangeRGB(image, rangeWidth, rangeHeight):
print ("\n")
for y in range(rangeHeight[0], rangeHeight[1] + 1):
print ("[", end=' ')
for x in range(rangeWidth[0], rangeWidth[1] + 1):
rgb = image[y,x]
print ("(", format(rgb[0], '3d'), format(rgb[1], '3d'), format(rgb[2], '3d'), ")", end=' ')
print ("]")
# Print image range
def printImageRangeL(image, rangeWidth, rangeHeight):
print ("\n")
for y in range(rangeHeight[0], rangeHeight[1] + 1):
print ("[", end=' ')
for x in range(rangeWidth[0], rangeWidth[1] + 1):
print (format(image[y,x], '3d'), end=' ')
print ("]")
# Print image range
def printImageRangeF(image, rangeWidth, rangeHeight, formatValue = ' 3.2f'):
print ("\n")
for y in range(rangeHeight[0], rangeHeight[1] + 1):
print ("[", end=' ')
for x in range(rangeWidth[0], rangeWidth[1] + 1):
print (format(image[y,x], formatValue), end=' ')
print ("]")
# Print text
def printText(text):
print (text)
# Print text
def printTextSameLine(text):
print (text, end=' ')
# Some operations can be slow. Print something to show progress
def printProgress(step, totalSteps):
print (step,"/",totalSteps,"...")
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter7/InvariantMoments.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples<filename>ExamplesPython_3.6/Chapter7/InvariantMoments.py
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 7
InvariantMoments: Compute invariant moments of a shape in an image
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF
from PrintUtilities import printImageRangeF, printText
from ImageRegionsUtilities import pixlesList
# Math and iteration
from math import pi, atan, sin, cos, log10
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
numMoments = Number of moments
background = The gray level range of the background pixels
'''
pathToDir = "../../Images/Chapter7/Input/"
imageName = "f14rs.png"
numMoments = 4
background = [200, 255] # white background image
# Read image into array and show
inputImage, width, height = imageReadL(pathToDir+imageName)
showImageL(inputImage)
# Get a list that contains the pixels of the shape in the form (y,x,val)
# We could have used the border pixels
imageRegion = pixlesList(inputImage, background)
# Compute geometric moments
numPoints = len(imageRegion)
M = createImageF(numMoments,numMoments)
for m,n in itertools.product(range(0, numMoments), range(0, numMoments)):
for indexPixel in range(0, numPoints):
y = (imageRegion[indexPixel])[0]
x = (imageRegion[indexPixel])[1]
v = (imageRegion[indexPixel])[2]
M[n,m] += (x**n) * (y**m) * v
# Geometric central Moments
xc,yc = M[1,0]/M[0,0], M[0,1]/M[0,0]
centMom = createImageF(numMoments,numMoments)
for m,n in itertools.product(range(0, numMoments), range(0, numMoments)):
for indexPixel in range(0, numPoints):
y = (imageRegion[indexPixel])[0]
x = (imageRegion[indexPixel])[1]
v = (imageRegion[indexPixel])[2]
centMom[n,m] += ((x-xc)**n) * ((y-yc)**m) * v
# Scale normalized geometric central Moments
centMomNorm = createImageF(numMoments,numMoments)
for m,n in itertools.product(range(0, numMoments), range(0, numMoments)):
c = 1 + ((n + m) / 2.0)
centMomNorm[n,m] = centMom[n,m] / pow(centMom[0,0], c)
# Angle from central moments
if centMom[2,0] < centMom[0,2]:
t = 0.5 * atan(2.0*centMom[1,1]/(centMom[2,0]-centMom[0,2])) + pi/2.0
else:
t = 0.5 * atan(2.0*centMom[1,1]/(centMom[2,0]-centMom[0,2]))
# Opposite direction for rotation invariant
t = -t;
# Geometric invariant moments from image region
vn = createImageF(numMoments,numMoments)
for m,n in itertools.product(range(0, numMoments), range(0, numMoments)):
for indexPixel in range(0, numPoints):
y = (imageRegion[indexPixel])[0]
x = (imageRegion[indexPixel])[1]
val = (imageRegion[indexPixel])[2]
vn[n,m] += ((x-xc)*cos(t) - (y-yc)*sin(t))**n * \
((x-xc)*sin(t) + (y-yc)*cos(t))**m * val
c = 1 + ((n + m) / 2.0)
if vn[n,m] > 0: vn[n,m] = log10(vn[n,m] / pow(M[0,0],c))
else: vn[n,m] = 0
printImageRangeF(vn, [0,numMoments-1],[0,numMoments-1], "6.2f")
# Compute invariant moments from normailsed central moments
m1 = centMomNorm[2,0] + centMomNorm[0,2]
m2 = (centMomNorm[2,0] - centMomNorm[0,2])**2 + 4* (centMomNorm[1,1]**2)
m3 = (centMomNorm[3,0] - 3.0*centMomNorm[1,2])**2 + (3.0*centMomNorm[2,1] \
- centMomNorm[0,3])**2
printText("M1 = " + '%.4f' % m1)
printText("M2 = " + '%.4f' % m2)
printText("M3 = " + '%.4f' % m3)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter4/FirstOrderEdgeDetector.py | <filename>ExamplesPython_3.6/Chapter4/FirstOrderEdgeDetector.py<gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 4
FirstOrderEdgeDetector: Compute gradient by first order derivative
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF, showImageF
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
'''
pathToDir = "../../Images/Chapter4/Input/"
imageName = "Squares.png"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create images to store the results
horizEdges = createImageF(width, height)
vertEdges = createImageF(width, height)
outputEdges = createImageF(width, height)
for x,y in itertools.product(range(0, width-1), range(0, height-1)):
horizEdges[y,x] = abs(float(inputImage[y,x]) - float(inputImage[y+1,x]))
vertEdges[y,x] = abs(float(inputImage[y,x]) - float(inputImage[y,x+1]))
outputEdges[y,x] = abs(2.0* float(inputImage[y,x]) - \
float(inputImage[y+1,x]) - float(inputImage[y,x+1]))
# Show horizontal, vertical and all edges
showImageF(horizEdges)
showImageF(vertEdges)
showImageF(outputEdges)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Modules/ImageOperatorsUtilities.py | <reponame>Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples<filename>ExamplesPython_3.6/Modules/ImageOperatorsUtilities.py
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
ImageOperatorsUtilities: Helper module to process an image
'''
# Images
from ImageUtilities import createImageF, createImageL, createVectorI
from ConvolutionUtilities import createGaussianKernel, createSobelKernel, applyKernelF, applyKernelMA
# Array to store image data
from numpy import amax
# Math
from math import log, pi, sin, cos
# Iteration
from timeit import itertools
# Create a log of an array containing float data
def imageLogF(image, scale = 100):
height = len(image)
width = len(image[0])
maximum = amax(image)
# Create a gray level image for display
logImage = createImageF(width ,height)
# Scale the float values into gray scale values without 0 and negative values
for y in range(0, height):
for x in range(0, width):
# Set to log value
logImage[y, x] = log(1.0 + (abs(image[y, x]) / maximum) * scale)
return logImage
# Compute the histogram of an image
def computeHistogram(inputImage):
height = len(inputImage)
width = len(inputImage[0])
# Vector of integers values to store the number of times a pixel value is repeated
outputHistogram = createVectorI(256)
# Get the number of times a pixel value is found in the image
for x,y in itertools.product(range(0, width), range(0, height)):
pixelValue = inputImage[y,x]
outputHistogram[pixelValue] += 1
return outputHistogram
# Return byte thresholded image
def thresholdImage(inputImage, threshold, binary = True):
height = len(inputImage)
width = len(inputImage[0])
# Create images to store the result
outputImage = createImageL(width, height)
# Set the pixels in the output image according to the accumulate histogram
for x,y in itertools.product(range(0, width), range(0, height)):
if inputImage[y,x] > threshold:
if binary:
outputImage[y,x] = 255
else:
outputImage[y,x] = inputImage[y,x]
else:
outputImage[y,x] = 0
return outputImage
# Apply Canny operator to an image
def applyCannyEdgeDetector(inputImage, GaussianKernelSize, sobelKernelSize, upperT, lowerT, returnGradient = False):
height = len(inputImage)
width = len(inputImage[0])
normalizeMagnitude = True
windowDelta = 1
# Apply Gaussian kernel
gaussianKernel = createGaussianKernel(GaussianKernelSize)
gaussianImage = applyKernelF(inputImage, gaussianKernel)
# Apply Sobel kernel. We use normalized magnitude in this example
sobelX, sobelY = createSobelKernel(sobelKernelSize)
magnitude, angle, mX, mY = applyKernelMA(gaussianImage, sobelX, sobelY, normalizeMagnitude)
# Weight magnitude by the variance. This is useful for corner extraction since suppress the internal corner
weightedMagnitude = createImageF(width, height)
for x,y in itertools.product(range(0, width), range(0, height)):
sumKernel = 1.0/8.0
for wx,wy in itertools.product(range(-1,2), range(-1, 2)):
posY = y + wy
posX = x + wx
if posY > -1 and posY < height and posX > -1 and posX < width:
sumKernel += abs(float(inputImage[posY,posX]) - float(inputImage[y,x]))
sumKernel /= 8.0
weightedMagnitude[y,x] = magnitude[y,x] * sumKernel
# To store maximum suppression image
maxImage = createImageF(width, height)
# Non-maximum suppression
border = GaussianKernelSize
for x,y in itertools.product(range(border, width - border),
range(border, height - border)):
# Only potential edges can be maximum
if magnitude[y,x] > lowerT:
# The normal angle is perpendicular to the edge angle
normalAngle = angle[y,x] - pi / 2.0
# Make sure the angle is between 0 and pi
while normalAngle < 0:
normalAngle += pi
while normalAngle > pi:
normalAngle -= pi
# Angle defining the first point
baseAngle = int( 4 * normalAngle / pi ) * (pi / 4.0)
# Integer delta positions for interpolation
# We use -y since the image origin is in top corner
x1, y1 = int(round(cos(baseAngle))), -int(round(sin(baseAngle)))
x2, y2 = int(round(cos(baseAngle + pi / 4.0))), \
-int(round(sin(baseAngle + pi / 4.0)))
# How far we are from (x1,y1). Maximum difference is math.pi / 4.0, so we multiply by 2
w = cos(2.0*(normalAngle - baseAngle))
# Point to interpolate
M1 = w * weightedMagnitude[y+y1,x+x1] + (1.0 - w) * weightedMagnitude[y+y2,x+x2]
# Point to interpolate for pixels in the other side of the edge
M2 = w * weightedMagnitude[y-y1,x-x1] + (1.0 - w) * weightedMagnitude[y-y2,x-x2]
# Determine if it is a maximum. If so make sure it will be preserved
if weightedMagnitude[y,x] > M1 and weightedMagnitude[y,x] > M2:
maxImage[y,x] = magnitude[y,x]
# To compute hysteresis thresholded images we require two thresholds
edges = createImageF(width, height)
potentialEdges = [ ]
# Divide pixels as edges, no edges and we are not sure
for x,y in itertools.product(range(1, width-1), range(1, height-1)):
# These are edges
if maxImage[y,x] > upperT:
edges[y,x] = 255
# These are pixels that we do not want as edges
if maxImage[y,x] < lowerT:
edges[y,x] = 0
# These may be edges
if maxImage[y,x] > lowerT and maxImage[y,x] <= upperT:
edges[y,x] = 128
# Resolve the potential edges
for x,y in itertools.product(range(1, width-1), range(1, height-1)):
# For each edge
if edges[y,x] == 255:
# Examine neighbour
potentialEdges = [ ]
for wx,wy in itertools.product(range(-windowDelta, windowDelta+1), range(-windowDelta, windowDelta+1)):
# It becomes an edge
if edges[y+wy,x+wx] == 128:
edges[y+wy,x+wx] = 255
potentialEdges.append((y+wy,x+wx))
# Look into new edges
while len(potentialEdges) > 0:
# Take element from potential edges
y = (potentialEdges[0])[0]
x = (potentialEdges[0])[1]
potentialEdges = potentialEdges[1:]
# Examine neighbour
for wx,wy in itertools.product(range(-windowDelta, windowDelta+1), range(-windowDelta, windowDelta+1)):
# It becomes an edge
if edges[y+wy,x+wx] == 128:
edges[y+wy,x+wx] = 255
potentialEdges.append((y+wy,x+wx))
# Clean up remaining potential edges
for x,y in itertools.product(range(1, width-1), range(1, height-1)):
if edges[y,x] == 128:
edges[y,x] = 0
if returnGradient == False:
return edges, angle
return edges, angle , mX, mY
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter4/PrewittOperator.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 4
PrewittOperator: Compute gradient by using the Prewitt operator
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF, showImageF
from PrintUtilities import printImageRangeF
from PlotUtilities import plotQuiver
# Math and iteration
from math import sqrt, atan2
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
'''
pathToDir = "../../Images/Chapter4/Input/"
imageName = "Squares.png"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
outputMagnitude = createImageF(width, height)
outputDirection = createImageF(width, height)
for x,y in itertools.product(range(0, width-1), range(0, height-1)):
mX,mY = 0.0, 0.0
for c in range(-1, 2):
mX += float(inputImage[y - 1, x + c]) - float(inputImage[y + 1, x + c])
mY += float(inputImage[y + c, x - 1]) - float(inputImage[y + c, x + 1])
outputMagnitude[y,x] = sqrt(mX * mX + mY * mY)
outputDirection[y,x] = atan2(mY, mX)
# Show output image
showImageF(outputMagnitude)
showImageF(outputDirection)
# Print pixel's values in an image range
printImageRangeF(outputDirection, [0, width-1], [0, height-1])
# Plot vectors
plotQuiver(outputMagnitude, outputDirection, 1300)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | ExamplesPython_3.6/Chapter3/FourierConvolution.py | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
FourierConvolution: Filter an image by using the Fourier transform
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF
from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients
from ImageOperatorsUtilities import imageLogF
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
kernelSize = Size of the kernel
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "Eye.png"
kernelSize = 9
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create Kernel
kernelImage = createImageF(width, height)
# Set the pixels of a flat kernel
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
kernelImage[y, x] = 255.0
# Padding size
widthPad, heightPad = width+kernelSize-1, height+kernelSize-1
# Padding input
inputPad = createImageF(widthPad, heightPad)
for x,y in itertools.product(range(0, width), range(0, height)):
inputPad[y,x] = inputImage[y,x]
# Padding and flip template
templatePadFlip = createImageF(widthPad, heightPad)
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
templatePadFlip[y, x] = kernelImage[kernelSize-y-1, kernelSize-x-1]
showImageF(templatePadFlip)
# Compute coefficients
imageCoeff, maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad)
templateCoeff, _, _ = computeCoefficients(templatePadFlip)
# Show the log of the power of the input image and template
powerImage = computePowerfromCoefficients(imageCoeff)
powerImageLog = imageLogF(powerImage)
showImageF(powerImageLog)
powerTemplate = computePowerfromCoefficients(templateCoeff)
powerTemplateLog = imageLogF(powerTemplate)
showImageF(powerTemplateLog)
# Frequency domain multiplication
resultCoeff = createImageF(1 + 2 * maxFrequencyW, 1 + 2 * maxFrequencyH , 2)
for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \
range(-maxFrequencyH, maxFrequencyH + 1)):
w = kw + maxFrequencyW
h = kh + maxFrequencyH
resultCoeff[h,w][0] = (imageCoeff[h,w][0] * templateCoeff[h,w][0] - \
imageCoeff[h,w][1] * templateCoeff[h,w][1])
resultCoeff[h,w][1] = (imageCoeff[h,w][1] * templateCoeff[h,w][0] + \
imageCoeff[h,w][0] * templateCoeff[h,w][1])
# Power result
powerResult = computePowerfromCoefficients(resultCoeff)
powerResultLog = imageLogF(powerResult)
showImageF(powerResultLog)
# Reconstruction
outputImage = reconstruction(resultCoeff)
outPad = createImageF(width, height)
halfKernel = int(kernelSize/2)
for x,y in itertools.product(range(0, width), range(0, height)):
outPad[y,x] = outputImage[y + halfKernel, x + halfKernel]
# Show filter image
showImageF(outPad)
|
Haotianz94/stylegan2-ada | run_projector.py | <reponame>Haotianz94/stylegan2-ada<gh_stars>0
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
import sys
from projector import *
def main():
seed = 303
minibatch_size = 10
network_pkl = 'training-runs/00002-djokovic_fg_black_v2_tf-mirror-paper256/network-snapshot-022937.pkl'
images_dir = 'datasets/djokovic_fg_black_v2'
output_dir = 'result/embed_djokovic_black_v2_22937'
os.makedirs(output_dir, exist_ok=True)
split_idx = int(sys.argv[1])
num_split = int(sys.argv[2])
# Load networks.
tflib.init_tf({'rnd.np_random_seed': seed})
print('Loading networks from "%s"...' % network_pkl)
with dnnlib.util.open_url(network_pkl) as fp:
_G, _D, Gs = pickle.load(fp)
# Initialize projector.
proj = Projector()
proj.set_network(Gs, minibatch_size)
proj.num_steps = 1000
images_name = sorted(os.listdir(images_dir))
result_dict = {}
for batch_idx in range(split_idx * minibatch_size, len(images_name), num_split * minibatch_size):
print("Split {} running batch {} of {}".format(split_idx, batch_idx, len(images_name) // minibatch_size // num_split))
images_name_batch = images_name[batch_idx: batch_idx + minibatch_size]
images_gt = [np.array(PIL.Image.open(os.path.join(images_dir, f))) for f in images_name_batch]
images_float = [image.astype(np.float32).transpose([2, 0, 1]) * (2 / 255) - 1 for image in images_gt]
proj.start(images_float)
# Run projector.
with tqdm.trange(proj.num_steps) as t:
for step in t:
assert step == proj.cur_step
dist, loss = proj.step()
t.set_postfix(dist=f'{dist[0]:.4f}', loss=f'{loss:.2f}')
# Save results.
# imgs_embed = proj.images_uint8
for i, f in enumerate(images_name_batch):
result_dict[f] = proj.dlatents[i]
pickle.dump(result_dict, open(os.path.join(output_dir, 'dlatents-{}-{}.pkl'.format(split_idx, num_split)), 'wb'))
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
|
sahiljajodia01/queueingApp-Web-Server | queues/models.py | from django.db.models import CharField, BooleanField, OneToOneField
from django.db.models import ManyToManyField, IntegerField, DateTimeField
from django.db.models import TimeField, FileField, Model, CASCADE, ForeignKey
from django_mysql.models import ListCharField
from django.contrib.auth.models import User
# from queueingApp import settings
class Location(Model):
floor = IntegerField(null=True)
department = CharField(max_length=10, null=True)
room = CharField(max_length=20, null=True)
updated_at = DateTimeField(auto_now=True)
def __str__(self):
return "{} Floor - {} Dept. - {}".format(self.floor, self.department, self.room)
class Queue(Model):
maxLength = IntegerField(null=True, blank=True, default=200)
isEmpty = BooleanField(default=True)
isFull = BooleanField(default=False)
size = IntegerField(null=True, blank=True)
# startTime = TimeField(auto_now_add=True)
startTime = TimeField(null=True, blank=True)
avgTime = TimeField(null=True, blank=True)
endTime = TimeField(null=True, blank=True)
subject = CharField(max_length=100, null=True)
lock = BooleanField(default=False)
flag = IntegerField(default=0)
created_at = DateTimeField(auto_now_add=True)
updated_at = DateTimeField(auto_now=True)
queueItems = ListCharField(
base_field=CharField(max_length=11),
size=100,
max_length=100 * 12,
null=True,
blank=True
)
location = OneToOneField(Location, on_delete=CASCADE, related_name="queue_location", null=True)
def __str__(self):
return "{} - {} items".format(self.subject, len(self.queueItems))
# class Teacher(Model):
# name = CharField(max_length=100)
# isFree = BooleanField(default=False)
# sapId = IntegerField(unique=True)
# photo = FileField(null=True, blank=True)
# subject = CharField(max_length=100)
# created_at = DateTimeField(auto_now_add=True)
# updated_at = DateTimeField(auto_now=True)
# loation = OneToOneField(Location, on_delete=CASCADE)
# queue = ManyToManyField(Queue, blank=True)
# def __str__(self):
# return "{}".format(self.name)
# class Student(Model):
# name = CharField(max_length=100)
# sapID = IntegerField(unique=True)
# department = CharField(max_length=10)
# year = CharField(max_length=2)
# div = CharField(max_length=1)
# batch = CharField(max_length=2)
# subscription = ManyToManyField(Teacher, blank=True)
# inQueue = BooleanField(default=False)
# created_at = DateTimeField(auto_now_add=True)
# updated_at = DateTimeField(auto_now=True)
# photo = FileField(null=True, blank=True)
# def __str__(self):
# return "{}".format(self.name)
# class UserProfile(Model):
# USER_TYPES = (
# (0, 'Students'),
# (1, 'Teachers')
# )
# user = OneToOneField(User, on_delete=CASCADE)
# user_type = IntegerField(null=True, choices=USER_TYPES)
# name = CharField(max_length=100, blank=True, null=True)
# sapId = IntegerField(unique=True, blank=True, null=True)
# photo = FileField(null=True, blank=True, upload_to="images/")
# created_at = DateTimeField(auto_now_add=True)
# updated_at = DateTimeField(auto_now=True)
# def __str__(self):
# return "{}".format(self.name)
# class Meta:
# abstract = True
# class TeacherProfile(Model):
# loation = OneToOneField(Location, on_delete=CASCADE)
# queue = ManyToManyField(Queue, blank=True)
# subject = CharField(max_length=100, blank=True, null=True)
# isFree = BooleanField(default=False)
# class Meta:
# abstract = True
# class StudentProfile(Model):
# department = CharField(max_length=10, blank=True, null=True)
# year = CharField(max_length=2, blank=True, null=True)
# batch = CharField(max_length=2, blank=True, null=True)
# # subscription = ManyToManyField(TeacherProfile, blank=True)
# inQueue = BooleanField(default=False)
# class Meta:
# abstract = True
# class Profile(TeacherProfile, StudentProfile, UserProfile):
# USERNAME_FIELD = 'sapId'
class Teacher(Model):
user = OneToOneField(User, on_delete=CASCADE, related_name='teacher')
register_id = CharField(max_length=250, null=True, blank=True)
name = CharField(max_length=100, null=True)
isFree = BooleanField(default=False)
sapId = CharField(unique=True, null=True, max_length=11)
photo = FileField(null=True, blank=True)
# subject = CharField(max_length=100, null=True)
subject = ListCharField(
base_field=CharField(max_length=50),
size=15,
max_length=15 * 51,
null=True,
blank=True
)
created_at = DateTimeField(auto_now_add=True)
updated_at = DateTimeField(auto_now=True)
location = OneToOneField(Location, on_delete=CASCADE, related_name='location', null=True)
queue = ManyToManyField(Queue, blank=True, related_name='queue')
def __str__(self):
return "{}".format(self.name)
class Student(Model):
user = OneToOneField(User, on_delete=CASCADE, related_name='student')
register_id = CharField(max_length=250, null=True, blank=True)
name = CharField(max_length=100, null=True)
sapID = CharField(unique=True, null=True, max_length=11)
department = CharField(max_length=10, null=True)
year = CharField(max_length=2, null=True)
div = CharField(max_length=1, null=True)
batch = CharField(max_length=2, null=True)
subscription = ManyToManyField(Teacher, blank=True, related_name='subscribers')
inQueue = BooleanField(default=False)
created_at = DateTimeField(auto_now_add=True)
updated_at = DateTimeField(auto_now=True)
photo = FileField(null=True, blank=True)
def __str__(self):
return "{}".format(self.name)
# mysqltestserver
# @receiver(post_save, sender=User)
# def create_user_teacher_profile(sender, instance, created, **kwargs):
# if created:
# Teacher.objects.create(user=instance)
#
#
# @receiver(post_save, sender=User)
# def save_user_teacher_profile(sender, instance, **kwargs):
# instance.teacher.save()
#
#
# @receiver(post_save, sender=User)
# def create_user_student_profile(sender, instance, created, **kwargs):
# if created:
# Student.objects.create(user=instance)
#
#
# @receiver(post_save, sender=User)
# def save_user_student_profile(sender, instance, **kwargs):
# instance.sudent.save()
class Token(Model):
user = OneToOneField(User, on_delete=CASCADE, related_name='token')
token = CharField(max_length=200, null=True, blank=True)
valid = BooleanField(default=False)
def __str__(self):
return self.token
|
sahiljajodia01/queueingApp-Web-Server | queues/serializers.py | from rest_framework import serializers
from .models import Location, Teacher, Student, Queue, Token
from django.contrib.auth.models import User
from .tokens import account_activation_token
from django.core.mail import send_mail
from queueing_app import settings
from rest_framework.response import Response
from django.contrib.auth.hashers import check_password
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.exceptions import ValidationError
class LocationSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = ('id', 'room', 'department', 'floor', 'updated_at')
read_only_fields = ('updated_at',)
def create(self, validated_data):
location = Location(room=validated_data['room'], department=validated_data['department'],
floor=validated_data['floor'])
location.save()
return location
class UserSerializer(serializers.ModelSerializer):
# teacher = serializers.PrimaryKeyRelatedField(queryset=Teacher.objects.all())
# token = serializers.CharField(max_length=100)
# token = serializers.PrimaryKeyRelatedField(queryset=Token.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'password', 'email')
# read_only_fields = ('token',)
def create(self, validated_data):
user = User(username=validated_data['username'], email=validated_data['email'])
user.set_password(validated_data['password'])
user.save()
subject = "Email verification for django"
message = account_activation_token.make_token(user)
token = Token.objects.create(user=user, token=message)
token.save()
send_mail(subject, message, settings.EMAIL_HOST_USER, [user.email])
return user
def update(self, instance, validated_data):
instance.username = validated_data.get('username', instance.username)
instance.set_password(validated_data.get('password', instance.password))
instance.save()
return instance
# token_obj = Token.objects.get(user=instance)
# user_token = validated_data['token']
# if token_obj == user_token:
# return Response("Token Matched!")
# else:
# return Response("Token not matched")
class TeacherSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), allow_null=True)
# location = serializers.PrimaryKeyRelatedField(queryset=Location.objects.all(), allow_null=True)
# queue = serializers.PrimaryKeyRelatedField(queryset=Queue.objects.all())
class Meta:
model = Teacher
fields = ('id', 'name', 'isFree', 'sapId', 'subject', 'user', 'created_at', 'updated_at', 'location', 'register_id', 'queue')
read_only_fields = ('created_at', 'updated_at')
def create(self, validated_data):
teacher = Teacher(
user=validated_data['user'],
name=validated_data['name'],
isFree=validated_data['isFree'],
subject=validated_data['subject'],
sapId=validated_data['sapId'],
location=validated_data['location'],
register_id=validated_data['register_id']
)
teacher.save()
return teacher
def update(self, instance, validated_data):
instance.user = validated_data['user']
instance.location = validated_data['location']
instance.register_id = validated_data['register_id']
instance.save()
return instance
class StudentSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
class Meta:
model = Student
fields = ('id', 'name', 'department', 'sapID', 'year', 'user', 'created_at', 'updated_at', 'batch', 'subscription', 'register_id')
read_only_fields = ('created_at', 'updated_at')
def create(self, validated_data):
student = Student(
user=validated_data['user'],
name=validated_data['name'],
department=validated_data['department'],
year=validated_data['year'],
sapID=validated_data['sapID'],
# div=validated_data['div'],
batch=validated_data['batch'],
register_id=validated_data['register_id'],
)
student.save()
return student
class QueueSerializer(serializers.ModelSerializer):
class Meta:
model = Queue
fields = ('id', 'isEmpty', 'isFull', 'size', 'maxLength', 'startTime', 'endTime', 'avgTime',
'subject', 'lock', 'created_at', 'updated_at', 'queueItems', 'location', 'flag')
read_only_fields = ('created_at', 'updated_at')
def create(self, validated_data):
queue = Queue(
# size=validated_data['size'],
maxLength=validated_data['maxLength'],
startTime=validated_data['startTime'],
endTime=validated_data['endTime'],
subject=validated_data['subject'],
queueItems=validated_data['queueItems'],
location=validated_data['location'],
)
queue.save()
return queue
def update(self, instance, validated_data):
instance.isEmpty = validated_data['isEmpty']
instance.isFull = validated_data['isFull']
instance.size = validated_data['size']
instance.maxLength = validated_data['maxLength']
instance.startTime = validated_data['startTime']
instance.endTime = validated_data['endTime']
instance.avgTime = validated_data['avgTime']
instance.subject = validated_data['subject']
instance.lock = validated_data['lock']
instance.location = validated_data['location']
instance.save()
# item = validated_data['queueItems']
# instance.queueItems.append(item)
return instance
class TokenSerializer(serializers.ModelSerializer):
class Meta:
model = Token
fields = ('id', 'token', 'valid')
#Janice code
class UserLoginSerializer(serializers.ModelSerializer):
username = serializers.CharField(required=True)
password = serializers.CharField(required=True, min_length=8)
email = serializers.EmailField(required=True)
def validate(self, validated_data):
hashed_pass = validated_data['password']
user = User.objects.get(username = validated_data['username'])
if not user:
raise serializers.ValidationError("User Does not exist")
if(check_password(hashed_pass,user.password)):
return user
raise serializers.ValidationError("Incorrect Password")
class Meta:
model = User
fields = ['id','username','password','email']
class StudentLoginSerializer(serializers.ModelSerializer):
sapID = serializers.CharField(min_length=11, required=True)
password = serializers.CharField(write_only=True)
class Meta:
model = Student
fields = ('id','sapID','password')
def validate(self, data):
hashed_pass = data['password']
try:
query = Student.objects.get(sapID = data['sapID'])
except ObjectDoesNotExist:
raise ValidationError("User does not exist")
else:
student_password = query.user.password
if check_password(hashed_pass,student_password):
return query
raise ValidationError("Incorrect password")
class TeacherLoginSerializer(serializers.ModelSerializer):
sapId = serializers.CharField(min_length=11,required=True)
password = serializers.CharField(write_only=True)
class Meta:
model = Teacher
fields = ('id', 'sapId', 'password')
def validate(self, data):
hashed_pass = data['password']
try:
query = Teacher.objects.get(sapId = data['sapId'])
except ObjectDoesNotExist:
raise ValidationError("User does not exist")
else:
teacher_pass = query.user.password
if check_password(hashed_pass, teacher_pass):
return query
raise ValidationError("Incorrect sapid/password")
|
sahiljajodia01/queueingApp-Web-Server | queues/urls.py | <reponame>sahiljajodia01/queueingApp-Web-Server
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from .views import LocationList, LocationDetails, TeacherList, TeacherDetail, UserList, UserDetail
from .views import StudentList, QueueList, QueueAddItems, QueueDeleteItems, QueueDeleteSpecificItems
from .views import TeacherNameGet, StudentSubscription, GetItemIndex, DeleteStudentSubscription, YouAreNextNotification
from .views import GetSubjectTeachers, TokenMatch, UserLogin, StudentLogin, TeacherLogin, AddSubjects, SendNotificationToSubscribers
from .views import GetTeacherLocatonFromName, QueueDetails, GetTeacherQueues, TeacherAddingQueues, TeacherDeletingQueues
urlpatterns = {
url(r'^queues/$', LocationList.as_view(), name='create'),
url(r'^queues/(?P<pk>[0-9]+)/$', LocationDetails.as_view(), name='details'),
url(r'^queues/teacher/$', TeacherList.as_view()),
url(r'^queues/teacher/(?P<pk>[0-9]+)/$', TeacherDetail.as_view()),
url(r'^queues/users/$', UserList.as_view()),
url(r'^queues/users/(?P<pk>[0-9]+)/$', UserDetail.as_view()),
url(r'^queues/student/$', StudentList.as_view()),
url(r'^queues/student/(?P<pk>[0-9]+)/$', StudentSubscription.as_view()),
url(r'^queues/queue/$', QueueList.as_view()),
url(r'^queues/queue/(?P<pk>[0-9]+)/$', QueueDetails.as_view()),
url(r'^queues/queue/(?P<pk>[0-9]+)/delete/$', QueueDeleteItems.as_view()),
url(r'^queues/queue/(?P<pk>[0-9]+)/deletespecific/$', QueueDeleteSpecificItems.as_view()),
url(r'^queues/teacher/name/(?P<name>[\w ]+)/$', TeacherNameGet.as_view()),
url(r'^queues/queue/(?P<pk>[0-9]+)/index/$', GetItemIndex.as_view()),
# url(r'^queues/student/subs$', StudentList.as_view()),
url(r'^queues/queue/(?P<pk>[0-9]+)/next/$', YouAreNextNotification.as_view()),
url(r'^queues/student/(?P<pk>[0-9]+)/deletesub/$', DeleteStudentSubscription.as_view()),
url(r'^queues/subject/$', GetSubjectTeachers.as_view()),
url(r'^queues/users/(?P<pk>[0-9]+)/token/$', TokenMatch.as_view()),
url(r'^queues/users/login/$', UserLogin.as_view()),
url(r'^queues/student/login/$', StudentLogin.as_view()),
url(r'^queues/teacher/login/$', TeacherLogin.as_view()),
url(r'^queues/teacher/(?P<pk>[0-9]+)/subject/$', AddSubjects.as_view()),
url(r'^queues/queue/notification/$', SendNotificationToSubscribers.as_view()),
url(r'^queues/teacher/name/$', GetTeacherLocatonFromName.as_view()),
url(r'^queues/queue/(?P<pk>[0-9]+)/add/$', QueueAddItems.as_view()),
url(r'^queues/teacher/getqueues/$', GetTeacherQueues.as_view()),
url(r'^queues/teacher/(?P<pk>[0-9]+)/addqueue/$', TeacherAddingQueues.as_view()),
url(r'^queues/teacher/(?P<pk>[0-9]+)/deletequeue/$', TeacherDeletingQueues.as_view()),
}
urlpatterns = format_suffix_patterns(urlpatterns)
|
sahiljajodia01/queueingApp-Web-Server | queues/views.py | <gh_stars>0
from rest_framework import generics
from .serializers import LocationSerializer, UserSerializer, TeacherSerializer, StudentSerializer
from .serializers import QueueSerializer, UserLoginSerializer, TeacherLoginSerializer, StudentLoginSerializer
from .models import Location, Teacher, Student, Queue, Token
from django.contrib.auth.models import User
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.http import Http404
from pyfcm import FCMNotification
import json
class LocationList(generics.ListCreateAPIView):
queryset = Location.objects.all()
serializer_class = LocationSerializer
def platfrom_create(self, serializer):
serializer.save()
class LocationDetails(generics.RetrieveUpdateDestroyAPIView):
queryset = Location.objects.all()
serializer_class = LocationSerializer
# class UserList(generics.ListCreateAPIView):
# queryset = User.objects.all()
# serializer_class = UserSerializer
#
# def platform_create(self, serializer):
# serializer.save()
#
#
# class UserDetail(generics.RetrieveUpdateDestroyAPIView):
# queryset = User.objects.all()
# serializer_class = UserSerializer
class UserList(APIView):
def get(self, request):
users = User.objects.all()
serializer = UserSerializer(users, many=True)
return Response(serializer.data)
def post(self, request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserDetail(APIView):
def get_object(self, pk):
try:
return User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
def get(self, request, pk):
user = self.get_object(pk)
serializer = UserSerializer(user)
return Response(serializer.data)
def put(self, request, pk):
user = self.get_object(pk)
serializer = UserSerializer(user, data=request.data)
# token = Token.objects.get(user=user)
# tokenSerializer = TokenSerializer(token)
# print(request.data['token'] == token.token)
# valid_data = serializer.data
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# if tokenSerializer.data['token'] == token.token:
# token.valid = True
# # token.valid = True
# return Response(token.valid)
# else:
# return Response(token.valid)
# serializer.save()
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data)
# return Response(tokenSerializer.data)
# else:
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
user = self.get_object(pk)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class TeacherList(generics.ListCreateAPIView):
queryset = Teacher.objects.all()
serializer_class = TeacherSerializer
def platfrom_create(self, serializer):
serializer.save(user=self.request.user, location=self.request.location)
class TeacherDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Teacher.objects.all()
serializer_class = TeacherSerializer
class StudentList(generics.ListCreateAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
def platfrom_create(self, serializer):
serializer.save(user=self.request.user)
# class StudentDetail(generics.RetrieveUpdateDestroyAPIView):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
class StudentSubscription(APIView):
def get_object(self, pk):
try:
return Student.objects.get(pk=pk)
except Student.DoesNotExist:
raise Http404
def get(self, request, pk):
student = self.get_object(pk)
serializer = StudentSerializer(student)
return Response(serializer.data)
def put(self, request, pk):
student = self.get_object(pk)
serializer = StudentSerializer(student)
teacherIds = []
print(request.data['teacherNames'])
teacherName = json.loads(request.data['teacherNames'])
for x in teacherName:
print(x)
teacher = Teacher.objects.get(name=x)
print(teacher.name)
# student.subscription.append(teacher.id)
teacherIds.append(teacher.id)
student.subscription = teacherIds
student.save()
return Response(serializer.data)
def delete(self, request, pk):
student = self.get_object(pk)
student.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class DeleteStudentSubscription(APIView):
def get_object(self, pk):
try:
return Student.objects.get(pk=pk)
except Student.DoesNotExist:
raise Http404
def put(self, request, pk):
student = self.get_object(pk)
serializer = StudentSerializer(student)
teacherIds = []
teacherName = json.loads(request.data['teacherNames'])
for x in teacherName:
teacher = Teacher.objects.get(name=x)
# teacherIds.pop(teacherIds.index(teacher.id))
teacherIds.append(teacher.id)
student.subscription.pop(student.subscription.index(teacher.id))
student.save()
return Response(serializer.data)
# class QueueList(generics.ListCreateAPIView):
# queryset = Queue.objects.all()
# serializer_class = QueueSerializer
#
# def platfrom_create(self, serializer):
# serializer.save()
class QueueList(APIView):
# push_service = FCMNotification(api_key="<KEY>")
def get(self, request):
queue = Queue.objects.all()
serializer = QueueSerializer(queue, many=True)
return Response(serializer.data)
def post(self, request):
serializer = QueueSerializer(data=request.data)
# teacherName = request.data['teacherName']
# teacher = Teacher.objects.get(name=teacherName)
# subscribers = teacher.subscribers.all()
# registration_ids = [x.register_id for x in subscribers]
# message_title = "First Notification"
# message_body = "Getting all the list of queues"
# data_message = {
# "click_action": "StudentScreenActivity"
# }
# self.push_service.notify_multiple_devices(registration_ids=registration_ids, message_title=message_title, message_body=message_body, data_message=data_message)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# class QueueDetail(generics.RetrieveUpdateDestroyAPIView):
# queryset = Queue.objects.all()
# serializer_class = QueueSerializer
class QueueDetails(APIView):
push_service = FCMNotification(api_key="<KEY>")
def get_object(self, pk):
try:
return Queue.objects.get(pk=pk)
except Queue.DoesNotExist:
raise Http404
def get(self, request, pk):
registration_ids = ["d-Di8WDILuw:APA91bE02qGEkVUWelt_frw67UeaaD9L5rAEKDHW79zQ_p7J10jTusa0dHIEJ8Op3IWHLX37jQioNAMHSEro0RpyUwsg8035kV4IondFC_rHU1ObbX7eBhwNqwGIq94Bv2ZPvm92uiBG",
"eEq2eaw8sGE:APA91bFDG6Mt9X2t5fx-HCLmVCcqlUn6qMXVblolXgcBBfu0gvhJo0SKLeU37SgamSLri-5SfNOLWm_BuoXzlLKduK05FF_VlHaYjq4awz9Z3QcsZmpz8hhhCrILQtX7Ydh6fhg_G9gw"]
message_title = "First Notification"
message_body = "Getting all the list of queues"
data_message = {
"click_action": "StudentScreenActivity"
}
self.push_service.notify_multiple_devices(registration_ids=registration_ids, message_title=message_title, message_body=message_body, data_message=data_message)
queue = self.get_object(pk)
serializer = QueueSerializer(queue)
return Response({"data": serializer.data, "items": queue.queueItems})
def put(self, request, pk):
queue = self.get_object(pk)
serializer = QueueSerializer(queue, data=request.data)
# if serializer.is_valid():
# queue.queueItems.append(request.data['queueItems'])
# for x in queue.queueItems:
# print(x)
# # print(queue.queueItems)
# queue.save()
# return Response({"data": serializer.data, "items": queue.queueItems})
# else:
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
queue = self.get_object(pk)
queue.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class QueueDeleteItems(APIView):
def get_object(self, pk):
try:
return Queue.objects.get(pk=pk)
except Queue.DoesNotExist:
raise Http404
def put(self, request, pk):
queue = self.get_object(pk)
# serializer = QueueSerializer(queue, data=request.data)
deleted = queue.queueItems.pop(0)
print(queue.queueItems)
queue.save()
return Response({"items": queue.queueItems, "deleted": deleted})
class QueueDeleteSpecificItems(APIView):
def get_object(self, pk):
try:
return Queue.objects.get(pk=pk)
except Queue.DoesNotExist:
raise Http404
def put(self, request, pk):
queue = self.get_object(pk)
deleted = queue.queueItems.pop(queue.queueItems.index(request.data['element']))
# serializer = QueueSerializer(queue, data=request.data)
# deleted = queue.queueItems.pop(request.data['element'])
# for x in queue.queueItems:
# if x == request.data['element']:
# queue.queueItems.pop(x)
print(queue.queueItems)
# print(deleted)
queue.save()
return Response({"items": queue.queueItems, "deleted": deleted})
class TeacherNameGet(APIView):
def get_object(self, name):
try:
return Teacher.objects.get(name=name)
except Teacher.DoesNotExist:
raise Http404
def get(self, request, name):
teacher = self.get_object(name)
serializer = TeacherSerializer(teacher)
return Response(serializer.data)
class GetItemIndex(APIView):
def get_object(self, pk):
try:
return Queue.objects.get(pk=pk)
except Queue.DoesNotExist:
raise Http404
def put(self, request, pk):
queue = self.get_object(pk)
serializer = QueueSerializer(queue)
if request.data['sapID'] in queue.queueItems:
index = queue.queueItems.index(request.data['sapID'])
else:
index = -1
return Response({
"data": serializer.data,
"index": index+1
})
class YouAreNextNotification(APIView):
push_service = FCMNotification(api_key="<KEY>")
def get_object(self, pk):
try:
return Queue.objects.get(pk=pk)
except Queue.DoesNotExist:
raise Http404
def put(self, request, pk):
student = Student(sapID=request.data['sapID'])
message_title = "First Notification"
message_body = "Getting all the list of queues"
data_message = {
"click_action": "StudentScreenActivity"
}
registration_id = student.register_id
self.push_service.notify_single_device(registration_id=registration_id, message_title=message_title, message_body=message_body, data_message=data_message)
return Response({"response": "valid"})
class GetSubjectTeachers(APIView):
# def get_object(self, subject):
# try:
# return Teacher.objects.all(subjects=subject)
# except Teacher.DoesNotExist:
# raise Http404
def put(self, request):
teacher = Teacher.objects.all()
teacherNames = []
for x in teacher:
if request.data["name"] in x.subject:
teacherNames.append(x.name)
print(teacherNames)
return Response({"teachers": teacherNames})
class TokenMatch(APIView):
def get_object(self, pk):
try:
return User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
def put(self, request, pk):
user = self.get_object(pk)
# serializer = UserSerializer(user, data=request.data)
token = Token.objects.get(user=user)
# tokenSerializer = TokenSerializer(token)
print(request.data['token'] == token.token)
# valid_data = serializer.data
if request.data['token'] == token.token:
return Response({"valid": "true"})
else:
return Response({"valid": "false"})
#Janice code
class UserLogin(generics.ListCreateAPIView):
serializer_class = UserLoginSerializer
queryset = User.objects.all()
def post(self, request):
serializer = UserLoginSerializer(data=request.data)
if serializer.is_valid():
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class StudentLogin(APIView):
serializer_class = StudentLoginSerializer
def post(self, request):
serializer = StudentLoginSerializer(data=request.data)
if serializer.is_valid():
return Response(serializer.data, status.HTTP_200_OK)
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
class TeacherLogin(generics.ListCreateAPIView):
serializer_class = TeacherLoginSerializer
queryset = Teacher.objects.all()
def post(self, request):
serializer = TeacherLoginSerializer(data=request.data)
if serializer.is_valid():
return Response(serializer.data, status.HTTP_200_OK)
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
class AddSubjects(APIView):
def get_object(self, pk):
try:
return Teacher.objects.get(pk=pk)
except Teacher.DoesNotExist:
raise Http404
def put(self, request, pk):
teacher = self.get_object(pk)
# serializer = TeacherSerializer(teacher, data=request.data)
teacher.subject.append(request.data['subject'])
for x in teacher.subject:
print(x)
teacher.save()
return Response({"subject": teacher.subject})
class SendNotificationToSubscribers(APIView):
push_service = FCMNotification(
api_key="<KEY>")
def get(self, request):
queue = Queue.objects.all()
serializer = QueueSerializer(queue, many=True)
return Response(serializer.data)
def post(self, request):
queue = Queue.objects.get(id=request.data['id'])
teacherName = request.data['teacherName']
teacher = Teacher.objects.get(name=teacherName)
subscribers = teacher.subscribers.all()
registration_ids = [x.register_id for x in subscribers]
message_title = "Queue Started"
message_body = "Submission of " + queue.subject + " is started by Prof. " + teacher.name
data_message = {
"sound": "default"
}
queue.flag = 1
queue.save()
self.push_service.notify_multiple_devices(registration_ids=registration_ids, message_title=message_title, message_body=message_body, data_message=data_message)
return Response({"response": "Notification sent"})
class GetTeacherLocatonFromName(APIView):
def put(self, request):
teacher = Teacher.objects.get(name=request.data['name'])
return Response({"location": teacher.location})
class QueueAddItems(APIView):
def get_object(self, pk):
try:
return Queue.objects.get(pk=pk)
except Queue.DoesNotExist:
raise Http404
def put(self, request, pk):
queue = self.get_object(pk)
serializer = QueueSerializer(queue, data=request.data)
if serializer.is_valid():
queue.queueItems.append(request.data['queueItems'])
for x in queue.queueItems:
print(x)
# print(queue.queueItems)
queue.save()
return Response({"data": serializer.data, "items": queue.queueItems})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# class AddTeacherSubject(APIView):
# def get_object(self, pk):
# try:
# return Teacher.objects.get(pk=pk)
# except Teacher.DoesNotExist:
# raise Http404
#
# def put(self, request, pk):
# teacher = self.get_object(pk)
class GetTeacherQueues(APIView):
def post(self, request):
teacher = Teacher.objects.get(name=request.data['teacherName'])
teacherQueues = teacher.queue.all()
finalResponse = []
for x in teacherQueues:
print(x)
serializer = QueueSerializer(x)
finalResponse.append(serializer.data)
# serializer = QueueSerializer(teacher.queue.all())
return Response(finalResponse)
class TeacherAddingQueues(APIView):
def get_object(self, pk):
try:
return Teacher.objects.get(pk=pk)
except Teacher.DoesNotExist:
raise Http404
def put(self, request, pk):
teacher = self.get_object(pk)
queue = Queue.objects.get(pk=request.data['id'])
teacher.queue.add(queue)
teacher.save()
return Response({"added": "true"})
# teacher.queue = []
# if len(teacher.queue) == 0:
# teacher.queue[0] = request.data['id']
# else:
# length = len(teacher.queue)
# teacher.queue[length] = request.data['id']
class TeacherDeletingQueues(APIView):
def get_object(self, pk):
try:
return Teacher.objects.get(pk=pk)
except Teacher.DoesNotExist:
raise Http404
def put(self, request, pk):
teacher = self.get_object(pk)
queue = Queue.objects.get(pk=request.data['id'])
teacher.queue.remove(queue)
teacher.save()
return Response({"deleted": "true"})
|
sahiljajodia01/queueingApp-Web-Server | queues/migrations/0001_initial.py | <reponame>sahiljajodia01/queueingApp-Web-Server
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-05-29 14:49
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_mysql.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('floor', models.IntegerField(null=True)),
('department', models.CharField(max_length=10, null=True)),
('room', models.CharField(max_length=20, null=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Queue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('maxLength', models.IntegerField(blank=True, default=200, null=True)),
('isEmpty', models.BooleanField(default=True)),
('isFull', models.BooleanField(default=False)),
('size', models.IntegerField(blank=True, null=True)),
('startTime', models.TimeField(blank=True, null=True)),
('avgTime', models.TimeField(blank=True, null=True)),
('endTime', models.TimeField(blank=True, null=True)),
('subject', models.CharField(max_length=100, null=True)),
('lock', models.BooleanField(default=False)),
('flag', models.IntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('queueItems', django_mysql.models.ListCharField(models.CharField(max_length=11), blank=True, max_length=1200, null=True, size=100)),
('location', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='queue_location', to='queues.Location')),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('register_id', models.CharField(blank=True, max_length=250, null=True)),
('name', models.CharField(max_length=100, null=True)),
('sapID', models.CharField(max_length=11, null=True, unique=True)),
('department', models.CharField(max_length=10, null=True)),
('year', models.CharField(max_length=2, null=True)),
('div', models.CharField(max_length=1, null=True)),
('batch', models.CharField(max_length=2, null=True)),
('inQueue', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('photo', models.FileField(blank=True, null=True, upload_to='')),
],
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('register_id', models.CharField(blank=True, max_length=250, null=True)),
('name', models.CharField(max_length=100, null=True)),
('isFree', models.BooleanField(default=False)),
('sapId', models.CharField(max_length=11, null=True, unique=True)),
('photo', models.FileField(blank=True, null=True, upload_to='')),
('subject', django_mysql.models.ListCharField(models.CharField(max_length=50), blank=True, max_length=765, null=True, size=15)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('location', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='location', to='queues.Location')),
('queue', models.ManyToManyField(blank=True, related_name='queue', to='queues.Queue')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='teacher', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Token',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.CharField(blank=True, max_length=200, null=True)),
('valid', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='token', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='student',
name='subscription',
field=models.ManyToManyField(blank=True, related_name='subscribers', to='queues.Teacher'),
),
migrations.AddField(
model_name='student',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='student', to=settings.AUTH_USER_MODEL),
),
]
|
sahiljajodia01/queueingApp-Web-Server | queues/admin.py | from django.contrib import admin
from .models import Location, Student, Queue, Teacher, Token
admin.site.register(Location)
admin.site.register(Student)
admin.site.register(Queue)
admin.site.register(Teacher)
admin.site.register(Token)
|
SimBoex/HW5 | Utils_2_4.py | import pandas as pd
import networkx as nx
import pickle
import random
import matplotlib.pyplot as plt
import numpy as np
from heapq import heapify, heappush, heappop
from utils_2_3 import new_graph,dijkstra,traceback,create_path
#given a path and a graph it finds the edge with the smallest weight
def find_min(l,dG):
mi=[]
for edge in l:
diz=dG.get_edge_data(*edge)
cost=diz["weight"]
mi.append((edge,cost))
mi=sorted(mi,key= lambda x : x[1])
return mi[0][0]
#it creates a new directed weighted graph where a edge weight corrisponds to the sum of all the edges' weights of interactions happened between those two nodes
def new_graph_SumWeights(H,startingTime,endTime):
G = nx.DiGraph()
edges=H.edges()
for start_node,end_node in edges:
diz=H.get_edge_data(start_node,end_node)
l=sorted(diz.items(),key= lambda x : x[1]["weight"])
t_weight=0
for el in l:
if startingTime<=el[0] and endTime>=el[0]:
t_weight+=el[1]["weight"]
if t_weight!=0:
G.add_edge(start_node, end_node, weight=t_weight)
return G
#it computes the list of edges we need to remove( considering their weights)
def Min_cut(G,user1,user2,startingTime,endTime,mapping):
G=new_graph_SumWeights(G,startingTime,endTime)
cost=0
edges=[]
path=True
while path:
print("looking for a new path...")
pred=dijkstra(user1,user2,G,mapping)
#if user1 and user2 are not connected then pred[user2]==-1
if pred[user2]==-1:
print("the minimum number of links (considering their weight) is ",len(edges))
return cost,edges
seq=traceback(pred,user2,user1)
l=create_path(seq)
print("The path is :",l)
edge=find_min(l,G)
diz=G.get_edge_data(*edge)
cost+=diz["weight"]
edges.append(edge)
visualize_minCut(G,edges,seq)
G.remove_edge(*edge)
return cost,edges
#it finds two users that are not in the same interval of time
def findUser(t1,t2,s1,s2,G):
g1=new_graph(G,t1,t2)
g2=new_graph(G,s1,s2)
set1=set(g1.nodes())
set2=set(g2.nodes())
oneS=set1-set2
twoS=set2-set1
user1=oneS.pop()
user2=twoS.pop()
return user1,user2
#it visualize the path that links the nodes
def visualize_minCut(H,l,nodes):
print("Here, the path that links {} and {}".format(nodes[0],nodes[-1]))
print()
print("The starting and the end node are filled with red")
print("The link that will be removed,it is evidenced by a red edge".format(nodes[0],nodes[-1]))
plt.figure(num=None, figsize=(15, 15),dpi=60)
H = H.subgraph(nodes)
pos = nx.spring_layout(H)
edge_colors = ['red' if e in l else "black" for e in H.edges()]
path=[]
cont=0
for u in H.nodes:
if u==nodes[0] or u==nodes[-1]:
path.append("red")
else:
path.append("blue")
nx.draw(H, pos,with_labels=True,edge_color=edge_colors, connectionstyle='arc3, rad = 0.1',arrowsize=30,node_size=2100)
nx.draw_networkx_nodes(H, pos,node_color=path)
edge_labels = nx.get_edge_attributes(H,'weight')
diz={}
for u,v in edge_labels:
diz[(u,v)]=edge_labels[(u,v)]
nx.draw_networkx_edges(H, pos, width=1,style="dotted",edge_color="white")
nx.draw_networkx_edge_labels(H, pos, edge_labels = diz,label_pos=0.2,font_size=20)
plt.show()
|
SimBoex/HW5 | utils_2_3.py | import pandas as pd
import networkx as nx
import pickle
import random
import matplotlib.pyplot as plt
import numpy as np
from heapq import heapify, heappush, heappop
def path(startingNode,endNode,index,H,startingTime,endTime,mapping):
index.insert(0,startingNode)
index.append(endNode)
#it creates the new graph
dG=new_graph(H,startingTime,endTime)
c=create_path(index)
final_path=[]
print("the sequnce of nodes : ")
print(index)
for start,end in c:
if start not in dG.nodes():
print("the node {} doesn't interact in the chosen interval of time".format(start))
return final_path
if end not in dG.nodes():
print("the node {} doesn't interact in the chosen interval of time".format(end))
return final_path
pred=dijkstra(start,end,dG,mapping)
#check if 2 nodes are not connected
if pred[end]==-1:
break
path=traceback(pred,end,start)
if final_path:
final_path.extend(path[1:])
else :
final_path.extend(path)
#it create the path as a list of tuples
p=create_path(final_path)
#it invokes the visualization method
visualizePath(dG,final_path,p)
return final_path
#it adds the all the tuples that we need to create the list of edges componing the path
def links(index):
full_links=[]
l=len(index)
for i in range(l-1):
full_links.append(index[i])
v=index[i][1]
v2=index[i+1][0]
if v!=v2:
full_links.append((v,v2))
full_links.append(index[l-1])
return full_links
#it renames the nodes and return the mapping between nodes and integers and the random path
def first(G_loaded):
dG,mapping=maps(G_loaded)
index=random_path(8,mapping)
return dG,index,mapping
#it returns the random path
def random_path(n,mapping):
random.seed(12)
l=random.sample(set(mapping.keys()),n-2 )
index=labels(l,mapping)
return index
#it maps the nodes with integers from 0
def maps(G):
mapping = {node:index for index, node in enumerate(G.nodes())}
H = nx.relabel_nodes(G, mapping)
return H,mapping
#it renames the random path
def labels(l,mapping):
l=[mapping[i] for i in l]
return l
#the dijkstra algorithm
def dijkstra(Starting_vertex,End_vertex,dG,mapping):
#it creates the min_heap
min_heap = []
heapify(min_heap)
N=len(mapping)
Final_distance=0
dist=np.ones(N)* np.inf
visited=np.zeros(N, dtype=bool)
pred=np.ones(N,dtype="int64")*(-1)
#it loads the starting vertex
heappush(min_heap, (0,Starting_vertex))
end=False
dist[Starting_vertex]=0
# for each node i'm looking for the best path from the starting node
while min_heap and not end:
element = heappop(min_heap)[1]
#it creates a set of (u,v) edges
for start_node,end_node in list(dG.edges(element)):
if not visited[end_node]:
diz=dG.get_edge_data(start_node,end_node)
weight=diz["weight"]
distance=dist[end_node]
#check if distance is bigger
if distance> dist[element] + weight:
#if we have already accessed this node
if distance!=np.inf:
min_heap.remove((distance,end_node))
dist[end_node]=dist[element] + weight
heappush(min_heap, (dist[end_node],end_node))
heapify(min_heap)
else:
dist[end_node]=dist[element] + weight
heappush(min_heap, (dist[end_node],end_node))
#to remember the predecessor to traceback
pred[end_node]=element
visited[element]=True
#I break the loop when i arrive to to the node i'm looking for (Gready algorith)
if element == End_vertex:
Final_distance=dist[element]
end=True
print("the smallest path distance between {} and {} is ".format(Starting_vertex,End_vertex),Final_distance)
#if during all the path we don't reach the End_vertex means the 2 nodes are not connected
if not end:
print("there isn't a path between this two values {} and {}".format(Starting_vertex,End_vertex))
return pred
#it computes the path from the starting node to the endNode
def traceback(pred,target,start):
res=[]
res.insert(0,target)
while pred[target]!=start:
target=pred[target]
res.insert(0,target)
res.insert(0,start)
return res
#it computes the new graph with only the links in a time interval that have the smaller weight
def new_graph(H,startingTime,endTime):
G = nx.DiGraph()
edges=H.edges()
for start_node,end_node in edges:
diz=H.get_edge_data(start_node,end_node)
#it creates a new graph only with interactions that happened in the desired interval
#and it takes the othe the smallest weight
l=sorted(diz.items(),key= lambda x : x[1]["weight"])
for el in l:
if startingTime<=el[0] and endTime>=el[0]:
G.add_edge(start_node, end_node, weight=el[1]["weight"])
break
return G
def create_path(l):
paths = iter(l)
c=list(zip(paths, paths))
if len(l)%2!=0:
c.append((l[-2],l[-1]))
c=links(c)
return c
#it creates the graph to visualize the path
def visualizePath(dg,nodes,p):
print("The path that we need to follow is ",p)
print()
print("The starting and the end node are filled with red")
print("The path from {} to {} is evidenced by red edges".format(nodes[0],nodes[-1]))
plt.figure(num=None, figsize=(15, 15),dpi=60)
H = dg.subgraph(nodes)
path=[]
cont=0
for u in H.nodes:
if u==nodes[0] or u==nodes[-1]:
path.append("red")
else:
path.append("blue")
pos = nx.spring_layout(H)
edge_colors = ['red' if e in p else "black" for e in H.edges()]
#pos = nx.circular_layout(H)
nx.draw(H, pos,with_labels=True,edge_color=edge_colors, connectionstyle='arc3, rad = 0.1',arrowsize=30,node_size=2100)
nx.draw_networkx_nodes(H, pos,node_color=path)
edge_labels = nx.get_edge_attributes(H,'weight')
diz={}
for u,v in edge_labels:
diz[(u,v)]=edge_labels[(u,v)]
nx.draw_networkx_edges(H, pos, width=1,style="dotted",edge_color="white")
nx.draw_networkx_edge_labels(H, pos, edge_labels = diz,label_pos=0.2,font_size=20)
plt.show()
|
vibhatha/cylon_applications | mpi/allgather_example.py | <filename>mpi/allgather_example.py<gh_stars>1-10
from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
data = [i * (rank + 1) for i in range(3)]
all_data = comm.allgather(data)
np_all_data = np.array(all_data).flatten()
np_all_unique_data = np.unique(np_all_data)
print(all_data, np_all_unique_data) |
vibhatha/cylon_applications | bench/simple_dask_cluster_example.py | from dask_cluster import DaskCluster
from dask.distributed import Client
import time
scheduler_host = "v-001"
ips = ['v-001', 'v-002', 'v-003', 'v-004']
memory_limit = '15GB'
network_interface = 'enp175s0f0'
nprocs = 16
nthreads = 1
local_directory = '/scratch/vlabeyko/dask'
scheduler_file = '/N/u2/v/vlabeyko/dask-sched.json'
python_env = '~/sandbox/UNOMT/cylon_source/cylon/ENVCYLON'
num_nodes = 4
wait = 20
dask_cluster = DaskCluster(scheduler_host=scheduler_host, ips=ips, memory_limit=memory_limit,
network_interface=network_interface, nprocs=nprocs, nthreads=nthreads,
local_directory=local_directory,
scheduler_file=scheduler_file, python_env=python_env, num_nodes=num_nodes, wait=wait)
dask_cluster.start_cluster()
time.sleep(100)
client = Client(scheduler_host + ":8786")
print(client)
dask_cluster.stop_cluster()
|
vibhatha/cylon_applications | bench/dask_cluster.py | <reponame>vibhatha/cylon_applications<gh_stars>1-10
import os
import dask
import dask.dataframe as dd
from dask.distributed import Client, SSHCluster
import pandas as pd
import time
import argparse
import math
import subprocess
import numpy as np
class DaskCluster(object):
def __init__(self, scheduler_host, ips, memory_limit, network_interface, nprocs, nthreads, local_directory,
scheduler_file,
python_env, num_nodes, wait):
self.scheduler_host = scheduler_host
self.ips = ips
self.memory_limit = memory_limit
self.network_interface = network_interface
self.nprocs = nprocs
self.nthreads = nthreads
self.local_directory = local_directory
self.scheduler_file = scheduler_file
self.python_env = python_env
self.num_nodes = num_nodes
self.wait = wait
def start_scheduler(self):
print("Starting Scheduler")
subprocess.Popen(
["ssh", self.scheduler_host, self.python_env + "/bin/dask-scheduler", "--scheduler-file",
self.scheduler_file, "--interface", str(self.network_interface)], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
time.sleep(self.wait)
def start_workers(self):
for ip in self.ips[0:self.num_nodes]:
print("Starting Worker : {}".format(ip))
val = subprocess.Popen(
["ssh", ip, self.python_env + "/bin/dask-worker", self.scheduler_host + ":8786", "--nthreads",
str(self.nthreads), "--nprocs",
str(self.nprocs), "--memory-limit", self.memory_limit, "--interface", str(self.network_interface),
"--local-directory", self.local_directory,
"--scheduler-file",
self.scheduler_file],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
time.sleep(self.wait)
def stop_scheduler(self):
print("Stopping scheduler")
subprocess.run(["pkill", "-f", "dask-scheduler"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(self.wait)
def stop_workers(self):
print("Stopping workers")
for ip in self.ips:
print("stopping worker", ip, flush=True)
subprocess.run(["ssh", ip, "pkill", "-f", "dask-worker"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(self.wait)
def start_cluster(self):
print("Start Cluster")
self.start_scheduler()
print("Scheduler Started")
self.start_workers()
print("Workers Started")
def stop_cluster(self):
print("Stop Cluster")
self.stop_workers()
print("Stopped Workers")
self.stop_scheduler()
print("Stopped Scheduler")
|
vibhatha/cylon_applications | torch/pycylon_torch_example.py | """
Install: PyCylon (Follow: https://cylondata.org/docs/)
Run Program: mpirun -n 4 python3 pycylon_torch_example.py --backend nccl --epochs 20
"""
import argparse
import os
import socket
import numpy as np
import pandas as pd
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from pycylon import CylonEnv
from pycylon import read_csv
from pycylon import DataFrame
from pycylon.net import MPIConfig
from pycylon.util.logging import log_level, disable_logging
from sklearn.preprocessing import StandardScaler
from torch.nn.parallel import DistributedDataParallel as DDP
log_level(0) # set an arbitrary log level
disable_logging() # disable logging completely
hostname = socket.gethostname()
def setup(rank, world_size, backend, master_address, port):
os.environ['MASTER_ADDR'] = master_address
os.environ['MASTER_PORT'] = port
os.environ["LOCAL_RANK"] = str(rank)
os.environ["RANK"] = str(rank)
os.environ["WORLD_SIZE"] = str(world_size)
# initialize the process group
dist.init_process_group(backend=backend, init_method="env://")
mpi_config = MPIConfig()
env = CylonEnv(config=mpi_config, distributed=True)
print(f"Init Process Groups : => [{hostname}]Demo DDP Rank {rank}")
return env
def cleanup():
dist.destroy_process_group()
class Network(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 1)
def forward(self, x):
y_pred = F.leaky_relu(self.linear(x))
return y_pred
def demo_basic(rank, world_size, backend, epochs, master_address, port):
print(f"Simple Batch Train => [{hostname}]Demo DDP Rank {rank}")
env = setup(rank=rank, world_size=world_size, backend=backend, master_address=master_address, port=port)
cuda_available = torch.cuda.is_available()
device = 'cuda:' + str(rank) if cuda_available else 'cpu'
base_path = "https://raw.githubusercontent.com/cylondata/cylon/main/cpp/src/tutorial/data/"
user_devices_file = os.path.join(base_path, f'user_device_tm_{rank + 1}.csv')
user_usage_file = os.path.join(base_path, f'user_usage_tm_{rank + 1}.csv')
print("Rank[{}] User Device File : {}".format(rank, user_devices_file))
print("Rank[{}] User Usage File : {}".format(rank, user_usage_file))
user_devices_data = DataFrame(pd.read_csv(user_devices_file)) #read_csv(user_devices_file, sep=',')
user_usage_data = DataFrame(pd.read_csv(user_usage_file)) #read_csv(user_usage_file, sep=',')
print(f"Rank [{rank}] User Devices Data Rows:{len(user_devices_data)}, Columns: {len(user_devices_data.columns)}")
print(f"Rank [{rank}] User Usage Data Rows:{len(user_usage_data)}, Columns: {len(user_usage_data.columns)}")
print("--------------------------------")
print("Before Join")
print("--------------------------------")
print(user_devices_data[0:5])
print("-------------------------------------")
print(user_usage_data[0:5])
join_df = user_devices_data.merge(right=user_usage_data, left_on=[0], right_on=[3], algorithm='hash')
print("----------------------")
print("Rank [{}] New Table After Join (5 Records)".format(rank))
print(join_df[0:5])
print("----------------------")
feature_df = join_df[
['_xplatform_version', '_youtgoing_mins_per_month', '_youtgoing_sms_per_month',
'_ymonthly_mb']]
feature_df.rename(
['platform_version', 'outgoing_mins_per_month', 'outgoing_sms_per_month', 'monthly_mb'])
if rank == 0:
print("Data Engineering Complete!!!")
print("=" * 80)
print("Rank [{}] Feature DataFrame ".format(rank))
print(feature_df[0:5])
print("=" * 80)
data_ar: np.ndarray = feature_df.to_numpy()
data_features: np.ndarray = data_ar[:, 0:3]
data_learner: np.ndarray = data_ar[:, 3:4]
x_train, y_train = data_features[0:100], data_learner[0:100]
x_test, y_test = data_features[100:], data_learner[100:]
x_train = np.asarray(x_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.float32)
x_test = np.asarray(x_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.float32)
sc = StandardScaler()
sct = StandardScaler()
x_train = sc.fit_transform(x_train)
y_train = sct.fit_transform(y_train)
x_test = sc.fit_transform(x_test)
y_test = sct.fit_transform(y_test)
x_train = torch.from_numpy(x_train).to(device)
y_train = torch.from_numpy(y_train).to(device)
x_test = torch.from_numpy(x_test).to(device)
y_test = torch.from_numpy(y_test).to(device)
# create model and move it to GPU with id rank
model = Network().to(device)
ddp_model = DDP(model, device_ids=[device]) if cuda_available else DDP(model)
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=0.01)
optimizer.zero_grad()
if rank == 0:
print("Training A Dummy Model")
for t in range(epochs):
for x_batch, y_batch in zip(x_train, y_train):
print(f"Epoch {t}", end='\r')
prediction = ddp_model(x_batch)
loss = loss_fn(prediction, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if rank == 0:
print("Data Analysis Complete!!!")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--backend",
help="example : 'mpi', 'nccl'",
default='mpi',
type=str)
parser.add_argument("-e", "--epochs",
help="training epochs",
default=10,
type=int)
parser.add_argument("-m", "--master_address",
help="master address for torch distributed runtime",
default='localhost',
type=str)
parser.add_argument("-p", "--port",
help="torch port for distributed runtime",
default='12335',
type=str)
args = parser.parse_args()
os.environ['MASTER_ADDR'] = args.master_address
os.environ['MASTER_PORT'] = args.port
world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
backend = args.backend
demo_basic(rank=rank, world_size=world_size, backend=backend, epochs=args.epochs,
master_address=args.master_address, port=args.port)
cleanup()
|
vibhatha/cylon_applications | uno/download_util.py | <reponame>vibhatha/cylon_applications
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
'''
Adopted From: https://github.com/tqdm/tqdm/blob/master/examples/tqdm_wget.py
'''
import urllib
from tqdm.auto import tqdm
from urllib.request import urlretrieve
from urllib.request import urlopen
class TqdmUpTo(tqdm):
"""Alternative Class-based version of the above.
Provides `update_to(n)` which uses `tqdm.update(delta_n)`.
Inspired by [twine#242](https://github.com/pypa/twine/pull/242),
[here](https://github.com/pypa/twine/commit/42e55e06).
"""
def update_to(self, b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
self.total = tsize
return self.update(b * bsize - self.n) # also sets self.n = b * bsize
def download(url, output_file):
eg_file = url.replace('/', ' ').split()[-1]
with TqdmUpTo(unit='B', unit_scale=True, unit_divisor=1024, miniters=1,
desc=eg_file) as t: # all optional kwargs
urlretrieve(url, filename=output_file, reporthook=t.update_to,
data=None)
t.total = t.n
# Even simpler progress by wrapping the output file's `write()`
with tqdm.wrapattr(open(output_file, "wb"), "write",
miniters=1, desc=eg_file) as fout:
for chunk in urlopen(url):
fout.write(chunk)
|
vibhatha/cylon_applications | bench/bench_util.py | <reponame>vibhatha/cylon_applications<filename>bench/bench_util.py
import os
import numpy as np
import pandas as pd
def add_record_to_stats_file(file_path: str, record: str):
if os.path.exists(file_path):
with open(file_path, "a+") as fp:
fp.write(record + "\n")
else:
with open(file_path, "w") as fp:
fp.write(record + "\n")
def get_random_data_column(num_rows: int, duplication_factor: float, with_null: bool = False,
null_per: float = 0.9, stringify: bool = False):
if with_null:
null_row_count = int(num_rows * null_per)
gen_record_size = int(num_rows * duplication_factor)
null_col = [None] * null_row_count
data_col = np.random.randint(gen_record_size, size=num_rows - null_row_count).tolist()
null_col = null_col + data_col
return null_col
else:
gen_record_size = int(num_rows * duplication_factor)
return np.random.randint(gen_record_size, size=num_rows)
def get_dataframe(num_rows: int, num_cols: int, unique_factor: float, with_null: bool = False, null_per: float = 0.9,
stringify: bool = False):
if with_null:
pdf = pd.DataFrame({'data{}'.format(i): get_random_data_column(num_rows=num_rows,
duplication_factor=unique_factor,
with_null=with_null,
null_per=null_per,
stringify=stringify)
for i in range(num_cols)})
pdf = pdf.sample(frac=1)
if stringify:
return pdf.astype('str')
return pdf
else:
pdf = pd.DataFrame({'data{}'.format(i): get_random_data_column(num_rows=num_rows,
duplication_factor=unique_factor,
stringify=stringify)
for i in range(num_cols)})
if stringify:
return pdf.astype('str')
return pdf
def line_separator():
print("=" * 80)
|
vibhatha/cylon_applications | bench/modin_join.py | ##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import time
import os
cpus = os.environ.get("MODIN_CPUS")
os.environ["MODIN_CPUS"] = str(cpus)
os.environ['MODIN_ENGINE'] = 'ray'
import modin.pandas as pd
import numpy as np
from bench_util import get_dataframe
import pyarrow as pa
import argparse
"""
Run benchmark:
>>> python modin_join.py --start_size 10_000_000 \
--step_size 10_000_000 \
--end_size 50_000_000 \
--num_cols 2 \
--stats_file /tmp/modin_join_bench.csv \
--repetitions 1 \
--unique_factor 0.1 \
--algorithm hash
"""
def join_op(num_rows: int, num_cols: int, algorithm: str, unique_factor: float):
pdf_left = get_dataframe(num_rows=num_rows, num_cols=num_cols, unique_factor=unique_factor, stringify=False)
pdf_right = get_dataframe(num_rows=num_rows, num_cols=num_cols, unique_factor=unique_factor, stringify=False)
# NOTE: sort join breaks when loaded data in-memory via Pandas dataframe
pdf_left = pd.DataFrame(pdf_left)
pdf_right = pd.DataFrame(pdf_right)
join_col = pdf_left.columns[0]
modin_time = time.time()
pdf2 = pdf_left.join(pdf_right, how="inner", on=join_col, lsuffix="_l", rsuffix="_r")
modin_time = time.time() - modin_time
return modin_time
def bench_join_op(start: int, end: int, step: int, num_cols: int, algorithm: str, repetitions: int,
stats_file: str,
unique_factor: float):
all_data = []
schema = ["num_records", "num_cols", "algorithm", "modin"]
assert repetitions >= 1
assert start > 0
assert step > 0
assert num_cols > 0
for records in range(start, end + step, step):
times = []
for idx in range(repetitions):
modin_time = join_op(num_rows=records, num_cols=num_cols,
algorithm=algorithm,
unique_factor=unique_factor)
times.append([modin_time])
times = np.array(times).sum(axis=0) / repetitions
print(f"Join Op : Records={records}, Columns={num_cols}, "
f"Modin Time : {times[0]}")
all_data.append(
[records, num_cols, algorithm, times[0]])
pdf = pd.DataFrame(all_data, columns=schema)
print(pdf)
pdf.to_csv(stats_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--start_size",
help="initial data size",
type=int)
parser.add_argument("-e", "--end_size",
help="end data size",
type=int)
parser.add_argument("-d", "--unique_factor",
help="random data unique factor",
type=float)
parser.add_argument("-s", "--step_size",
help="Step size",
type=int)
parser.add_argument("-c", "--num_cols",
help="number of columns",
type=int)
parser.add_argument("-a", "--algorithm",
help="join algorithm [hash or sort]",
type=str)
parser.add_argument("-r", "--repetitions",
help="number of experiments to be repeated",
type=int)
parser.add_argument("-f", "--stats_file",
help="stats file to be saved",
type=str)
args = parser.parse_args()
print(f"Start Data Size : {args.start_size}")
print(f"End Data Size : {args.end_size}")
print(f"Step Data Size : {args.step_size}")
print(f"Data Unique Factor : {args.unique_factor}")
print(f"Number of Columns : {args.num_cols}")
print(f"Number of Repetitions : {args.repetitions}")
print(f"Join Algorithm : {args.algorithm}")
print(f"Stats File : {args.stats_file}")
bench_join_op(start=args.start_size,
end=args.end_size,
step=args.step_size,
num_cols=args.num_cols,
algorithm=args.algorithm,
repetitions=args.repetitions,
stats_file=args.stats_file,
unique_factor=args.unique_factor)
|
vibhatha/cylon_applications | bench/dask_distributed_unique.py | import os
import dask
import dask.dataframe as dd
from dask.distributed import Client, SSHCluster
from dask_cluster import DaskCluster
import pandas as pd
import time
import argparse
import math
import subprocess
import numpy as np
"""
>>> python dask_distributed_unique.py --start_size 100_000_000 \
--step_size 100_000_000 \
--end_size 500_000_000 \
--num_cols 2 \
--stats_file /tmp/dask_dist_join_bench.csv \
--repetitions 3 \
--base_file_path ~/data/cylon_bench \
--parallelism 64 \
--nodes_file /hostfiles/hostfile_victor_8x16 \
--total_nodes 8 \
--scheduler_host v-001 \
--python_env /home/vibhatha/venv/ENVCYLON
"""
def get_ips(nodes_file):
ips = []
with open(nodes_file, 'r') as fp:
for l in fp.readlines():
ips.append(l.split(' ')[0])
return ips
def dask_drop_duplicates(scheduler_host, num_rows, base_file_path, num_nodes, parallelism):
print("Drop Duplicates Function")
client = Client(scheduler_host + ':8786')
print(client)
sub_path = "records_{}/parallelism_{}".format(num_rows, parallelism)
distributed_file_prefix = "distributed_data_file_rank_*.csv"
file_path = os.path.join(base_file_path, sub_path, distributed_file_prefix)
# if not os.path.exists(file_path):
# print("File Path invalid: {}".format(file_path))
# return 0
df_l = dd.read_csv(file_path).repartition(npartitions=parallelism)
client.persist([df_l])
print("rows", len(df_l), flush=True)
join_time = time.time()
out = df_l.drop_duplicates(split_out=parallelism)
res = out.compute()
join_time = time.time() - join_time
return join_time
def bench_drop_duplicates_op(start, end, step, num_cols, repetitions, stats_file, base_file_path, num_nodes,
parallelism):
all_data = []
schema = ["num_records", "num_cols", "time(s)"]
assert repetitions >= 1
assert start > 0
assert step > 0
assert num_cols > 0
for records in range(start, end + step, step):
times = []
for idx in range(repetitions):
dask_time = dask_drop_duplicates(scheduler_host=scheduler_host, num_rows=records,
base_file_path=base_file_path,
num_nodes=num_nodes, parallelism=parallelism)
times.append([dask_time])
times = np.array(times).sum(axis=0) / repetitions
print("Join Op : Records={}, Columns={}, Dask Time : {}".format(records, num_cols, times[0]))
all_data.append([records, num_cols, times[0]])
pdf = pd.DataFrame(all_data, columns=schema)
print(pdf)
pdf.to_csv(stats_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--start_size",
help="initial data size",
type=int)
parser.add_argument("-e", "--end_size",
help="end data size",
type=int)
parser.add_argument("-s", "--step_size",
help="Step size",
type=int)
parser.add_argument("-c", "--num_cols",
help="number of columns",
type=int)
parser.add_argument("-r", "--repetitions",
help="number of experiments to be repeated",
type=int)
parser.add_argument("-f", "--stats_file",
help="stats file to be saved",
type=str)
parser.add_argument("-bf", "--base_file_path",
help="base file path",
type=str)
parser.add_argument("-p", "--parallelism",
help="parallelism",
type=int)
parser.add_argument("-n", "--total_nodes",
help="total nodes",
type=int)
parser.add_argument("-nf", "--nodes_file",
help="nodes file",
type=str)
parser.add_argument("-ml", "--memory_limit_per_worker",
help="memory limit per worker",
type=str)
parser.add_argument("-ni", "--network_interface",
help="network interface",
type=str)
parser.add_argument("-sh", "--scheduler_host",
help="scheduler host",
type=str)
parser.add_argument("-pe", "--python_env",
help="python env",
type=str)
args = parser.parse_args()
print("Start Data Size : {}".format(args.start_size))
print("End Data Size : {}".format(args.end_size))
print("Step Data Size : {}".format(args.step_size))
print("Number of Columns : {}".format(args.num_cols))
print("Number of Repetitions : {}".format(args.repetitions))
print("Stats File : {}".format(args.stats_file))
print("Base File Path : {}".format(args.base_file_path))
print("Total Nodes : {}".format(args.total_nodes))
print("Memory limit per worker : {}".format(args.memory_limit_per_worker))
print("Network Interface : {}".format(args.network_interface))
print("Parallelism : {}".format(args.parallelism))
print("Nodes File : {}".format(args.nodes_file))
print("Scheduler Host : {}".format(args.scheduler_host))
print("Python ENV : {}".format(args.python_env))
parallelism = args.parallelism
TOTAL_NODES = args.total_nodes
procs = int(math.ceil(parallelism / TOTAL_NODES))
nodes = min(parallelism, TOTAL_NODES)
ips = get_ips(args.nodes_file)
python_env = args.python_env
scheduler_host = args.scheduler_host
local_directory = "/scratch/vlabeyko/dask"
scheduler_file = "/N/u2/v/vlabeyko/dask-sched.json"
wait = 15
print("NODES : ", ips)
print("Processes Per Node: ", procs)
dask_cluster = DaskCluster(scheduler_host=scheduler_host, ips=ips, memory_limit=args.memory_limit_per_worker,
network_interface=args.network_interface, nprocs=procs, nthreads=1,
local_directory=local_directory,
scheduler_file=scheduler_file, python_env=python_env, num_nodes=nodes, wait=wait)
dask_cluster.start_cluster()
try:
bench_drop_duplicates_op(start=args.start_size,
end=args.end_size,
step=args.step_size,
num_cols=args.num_cols,
repetitions=args.repetitions,
stats_file=args.stats_file,
base_file_path=args.base_file_path,
num_nodes=args.total_nodes,
parallelism=parallelism)
except Exception as e:
print("Exception Occurred : {}".format(str(e)))
dask_cluster.stop_cluster()
finally:
dask_cluster.stop_cluster()
|
vibhatha/cylon_applications | horovod/pycylon_tensorflow_example.py | <reponame>vibhatha/cylon_applications
"""
Install: PyCylon (Follow: https://cylondata.org/docs/)
Run Program: horovodrun -np 4 python3 pycylon_tensorflow_example.py --epochs 20
References:
1. https://github.com/horovod/horovod/blob/master/examples/tensorflow2/tensorflow2_mnist.py
2. https://horovod.readthedocs.io/en/stable/tensorflow.html
"""
import argparse
import os
import socket
import numpy as np
import pandas as pd
from pycylon import CylonEnv
from pycylon import DataFrame
from pycylon.net import MPIConfig
from pycylon.util.logging import log_level, disable_logging
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
import horovod.tensorflow as hvd
log_level(0) # set an arbitrary log level
disable_logging() # disable logging completely
hostname = socket.gethostname()
def setup():
hvd.init()
assert hvd.mpi_threads_supported()
mpi_config = MPIConfig()
env = CylonEnv(config=mpi_config, distributed=True)
rank = env.rank
world_size = env.world_size
print(f"Init Process Groups : => [{hostname}]Demo DDP Rank: {rank} , World Size: {world_size}")
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
return env
def demo_basic(epochs):
env = setup()
rank = env.rank
print(f"Simple Batch Train => [{hostname}]Demo DDP Rank {rank}")
# device = 'cuda:' + str(rank) if cuda_available else 'cpu'
base_path = "https://raw.githubusercontent.com/cylondata/cylon/main/cpp/src/tutorial/data/"
user_devices_file = os.path.join(base_path, f'user_device_tm_{rank + 1}.csv')
user_usage_file = os.path.join(base_path, f'user_usage_tm_{rank + 1}.csv')
print("Rank[{}] User Device File : {}".format(rank, user_devices_file))
print("Rank[{}] User Usage File : {}".format(rank, user_usage_file))
user_devices_data = DataFrame(pd.read_csv(user_devices_file)) # read_csv(user_devices_file, sep=',')
user_usage_data = DataFrame(pd.read_csv(user_usage_file)) # read_csv(user_usage_file, sep=',')
print(f"Rank [{rank}] User Devices Data Rows:{len(user_devices_data)}, Columns: {len(user_devices_data.columns)}")
print(f"Rank [{rank}] User Usage Data Rows:{len(user_usage_data)}, Columns: {len(user_usage_data.columns)}")
print("--------------------------------")
print("Before Join")
print("--------------------------------")
print(user_devices_data[0:5])
print("-------------------------------------")
print(user_usage_data[0:5])
join_df = user_devices_data.merge(right=user_usage_data, left_on=[0], right_on=[3], algorithm='hash')
print("----------------------")
print("Rank [{}] New Table After Join (5 Records)".format(rank))
print(join_df[0:5])
print("----------------------")
feature_df = join_df[
['_xplatform_version', '_youtgoing_mins_per_month', '_youtgoing_sms_per_month',
'_ymonthly_mb']]
feature_df.rename(
['platform_version', 'outgoing_mins_per_month', 'outgoing_sms_per_month', 'monthly_mb'])
if rank == 0:
print("Data Engineering Complete!!!")
print("=" * 80)
print("Rank [{}] Feature DataFrame ".format(rank))
print(feature_df[0:5])
print("=" * 80)
data_ar: np.ndarray = feature_df.to_numpy()
data_features: np.ndarray = data_ar[:, 0:3]
data_learner: np.ndarray = data_ar[:, 3:4]
x_train, y_train = data_features[0:100], data_learner[0:100]
x_test, y_test = data_features[100:], data_learner[100:]
x_train = np.asarray(x_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.float32)
x_test = np.asarray(x_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.float32)
sc = StandardScaler()
sct = StandardScaler()
x_train = sc.fit_transform(x_train)
y_train = sct.fit_transform(y_train)
x_test = sc.fit_transform(x_test)
y_test = sct.fit_transform(y_test)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
print("=" * 80)
print("Tensorflow DataSets")
print("=" * 80)
BATCH_SIZE = 64
SHUFFLE_BUFFER_SIZE = 100
train_dataset = train_dataset.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)
# define network
model = tf.keras.Sequential([
tf.keras.layers.Dense(3), tf.keras.layers.Dense(1)])
# define loss function
loss = tf.losses.MeanSquaredError()
# define optimizer
opt = tf.optimizers.Adam(0.001 * hvd.size())
@tf.function
def training_step(images, labels, first_batch):
# define a step function for training
with tf.GradientTape() as tape:
probs = model(images, training=True)
loss_value = loss(labels, probs)
tape = hvd.DistributedGradientTape(tape)
grads = tape.gradient(loss_value, model.trainable_variables)
opt.apply_gradients(zip(grads, model.trainable_variables))
if first_batch:
hvd.broadcast_variables(model.variables, root_rank=0)
hvd.broadcast_variables(opt.variables(), root_rank=0)
return loss_value
if rank == 0:
print("Training A Dummy Model")
take_count = x_train.shape[0] // hvd.size()
for t in range(epochs):
for batch, (images, labels) in enumerate(train_dataset.take(take_count)):
loss_value = training_step(images, labels, batch == 0)
if batch % 10 == 0 and hvd.local_rank() == 0:
print("Epoch : {}, Batch : {}, Loss : {}".format(t, batch, loss_value))
if rank == 0:
print("Data Analysis Complete!!!")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--epochs",
help="training epochs",
default=10,
type=int)
args = parser.parse_args()
demo_basic(epochs=args.epochs)
|
vibhatha/cylon_applications | uno/uno_drug_response_load_pandas.py | <reponame>vibhatha/cylon_applications
import os
import time
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
def scale_dataframe(dataframe: pd.DataFrame,
scaling_method: str, ):
"""new_df = dataframe_scaling(old_df, 'std')
Scaling features in dataframe according to specific scaling strategy.
TODO: More scaling options and selective feature(col) scaling for dataframe.
Args:
dataframe (pandas.Dataframe): dataframe to be scaled.
scaling_method (str): 'std', 'minmax', etc.
Returns:
pandas.Dataframe: scaled dataframe.
"""
print("=" * 80)
print("scale_dataframe")
scaling_method = scaling_method.lower()
if scaling_method.lower() == 'none':
return dataframe
elif scaling_method.lower() == 'std':
scaler = StandardScaler()
elif scaling_method.lower() == 'minmax':
scaler = MinMaxScaler()
else:
return dataframe
if len(dataframe.shape) == 1:
dataframe = scaler.fit_transform(dataframe.values.reshape(-1, 1))
else:
dataframe[:] = scaler.fit_transform(dataframe[:])
print("=" * 80)
return dataframe
t1 = time.time()
df = pd.read_csv(
os.path.join(
"/home/vibhatha/github/forks/Benchmarks/Data/Pilot1/raw/1/rescaled_combined_single_drug_growth_shuffle0"),
sep=',',
header=0,
index_col=None,)
t2 = time.time()
df = df[['SOURCE', 'DRUG_ID', 'CELLNAME', 'LOG_CONCENTRATION', 'GROWTH']]
t3 = time.time()
# Delete '-', which could be inconsistent between seq and meta
df['CELLNAME'] = df['CELLNAME'].str.replace('-', '')
t4 = time.time()
# Encode data sources into numeric
# Scaling the growth with given scaling method
df['GROWTH'] = scale_dataframe(df['GROWTH'], "std")
t5 = time.time()
# Convert data type into generic python types
df[['LOG_CONCENTRATION', 'GROWTH']] = df[['LOG_CONCENTRATION', 'GROWTH']].astype(float)
t6 = time.time()
print("Time Taken For Data Loading : {} s".format(t2 - t1))
print("Time Taken For Column Select : {} s".format(t3 - t2))
print("Time Taken For Map Operation : {} s".format(t4 - t3))
print("Time Taken For Scale Operation : {} s".format(t5 - t4))
print("Time Taken For Casting : {} s".format(t6 - t5))
"""
Pandas
Time Taken For Data Loading : 28.78172779083252 s
Time Taken For Column Select : 1.180802345275879 s
Time Taken For Map Operation : 11.115014553070068 s
Time Taken For Scale Operation : 0.3975682258605957 s
Time Taken For Casting : 0.22675848007202148 s
"""
"""
Modin
Time Taken For Data Loading : 51.13017392158508 s
Time Taken For Column Select : 0.0009305477142333984 s
Time Taken For Map Operation : 0.013023853302001953 s
Time Taken For Scale Operation : 205.78279185295105 s
Time Taken For Casting : 39.518057346343994 s
""" |
vibhatha/cylon_applications | uno/data_processing_pandas.py | import os
import time
import download_util
from pycylon import CylonContext
from pycylon import Table
from pycylon.io import CSVReadOptions
from pycylon.io import read_csv
import numpy as np
import pandas as pd
ctx: CylonContext = CylonContext(config=None, distributed=False)
def load_aggregated_single_response_pandas(target='AUC', min_r2_fit=0.3, max_ec50_se=3.0,
combo_format=True,
rename=False):
url = "https://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/combo/combined_single_response_agg"
output_combined_single_response = \
"/home/vibhatha/data/uno/Pilot1/workload_1/combined_single_response_agg"
if not os.path.exists(output_combined_single_response):
download_util.download(url=url, output_file=output_combined_single_response)
if os.path.exists(output_combined_single_response):
print(f"Pandas Data file : {output_combined_single_response}")
t1 = time.time()
df = pd.read_csv(output_combined_single_response, engine='c', sep='\t',
dtype={'SOURCE': str, 'CELL': str, 'DRUG': str, 'STUDY': str,
'AUC': np.float32, 'IC50': np.float32,
'EC50': np.float32, 'EC50se': np.float32,
'R2fit': np.float32, 'Einf': np.float32,
'HS': np.float32, 'AAC1': np.float32,
'AUC1': np.float32, 'DSS1': np.float32})
t2 = time.time()
df = df[(df['R2fit'] >= min_r2_fit) & (df['EC50se'] <= max_ec50_se)]
filter_time = time.time() - t2
print("Pandas Data Loading Time ", df.shape, t2 - t1)
print("Pandas Filter Time 1", df.shape, filter_time)
df = df[['SOURCE', 'CELL', 'DRUG', target, 'STUDY']]
df = df[~df[target].isnull()]
print("After not and null check ", df.shape)
if combo_format:
df = df.rename(columns={'DRUG': 'DRUG1'})
df['DRUG2'] = np.nan
df['DRUG2'] = df['DRUG2'].astype(object)
df = df[['SOURCE', 'CELL', 'DRUG1', 'DRUG2', target, 'STUDY']]
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG1': 'Drug1', 'DRUG2': 'Drug2', 'STUDY': 'Study'})
else:
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG': 'Drug', 'STUDY': 'Study'})
print("DF New", df.shape, df.columns)
def load_single_dose_response_pandas(combo_format=False, fraction=True):
url = "https://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/combo/rescaled_combined_single_drug_growth"
rescaled_combined_single_drug_growth = \
"/home/vibhatha/data/uno/Pilot1/workload_1/rescaled_combined_single_drug_growth"
if not os.path.exists(rescaled_combined_single_drug_growth):
download_util.download(url=url, output_file=rescaled_combined_single_drug_growth)
if os.path.exists(rescaled_combined_single_drug_growth):
print(f"Data file : {rescaled_combined_single_drug_growth}")
print("------------------Pandas--------------------")
t1 = time.time()
df = pd.read_csv(rescaled_combined_single_drug_growth, sep='\t', engine='c',
na_values=['na', '-', ''],
# nrows=10,
dtype={'SOURCE': str, 'DRUG_ID': str,
'CELLNAME': str, 'CONCUNIT': str,
'LOG_CONCENTRATION': np.float32,
'EXPID': str, 'GROWTH': np.float32})
t2 = time.time()
print(df.shape, t2 - t1)
print("Schema : ", df.dtypes, df.shape)
df['DOSE'] = -df['LOG_CONCENTRATION']
print("New Schema : ", df.dtypes, df.shape)
df = df.rename(columns={'CELLNAME': 'CELL', 'DRUG_ID': 'DRUG', 'EXPID': 'STUDY'})
df = df[['SOURCE', 'CELL', 'DRUG', 'DOSE', 'GROWTH', 'STUDY']]
print("Rename and Update : ", df.dtypes, df.shape)
print("----------------------------------------------")
load_single_dose_response_pandas()
|
vibhatha/cylon_applications | bench/dask_setup.py | import os
import dask
import dask.dataframe as dd
from dask.distributed import Client, SSHCluster
import time
import argparse
import math
import subprocess
"""
>>> python dask_setup.py --start_size 100_000_000 \
--step_size 100_000_000 \
--end_size 500_000_000 \
--num_cols 2 \
--stats_file /tmp/dask_dist_join_bench.csv \
--repetitions 3 \
--base_file_path ~/data/cylon_bench \
--parallelism 4 \
--nodes_file /tmp/hostfile \
--total_nodes 1 \
--scheduler_host v-001 \
--python_env /home/vibhatha/venv/ENVCYLON
"""
def get_ips(nodes_file):
ips = []
with open(nodes_file, 'r') as fp:
for l in fp.readlines():
ips.append(l.split(' ')[0])
return ips
def start_cluster(ips, scheduler_host, python_env, procs, nodes):
print("starting scheduler", flush=True)
# subprocess.Popen(
# ["ssh", "v-001", "/N/u2/d/dnperera/victor/git/cylon/ENV/bin/dask-scheduler", "--interface", "enp175s0f0",
# "--scheduler-file", "/N/u2/d/dnperera/dask-sched.json"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
subprocess.Popen(
["ssh", scheduler_host, python_env + "/bin/dask-scheduler", "--scheduler-file",
"/N/u2/v/vlabeyko/dask-sched.json"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(15)
for ip in ips[0:nodes]:
print("starting worker", ip, flush=True)
# subprocess.Popen(
# ["ssh", ip, "/N/u2/d/dnperera/victor/git/cylon/ENV/bin/dask-worker", "v-001:8786", "--interface",
# "enp175s0f0", "--nthreads", "1", "--nprocs", str(procs), "--memory-limit", "20GB", "--local-directory",
# "/scratch/dnperera/dask/", "--scheduler-file", "/N/u2/d/dnperera/dask-sched.json"], stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT)
subprocess.Popen(
["ssh", ip, python_env + "/bin/dask-worker", scheduler_host + ":8786", "--nthreads", "1", "--nprocs",
str(procs), "--memory-limit", "20GB", "--local-directory", "/scratch/vlabeyko/dask/", "--scheduler-file", "/N/u2/v/vlabeyko/dask-sched.json"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
time.sleep(15)
def stop_cluster(ips):
for ip in ips:
print("stopping worker", ip, flush=True)
subprocess.run(["ssh", ip, "pkill", "-f", "dask-worker"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(5)
print("stopping scheduler", flush=True)
subprocess.run(["pkill", "-f", "dask-scheduler"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(5)
def dask_test_app(scheduler_host):
def func():
df = dask.datasets.timeseries()
df2 = df[df.y > 0]
df3 = df2.groupby('name').x.std()
computed_df = df3.compute()
return computed_df
client = Client(scheduler_host + ':8786')
print(client)
future = client.submit(func)
result = future.result()
print(result)
client.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--start_size",
help="initial data size",
type=int)
parser.add_argument("-e", "--end_size",
help="end data size",
type=int)
parser.add_argument("-s", "--step_size",
help="Step size",
type=int)
parser.add_argument("-c", "--num_cols",
help="number of columns",
type=int)
parser.add_argument("-r", "--repetitions",
help="number of experiments to be repeated",
type=int)
parser.add_argument("-f", "--stats_file",
help="stats file to be saved",
type=str)
parser.add_argument("-bf", "--base_file_path",
help="base file path",
type=str)
parser.add_argument("-p", "--parallelism",
help="parallelism",
type=int)
parser.add_argument("-n", "--total_nodes",
help="total nodes",
type=int)
parser.add_argument("-nf", "--nodes_file",
help="nodes file",
type=str)
parser.add_argument("-sh", "--scheduler_host",
help="scheduler host",
type=str)
parser.add_argument("-pe", "--python_env",
help="python env",
type=str)
args = parser.parse_args()
print("Start Data Size : {}".format(args.start_size))
print("End Data Size : {}".format(args.end_size))
print("Step Data Size : {}".format(args.step_size))
print("Number of Columns : {}".format(args.num_cols))
print("Number of Repetitions : {}".format(args.repetitions))
print("Stats File : {}".format(args.stats_file))
print("Base File Path : {}".format(args.base_file_path))
print("Total Nodes : {}".format(args.total_nodes))
print("Parallelism : {}".format(args.parallelism))
print("Nodes File : {}".format(args.nodes_file))
print("Scheduler Host : {}".format(args.scheduler_host))
print("Python ENV : {}".format(args.python_env))
parallelism = args.parallelism
TOTAL_NODES = args.total_nodes
procs = int(math.ceil(parallelism / TOTAL_NODES))
nodes = min(parallelism, TOTAL_NODES)
ips = get_ips(args.nodes_file)
python_env = args.python_env
scheduler_host = args.scheduler_host
print(ips)
stop_cluster(ips)
start_cluster(ips=ips, scheduler_host=scheduler_host, python_env=python_env, procs=procs, nodes=nodes)
dask_test_app(scheduler_host=scheduler_host)
time.sleep(240)
stop_cluster(ips)
|
vibhatha/cylon_applications | torch/mpi_torch_example.py | import os
import sys
import tempfile
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import socket
from torch.multiprocessing import Process
import mpi4py.rc
mpi4py.rc.initialize = False
from mpi4py import MPI
from torch.nn.parallel import DistributedDataParallel as DDP
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.net1 = nn.Linear(10, 10)
self.relu = nn.ReLU()
self.net2 = nn.Linear(10, 5)
def forward(self, x):
return self.net2(self.relu(self.net1(x)))
def demo_basic(rank, world_size):
print("Running basic DDP example on rank ", rank)
# create model and move it to GPU with id rank
model = ToyModel()
ddp_model = DDP(model)
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)
optimizer.zero_grad()
outputs = ddp_model(torch.randn(20, 10))
labels = torch.randn(20, 5)
loss_fn(outputs, labels).backward()
optimizer.step()
def run(rank, size, hostname):
print("I am ", rank, "of ",size, "in", hostname)
tensor = torch.zeros(1)
if rank == 0:
tensor += 1
# Send the tensor to process 1
dist.send(tensor=tensor, dst=1)
else:
# Receive tensor from process 0
dist.recv(tensor=tensor, src=0)
print('Rank ', rank, ' has data ', tensor[0])
def init_processes(rank, size, hostname, fn, backend='tcp'):
""" Initialize the distributed environment. """
dist.init_process_group(backend, rank=rank, world_size=size)
if not MPI.Is_initialized():
MPI.Init()
fn(rank, size)
def cleanup():
dist.destroy_process_group()
if MPI.Is_finalized():
MPI.Finalize()
if __name__ == "__main__":
world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
world_rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
hostname = socket.gethostname()
init_processes(world_rank, world_size, hostname, demo_basic, backend='mpi')
cleanup()
|
vibhatha/cylon_applications | uno/data_processing_cylon.py | <filename>uno/data_processing_cylon.py
import os
import time
import download_util
from pycylon import CylonContext
from pycylon import Table
from pycylon.io import CSVReadOptions
from pycylon.io import read_csv
import numpy as np
import pandas as pd
ctx: CylonContext = CylonContext(config=None, distributed=False)
def load_aggregated_single_response_cylon(target='AUC', min_r2_fit=0.3, max_ec50_se=3.0,
combo_format=False,
rename=True):
url = "https://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/combo/combined_single_response_agg"
output_combined_single_response = \
"/home/vibhatha/data/uno/Pilot1/workload_1/combined_single_response_agg"
if not os.path.exists(output_combined_single_response):
download_util.download(url=url, output_file=output_combined_single_response)
if os.path.exists(output_combined_single_response):
print(f"Data file : {output_combined_single_response}")
csv_read_options = CSVReadOptions().use_threads(True).block_size(1 << 30).with_delimiter(
"\t")
t1 = time.time()
tb: Table = read_csv(ctx, output_combined_single_response, csv_read_options)
t2 = time.time()
tb = tb[(tb['R2fit'] >= min_r2_fit) & (tb['EC50se'] <= max_ec50_se)]
t3 = time.time()
table_read_time = t2 - t1
filter_time = t3 - t2
tb = tb[['SOURCE', 'CELL', 'DRUG', target, 'STUDY']]
tb = tb[~tb[target].isnull()]
print("Cylon ", tb.row_count, tb.column_count, tb.column_names)
print("Cylon Data Loading Time: ", table_read_time)
print("Cylon Data Filter Time: ", filter_time)
if combo_format:
tb = tb.rename(columns={'DRUG': 'DRUG1'})
tb['DRUG2'] = np.nan
# tb['DRUG2'] = tb['DRUG2'].astype(object)
tb = tb[['SOURCE', 'CELL', 'DRUG1', 'DRUG2', target, 'STUDY']]
if rename:
tb = tb.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG1': 'Drug1', 'DRUG2': 'Drug2', 'STUDY': 'Study'})
else:
if rename:
tb = tb.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG': 'Drug', 'STUDY': 'Study'})
def load_single_dose_response_cylon(combo_format=False, fraction=True):
url = "https://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/combo/rescaled_combined_single_drug_growth"
rescaled_combined_single_drug_growth = \
"/home/vibhatha/data/uno/Pilot1/workload_1/rescaled_combined_single_drug_growth"
if not os.path.exists(rescaled_combined_single_drug_growth):
download_util.download(url=url, output_file=rescaled_combined_single_drug_growth)
if os.path.exists(rescaled_combined_single_drug_growth):
print("------------------Cylon--------------------")
print(f"Data file : {rescaled_combined_single_drug_growth}")
csv_read_options = CSVReadOptions().use_threads(True).block_size(1 << 30).with_delimiter(
"\t")
t1 = time.time()
tb: Table = read_csv(ctx, rescaled_combined_single_drug_growth, csv_read_options)
t2 = time.time()
print(tb.shape, t2 - t1)
print("Schema: ", tb.to_arrow().schema)
tb['DOSE'] = -tb['LOG_CONCENTRATION']
print("New Schema : ", tb.to_arrow().schema, tb.shape)
columns = {'CELLNAME': 'CELL', 'DRUG_ID': 'DRUG', 'EXPID': 'STUDY'}
tb.rename(columns)
tb = tb[['SOURCE', 'CELL', 'DRUG', 'DOSE', 'GROWTH', 'STUDY']]
print("Rename and Filter : ", tb.to_arrow().schema, tb.shape)
print("----------------------------------------------")
load_single_dose_response_cylon()
|
vibhatha/cylon_applications | bench/cudf_join.py | <reponame>vibhatha/cylon_applications
import time
import pandas as pd
import numpy as np
from bench_util import get_dataframe
import pyarrow as pa
import argparse
import cupy as cp
import pandas as pd
import cudf
import dask_cudf
cp.random.seed(12)
"""
Run benchmark:
>>> python cudf_join.py --start_size 1_000_000 \
--step_size 1_000_000 \
--end_size 10_000_000 \
--num_cols 2 \
--stats_file cudf_join_bench.csv \
--repetitions 1 \
--unique_factor 0.1
"""
def join_op(num_rows: int, num_cols: int, unique_factor: float):
pdf_left = get_dataframe(num_rows=num_rows, num_cols=num_cols, unique_factor=unique_factor,
stringify=False)
pdf_right = get_dataframe(num_rows=num_rows, num_cols=num_cols, unique_factor=unique_factor,
stringify=False)
# NOTE: sort join breaks when loaded data in-memory via Pandas dataframe
gdf_left = cudf.DataFrame.from_pandas(pdf_left)
gdf_right = cudf.DataFrame.from_pandas(pdf_right)
join_col = pdf_left.columns[0]
cudf_time = time.time()
merged = gdf_left.merge(gdf_right, on=[join_col], how='inner')
cudf_time = time.time() - cudf_time
return cudf_time
def bench_join_op(start: int, end: int, step: int, num_cols: int, repetitions: int,
stats_file: str,
unique_factor: float):
all_data = []
schema = ["num_records", "num_cols", "cudf"]
assert repetitions >= 1
assert start > 0
assert step > 0
assert num_cols > 0
for records in range(start, end + step, step):
times = []
for idx in range(repetitions):
cudf_time = join_op(num_rows=records, num_cols=num_cols,
unique_factor=unique_factor)
times.append([cudf_time])
times = np.array(times).sum(axis=0) / repetitions
print(f"Join Op : Records={records}, Columns={num_cols}, "
f"Cudf Time : {times[0]}")
all_data.append(
[records, num_cols, times[0]])
pdf = pd.DataFrame(all_data, columns=schema)
print(pdf)
pdf.to_csv(stats_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--start_size",
help="initial data size",
type=int)
parser.add_argument("-e", "--end_size",
help="end data size",
type=int)
parser.add_argument("-d", "--unique_factor",
default=0.1,
help="random data duplication factor",
type=float)
parser.add_argument("-s", "--step_size",
help="Step size",
type=int)
parser.add_argument("-c", "--num_cols",
default=2,
help="number of columns",
type=int)
parser.add_argument("-r", "--repetitions",
default=1,
help="number of experiments to be repeated",
type=int)
parser.add_argument("-f", "--stats_file",
default="cudf_join_stats.csv",
help="stats file to be saved",
type=str)
args = parser.parse_args()
print(f"Start Data Size : {args.start_size}")
print(f"End Data Size : {args.end_size}")
print(f"Step Data Size : {args.step_size}")
print(f"Data Unique Factor : {args.unique_factor}")
print(f"Number of Columns : {args.num_cols}")
print(f"Number of Repetitions : {args.repetitions}")
print(f"Stats File : {args.stats_file}")
bench_join_op(start=args.start_size,
end=args.end_size,
step=args.step_size,
num_cols=args.num_cols,
repetitions=args.repetitions,
stats_file=args.stats_file,
unique_factor=args.unique_factor)
|
vibhatha/cylon_applications | horovod/pycylon_horovod_pytorch_example.py | """
Run Program: horovodrun -np 4 python3 pycylon_horovod_pytorch_example.py
"""
import argparse
import os
import socket
import horovod.torch as hvd
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from pycylon import CylonEnv
from pycylon import DataFrame
from pycylon.net import MPIConfig
from pycylon.util.logging import log_level, disable_logging
from sklearn.preprocessing import StandardScaler
log_level(0) # set an arbitrary log level
disable_logging() # disable logging completely
hostname = socket.gethostname()
def setup():
hvd.init()
mpi_config = MPIConfig()
env = CylonEnv(config=mpi_config, distributed=True)
rank = env.rank
print(f"Init Process Groups : => [{hostname}]Demo DDP Rank {rank}")
cuda_available = torch.cuda.is_available()
device = 'cuda:' + str(rank) if cuda_available else 'cpu'
if cuda_available:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(42)
return env, device, cuda_available
class Network(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 1)
def forward(self, x):
y_pred = F.leaky_relu(self.linear(x))
return y_pred
def train(epoch, x_train, y_train, ddp_model, loss_fn, optimizer):
for x_batch, y_batch in zip(x_train, y_train):
print(f"Epoch {epoch}", end='\r')
prediction = ddp_model(x_batch)
loss = loss_fn(prediction, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
def demo_basic(epochs):
env, device, cuda_available = setup()
rank = env.rank
print(f"Simple Batch Train => [{hostname}]Demo DDP Rank {rank}")
base_path = "https://raw.githubusercontent.com/cylondata/cylon/main/cpp/src/tutorial/data/"
user_devices_file = os.path.join(base_path, f'user_device_tm_{rank + 1}.csv')
user_usage_file = os.path.join(base_path, f'user_usage_tm_{rank + 1}.csv')
print("Rank[{}] User Device File : {}".format(rank, user_devices_file))
print("Rank[{}] User Usage File : {}".format(rank, user_usage_file))
user_devices_data = DataFrame(pd.read_csv(user_devices_file)) # read_csv(user_devices_file, sep=',')
user_usage_data = DataFrame(pd.read_csv(user_usage_file)) # read_csv(user_usage_file, sep=',')
print(f"Rank [{rank}] User Devices Data Rows:{len(user_devices_data)}, Columns: {len(user_devices_data.columns)}")
print(f"Rank [{rank}] User Usage Data Rows:{len(user_usage_data)}, Columns: {len(user_usage_data.columns)}")
print("--------------------------------")
print("Before Join")
print("--------------------------------")
print(user_devices_data[0:5])
print("-------------------------------------")
print(user_usage_data[0:5])
join_df = user_devices_data.merge(right=user_usage_data, left_on=[0], right_on=[3], algorithm='hash')
print("----------------------")
print("Rank [{}] New Table After Join (5 Records)".format(rank))
print(join_df[0:5])
print("----------------------")
feature_df = join_df[
['_xplatform_version', '_youtgoing_mins_per_month', '_youtgoing_sms_per_month',
'_ymonthly_mb']]
feature_df.rename(
['platform_version', 'outgoing_mins_per_month', 'outgoing_sms_per_month', 'monthly_mb'])
if rank == 0:
print("Data Engineering Complete!!!")
print("=" * 80)
print("Rank [{}] Feature DataFrame ".format(rank))
print(feature_df[0:5])
print("=" * 80)
data_ar: np.ndarray = feature_df.to_numpy()
data_features: np.ndarray = data_ar[:, 0:3]
data_learner: np.ndarray = data_ar[:, 3:4]
x_train, y_train = data_features[0:100], data_learner[0:100]
x_test, y_test = data_features[100:], data_learner[100:]
x_train = np.asarray(x_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.float32)
x_test = np.asarray(x_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.float32)
sc = StandardScaler()
sct = StandardScaler()
x_train = sc.fit_transform(x_train)
y_train = sct.fit_transform(y_train)
x_test = sc.fit_transform(x_test)
y_test = sct.fit_transform(y_test)
x_train = torch.from_numpy(x_train).to(device)
y_train = torch.from_numpy(y_train).to(device)
x_test = torch.from_numpy(x_test).to(device)
y_test = torch.from_numpy(y_test).to(device)
# create model and move it to GPU with id rank
lr = 0.01 # learning rate
model = Network()
loss_fn = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=lr)
optimizer.zero_grad()
# By default, Adasum doesn't need scaling up learning rate.
lr_scaler = 1
if cuda_available:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(model.parameters(), lr=lr * lr_scaler, momentum=0.01)
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = hvd.Compression.fp16
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Adasum,
gradient_predivide_factor=1.0)
if rank == 0:
print("Training A Dummy Model")
for epoch in range(epochs):
train(epoch, x_train, y_train, model, loss_fn, optimizer)
if rank == 0:
print("Data Analysis Complete!!!")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--backend",
help="example : 'mpi', 'nccl'",
default='mpi',
type=str)
parser.add_argument("-e", "--epochs",
help="training epochs",
default=10,
type=int)
parser.add_argument("-m", "--master_address",
help="master address for torch distributed runtime",
default='localhost',
type=str)
parser.add_argument("-p", "--port",
help="torch port for distributed runtime",
default='12335',
type=str)
args = parser.parse_args()
backend = args.backend
demo_basic(epochs=args.epochs)
|
vibhatha/cylon_applications | bench/cylon_distributed_join.py | <filename>bench/cylon_distributed_join.py
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import argparse
import os
import time
import numpy as np
import pandas as pd
from pycylon import CylonContext
from pycylon.io import CSVReadOptions
from pycylon.io import read_csv
from pycylon.net import MPIConfig
"""
Run benchmark:
>>> mpirun -n 4 python cylon_distributed_join.py --start_size 1_000_000 \
--step_size 1_000_000 \
--end_size 2_000_000 \
--num_cols 2 \
--stats_file /tmp/dist_sort_join_bench.csv \
--repetitions 1 \
--base_file_path ~/data/cylon_bench \
--algorithm sort
"""
def join_op(ctx, num_rows, base_file_path, algorithm):
parallelism = ctx.get_world_size()
csv_read_options = CSVReadOptions() \
.use_threads(True) \
.block_size(1 << 30)
sub_path = "records_{}/parallelism_{}".format(num_rows, parallelism)
distributed_file_prefix = "distributed_data_file_rank_{}.csv".format(ctx.get_rank())
left_file_path = os.path.join(base_file_path, sub_path, distributed_file_prefix)
right_file_path = os.path.join(base_file_path, sub_path, distributed_file_prefix)
tb_left = read_csv(ctx, left_file_path, csv_read_options)
tb_right = read_csv(ctx, right_file_path, csv_read_options)
join_col = tb_left.column_names[0]
cylon_time = time.time()
tb2 = tb_left.distributed_join(tb_right, join_type='inner', algorithm=algorithm, on=[join_col])
cylon_time = time.time() - cylon_time
return cylon_time
def bench_join_op(ctx, start, end, step, num_cols, algorithm, repetitions, stats_file, base_file_path):
all_data = []
schema = ["num_records", "num_cols", "algorithm", "time(s)"]
assert repetitions >= 1
assert start > 0
assert step > 0
assert num_cols > 0
for records in range(start, end + step, step):
times = []
for idx in range(repetitions):
cylon_time = join_op(ctx=ctx, num_rows=records, base_file_path=base_file_path, algorithm=algorithm)
times.append([cylon_time])
times = np.array(times).sum(axis=0) / repetitions
if ctx.get_rank() == 0:
print("Join Op : Records={}, Columns={}, Cylon Time : {}".format(records, num_cols, times[0]))
all_data.append(
[records, num_cols, algorithm, times[0]])
if ctx.get_rank() == 0:
pdf = pd.DataFrame(all_data, columns=schema)
print(pdf)
pdf.to_csv(stats_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--start_size",
help="initial data size",
type=int)
parser.add_argument("-e", "--end_size",
help="end data size",
type=int)
parser.add_argument("-d", "--duplication_factor",
help="random data duplication factor",
type=float)
parser.add_argument("-s", "--step_size",
help="Step size",
type=int)
parser.add_argument("-c", "--num_cols",
help="number of columns",
type=int)
parser.add_argument("-a", "--algorithm",
help="join algorithm [hash or sort]",
type=str)
parser.add_argument("-r", "--repetitions",
help="number of experiments to be repeated",
type=int)
parser.add_argument("-f", "--stats_file",
help="stats file to be saved",
type=str)
parser.add_argument("-bf", "--base_file_path",
help="base file path",
type=str)
args = parser.parse_args()
mpi_config = MPIConfig()
ctx = CylonContext(config=mpi_config, distributed=True)
if ctx.get_rank() == 0:
print("Start Data Size : {}".format(args.start_size))
print("End Data Size : {}".format(args.end_size))
print("Step Data Size : {}".format(args.step_size))
print("Number of Columns : {}".format(args.num_cols))
print("Number of Repetitions : {}".format(args.repetitions))
print("Join Algorithm : {}".format(args.algorithm))
print("Stats File : {}".format(args.stats_file))
print("Base File Path : {}".format(args.base_file_path))
bench_join_op(ctx=ctx,
start=args.start_size,
end=args.end_size,
step=args.step_size,
num_cols=args.num_cols,
algorithm=args.algorithm,
repetitions=args.repetitions,
stats_file=args.stats_file,
base_file_path=args.base_file_path)
ctx.finalize()
|
vibhatha/cylon_applications | bench/generate_distributed_files.py | <reponame>vibhatha/cylon_applications<filename>bench/generate_distributed_files.py<gh_stars>1-10
import argparse
import os
import numpy as np
from bench_util import get_dataframe
from bench_util import line_separator
"""
python3 generate_distributed_files.py --start_size 1_000_000 \
--step_size 1_000_000 \
--end_size 4_000_000 \
--unique_factor 0.1 \
--num_cols 2 \
--file_path ~/data/cylon_bench \
--parallelism 4
"""
def generation_op(start: int, end: int, step: int, num_cols: int, file_path: str,
unique_factor: float, parallelism: int):
assert start > 0
assert step > 0
assert num_cols > 0
for records in range(start, end + step, step):
line_separator()
print("Generating Records : {}".format(records))
line_separator()
sequential_file = "single_data_file.csv"
distributed_file_prefix = "distributed_data_file"
if not os.path.exists(file_path):
os.mkdir(file_path)
record_file_path = os.path.join(file_path, "records_{}".format(str(records)))
if not os.path.exists(record_file_path):
os.mkdir(record_file_path)
new_sub_data_dir_path = os.path.join(record_file_path, "parallelism_{}".format(parallelism))
if not os.path.exists(new_sub_data_dir_path):
os.mkdir(new_sub_data_dir_path)
pdf = get_dataframe(num_rows=records, num_cols=num_cols, unique_factor=unique_factor)
pdf_splits = np.array_split(pdf, parallelism)
seq_file_save_path = os.path.join(new_sub_data_dir_path, sequential_file)
pdf.to_csv(seq_file_save_path, sep=",", index=False)
for rank in range(parallelism):
distributed_file_name = distributed_file_prefix + "_rank_{}.csv".format(rank)
dist_file_save_path = os.path.join(new_sub_data_dir_path, distributed_file_name)
print(pdf.shape, pdf_splits[rank].shape)
pdf_splits[rank].to_csv(dist_file_save_path, sep=",", index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--start_size",
help="initial data size",
type=int)
parser.add_argument("-e", "--end_size",
help="end data size",
type=int)
parser.add_argument("-d", "--unique_factor",
help="random data uniqueness factor",
type=float)
parser.add_argument("-s", "--step_size",
help="Step size",
type=int)
parser.add_argument("-c", "--num_cols",
help="number of columns",
type=int)
parser.add_argument("-fp", "--file_path",
help="file path",
type=str)
parser.add_argument("-p", "--parallelism",
help="number of processes",
type=int)
args = parser.parse_args()
print(f"Start Data Size : {args.start_size}")
print(f"End Data Size : {args.end_size}")
print(f"Step Data Size : {args.step_size}")
print(f"Data Duplication Factor : {args.unique_factor}")
print(f"Number of Columns : {args.num_cols}")
print(f"File Path : {args.file_path}")
print(f"Parallelism : {args.parallelism}")
generation_op(start=args.start_size,
end=args.end_size,
step=args.step_size,
num_cols=args.num_cols,
file_path=args.file_path,
parallelism=args.parallelism,
unique_factor=args.unique_factor
)
|
vibhatha/cylon_applications | bench/dask_ssh_cluster_setup.py | <gh_stars>1-10
import dask
import dask.dataframe as dd
from dask.distributed import Client, SSHCluster
import time
cluster = SSHCluster(["v-001", "v-002"], worker_options={"nthreads": 1, "nprocs": 16})
client = Client(cluster)
time.sleep(100)
client.close()
cluster.shutdown()
|
vibhatha/cylon_applications | bench/dask_distributed_join.py | <filename>bench/dask_distributed_join.py
import os
import dask
import dask.dataframe as dd
from dask.distributed import Client, SSHCluster
import pandas as pd
import time
import argparse
import math
import subprocess
import numpy as np
"""
>>> python dask_setup.py --start_size 100_000_000 \
--step_size 100_000_000 \
--end_size 500_000_000 \
--num_cols 2 \
--stats_file /tmp/dask_dist_join_bench.csv \
--repetitions 3 \
--base_file_path ~/data/cylon_bench \
--parallelism 64 \
--nodes_file /hostfiles/hostfile_victor_8x16 \
--total_nodes 8 \
--scheduler_host v-001 \
--memory_limit_per_worker 4G \
--python_env /home/vibhatha/venv/ENVCYLON
"""
def get_ips(nodes_file):
ips = []
with open(nodes_file, 'r') as fp:
for l in fp.readlines():
ips.append(l.split(' ')[0])
return ips
def start_cluster(ips, scheduler_host, python_env, procs, nodes, memory_limit_per_worker, network_interface):
print("starting scheduler", flush=True)
# subprocess.Popen(
# ["ssh", "v-001", "/N/u2/d/dnperera/victor/git/cylon/ENV/bin/dask-scheduler", "--interface", "enp175s0f0",
# "--scheduler-file", "/N/u2/d/dnperera/dask-sched.json"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if network_interface == "none":
subprocess.Popen(
["ssh", scheduler_host, python_env + "/bin/dask-scheduler", "--scheduler-file",
"/N/u2/v/vlabeyko/dask-sched.json"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
elif network_interface and network_interface != "none":
print("With Network interface : {}".format(network_interface))
subprocess.Popen(
["ssh", scheduler_host, python_env + "/bin/dask-scheduler", "--scheduler-file", "--interface",
network_interface,
"/N/u2/v/vlabeyko/dask-sched.json"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(30)
for ip in ips[0:nodes]:
print("starting worker {}, With Processes : {}".format(ip, procs), flush=True)
# subprocess.Popen(
# ["ssh", ip, "/N/u2/d/dnperera/victor/git/cylon/ENV/bin/dask-worker", "v-001:8786", "--interface",
# "enp175s0f0", "--nthreads", "1", "--nprocs", str(procs), "--memory-limit", "20GB", "--local-directory",
# "/scratch/dnperera/dask/", "--scheduler-file", "/N/u2/d/dnperera/dask-sched.json"], stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT)
if network_interface == "none":
subprocess.Popen(
["ssh", ip, python_env + "/bin/dask-worker", scheduler_host + ":8786", "--nthreads", "1", "--nprocs",
str(procs), "--memory-limit", memory_limit_per_worker, "--local-directory", "/scratch/vlabeyko/dask/",
"--scheduler-file",
"/N/u2/v/vlabeyko/dask-sched.json"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
elif network_interface and network_interface != 'none':
print("With Network interface : {}".format(network_interface))
subprocess.Popen(
["ssh", ip, python_env + "/bin/dask-worker", scheduler_host + ":8786", "--nthreads", "1", "--nprocs",
str(procs), "--memory-limit", memory_limit_per_worker, "--interface", network_interface,
"--local-directory", "/scratch/vlabeyko/dask/",
"--scheduler-file",
"/N/u2/v/vlabeyko/dask-sched.json"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
time.sleep(30)
def stop_cluster(ips):
for ip in ips:
print("stopping worker", ip, flush=True)
subprocess.run(["ssh", ip, "pkill", "-f", "dask-worker"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(5)
print("stopping scheduler", flush=True)
subprocess.run(["pkill", "-f", "dask-scheduler"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(5)
def dask_join(scheduler_host, num_rows, base_file_path, num_nodes, parallelism):
print("Join Function")
client = Client(scheduler_host + ':8786')
print(client)
sub_path = "records_{}/parallelism_{}".format(num_rows, parallelism)
distributed_file_prefix = "distributed_data_file_rank_*.csv"
left_file_path = os.path.join(base_file_path, sub_path, distributed_file_prefix)
right_file_path = os.path.join(base_file_path, sub_path, distributed_file_prefix)
# if not (os.path.exists(left_file_path) and os.path.exists(right_file_path)):
# print("File Path invalid: {}, {}".format(left_file_path, right_file_path))
# return 0
df_l = dd.read_csv(left_file_path).repartition(npartitions=parallelism)
df_r = dd.read_csv(right_file_path).repartition(npartitions=parallelism)
client.persist([df_l, df_r])
join_column = df_l.columns[0]
print("left rows", len(df_l), flush=True)
print("right rows", len(df_r), flush=True)
join_time = time.time()
out = df_l.merge(df_r, on=join_column, how='inner', suffixes=('_left', '_right')).compute()
join_time = time.time() - join_time
return join_time
def bench_join_op(start, end, step, num_cols, repetitions, stats_file, base_file_path, num_nodes, parallelism):
all_data = []
schema = ["num_records", "num_cols", "time(s)"]
assert repetitions >= 1
assert start > 0
assert step > 0
assert num_cols > 0
for records in range(start, end + step, step):
times = []
for idx in range(repetitions):
dask_time = dask_join(scheduler_host=scheduler_host, num_rows=records, base_file_path=base_file_path,
num_nodes=num_nodes, parallelism=parallelism)
times.append([dask_time])
times = np.array(times).sum(axis=0) / repetitions
print("Join Op : Records={}, Columns={}, Dask Time : {}".format(records, num_cols, times[0]))
all_data.append([records, num_cols, times[0]])
pdf = pd.DataFrame(all_data, columns=schema)
print(pdf)
pdf.to_csv(stats_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--start_size",
help="initial data size",
type=int)
parser.add_argument("-e", "--end_size",
help="end data size",
type=int)
parser.add_argument("-s", "--step_size",
help="Step size",
type=int)
parser.add_argument("-c", "--num_cols",
help="number of columns",
type=int)
parser.add_argument("-r", "--repetitions",
help="number of experiments to be repeated",
type=int)
parser.add_argument("-f", "--stats_file",
help="stats file to be saved",
type=str)
parser.add_argument("-bf", "--base_file_path",
help="base file path",
type=str)
parser.add_argument("-p", "--parallelism",
help="parallelism",
type=int)
parser.add_argument("-n", "--total_nodes",
help="total nodes",
type=int)
parser.add_argument("-ml", "--memory_limit_per_worker",
help="memory limit per worker",
type=str)
parser.add_argument("-ni", "--network_interface",
help="network interface",
type=str)
parser.add_argument("-nf", "--nodes_file",
help="nodes file",
type=str)
parser.add_argument("-sh", "--scheduler_host",
help="scheduler host",
type=str)
parser.add_argument("-pe", "--python_env",
help="python env",
type=str)
args = parser.parse_args()
print("Start Data Size : {}".format(args.start_size))
print("End Data Size : {}".format(args.end_size))
print("Step Data Size : {}".format(args.step_size))
print("Number of Columns : {}".format(args.num_cols))
print("Number of Repetitions : {}".format(args.repetitions))
print("Stats File : {}".format(args.stats_file))
print("Base File Path : {}".format(args.base_file_path))
print("Total Nodes : {}".format(args.total_nodes))
print("Memory limit per worker : {}".format(args.memory_limit_per_worker))
print("Network Interface : {}".format(args.network_interface))
print("Parallelism : {}".format(args.parallelism))
print("Nodes File : {}".format(args.nodes_file))
print("Scheduler Host : {}".format(args.scheduler_host))
print("Python ENV : {}".format(args.python_env))
parallelism = args.parallelism
TOTAL_NODES = args.total_nodes
procs = int(math.ceil(parallelism / TOTAL_NODES))
nodes = min(parallelism, TOTAL_NODES)
ips = get_ips(args.nodes_file)
python_env = args.python_env
scheduler_host = args.scheduler_host
print("NODES : ", ips)
print("Processes Per Node: ", procs)
# stop_cluster(ips)
# start_cluster(ips=ips, scheduler_host=scheduler_host, python_env=python_env, procs=procs, nodes=nodes,
# memory_limit_per_worker=args.memory_limit_per_worker, network_interface=args.network_interface)
bench_join_op(start=args.start_size,
end=args.end_size,
step=args.step_size,
num_cols=args.num_cols,
repetitions=args.repetitions,
stats_file=args.stats_file,
base_file_path=args.base_file_path,
num_nodes=args.total_nodes,
parallelism=parallelism)
# stop_cluster(ips)
|
vibhatha/cylon_applications | torch/pycylon_torch_setup.py | <filename>torch/pycylon_torch_setup.py
import os
from pycylon.net import MPIConfig
from pycylon import CylonEnv
import torch.distributed as dist
import socket
from pycylon.util.logging import log_level, disable_logging
log_level(0) # set an arbitrary log level
# disable_logging() # disable logging completely
master_address = 'localhost'
port = '12355'
world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
backend = 'mpi'
hostname = socket.gethostname()
#
os.environ['MASTER_ADDR'] = master_address
os.environ['MASTER_PORT'] = port
os.environ["LOCAL_RANK"] = str(rank)
os.environ["RANK"] = str(rank)
os.environ["WORLD_SIZE"] = str(world_size)
# initialize the process group
dist.init_process_group(backend=backend, init_method="env://")
mpi_config = MPIConfig()
env = CylonEnv(config=mpi_config, distributed=True)
print(f"Init Process Groups : => [{hostname}]Demo DDP Rank {env.rank}")
dist.destroy_process_group()
# NOTE: calling env.finalize() with our without destroy_process_group an exception occurs.
"""
Init Process Groups : => [vibhatha]Demo DDP Rank 0
Init Process Groups : => [vibhatha]Demo DDP Rank 2
Init Process Groups : => [vibhatha]Demo DDP Rank 3
WARNING: Logging before InitGoogleLogging() is written to STDERR
I0422 12:19:49.765537 2686345 mpi_communicator.cpp:62] calling mpi finalize...
I0422 12:19:49.765576 2686345 mpi_communicator.cpp:66] Is not Finalized
WARNING: Logging before InitGoogleLogging() is written to STDERR
I0422 12:19:49.765537 2686347 mpi_communicator.cpp:62] calling mpi finalize...
I0422 12:19:49.765576 2686347 mpi_communicator.cpp:66] Is not Finalized
WARNING: Logging before InitGoogleLogging() is written to STDERR
I0422 12:19:49.765537 2686348 mpi_communicator.cpp:62] calling mpi finalize...
I0422 12:19:49.765576 2686348 mpi_communicator.cpp:66] Is not Finalized
Init Process Groups : => [vibhatha]Demo DDP Rank 1
WARNING: Logging before InitGoogleLogging() is written to STDERR
I0422 12:19:49.765707 2686346 mpi_communicator.cpp:62] calling mpi finalize...
I0422 12:19:49.765741 2686346 mpi_communicator.cpp:66] Is not Finalized
*** The MPI_Finalize() function was called after MPI_FINALIZE was invoked.
*** This is disallowed by the MPI standard.
*** Your MPI job will now abort.
[vibhatha:2686347] Local abort after MPI_FINALIZE started completed successfully, but am not able to aggregate error messages, and not able to guarantee that all other processes were killed!
--------------------------------------------------------------------------
Primary job terminated normally, but 1 process returned
a non-zero exit code. Per user-direction, the job has been aborted.
--------------------------------------------------------------------------
*** The MPI_Finalize() function was called after MPI_FINALIZE was invoked.
*** This is disallowed by the MPI standard.
*** Your MPI job will now abort.
[vibhatha:2686348] Local abort after MPI_FINALIZE started completed successfully, but am not able to aggregate error messages, and not able to guarantee that all other processes were killed!
*** The MPI_Finalize() function was called after MPI_FINALIZE was invoked.
*** This is disallowed by the MPI standard.
*** Your MPI job will now abort.
[vibhatha:2686345] Local abort after MPI_FINALIZE started completed successfully, but am not able to aggregate error messages, and not able to guarantee that all other processes were killed!
*** The MPI_Finalize() function was called after MPI_FINALIZE was invoked.
*** This is disallowed by the MPI standard.
*** Your MPI job will now abort.
[vibhatha:2686346] Local abort after MPI_FINALIZE started completed successfully, but am not able to aggregate error messages, and not able to guarantee that all other processes were killed!
--------------------------------------------------------------------------
mpirun detected that one or more processes exited with non-zero status, thus causing
the job to be terminated. The first process to do so was:
Process name: [[22319,1],2]
Exit code: 1
"""
|
vibhatha/cylon_applications | bench/modin_distributed_join.py | import os
import modin.pandas as pd
import time
import argparse
import math
import subprocess
import numpy as np
import json
"""
Ray Head: ray start --head --port 12345
Ray Worker: ray start --address='v-015:12345' --redis-password='<PASSWORD>' --num-cpus 32
"""
"""
>>> python modin_distributed_join.py --start_size 1_000_000 \
--step_size 1_000_000 \
--end_size 2_000_000 \
--num_cols 2 \
--stats_file /tmp/dask_dist_join_bench.csv \
--repetitions 1 \
--base_file_path ~/data/cylon_bench \
--parallelism 4 \
--nodes_file /hostfiles/hostfile_victor_8x16 \
--total_nodes 1 \
--scheduler_host v-001 \
--memory_limit_per_worker 4G \
--python_env /home/vibhatha/venv/ENVCYLON
"""
def get_ips(nodes_file):
ips = []
with open(nodes_file, 'r') as fp:
for l in fp.readlines():
ips.append(l.split(' ')[0])
return ips
def modin_join(num_rows, base_file_path, parallelism):
print("Join Function")
sub_path = "records_{}/parallelism_{}".format(num_rows, parallelism)
distributed_file_prefix = "single_data_file.csv"
left_file_path = os.path.join(base_file_path, sub_path, distributed_file_prefix)
right_file_path = os.path.join(base_file_path, sub_path, distributed_file_prefix)
print("Reading files...")
df_l = pd.read_csv(left_file_path)
df_r = pd.read_csv(right_file_path)
join_column = df_l.columns[0]
print("left rows", len(df_l), flush=True)
print("right rows", len(df_r), flush=True)
join_time = time.time()
out = df_l.merge(df_r, on=join_column, how='inner', suffixes=('_left', '_right'))
join_time = time.time() - join_time
return join_time
def bench_join_op(start, end, step, num_cols, repetitions, stats_file, base_file_path, parallelism):
all_data = []
schema = ["num_records", "num_cols", "time(s)"]
assert repetitions >= 1
assert start > 0
assert step > 0
assert num_cols > 0
for records in range(start, end + step, step):
times = []
for idx in range(repetitions):
dask_time = modin_join(num_rows=records, base_file_path=base_file_path, parallelism=parallelism)
times.append([dask_time])
times = np.array(times).sum(axis=0) / repetitions
print("Join Op : Records={}, Columns={}, Modin Time : {}".format(records, num_cols, times[0]))
all_data.append([records, num_cols, times[0]])
pdf = pd.DataFrame(all_data, columns=schema)
print(pdf)
pdf.to_csv(stats_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--start_size",
help="initial data size",
type=int)
parser.add_argument("-e", "--end_size",
help="end data size",
type=int)
parser.add_argument("-s", "--step_size",
help="Step size",
type=int)
parser.add_argument("-c", "--num_cols",
help="number of columns",
type=int)
parser.add_argument("-r", "--repetitions",
help="number of experiments to be repeated",
type=int)
parser.add_argument("-f", "--stats_file",
help="stats file to be saved",
type=str)
parser.add_argument("-bf", "--base_file_path",
help="base file path",
type=str)
parser.add_argument("-p", "--parallelism",
help="parallelism",
type=int)
parser.add_argument("-n", "--total_nodes",
help="total nodes",
type=int)
parser.add_argument("-ml", "--memory_limit_per_worker",
help="memory limit per worker",
type=str)
parser.add_argument("-ni", "--network_interface",
help="network interface",
type=str)
parser.add_argument("-nf", "--nodes_file",
help="nodes file",
type=str)
parser.add_argument("-sh", "--scheduler_host",
help="scheduler host",
type=str)
parser.add_argument("-pe", "--python_env",
help="python env",
type=str)
parser.add_argument("-cl", "--cluster",
help="cluster mode",
type=str)
args = parser.parse_args()
print("Start Data Size : {}".format(args.start_size))
print("End Data Size : {}".format(args.end_size))
print("Step Data Size : {}".format(args.step_size))
print("Number of Columns : {}".format(args.num_cols))
print("Number of Repetitions : {}".format(args.repetitions))
print("Stats File : {}".format(args.stats_file))
print("Base File Path : {}".format(args.base_file_path))
print("Total Nodes : {}".format(args.total_nodes))
print("Memory limit per worker : {}".format(args.memory_limit_per_worker))
print("Network Interface : {}".format(args.network_interface))
print("Parallelism : {}".format(args.parallelism))
print("Nodes File : {}".format(args.nodes_file))
print("Scheduler Host : {}".format(args.scheduler_host))
print("Python ENV : {}".format(args.python_env))
print("Cluster : {}".format(args.cluster))
parallelism = args.parallelism
os.environ["MODIN_CPUS"] = str(parallelism)
TOTAL_NODES = args.total_nodes
procs = int(math.ceil(parallelism / TOTAL_NODES))
nodes = min(parallelism, TOTAL_NODES)
# ips = get_ips(args.nodes_file)
python_env = args.python_env
scheduler_host = args.scheduler_host
# print("NODES : ", ips)
print("Processes Per Node: ", procs)
# if args.cluster == 'ray':
# # ray.init()
# ray.init(address='auto', _redis_password='<PASSWORD>')
# elif args.cluster == 'dask':
# from distributed import Client
# client = Client(args.scheduler_host + ':8786')
# else:
# ray.init(
# _system_config={
# "object_spilling_config": json.dumps(
# {"type": "filesystem", "params": {"directory_path": "/scratch/vlabeyko/modin"}},
# )
# },
# )
bench_join_op(start=args.start_size,
end=args.end_size,
step=args.step_size,
num_cols=args.num_cols,
repetitions=args.repetitions,
stats_file=args.stats_file,
base_file_path=args.base_file_path,
parallelism=parallelism)
|
sonali-uttekar01/receipe-app-api | app/core/models.py | <filename>app/core/models.py<gh_stars>0
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager,PermissionsMixin
from django.db.models.fields import CharField
class UserManager(BaseUserManager):
def create_user(self,username,password=None,**extra_fields):
if not email:
self.valueError('Users must have an email address')
user= self.model(email=self.normalize_email(email),**extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser,PermissionsMixin):
email= models.EmailField(max_length=25,unique=True)
name= CharField(max_length=255)
is_active=models.BooleanField(default=True)
is_staff=models.BooleanField(default=False)
objects=UserManager()
USERNAME_FIELD='email'
# Create your models here.
|
shawaj/Adafruit_Python_PlatformDetect | adafruit_platformdetect/constants/__init__.py | # SPDX-FileCopyrightText: 2021 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""Common constants used all over the module."""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.