repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
JuliusHen/gimli | pygimli/viewer/pv/utils.py | import tempfile
import numpy as np
import pygimli as pg
pv = pg.optImport('pyvista', requiredFor="properly visualize 3D data")
def pgMesh2pvMesh(mesh, data=None, label=None):
"""
pyGIMLi's mesh format is different from pyvista's needs,
some preparation is necessary.
Parameters
----------
mesh: pg.Mesh
Structure generated by pyGIMLi to display.
data: iterable
Parameter to distribute to cells/nodes.
"""
_, tmp = tempfile.mkstemp(suffix=".vtk")
# export given mesh temporarily is the easiest and fastest option ATM
mesh.exportVTK(tmp)
grid = pv.read(tmp)
# check for parameters inside the pg.Mesh
for key, values in mesh.dataMap():
if len(values) == mesh.cellCount():
grid.cell_arrays[key] = np.asarray(values)
elif len(values) == mesh.nodeCount():
grid.point_arrays[key] = np.asarray(values)
# check the given data as well
try:
if data is not None:
if len(data) == mesh.cellCount():
grid.cell_arrays[label] = np.asarray(data)
elif len(data) == mesh.nodeCount():
grid.point_arrays[label] = np.asarray(data)
else:
pg.warn("Given data fits neither cell count nor node count:")
pg.warn("{} vs. {} vs. {}".format(len(data), mesh.cellCount(),
mesh.nodeCount()))
except Exception as e:
print(e)
pg.error("fix pyvista bindings")
if label is None:
# last data that was added
label = grid.array_names[-1]
elif label not in grid.array_names:
pg.warn("Given label '{}' was not found.".format(label))
label = grid.array_names[-1]
grid.set_active_scalars(label)
return grid
|
JuliusHen/gimli | pygimli/testing/test_RegionManager.py | <filename>pygimli/testing/test_RegionManager.py<gh_stars>100-1000
#!/usr/bin/env python
"""
"""
import unittest
import time
import numpy as np
import pygimli as pg
import pygimli.meshtools as mt
from pygimli.frameworks import MeshModelling
class TestMod(MeshModelling):
def __init__(self, mesh, verbose=True):
super(TestMod, self).__init__()
self.meshlist = []
for i in range(2):
for cell in mesh.cells():
cell.setMarker(i + 1)
self.meshlist.append(pg.Mesh(mesh))
self.regionManager().setMesh(self.meshlist[i])
self.regionManager().addRegion(i + 1, self.meshlist[i])
self.regionManager().region(i + 1).setConstraintType(1)
# self.setMesh(self.meshlist[0], ignoreRegionManager=True)
class TestRM(unittest.TestCase):
def test_Constraints(self):
""" Test FOP """
grid = pg.createGrid(x=[0., 1., 2.], y=[0., 1., 2.])
fop = TestMod(grid)
fop.createConstraints()
#pg.solver.showSparseMatrix(fop.constraints(), full=True)
fop.regionManager().setConstraintType(2)
fop.createConstraints()
#pg.solver.showSparseMatrix(fop.constraints(), full=True)
def test_zweight(self):
mesh = pg.meshtools.createGrid(x=np.arange(0, 4), y=np.arange(0, 3))
# fop = pg.Modelling()
# fop.setMesh(mesh)
# fop.createConstraints()
# rm = fop.regionManager()
# rm.addRegion(1, mesh, 0)
# rm.addRegion(2, mesh, 0)
# rm.setZWeight(0.1)
# # check distribution of zWeight
# self.assertTrue(np.isclose(rm.region(0).zWeight(), 0.1))
# self.assertTrue(np.isclose(rm.region(1).zWeight(), 0.1))
# self.assertTrue(np.isclose(rm.region(2).zWeight(), 0.1))
# w0 = rm.region(0).constraintWeights()
# w1 = rm.region(1).constraintWeights()
# w2 = rm.region(2).constraintWeights()
# print(w0.array())
# print(w1.array())
# print(w2.array())
# # check actual constraint weight values
# self.assertTrue(np.isclose(np.min(w0), 0.1))
# # check distribution of zWeight
# self.assertTrue(np.allclose(w0, w1))
# self.assertTrue(np.allclose(w0, w2))
def test_zweight_2meshes(self):
# marker = 0
mesh = pg.meshtools.createGrid(x=np.arange(0, 4), y=np.arange(0, 3))
mesh.setCellMarkers(np.zeros(mesh.cellCount(), dtype=int))
# marker = 1
mesh2 = pg.Mesh(mesh)
mesh2.setCellMarkers(np.ones(mesh.cellCount(), dtype=int))
together = pg.meshtools.merge2Meshes(
mesh,
mesh2.translate(pg.Vector([0., 0., 1.])))
fop = pg.Modelling()
fop.setMesh(together)
fop.createConstraints()
rm = fop.regionManager()
rm.setZWeight(0.1)
# check distribution of zWeight
self.assertTrue(np.isclose(rm.region(0).zWeight(), 0.1))
self.assertTrue(np.isclose(rm.region(1).zWeight(), 0.1))
# self.assertTrue(np.isclose(rm.region(2).zWeight(), 0.1))
w0 = rm.region(0).constraintWeights()
w1 = rm.region(1).constraintWeights()
# print(w0, w1)
# w2 = rm.region(2).constraintWeights()
# check actual constraint weight values
self.assertTrue(np.isclose(np.min(w0), 0.1))
# check distribution of zWeight
self.assertTrue(np.allclose(w0, w1))
# self.assertTrue(np.allclose(w0, w2))
if __name__ == '__main__':
unittest.main()
|
JuliusHen/gimli | pygimli/physics/ert/importData.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import numpy as np
import pygimli as pg
def load(fileName, verbose=False, **kwargs):
"""Shortcut to load ERT data.
Import Data and try to assume the file format.
Additionally to unified data format we support the wide-spread res2dinv
format as well as ASCII column files generated by the processing software
of various instruments (ABEM LS, Syscal Pro, Resecs, ?)
If this fails, install pybert and use its auto importer pybert.importData.
Parameters
----------
fileName: str
Returns
-------
data: pg.DataContainer
"""
data = pg.load(fileName)
if isinstance(data, pg.DataContainerERT):
return data
try:
pg.info("could not read unified data format for ERT ... try res2dinv")
data = importRes2dInv(fileName)
return data
except:
pg.info("could not read res2dinv ... try Ascii columns")
try:
data = importAsciiColumns(fileName)
return data
except Exception as e:
pg.info("Failed importing Ascii column file. Consider using pybert.")
pg.info(e)
if verbose:
pg.info("Try to import using pybert .. if available")
pb = pg.optImport('pybert')
data = pb.loadData(fileName)
if isinstance(data, pg.DataContainerERT):
return data
pg.critical("Can't import ERT data file.", fileName)
def importRes2dInv(filename, verbose=False, return_header=False):
"""Read res2dinv format
Parameters
----------
filename : str
verbose : bool [False]
return_header : bool [False]
Returns
-------
pg.DataContainerERT and (in case of return_header=True)
header dictionary
Format
------
str - title
float - unit spacing [m]
int - Array Number (1-Wenner, 3-Dipole-dipole atm only)
int - Number of Datapoints
float - x-location given in terms of first electrode
use 1 if mid-point location is given
int - 0 for no IP, use 1 if IP present
str - Phase Angle if IP present
str - mrad if IP present
0,90.0 - if IP present
dataBody
"""
def getNonEmptyRow(i, comment='#'):
s = next(i)
while s[0] is comment:
s = next(i)
return s.split('\r\n')[0]
# def getNonEmptyRow(...)
with open(filename, 'r') as fi:
content = fi.readlines()
it = iter(content)
header = {}
header['name'] = getNonEmptyRow(it, comment=';')
header['spacing'] = float(getNonEmptyRow(it, comment=';'))
typrow = getNonEmptyRow(it, comment=';')
typ = int(typrow.rstrip('\n').rstrip('R').rstrip('L'))
if typ == 11:
# independent electrode positions
header['subtype'] = int(getNonEmptyRow(it, comment=';'))
header['dummy'] = getNonEmptyRow(it, comment=';')
isR = int(getNonEmptyRow(it, comment=';'))
nData = int(getNonEmptyRow(it, comment=';'))
xLoc = float(getNonEmptyRow(it, comment=';'))
hasIP = int(getNonEmptyRow(it, comment=';'))
if hasIP:
header['ipQuantity'] = getNonEmptyRow(it, comment=';')
header['ipUnit'] = getNonEmptyRow(it, comment=';')
header['ipData'] = getNonEmptyRow(it, comment=';')
ipline = header['ipData'].rstrip('\n').rstrip('\r').split(' ')
if len(ipline) > 2: # obviously spectral data?
header['ipNumGates'] = int(ipline[0])
header['ipDelay'] = float(ipline[1])
header['onTime'] = float(ipline[-2])
header['offTime'] = float(ipline[-1])
header['ipDT'] = np.array(ipline[2:-2], dtype=float)
header['ipGateT'] = np.cumsum(np.hstack((header['ipDelay'],
header['ipDT'])))
data = pg.DataContainerERT()
data.resize(nData)
if typ == 9 or typ == 10:
raise Exception("Don't know how to read:" + str(typ))
if typ == 11 or typ == 12 or typ == 13: # mixed array
res = pg.Vector(nData, 0.0)
ip = pg.Vector(nData, 0.0)
specIP = []
for i in range(nData):
vals = getNonEmptyRow(it, comment=';').replace(',', ' ').split()
# row starts with 4
if int(vals[0]) == 4:
eaID = data.createSensor(pg.Pos(float(vals[1]),
float(vals[2])))
ebID = data.createSensor(pg.Pos(float(vals[3]),
float(vals[4])))
emID = data.createSensor(pg.Pos(float(vals[5]),
float(vals[6])))
enID = data.createSensor(pg.Pos(float(vals[7]),
float(vals[8])))
elif int(vals[0]) == 3:
eaID = data.createSensor(pg.Pos(float(vals[1]),
float(vals[2])))
ebID = -1
emID = data.createSensor(pg.Pos(float(vals[3]),
float(vals[4])))
enID = data.createSensor(pg.Pos(float(vals[5]),
float(vals[6])))
elif int(vals[0]) == 2:
eaID = data.createSensor(pg.Pos(float(vals[1]),
float(vals[2])))
ebID = -1
emID = data.createSensor(pg.Pos(float(vals[3]),
float(vals[4])))
enID = -1
else:
raise Exception('dont know how to handle row', vals[0])
res[i] = float(vals[int(vals[0])*2+1])
if hasIP:
# ip[i] = float(vals[int(vals[0])*2+2])
ipCol = int(vals[0])*2+2
ip[i] = float(vals[ipCol])
if 'ipNumGates' in header:
specIP.append(vals[ipCol:])
data.createFourPointData(i, eaID, ebID, emID, enID)
if isR:
data.set('r', res)
else:
data.set('rhoa', res)
if hasIP:
data.set('ip', ip)
if 'ipNumGates' in header:
A = np.array(specIP, dtype=float)
A[A > 1000] = -999
A[A < -1000] = -999
for i in range(header['ipNumGates']):
data.set('ip'+str(i+1), A[:, i])
data.sortSensorsX()
data.sortSensorsIndex()
if return_header:
return data, header
else:
return data
# amount of values per collumn per typ
nntyp = [0, 3, 3, 4, 3, 3, 4, 4, 3, 0, 0, 8, 10]
nn = nntyp[typ] + hasIP
# dataBody = pg.Matrix(nn, nData)
dataBody = np.zeros((nn, nData))
for i in range(nData):
vals = getNonEmptyRow(it, comment=';').replace(',', ' ').split()
dataBody[:, i] = np.array(vals, dtype=float)
# for j in range(nn):
# dataBody[j][i] = float(vals[j])
XX = dataBody[0]
EL = dataBody[1]
SP = pg.Vector(nData, 1.0)
if nn - hasIP == 4:
SP = dataBody[2]
AA = None
BB = None
NN = None
MM = None
if typ == 1: # Wenner
AA = XX - xLoc * EL * 1.5
MM = AA + EL
NN = MM + EL
BB = NN + EL
elif typ == 2: # Pole-Pole
AA = XX - xLoc * EL * 0.5
MM = AA + EL
elif typ == 3: # Dipole-Dipole
AA = XX - xLoc * EL * (SP / 2. + 1.)
BB = AA + EL
MM = BB + SP * EL
NN = MM + EL
pass
elif typ == 3: # Dipole-Dipole
AA = XX - xLoc * EL * (SP / 2. + 1.)
BB = AA + EL
MM = BB + SP * EL
NN = MM + EL
elif typ == 4: # WENNER-BETA
AA = XX - xLoc * EL * 1.5
BB = AA + EL
MM = BB + EL
NN = MM + EL
elif typ == 5: # WENNER-GAMMA
AA = XX - xLoc * EL * 1.5
MM = AA + EL
BB = MM + EL
NN = BB + EL
elif typ == 6: # POLE-DIPOLE
AA = XX - xLoc * SP * EL - (SP - 1.) * (SP < 0.) * EL
MM = AA + SP * EL
NN = MM + pg.sign(SP) * EL
elif typ == 7: # SCHLUMBERGER
AA = XX - xLoc * EL * (SP + 0.5)
MM = AA + SP * EL
NN = MM + EL
BB = NN + SP * EL
else:
raise Exception('Datatype ' + str(typ) + ' not yet suppoted')
for i in range(len(AA)):
if AA is not None:
eaID = data.createSensor(pg.Pos(AA[i], 0.0))
else:
eaID = -1
if BB is not None:
ebID = data.createSensor(pg.Pos(BB[i], 0.0))
else:
ebID = -1
if MM is not None:
emID = data.createSensor(pg.Pos(MM[i], 0.0))
else:
emID = -1
if NN is not None:
enID = data.createSensor(pg.Pos(NN[i], 0.0))
else:
enID = -1
data.createFourPointData(i, eaID, ebID, emID, enID)
data.set('rhoa', dataBody[nn - hasIP - 1])
if hasIP:
data.set('ip', dataBody[nn - 1])
data.sortSensorsX()
if return_header:
return data, header
else:
return data
# def importRes2dInv(...)
def importAsciiColumns(filename, verbose=False, return_header=False):
"""Import any ERT data file organized in columns with column header
Input can be:
* Terrameter LS or SAS Ascii Export format, e.g.
Time MeasID DPID Channel A(x) A(y) A(z) B(x) B(y) B(z) M(x) M(y) M(z) \
N(x) N(y) N(z) F(x) F(y) F(z) Note I(mA) Uout(V) U(V) SP(V) R(O) \
Var(%) Rhoa Cycles Pint Pext(V) T(°C) Lat Long
2016-09-14 07:01:56 73 7 1 8 1 1 20 1 1 12 1 1 \
16 1 1 14 1 2.076 99.8757 107.892 0.0920761 0 0.921907 \
0.196302 23.17 1 12.1679 12.425 42.1962 0 0
* Resecs Output format
"""
data = pg.DataContainerERT()
header = {}
with open(filename, 'r', encoding='iso-8859-15') as fi:
content = fi.readlines()
d = readAsDictionary(content, sep='\t')
if len(d) < 2:
d = readAsDictionary(content)
nData = len(next(iter(d.values())))
data.resize(nData)
if 'Spa.1' in d: # Syscal Pro
abmn = ['Spa.1', 'Spa.2', 'Spa.3', 'Spa.4']
if verbose:
pg.debug("detected Syscalfile format")
elif 'A(x)' in d: # ABEM Terrameter
abmn = ['A', 'B', 'M', 'N']
if verbose:
pg.debug("detected ABEM file format")
elif 'xA' in d: # Workbench TX2 processed data
abmn = ['xA', 'xB', 'xM', 'xN']
if verbose:
pg.debug("detected Workbench file format")
elif 'C1(x)' in d or 'C1(xm)' in d: # Resecs
abmn = ['C1', 'C2', 'P1', 'P2']
if verbose:
pg.debug("detected RESECS file format")
else:
pg.debug("no electrode positions found!")
pg.debug("Keys are:", d.keys())
raise Exception("No electrode positions found!")
for i in range(nData):
if abmn[0]+'(z)' in d:
eID = [data.createSensor([d[se+'(x)'][i], d[se+'(y)'][i],
d[se+'(z)'][i]]) for se in abmn]
elif abmn[0]+'(zm)' in d:
eID = [data.createSensor([d[se+'(xm)'][i], d[se+'(ym)'][i],
d[se+'(zm)'][i]]) for se in abmn]
elif abmn[0]+'(y)' in d:
eID = [data.createSensor([d[se+'(x)'][i], d[se+'(y)'][i],
0.]) for se in abmn]
elif abmn[0]+'(ym)' in d:
eID = [data.createSensor([d[se+'(xm)'][i], d[se+'(ym)'][i],
0.]) for se in abmn]
elif abmn[0]+'(x)' in d:
eID = [data.createSensor([d[se+'(x)'][i], 0.,
0.]) for se in abmn]
elif abmn[0]+'(xm)' in d:
eID = [data.createSensor([d[se+'(xm)'][i], 0.,
0.]) for se in abmn]
else:
eID = [data.createSensor([d[se][i], 0., 0.]) for se in abmn]
data.createFourPointData(i, *eID)
# data.save('tmp.shm', 'a b m n')
tokenmap = {'I(mA)': 'i', 'I': 'i', 'In': 'i', 'Vp': 'u',
'VoltageV': 'u', 'U': 'u', 'U(V)': 'u', 'UV': 'u',
'R(Ohm)': 'r', 'RO': 'r', 'R(O)': 'r', 'Res': 'r',
'Rho': 'rhoa', 'AppROhmm': 'rhoa', 'Rho-a(Ohm-m)': 'rhoa',
'Rho-a(Om)': 'rhoa',
'Var(%)': 'err', 'D': 'err', 'Dev.': 'err', 'Dev': 'err',
'M': 'ma', 'P': 'ip', 'IP sum window': 'ip',
'Time': 't'}
# Unit conversions (mA,mV,%), partly automatically assumed
unitmap = {'I(mA)': 1e-3, 'Var(%)': 0.01, # ABEM
'U': 1e-3, 'I': 1e-3, 'D': 0.01, # Resecs
'Dev.': 0.01, 'In': 1e-3, 'Vp': 1e-3} # Syscal
abmn = ['a', 'b', 'm', 'n']
if 'Cycles' in d:
d['stacks'] = d['Cycles']
for key in d.keys():
vals = np.asarray(d[key])
if key.startswith('IP sum window'): # there is a trailing number
key = 'IP sum window' # apparently not working
if np.issubdtype(vals.dtype, np.floating, # 'float' 'int'
) or np.issubdtype(vals.dtype, np.signedinteger):
if key in tokenmap: # use the standard (i, u, rhoa) key
if key not in abmn:
if verbose:
pg.debug("Setting", tokenmap[key], "from", key)
data.set(tokenmap[key],
vals * unitmap.get(key, 1.0))
else: # use the original key if not XX(x) etc.
if not re.search('([x-z])', key) and key not in abmn:
data.set(key.replace(' ', '_'), d[key])
r = data('u') / data('i')
if hasattr(d, 'R(0)'):
if np.linalg.norm(r-d['R(O)']) < 1e4: # no idea what's that for
data.set('r', r)
else:
pg.debug("Warning! File inconsistent")
data.sortSensorsX()
if return_header:
return data, header
else:
return data
# def importAsciiColumns(...)
def readAsDictionary(content, token=None, sep=None): # obsolote due to numpy?
"""Read list of strings from a file as column separated dictionary.
e.g.
token1 token2 token3 token4
va1 va2 val3 val4
va1 va2 val3 val4
va1 va2 val3 val4
Parameters
----------
content: [string]
List of strings read from file:
e.g.
with open(filename, 'r') as fi:
content = fi.readlines()
fi.close()
token: [string]
If given the tokens will be the keys of the resulting dictionary.
When token is None, tokens will be the first row values.
When token is a empty list, the tokens will be autonamed to
'col' + str(ColNumber)
ret: dictionary
Dictionary of all data
"""
data = dict()
if token is None:
header = content[0].splitlines()[0].split(sep)
token = []
for i, tok in enumerate(header):
tok = tok.lstrip()
token.append(tok)
for i, row in enumerate(content[1:]):
vals = row.splitlines()[0].split(sep)
for j, v in enumerate(vals):
v = v.replace(',', '.')
if len(token) < j+1:
token.append('col' + str(j))
if token[j] not in data:
data[token[j]] = [None] * (len(content)-1)
try:
data[token[j]][i] = float(v)
except:
if len(v) == 1 and v[0] == '-':
v = 0.0
data[token[j]][i] = v
return data
|
JuliusHen/gimli | doc/examples/1_meshing/plot_hybrid-mesh-2d.py | #!/usr/bin/env python
# encoding: utf-8
r"""
Building a hybrid mesh in 2D
----------------------------
In some cases, the modelling domain may require flexibility in one region and
equidistant structure in another. In this short example, we demonstrate how to
accomplish this for a two-dimensional mesh consisting of a region with regularly
spaced quadrilaterals and a region with unstructured triangles."""
###############################################################################
# We start by importing numpy, matplotlib and pygimli with its required components.
import numpy as np
import pygimli as pg
import pygimli.meshtools as mt
###############################################################################
# We continue by building a regular grid and assign the marker 2 to all cells.
xmin, xmax = 0., 50.
zmin, zmax = -50., -25.
xreg = np.linspace(xmin, xmax, 13)
zreg = np.linspace(zmin, zmax, 13)
mesh1 = mt.createGrid(xreg, zreg, marker=2)
mesh1.setCellMarkers([2]*mesh1.cellCount())
print(mesh1)
###############################################################################
# Next, we build an unstructured region on top by creating the polygon and
# calling triangle via pygimli's TriangleWrapper.
poly = pg.Mesh(2) # empty 2d mesh
nStart = poly.createNode(xmin, zmax, 0.0)
nA = nStart
for x in xreg[1:]:
nB = poly.createNode(x, zmax, 0.0)
poly.createEdge(nA, nB)
nA = nB
z2 = 0.
nA = poly.createNode(xmax, z2, 0.0)
poly.createEdge(nB, nA)
nB = poly.createNode(xmin, z2, 0.0)
poly.createEdge(nA, nB)
poly.createEdge(nB, nStart)
mesh2 = mt.createMesh(poly, quality=31)
mesh2.setCellMarkers([1]*mesh2.cellCount())
print(mesh2)
###############################################################################
# Finally, the grid and the unstructured mesh can be merged to single mesh for
# further modelling.
mesh3 = mt.mergeMeshes([mesh1, mesh2])
###############################################################################
# Of course, you can treat the hybrid mesh like any other mesh and append a
# triangle boundary for example with the function
# :py:func:`pygimli.meshtools.grid.appendTriangleBoundary`.
mesh = mt.appendTriangleBoundary(mesh3, xbound=100., ybound=100., quality=31, smooth=True,
marker=3, isSubSurface=True, addNodes=5)
pg.show(mesh, markers=True, showMesh=True)
pg.wait()
|
JuliusHen/gimli | pygimli/testing/test_SparseMatrix.py | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# write a correct test!
import unittest
import pygimli as pg
import numpy as np
class TestSparseMatrix(unittest.TestCase):
def test_Convert(self):
"""
"""
colIds = range(10)
rowIds = range(10)
vals = np.ones(10)
# Construct SparseMap Matrix from python arrays
A = pg.matrix.SparseMapMatrix(colIds, rowIds, vals)
# Construct SparseMap -> CRS (compressed row storage)
S = pg.matrix.SparseMatrix(A)
# Construct CRS -> SparseMap
A2 = pg.matrix.SparseMapMatrix(S)
# all should by identity matrix
np.testing.assert_equal(A2.getVal(1, 1), 1.0)
np.testing.assert_equal(sum(S * np.ones(S.cols())), S.rows())
np.testing.assert_equal(sum(A2 * np.ones(A2.cols())), A2.rows())
MAP1 = pg.matrix.SparseMapMatrix(r=3, c=15)
CSR = pg.matrix.SparseMatrix(MAP1)
MAP2 = pg.matrix.SparseMapMatrix(CSR)
v3 = pg.Vector(3)
v15 = pg.Vector(15)
np.testing.assert_equal((MAP1*v15).size(), 3)
np.testing.assert_equal((MAP1.transMult(v3)).size(), 15)
np.testing.assert_equal((CSR*v15).size(), 3)
np.testing.assert_equal((CSR.transMult(v3)).size(), 15)
np.testing.assert_equal(MAP1.cols(), MAP2.cols())
np.testing.assert_equal(CSR.cols(), MAP1.cols())
np.testing.assert_equal(CSR.rows(), MAP1.rows())
np.testing.assert_equal(MAP1.rows(), MAP2.rows())
# testing SparseMatrix to Numpy
mm = pg.matrix.SparseMapMatrix(r=4, c=5)
check_rows = [0, 0, 1, 2, 3]
check_cols = [0, 1, 2, 3, 4]
check_vals = np.array([1.0, 3, np.pi, 1e-12, -1.12345e13])
for i in range(len(check_rows)):
mm.addVal(check_rows[i], check_cols[i], check_vals[i])
#pg.solver.showSparseMatrix(mm, full=True)
check_csr_rows = [0, 1, 2, 3, 4]
check_csr_colPtr = [0, 2, 3, 4, 5]
check_csc_cols = [0, 0, 1, 2, 3]
check_csc_rowptr = [0, 1, 2, 3, 4, 5]
r1, c1, v1 = pg.utils.sparseMatrix2Array(mm)
np.testing.assert_allclose(r1, check_csr_rows)
np.testing.assert_allclose(c1, check_csr_colPtr)
np.testing.assert_allclose(v1, check_vals)
sciA1 = pg.utils.sparseMatrix2csr(pg.matrix.SparseMatrix(mm))
np.testing.assert_equal(sciA1.indices, check_csr_rows)
np.testing.assert_equal(sciA1.indptr, check_csr_colPtr)
sciA1 = pg.utils.sparseMatrix2csr(mm)
np.testing.assert_equal(sciA1.indices, check_csr_rows)
np.testing.assert_equal(sciA1.indptr, check_csr_colPtr)
r2, c2, v2 = pg.utils.sparseMatrix2Array(pg.matrix.SparseMatrix(mm),
getInCRS=False)
np.testing.assert_allclose(r2, check_rows)
np.testing.assert_allclose(c2, check_cols)
np.testing.assert_allclose(v2, check_vals)
A1 = pg.matrix.SparseMapMatrix(colIds, rowIds, vals)
A2 = pg.matrix.SparseMapMatrix(colIds, rowIds, vals)
A1 += A2
sciA1 = pg.utils.sparseMatrix2csr(pg.matrix.SparseMatrix(mm))
sciA2 = pg.utils.sparseMatrix2csr(mm)
np.testing.assert_equal(len(sciA1.data), mm.size())
np.testing.assert_equal(sciA1.data, sciA2.data)
np.testing.assert_equal(sciA1.indices, sciA2.indices)
np.testing.assert_equal(sciA1.indptr, sciA2.indptr)
sciA1 = pg.utils.sparseMatrix2coo(pg.matrix.SparseMatrix(mm))
sciA2 = pg.utils.sparseMatrix2coo(mm)
np.testing.assert_equal(len(sciA1.data), mm.size())
np.testing.assert_equal(sciA1.data, sciA2.data)
np.testing.assert_equal(sciA1.row, sciA2.row)
np.testing.assert_equal(sciA1.col, sciA2.col)
### toSparseMatrix
sciCSR = pg.utils.sparseMatrix2csr(pg.matrix.SparseMatrix(mm))
np.testing.assert_equal(pg.utils.toSparseMatrix(sciCSR) == mm, True)
def test_Access(self):
#addVal(0, 1, 1.2) kommt nach der konvertierung auch wieder [0], [1], [1.2]
pass
def test_Operators(self):
colIds = range(10)
rowIds = range(10)
vals = np.ones(10)
A = pg.matrix.SparseMapMatrix(colIds, rowIds, vals)
S = pg.matrix.SparseMatrix(A)
S2 = S + S * 0.1 * 0.3
def test_ComplexMatrix(self):
verbose = False
grid = pg.createGrid(3, 3)
# print(grid)
alpha = pg.math.toComplex(np.ones(grid.cellCount()),
np.ones(grid.cellCount())*1.0)
A = pg.solver.createStiffnessMatrix(grid, a=alpha)
pg.solver.solver.applyDirichlet(A, None, [0], [0.0])
#pg.solver.showSparseMatrix(A)
#pg.solver.assembleDirichletBC(A, [[grid.boundary(0), 0.0]])
b = pg.math.toComplex(np.ones(A.rows()), np.ones(A.rows())*0.0)
x = pg.solver.linSolve(A, b, verbose=verbose, solver='pg')
np.testing.assert_allclose(A.mult(x), b, rtol=1e-10)
x2 = pg.solver.linSolve(A, b, verbose=verbose, solver='scipy')
np.testing.assert_allclose(x2, x, rtol=1e-10)
x3 = pg.solver.linSolve(pg.utils.squeezeComplex(A),
pg.utils.squeezeComplex(b),
verbose=verbose, solver='pg')
np.testing.assert_allclose(pg.utils.toComplex(x3), x, rtol=1e-10)
def test_BlockMatrix(self):
A = pg.SparseMapMatrix(2, 2)
A.setVal(0, 0, 1.0)
B = pg.BlockMatrix()
B.add(A, 0, 0)
np.testing.assert_allclose(B.row(0), [1.0, 0.0], rtol=1e-10)
B.add(A, 0, 0)
np.testing.assert_allclose(B.row(0), [2.0, 0.0], rtol=1e-10)
C = B.sparseMapMatrix()
np.testing.assert_allclose(C.row(0), [2.0, 0.0], rtol=1e-10)
B.add(A, 10, 10)
print(B)
def test_Misc(self):
D = pg.SparseMapMatrix(3, 4)
for i in range(D.rows()):
for j in range(D.cols()):
D.setVal(i, j, 1.0)
np.testing.assert_allclose(D.col(2), pg.Vector(D.rows(), 1.0))
np.testing.assert_allclose(D.row(2), pg.Vector(D.cols(), 1.0))
D.cleanRow(1)
np.testing.assert_allclose(D.col(2), [1.0, 0.0, 1.0])
D.cleanCol(1)
np.testing.assert_allclose(D.row(2), [1.0, 0.0, 1.0, 1.0])
if __name__ == '__main__':
unittest.main()
|
JuliusHen/gimli | pygimli/core/datacontainer.py | # -*- coding: utf-8 -*-
"""
Extensions to the core DataContainer class[es].
"""
import numpy as np
from . logger import deprecated, info, warn, critical, verbose
from .core import (RVector3, RVector, IndexArray,
DataContainer, DataContainerERT)
def __DataContainer_str(self):
return "Data: Sensors: " + str(self.sensorCount()) + " data: " + \
str(self.size()) + ", nonzero entries: " + \
str([d for d in self.dataMap().keys() if self.isSensorIndex(d) or
self.haveData(d)])
DataContainer.__repr__ =__DataContainer_str
def __DataContainer_setSensors(self, sensors):
"""Set Sensor positions.
Set all sensor positions.
This is just syntactic sugar for setSensorPositions.
Parameters
----------
sensors: iterable
Iterable that can be converted into a pg.Pos.
Tests
-----
>>> import pygimli as pg
>>> d = pg.DataContainerERT()
>>> d.setSensors(pg.utils.grange(0.0, 3, n=4))
>>> assert d.sensorCount() == 4
"""
for i, s in enumerate(sensors):
nS = s
if isinstance(s, float) or isinstance(s, int):
nS = RVector3(s, 0.0)
if i > self.sensorCount():
self.createSensor(nS)
else:
self.setSensorPosition(i, nS)
DataContainer.setSensors = __DataContainer_setSensors
def __DC_setVal(self, key, val):
if len(val) > self.size():
verbose("DataContainer resized to:", len(val))
self.resize(len(val))
self.set(key, val)
DataContainer.__setitem__ = __DC_setVal
def __DC_getVal(self, key):
if self.isSensorIndex(key):
return np.array(self(key), dtype=int)
#return self(key).array() // d['a'][2] = 0.0, would be impossible
return self(key)
DataContainer.__getitem__ = __DC_getVal
def __DataContainerERT_addFourPointData(self, *args,
indexAsSensors=False, **kwargs):
"""Add a new data point to the end of the dataContainer.
Add a new 4 point measurement to the end of the dataContainer and increase
the data size by one. The index of the new data point is returned.
Parameters
----------
*args: [int]
At least four index values for A, B, M and N.
indexAsSensors: bool [False]
The indices A, B, M and N are additionally interpreted as sensor position in [m, 0, 0].
**kwargs: dict
Named values for the data configuration.
Returns
-------
ret: int
Index of this new data point.
Examples
--------
>>> import pygimli as pg
>>> d = pg.DataContainerERT()
>>> d.setSensors(pg.utils.grange(0, 3, n=4))
>>> d.addFourPointData(0,1,2,3)
0
>>> d.addFourPointData([3,2,1,0], rhoa=1.0)
1
>>> print(d)
Data: Sensors: 4 data: 2, nonzero entries: ['a', 'b', 'm', 'n', 'rhoa', 'valid']
>>> print(d('rhoa'))
2 [0.0, 1.0]
"""
try:
if len(args) == 1:
a, b, m, n = args[0][:]
else:
[a, b, m, n] = args
if indexAsSensors:
a = self.createSensor([float(a), 0.0, 0.0])
b = self.createSensor([float(b), 0.0, 0.0])
m = self.createSensor([float(m), 0.0, 0.0])
n = self.createSensor([float(n), 0.0, 0.0])
idx = self.createFourPointData(self.size(), a, b, m, n)
except Exception as e:
print(e)
print("args:", args, len(args))
critical("Can't interpret arguments:", *args)
for k, v in kwargs.items():
if not self.haveData(k):
self.add(k)
self.ref(k)[idx] = v
return idx
DataContainerERT.addFourPointData = __DataContainerERT_addFourPointData
|
JuliusHen/gimli | pygimli/utils/streams.py | import pygimli as pg
def streamline(mesh, field, startCoord, dLengthSteps, dataMesh=None,
maxSteps=1000, verbose=False, coords=(0, 1)):
"""Create a streamline from start coordinate and following a vector field in up and down direction.
"""
xd, yd, vd = streamlineDir(mesh, field, startCoord, dLengthSteps,
dataMesh=dataMesh, maxSteps=maxSteps,
down=True, verbose=verbose, coords=coords)
c = mesh.findCell(startCoord)
if c is not None:
c.setValid(True)
xu, yu, vu = streamlineDir(mesh, field, startCoord, dLengthSteps,
dataMesh=dataMesh, maxSteps=maxSteps,
down=False, verbose=verbose, coords=coords)
return xd + xu[1:], yd + yu[1:], vd + vu[1:]
def streamlineDir(mesh, field, startCoord, dLengthSteps, dataMesh=None,
maxSteps=150, down=True, verbose=False, coords=(0, 1)):
"""
down = -1, up = 1, both = 0
"""
xd = []
yd = []
vd = []
pot = None
vx = None
vy = None
isVectorData = False
if isinstance(field, pg.core.R3Vector):
field = field.array()
if hasattr(field[0], '__len__'):
if abs(max(field[:, 0])) == 0 and abs(max(field[:, 1]) == 0):
raise Exception("No data range streamline: min/max == ",
min(field[:, 0]))
vx = pg.Vector(field[:, 0])
vy = pg.Vector(field[:, 1])
isVectorData = True
else:
if min(field) == max(field):
raise Exception("No scalar data range for any gradients "
" to draw a streamline: min/max == ",
min(field))
if dataMesh is not None:
if len(field) == dataMesh.nodeCount():
pot = pg.Vector(field)
elif len(field) == dataMesh.cellCount():
pot = pg.core.cellDataToPointData(dataMesh, field)
else:
print(len(field), dataMesh)
raise Exception("Data length (%i) for streamline is "
"neighter nodeCount (%i) nor cellCount (%i)" %
(len(field), dataMesh.nodeCount(), dataMesh.nodeCount()))
else:
if len(field) == mesh.nodeCount():
pot = pg.Vector(field)
elif len(field) == mesh.cellCount():
pot = pg.core.cellDataToPointData(mesh, field)
else:
print(len(field), dataMesh)
raise Exception("Data length (%i) for streamline is "
"neighter nodeCount (%i) nor cellCount (%i)" %
(len(field), mesh.nodeCount(), mesh.nodeCount()))
direction = 1
if down:
direction = -1
# search downward
pos = pg.RVector3(startCoord)
c = mesh.findCell(startCoord)
dLength = c.center().dist(c.node(0).pos()) / dLengthSteps
# stream line starting point
if c is not None:
xd.append(pos[coords[0]])
yd.append(pos[coords[1]])
vd.append(-1)
lastC = c
lastU = -direction * 1e99
d = None
while c is not None and len(xd) < maxSteps:
# valid .. temporary check if there is already a stream within the cell
if not c.valid():
break
if isVectorData:
u = 0.
if len(vx) == mesh.cellCount():
d = pg.RVector3(vx[c.id()], vy[c.id()])
elif len(vx) == mesh.nodeCount():
d = pg.RVector3(c.pot(pos, vx), c.pot(pos, vy))
elif dataMesh:
cd = dataMesh.findCell(pos)
if cd is None:
raise Exception("Cannot find " + str(pos) + " dataMesh")
if len(vx) == dataMesh.cellCount():
d = pg.RVector3(vx[cd.id()], vy[cd.id()])
elif len(vx) == dataMesh.nodeCount() and \
len(vy) == dataMesh.nodeCount():
d = pg.RVector3(cd.pot(pos, vx), cd.pot(pos, vy))
else:
print(dataMesh, len(vx), len(vy))
raise Exception("data size wrong")
else:
print("mesh:", mesh, len(vx), len(vy))
raise Exception("Data length neighter node size or cell size.")
else:
if dataMesh:
cd = dataMesh.findCell(pos)
if not cd:
break
d = cd.grad(pos, pot)
u = cd.pot(pos, pot)
else:
d = c.grad(pos, pot)
u = c.pot(pos, pot)
# always go u down
dAbs = d.length()
#print("cell:", c.id(), u, d, dAbs)
if dAbs == 0.0:
#print(d, "check this in streamlineDir(",)
break
if down:
if u > lastU:
break
else:
if u < lastU:
break
# * min(1.0, ((startCoord - pos).length()))
pos += direction * d / dAbs * dLength
c = mesh.findCell(pos, False)
# Change cell here .. set old cell to be processed
if c is not None:
xd.append(pos[coords[0]])
yd.append(pos[coords[1]])
# set the stating value here
if vd[0] == -1:
vd[0] = dAbs
vd.append(dAbs)
## check for degenerating stream
if len(xd) > 2:
pos0 = pg.Pos(xd[-3], yd[-3])
pos1 = pg.Pos(xd[-2], yd[-2])
pos2 = pg.Pos(xd[-1], yd[-1])
if (pos0.dist(pos2) < pos0.dist(pos1)):
pg.debug('degenerating stream aborted')
break
# If the new cell is different from the current we move into the
# new cell and make the last to be invalid ..
# the last active contains a stream element
if c.id() != lastC.id():
lastC.setValid(False)
lastC = c
dLength = c.center().dist(c.node(0).pos()) / dLengthSteps
else:
# There is no new cell .. the last active contains a stream element
lastC.setValid(False)
lastU = u
if verbose:
print(pos, u)
# Stream line has stopped and the current cell (if there is one) ..
# .. contains a stream element
if c is not None:
c.setValid(False)
if down:
xd.reverse(), yd.reverse(), vd.reverse()
return xd, yd, vd
|
JuliusHen/gimli | pygimli/physics/ert/__init__.py | # -*- coding: utf-8 -*-
"""Direct current electromagnetics
This package contains tools, modelling operators, and managers for:
* Electrical Resistivity Tomography (ERT) / Induced polarization (IP)
* Vertical Electric Sounding (VES)
"""
import pygimli as pg
from .ert import (simulate, estimateError,
createGeometricFactors, createInversionMesh)
from .ertManager import ERTManager
from .ertModelling import ERTModelling, ERTModellingReference
from .ertScheme import createData
from .ves import VESModelling, VESCModelling, VESManager
from .visualization import showERTData
from .importData import load
@pg.renamed(createData, '1.3') # 20210302
def createERTData(*args, **kwargs):
pass
showData = showERTData
show = showERTData # better create a function that can also handle mgr
geometricFactor = pg.core.geometricFactor
geometricFactors = geometricFactor
|
JuliusHen/gimli | pygimli/utils/__init__.py | # -*- coding: utf-8 -*-
"""
Useful utility functions.
"""
from .base import (rms, rmsWithErr, nanrms, createDateTimeString,
createfolders, # renamed 20200515
createFolders,
createResultFolder, # renamed 20200515
createResultPath,
createPath,
getSavePath, gmat2numpy, interperc, interpExtrap, inthist,
nanrms, num2str, numpy2gmat, rrms, rms, rndig, saveResult,
chi2)
# compatibility for dev #can be removed? (20200515)
from .base import rmsWithErr as rmswitherr
from .complex import (isComplex, toComplex, toPolar, squeezeComplex,
toRealMatrix, KramersKronig)
from .cache import (cache, strHash)
from .geostatistics import (computeInverseRootMatrix, covarianceMatrix,
generateGeostatisticalModel)
from .gps import GKtoUTM, findUTMZone, getProjection, getUTMProjection, readGPX
from .hankel import hankelFC
from .postinversion import iterateBounds, modCovar
from .sparseMat2Numpy import (convertCRSIndex2Map, sparseMatrix2Array,
sparseMatrix2coo, sparseMatrix2csr, sparseMatrix2Dense,
toSparseMatrix, toSparseMapMatrix, toCSR, toCOO,
)
from .units import (unit, cmap)
from .units import cmap as cMap # for compatibilty (will be removed)
from .utils import (ProgressBar, boxprint, cumDist, cut, diff, dist,
filterIndex, filterLinesByCommentStr, findNearest,
getIndex, grange, logDropTol, niceLogspace,
prettify, prettyFloat,
randn, rand, trimDocString, unicodeToAscii, unique,
unique_everseen, unique_rows, uniqueAndSum)
from .streams import streamline, streamlineDir
__all__ = [name for name in dir() if '_' not in name]
|
JuliusHen/gimli | pygimli/viewer/mpl/__init__.py | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""Matplotlib drawing functions used by `pygimli.viewer`."""
import pygimli as pg
import matplotlib
# are the following suitable for a drawing package?
from .utils import (hold,
wait,
updateFig,
updateAxes,
renameDepthTicks,
insertUnitAtNextLastTick,
saveFigure,
saveAxes,
adjustWorldAxes,
createAnimation,
saveAnimation,
setOutputStyle,
setPlotStuff,
plotLines,
twin, createTwinX, createTwinY)
from .boreholes import BoreHole, BoreHoles, create_legend
from .colorbar import (createColorBar,
createColorBarOnly,
updateColorBar,
addCoverageAlpha,
autolevel,
cmapFromName,
findAndMaskBestClim,
setCbarLevels,
setMappableData)
from .meshview import (CellBrowser,
createMeshPatches,
createTriangles,
drawBoundaryMarkers,
drawField,
drawMesh,
drawMeshBoundaries,
drawModel,
drawParameterConstraints,
drawPLC,
drawSelectedMeshBoundaries,
drawSelectedMeshBoundariesShadow,
drawSensors,
drawStreamLines,
drawStreams)
from .overlayimage import (cacheFileName,
deg2MapTile,
getMapTile,
mapTile2deg,
underlayMap,
underlayBKGMap)
from .matrixview import (drawBlockMatrix, drawSparseMatrix)
# TODO example scripts for the following and refactor is needed
# maybe ploter should named show or draw
from .dataview import (drawSensorAsMarker, # dups to meshview??
showVecMatrix,
drawVecMatrix,
showValMapPatches,
drawValMapPatches,
showDataMatrix,
drawDataMatrix,
generateMatrix,
showDataContainerAsMatrix,
patchMatrix, # deprectated (Naming)
patchValMap, # deprectated (Naming)
plotDataContainerAsMatrix, # need renaming
plotMatrix, # deprectated (Naming)
plotVecMatrix,# deprectated (Naming)
)
# which of these do we actually need?
from .modelview import (drawModel1D,
showmymatrix, # needed ?
draw1DColumn, # needed or redundant ?
draw1dmodel, # needed or redundant ?
show1dmodel, # needed or redundant ?
draw1dmodelErr, # needed or redundant ?
draw1dmodelLU, # needed or redundant ?
showStitchedModels,
showfdemsounding)
__all__ = [
"BoreHole", "BoreHoles", "create_legend", "addCoverageAlpha", "autolevel",
"cmapFromName",
"createColorBar", "createColorBarOnly", "updateColorBar",
"findAndMaskBestClim", "setCbarLevels", "saveFigure", "saveAxes",
"setMappableData", "drawSensorAsMarker", "generateMatrix", "patchMatrix",
"patchValMap", "plotDataContainerAsMatrix", "plotMatrix", "plotVecMatrix",
"CellBrowser", "createMeshPatches",
"createTriangles", "draw1DColumn", "drawField", "drawMesh",
"drawMeshBoundaries", "drawModel", "hold", "wait",
"setOutputStyle", "setPlotStuff", "createAnimation", "saveAnimation",
"drawParameterConstraints", "drawPLC", "drawSelectedMeshBoundaries",
"drawSelectedMeshBoundariesShadow", "drawSensors", "drawStreamLines",
"drawStreams", "insertUnitAtNextLastTick", "plotLines", "cacheFileName",
"deg2MapTile", "getMapTile", "mapTile2deg", "underlayMap", "updateAxes"
]
# plt.subplots() resets locale setting to system default .. this went
# horrible wrong for german 'decimal_point': ','
# https://github.com/matplotlib/matplotlib/issues/6706
# Qt5Agg resets it after importing figure;
# TkAgg resets it after importing pyplot.
# until its fixed we should maybe silently initialize the qt5agg backend and
# refix the locale afterwards. If someone have a plan to do.
#checkAndFixLocaleDecimal_point(verbose=True)
# Set global hold if mpl inline backend is used (as in Jupyter Notebooks)
if 'inline' in matplotlib.get_backend():
hold(1)
|
JuliusHen/gimli | pygimli/frameworks/inversion.py | <reponame>JuliusHen/gimli
# -*- coding: utf-8 -*-
"""pyGIMLi - Inversion Frameworks.
Basic inversion frameworks that usually needs a forward operator to run.
"""
import numpy as np
import pygimli as pg
from pygimli.utils import prettyFloat as pf
class Inversion(object):
"""Basic inversion framework.
Changes to prior Versions (remove me)
* holds the starting model itself, fop only provide a creator for SM
fop.createStartModel(dataValues)
Attributes
----------
verbose : bool
Give verbose output
debug : bool
Give debug output
startModel : array
Holds the current starting model
model : array
Holds the last active model
maxIter : int [20]
Maximal interation number.
stopAtChi1 : bool [True]
Stop iteration when chi² is one. If set to false the iteration stops
after maxIter or convergence reached (self.inv.deltaPhiAbortPercent())
"""
def __init__(self, fop=None, inv=None, **kwargs):
self._debug = kwargs.pop('debug', False)
self._verbose = kwargs.pop('verbose', False)
# If this class or its derived is a Framework the _inv holds another
# Inversion which allows us (remove me)........
# this will be probably removed in the future
self.isFrameWork = False # check if needed
self._stopAtChi1 = True
self._preStep = None
self._postStep = None
self._inv = None
self._fop = None
self.reset()
if inv is not None:
self._inv = inv
self.isFrameWork = True
else:
self._inv = pg.core.RInversion(self._verbose, self._debug)
self._dataTrans = pg.trans.TransLin()
self.axs = None # for showProgress only
self.maxIter = kwargs.pop('maxIter', 20)
if fop is not None:
self.setForwardOperator(fop)
def reset(self):
""""""
self._model = None
self._startModel = None
self._dataVals = None
self._errorVals = None
@property
def inv(self):
return self._inv
@property
def fop(self):
return self._fop
@fop.setter
def fop(self, f):
self.setForwardOperator(f)
def setForwardOperator(self, fop):
self._fop = fop
# we need to initialize the regionmanager by calling it once
self._fop.regionManager()
self._inv.setForwardOperator(fop)
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, v):
self._verbose = v
self.inv.setVerbose(self._verbose)
self.fop.setVerbose(self._verbose)
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, v):
self._debug = v
self.inv.setDoSave(self._debug)
@property
def dataTrans(self):
return self._dataTrans
@dataTrans.setter
def dataTrans(self, dt):
self._dataTrans = dt
self.inv.setTransData(self._dataTrans)
@property
def modelTrans(self):
return self.fop.modelTrans
@modelTrans.setter
def modelTrans(self, mt):
self.fop.modelTrans = self._modelTrans
@property
def startModel(self):
""" Gives the current default starting model.
Returns the current default starting model or
call fop.createStartmodel() if non is defined.
"""
if self._startModel is None:
sm = self.fop.regionManager().createStartModel()
if len(sm) > 0 and max(abs(np.atleast_1d(sm))) > 0.0:
self._startModel = sm
pg.info("Created startmodel from region infos:", sm)
else:
pg.verbose("No region infos for startmodel")
if self._startModel is None:
sm = self.fop.createStartModel(self.dataVals)
pg.info("Created startmodel from forward operator:", sm)
self._startModel = sm
return self._startModel
@startModel.setter
def startModel(self, model):
"""
model: [float] | float
Model used as starting model.
Float value is used as constant model.
"""
if model is None:
self._startModel = None
elif isinstance(model, float) or isinstance(model, int):
self._startModel = np.ones(self.parameterCount) * float(model)
pg.info("Startmodel set from given value.", float(model))
elif hasattr(model, '__iter__'):
if len(model) == self.parameterCount:
pg.info("Startmodel set from given array.", model)
self._startModel = model
else:
pg.error("Startmodel size invalid {0} != {1}.".
format(len(model), self.parameterCount))
@property
def model(self):
"""The last active model."""
if self._model is None:
if hasattr(self.inv, 'model'):
# inv is RInversion()
if len(self.inv.model()) > 0:
return self.inv.model()
else:
raise pg.critical(
"There was no inversion run so there is no last model")
else:
return self.inv.model
return self._model
@model.setter
def model(self, m):
self._model = m
@property
def response(self):
if len(self.inv.response()) > 0:
return self.inv.response()
else:
raise Exception(
"There was no inversion run so there is no response yet")
# backward compatibility
@property
def dataErrs(self):
pg.warn('do not use')
return self._errorVals
@dataErrs.setter
def dataErrs(self, v):
pg.warn('do not use')
self._errorVals = v
@property
def dataVals(self):
return self._dataVals
@dataVals.setter
def dataVals(self, d):
"""Set mandatory data values.
Values == 0.0. Will be set to Tolerance
"""
self._dataVals = d
if self._dataVals is None:
pg._y(d)
pg.critical("Inversion framework needs data values to run")
# zero can be a valid data value
#
# if min(abs(self._dataVals)) < 1e-12:
# print(self._dataVals)
# pg.warn("Found zero data values. \
# Setting them to a TOLERANCE value of 1e-12")
# pg.fixZero(self._dataVals, 1e-12)
@property
def errorVals(self):
return self._errorVals
@errorVals.setter
def errorVals(self, d):
"""Set mandatory error values.
Values == 0.0. Will be set to Tolerance
"""
self._errorVals = d
if self._errorVals is None:
pg._y(d)
pg.critical("Inversion framework needs error values to run")
if min(abs(self._errorVals)) < 1e-12:
print(self._errorVals)
pg.warn(
"Found zero error values. Setting them to fallback value of 1")
pg.fixZero(self._errorVals, 1)
@property
def parameterCount(self):
pC = self.fop.regionManager().parameterCount()
if pC == 0:
pg.warn("Parameter count is 0")
return pC
@property
def robustData(self):
return self.inv.robustData()
@robustData.setter
def robustData(self, v):
if self.inv is not None:
self.inv.setRobustData(v)
@property
def maxIter(self):
return self.inv.maxIter()
@maxIter.setter
def maxIter(self, v):
if self.inv is not None:
self.inv.setMaxIter(v)
@property
def stopAtChi1(self):
return self._stopAtChi1
@stopAtChi1.setter
def stopAtChi1(self, b):
self._stopAtChi1 = b
@property
def minDPhi(self):
return self.inv.deltaPhiAbortPercent()
@minDPhi.setter
def minDPhi(self, dPhi):
return self.setDeltaChiStop(dPhi)
@property
def lam(self):
return self.inv.getLambda()
@lam.setter
def lam(self, lam):
self.inv.setLambda(lam)
def setDeltaChiStop(self, it):
self.inv.setDeltaPhiAbortPercent(it)
def echoStatus(self):
self.inv.echoStatus()
def setPostStep(self, p):
self._postStep = p
def setPreStep(self, p):
self._preStep = p
def setData(self, data):
# QUESTION_ISNEEDED
if isinstance(data, pg.DataContainer):
raise Exception("should not be here .. its Managers job")
self.fop.setData(data)
else:
self.dataVals = data
def chi2(self, response=None):
return self.phiData(response) / len(self.dataVals)
def phiData(self, response=None):
""" """
if response is None:
response = self.response
dT = self.dataTrans
dData = (dT.trans(self.dataVals) - dT.trans(response)) / \
dT.error(self.dataVals, self.errorVals)
return pg.math.dot(dData, dData)
def phiModel(self, model=None):
""" """
if model is None:
model = self.model
rough = self.inv.roughness(model)
return pg.math.dot(rough, rough)
def phi(self, model=None, response=None):
""" """
phiD = self.phiData(response)
if self.inv.localRegularization():
return phiD
else:
return phiD + self.phiModel(model) * self.inv.getLambda()
def relrms(self):
"""Relative root-mean-square misfit of the last run."""
return self.inv.relrms()
def absrms(self):
"""Absolute root-mean-square misfit of the last run."""
return self.inv.absrms()
def run(self, dataVals, errorVals, **kwargs):
"""Run inversion.
The inversion will always start from the starting model taken from
the forward operator.
If you want to run the inversion from a specified prior model,
e.g., from a other run, set this model as starting model to the FOP
(fop.setStartModel).
Any self.inv.setModel() settings will be overwritten.
Parameters
----------
dataVals : iterable
Data values
errorVals : iterable
Relative error values. dv / v
Keyword Arguments
-----------------
maxIter : int
Overwrite class settings for maximal iterations number.
dPhi : float [1]
Overwrite class settings for delta data phi aborting criteria.
Default is 1%
cType: int[1]
Set global contraints type for all regions.
"""
self.reset()
if self.isFrameWork:
pg.critical('in use?')
return self._inv.run(dataVals, errorVals, **kwargs)
if self.fop is None:
raise Exception("Need valid forward operator for inversion run.")
maxIter = kwargs.pop('maxIter', self.maxIter)
minDPhi = kwargs.pop('dPhi', self.minDPhi)
self.verbose = kwargs.pop('verbose', self.verbose)
self.debug = kwargs.pop('debug', self.debug)
self.robustData = kwargs.pop('robustData', False)
# pg._g('verbose:',
# self.verbose, self.fop.verbose(), self.inv.verbose())
self.lam = kwargs.pop('lam', 20)
# progress = kwargs.pop('progress', None) # NOT USED
showProgress = kwargs.pop('showProgress', False)
self.inv.setTransModel(self.fop.modelTrans)
self.dataVals = dataVals
self.errorVals = errorVals
sm = kwargs.pop('startModel', None)
if sm is not None:
self.startModel = sm
self.inv.setData(self._dataVals)
self.inv.setRelativeError(self._errorVals)
if 'cType' in kwargs:
self.fop.setRegionProperties('*', cType=kwargs['cType'])
# temporary set max iter to one for the initial run call
maxIterTmp = self.maxIter
self.maxIter = 1
if self.verbose:
pg.info('Starting inversion.')
print("fop:", self.inv.fop())
if isinstance(self.dataTrans, pg.trans.TransCumulative):
print("Data transformation (cumulative):")
for i in range(self.dataTrans.size()):
print("\t", i, self.dataTrans.at(i))
else:
print("Data transformation:", self.dataTrans)
if isinstance(self.modelTrans, pg.trans.TransCumulative):
print("Model transformation (cumulative):")
for i in range(self.modelTrans.size()):
if i < 10:
print("\t", i, self.modelTrans.at(i))
else:
print(".", end='')
else:
print("Model transformation:", self.modelTrans)
print("min/max (data): {0}/{1}".format(pf(min(self._dataVals)),
pf(max(self._dataVals))))
print("min/max (error): {0}%/{1}%".format(
pf(100*min(self._errorVals)), pf(100*max(self._errorVals))))
print("min/max (start model): {0}/{1}".format(
pf(min(self.startModel)), pf(max(self.startModel))))
# To ensure reproduceability of the run() call, inv.start() will
# reset self.inv.model() to fop.startModel().
self.fop.setStartModel(self.startModel)
self.inv.setReferenceModel(self.startModel)
if self.verbose:
print("-" * 80)
if self._preStep and callable(self._preStep):
self._preStep(0, self)
self.inv.start()
self.maxIter = maxIterTmp
if self._postStep and callable(self._postStep):
self._postStep(0, self)
if showProgress:
self.showProgress(showProgress)
lastPhi = self.phi()
self.chi2History = [self.chi2()]
self.modelHistory = [self.startModel]
for i in range(1, maxIter):
if self._preStep and callable(self._preStep):
self._preStep(i, self)
if self.verbose:
print("-" * 80)
print("inv.iter", i + 1, "... ", end='')
try:
self.inv.oneStep()
except RuntimeError as e:
print(e)
pg.error('One step failed. '
'Aborting and going back to last model')
if np.isnan(self.model).any():
print(self.model)
pg.critical('invalid model')
# resp = self.inv.response() # NOT USED
chi2 = self.inv.chi2()
self.chi2History.append(chi2)
self.modelHistory.append(self.model)
if showProgress:
self.showProgress(showProgress)
if self._postStep and callable(self._postStep):
self._postStep(i, self)
# Do we need to check the following before oder after chi2 calc??
self.lam = self.lam * self.inv.lambdaFactor()
if self.robustData:
self.inv.robustWeighting()
if self.inv.blockyModel():
self.inv.constrainBlocky()
phi = self.phi()
dPhi = phi/lastPhi
if self.verbose:
print("chi² = {0} (dPhi = {1}%) lam: {2}".format(
round(chi2, 2), round((1-dPhi)*100, 2), self.lam))
if chi2 <= 1 and self.stopAtChi1:
print("\n")
if self.verbose:
pg.boxprint(
"Abort criterion reached: chi² <= 1 (%.2f)" % chi2)
break
# if dPhi < -minDPhi:
if (dPhi > (1.0 - minDPhi / 100.0)) and i > 2:
if self.verbose:
pg.boxprint(
"Abort criteria reached: dPhi = {0} (< {1}%)".format(
round((1-dPhi)*100, 2), minDPhi))
break
lastPhi = phi
# will never work as expected until we unpack kwargs .. any idea for
# better strategy?
# if len(kwargs.keys()) > 0:
# print("Warning! unused keyword arguments", kwargs)
self.model = self.inv.model()
return self.model
def showProgress(self, style='all'):
r"""Called if showProgress=True is set for the inversion run.
TODO
*Discuss .. its a useful function but breaks a little
the FrameWork work only concept.
"""
if self.axs is None:
axs = None
if style == 'all' or style is True:
fig, axs = pg.plt.subplots(1, 2)
elif style == 'Model':
fig, axs = pg.plt.subplots(1, 1)
self.axs = axs
ax = self.axs
if style == 'Model':
for other_ax in ax.figure.axes:
# pg._y(type(other_ax).mro())
if type(other_ax).mro()[0] == type(ax):
# only clear Axes not Colorbars
other_ax.clear()
self.fop.drawModel(ax, self.inv.model())
else:
# for other_ax in ax[0].figure.axes:
# other_ax.clear()
for _ax in self.axs:
_ax.clear()
try:
pg.viewer.mpl.twin(_ax).clear()
except Exception:
pass
self.fop.drawModel(ax[0], self.inv.model(),
label='Model')
self.fop.drawData(ax[1], self._dataVals, self._errorVals,
label='Data')
self.fop.drawData(ax[1], self.inv.response(),
label='Response')
ax[1].text(
0.99, 0.005, r"Iter: {0}, rrms: {1}%, $\chi^2$: {2}".format(
self.inv.iter(), pf(self.inv.relrms()),
pf(self.inv.chi2())),
transform=ax[1].transAxes,
horizontalalignment='right',
verticalalignment='bottom',
fontsize=8)
ax[1].figure.tight_layout()
pg.plt.pause(0.05)
class MarquardtInversion(Inversion):
"""Marquardt scheme (local damping with decreasing regularization."""
def __init__(self, fop=None, **kwargs):
super(MarquardtInversion, self).__init__(fop, **kwargs)
self.stopAtChi1 = False
self.inv.setLocalRegularization(True)
self.inv.setLambdaFactor(0.8)
def run(self, dataVals, errorVals, **kwargs):
r"""Parameters
----------
**kwargs:
Forwarded to the parent class.
See: :py:mod:`pygimli.modelling.Inversion`
"""
self.fop.regionManager().setConstraintType(0)
self.fop.setRegionProperties('*', cType=0)
self.model = super(MarquardtInversion, self).run(dataVals,
errorVals, **kwargs)
return self.model
class Block1DInversion(MarquardtInversion):
"""
Attributes
----------
nLayers : int
"""
def __init__(self, fop=None, **kwargs):
# pg.warn("move this to the manager")
super(Block1DInversion, self).__init__(fop=fop, **kwargs)
def setForwardOperator(self, fop):
if not isinstance(fop, pg.frameworks.Block1DModelling):
pg.critical('Forward operator needs to be an instance of '
'pg.modelling.Block1DModelling but is of type:', fop)
return super(Block1DInversion, self).setForwardOperator(fop)
def fixLayers(self, fixLayers):
"""Fix layer thicknesses.
Parameters
----------
fixLayers : bool | [float]
Fix all layers to the last value or set the fix layer
thickness for all layers
"""
if fixLayers is False:
self.fop.setRegionProperties(0, modelControl=1.0)
elif fixLayers is not None:
# how do we fix values without modelControl?
# maybe set the region to be fixed here
self.fop.setRegionProperties(0, modelControl=1e6)
if hasattr(fixLayers, '__iter__'):
if len(fixLayers) != self.fop.nLayers:
print("fixLayers:", fixLayers)
pg.error("fixlayers needs to have a length of nLayers-1=" +
str(self.fop.nLayers-1))
self.fop.setRegionProperties(0, startModel=fixLayers)
def setLayerLimits(self, limits):
"""Set min and max layer thickness.
Parameters
----------
limits : False | [min, max]
"""
if limits is False:
self.fop.setRegionProperties(0, limits=[0.0, 0.0], trans='log')
else:
self.fop.setRegionProperties(0, limits=limits, trans='log')
def setParaLimits(self, limits):
"""Set the limits for each parameter region."""
for i in range(1, 1 + self.fop.nPara):
if self.fop.nPara == 1:
self.fop.setRegionProperties(i, limits=limits, trans='log')
else:
self.fop.setRegionProperties(i, limits=limits[i-1],
trans='log')
def run(self, dataVals, errorVals,
nLayers=None, fixLayers=None, layerLimits=None, paraLimits=None,
**kwargs):
r"""
Parameters
----------
nLayers : int [4]
Number of layers.
fixLayers : bool | [thicknesses]
See: :py:mod:`pygimli.modelling.Block1DInversion.fixLayers`
For fixLayers=None, preset or defaults are uses.
layerLimits : [min, max]
Limits the thickness off all layers.
For layerLimits=None, preset or defaults are uses.
paraLimits : [min, max] | [[min, max],...]
Limits the range of the model parameter. If you have multiple
parameters you can set them with a list of limits.
**kwargs:
Forwarded to the parent class.
See: :py:mod:`pygimli.modelling.MarquardtInversion`
"""
if nLayers is not None:
self.fop.nLayers = nLayers
if layerLimits is not None:
self.setLayerLimits(layerLimits)
if fixLayers is not None:
self.fixLayers(fixLayers)
if paraLimits is not None:
self.setParaLimits(paraLimits)
self.model = super(Block1DInversion, self).run(dataVals,
errorVals, **kwargs)
return self.model
class MeshInversion(Inversion):
"""
** UNUSED ** TO BE REMOVED
Attributes
----------
zWeight
"""
def __init__(self, fop=None, **kwargs):
pg.critical('Obsolete .. to be removed.')
super(MeshInversion, self).__init__(fop=fop, **kwargs)
self._zWeight = 1.0
def setForwardOperator(self, fop):
if not isinstance(fop, pg.frameworks.MeshModelling):
pg.critical('Forward operator needs to be an instance of '
'pg.modelling.MeshModelling but is of type:', fop)
return super(MeshInversion, self).setForwardOperator(fop)
def run(self, dataVals, errorVals, mesh=None, zWeight=None, **kwargs):
"""Run inversion with given data and error values"""
if mesh is not None:
self.fop.setMesh(mesh)
# maybe move this to the fop
if zWeight is None:
zWeight = self._zWeight
self.fop.setRegionProperties('*', zWeight=zWeight)
# maybe move this to the fop
pg.debug('run with: ', self.fop.regionProperties())
# more mesh-related inversion attributes to set?
# ensure the mesh is generated
self.fop.mesh() # not a nice way to ensure something
self.model = super(MeshInversion, self).run(dataVals,
errorVals, **kwargs)
return self.model
class PetroInversion(Inversion):
def __init__(self, petro, fop=None, **kwargs):
"""
Parameters
----------
"""
pg.critical('Obsolete .. to be removed.')
if fop is not None:
if not isinstance(fop, pg.frameworks.PetroModelling):
fop = pg.frameworks.PetroModelling(fop, petro)
super(PetroInversion, self).__init__(fop=fop, **kwargs)
def setForwardOperator(self, fop):
if not isinstance(fop, pg.frameworks.PetroModelling):
pg.critical('Forward operator needs to be an instance of '
'pg.modelling.PetroModelling but is of type:', fop)
return super(PetroInversion, self).setForwardOperator(fop)
def run(self, dataVals, errorVals, **kwargs):
"""
"""
if 'limits' in kwargs:
limits = kwargs.pop('limits')
if len(self.fop.regionManager().regionIdxs()) > 1:
pg.critical('implement')
else:
self.fop.setRegionProperties('*', limits=limits)
# ensure the mesh is there
self.fop.mesh()
return super(PetroInversion, self).run(dataVals, errorVals, **kwargs)
class LCInversion(Inversion):
"""Quasi-2D Laterally constrained inversion (LCI) framework."""
def __init__(self, fop=None, **kwargs):
if fop is not None:
f = pg.frameworks.LCModelling(fop, **kwargs)
super(LCInversion, self).__init__(f, **kwargs)
self.dataTrans = pg.trans.TransLog()
# self.setDeltaChiStop(0.1)
def prepare(self, dataVals, errorVals, nLayers=4, **kwargs):
dataVec = pg.RVector()
for d in dataVals:
dataVec = pg.cat(dataVec, d)
errVec = pg.RVector()
for e in errorVals:
errVec = pg.cat(errVec, e)
self.fop.initJacobian(dataVals=dataVals, nLayers=nLayers,
nPar=kwargs.pop('nPar', None))
# self.fop.initJacobian resets prior set startmodels
if self._startModel is not None:
self.fop.setStartModel(self._startModel)
rC = self.fop.regionManager().regionCount()
if kwargs.pop('disableLCI', False):
self.inv.setMarquardtScheme(0.7)
# self.inv.setLocalRegularization(True)
for r in self.fop.regionManager().regionIdxs():
self.fop.setRegionProperties(r, cType=0)
else:
# self.inv.stopAtChi1(False)
cType = kwargs.pop('cType', None)
if cType is None:
cType = [1] * rC
zWeight = kwargs.pop('zWeight', None)
if zWeight is None:
zWeight = [0.0] * rC
self.fop.setRegionProperties('*',
cType=cType,
zWeight=zWeight,
**kwargs)
self.inv.setReferenceModel(self.fop.startModel())
return dataVec, errVec
def run(self, dataVals, errorVals, nLayers=4, **kwargs):
lam = kwargs.pop('lam', 20)
dataVec, errVec = self.prepare(dataVals, errorVals, nLayers, **kwargs)
print('#'*50)
print(kwargs)
print('#'*50)
return super(LCInversion, self).run(dataVec, errVec, lam=lam, **kwargs)
|
JuliusHen/gimli | pygimli/viewer/mpl/modelview.py | <reponame>JuliusHen/gimli
# -*- coding: utf-8 -*-
"""Model viewer functions."""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.collections import PatchCollection
import matplotlib.colors as colors
import pygimli as pg
from .colorbar import setMappableData
# from pygimli.viewer.mpl.modelview import cmapFromName
from pygimli.utils import rndig
from .utils import updateAxes as updateAxes_
def drawModel1D(ax, thickness=None, values=None, model=None, depths=None,
plot='plot',
xlabel=r'Resistivity $(\Omega$m$)$', zlabel='Depth (m)',
z0=0,
**kwargs):
"""Draw 1d block model into axis ax.
Draw 1d block model into axis ax defined by values and thickness vectors
using plot function.
For log y cases, z0 should be set > 0 so that the default becomes 1.
Parameters
----------
ax : mpl axes
Matplotlib Axes object to plot into.
values : iterable [float]
[N] Values for each layer plus lower background.
thickness : iterable [float]
[N-1] thickness for each layer. Either thickness or depths must be set.
depths : iterable [float]
[N-1] Values for layer depths (positive z-coordinates).
Either thickness or depths must be set.
model : iterable [float]
Shortcut to use default model definition.
thks = model[0:nLay]
values = model[nLay:]
plot : string
Matplotlib plotting function.
'plot', 'semilogx', 'semilogy', 'loglog'
xlabel : str
Label for x axis.
ylabel : str
Label for y axis.
z0 : float
Starting depth in m
**kwargs : dict()
Forwarded to the plot routine
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import pygimli as pg
>>> # plt.style.use('ggplot')
>>> thk = [1, 4, 4]
>>> res = np.array([10., 5, 15, 50])
>>> fig, ax = plt.subplots()
>>> pg.viewer.mpl.drawModel1D(ax, values=res*5, depths=np.cumsum(thk),
... plot='semilogx', color='blue')
>>> pg.viewer.mpl.drawModel1D(ax, values=res, thickness=thk, z0=1,
... plot='semilogx', color='red')
>>> pg.wait()
"""
if model is not None:
nLayers = (len(model)-1)//2
thickness = model[:nLayers]
values = model[nLayers:]
if thickness is None and depths is None:
raise Exception("Either thickness or depths must be given.")
nLayers = len(values)
px = np.zeros(nLayers * 2)
pz = np.zeros(nLayers * 2)
if thickness is not None:
z1 = np.cumsum(thickness) + z0
else:
z1 = depths
for i in range(nLayers):
px[2 * i] = values[i]
px[2 * i + 1] = values[i]
if i == nLayers - 1:
pz[2 * i + 1] = z1[i - 1] * 1.2
else:
pz[2 * i + 1] = z1[i]
pz[2 * i + 2] = z1[i]
if plot == 'loglog' or plot == 'semilogy':
if z0 == 0:
pz[0] = z1[0] / 2.
else:
pz[0] = z0
try:
plot = getattr(ax, plot)
plot(px, pz+z0, **kwargs)
except BaseException as e:
print(e)
ax.set_ylabel(zlabel)
ax.set_xlabel(xlabel)
# assume positive depths pointing upward
ax.set_ylim(pz[-1], pz[0])
ax.grid(True)
def draw1DColumn(ax, x, val, thk, width=30, ztopo=0, cmin=1, cmax=1000,
cmap=None, name=None, textoffset=0.0):
"""Draw a 1D column (e.g., from a 1D inversion) on a given ax.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from pygimli.viewer.mpl import draw1DColumn
>>> thk = [1, 2, 3, 4]
>>> val = thk
>>> fig, ax = plt.subplots()
>>> draw1DColumn(ax, 0.5, val, thk, width=0.1, cmin=1, cmax=4, name="VES")
<matplotlib.collections.PatchCollection object at ...>
>>> _ = ax.set_ylim(-np.sum(thk), 0)
"""
z = -np.hstack((0., np.cumsum(thk), np.sum(thk) * 1.5)) + ztopo
recs = []
for i in range(len(val)):
recs.append(Rectangle((x - width / 2., z[i]), width, z[i + 1] - z[i]))
pp = PatchCollection(recs)
col = ax.add_collection(pp)
pp.set_edgecolor(None)
pp.set_linewidths(0.0)
if cmap is not None:
if isinstance(cmap, str):
pp.set_cmap(pg.viewer.mpl.cmapFromName(cmap))
else:
pp.set_cmap(cmap)
pp.set_norm(colors.LogNorm(cmin, cmax))
pp.set_array(np.array(val))
pp.set_clim(cmin, cmax)
if name:
ax.text(x+textoffset, ztopo, name, ha='center', va='bottom')
updateAxes_(ax)
return col
def showmymatrix(mat, x, y, dx=2, dy=1, xlab=None, ylab=None, cbar=None):
"""What is this good for?."""
pg.error('who use this?')
plt.imshow(mat, interpolation='nearest')
plt.xticks(np.arange(0, len(x), dx), ["%g" % rndig(xi, 2) for xi in x])
plt.yticks(np.arange(0, len(y), dy), ["%g" % rndig(yi, 2) for yi in y])
plt.ylim((len(y) - 0.5, -0.5))
if xlab is not None:
plt.xlabel(xlab)
if ylab is not None:
plt.ylabel(ylab)
plt.axis('auto')
if cbar is not None:
plt.colorbar(orientation=cbar)
return
def draw1dmodelErr(x, xL, xU=None, thk=None, xcol='g', ycol='r', **kwargs):
"""TODO."""
if thk is None:
nlay = (len(x) + 1) / 2
thk = np.array(x)[:nlay - 1]
x = np.asarray(x)[nlay - 1:nlay * 2 - 1]
thkL = np.array(xL)[:nlay - 1]
thkU = np.array(xU)[:nlay - 1]
xL = np.asarray(xL)[nlay - 1:nlay * 2 - 1]
xU = np.asarray(xU)[nlay - 1:nlay * 2 - 1]
# thk0 = np.hstack((thk, 0.))
# thkL0 = np.hstack((thkL, 0.))
# thkU0 = np.hstack((thkU, 0.))
zm = np.hstack((np.cumsum(thk) - thk / 2, np.sum(thk) * 1.2)) # midpoint
zc = np.cumsum(thk) # cumulative
draw1dmodel(x, thk, **kwargs)
plt.xlim(min(xL) * 0.95, max(xU) * 1.05)
plt.ylim(zm[-1] * 1.1, 0.)
plt.errorbar(
x, zm, fmt='.', xerr=np.vstack(
(x - xL, xU - x)), ecolor=xcol, **kwargs)
plt.errorbar((x[:-1] + x[1:]) / 2, zc, fmt='.',
yerr=np.vstack((thk - thkL, thkU - thk)), ecolor=ycol)
def draw1dmodelLU(x, xL, xU, thk=None, **kwargs):
"""Draw 1d model with lower and upper bounds."""
raise BaseException("IMPLEMENTME")
# draw1dmodel(x, thk, color='red', **kwargs)
# for i in range(len(x)):
# x1 = np.array(x)
# x1[i] = xL[i]
# draw1dmodel(x1, thk, color='blue')
# x1[i] = xU[i]
# draw1dmodel(x1, thk, color='blue')
#
# li = draw1dmodel(x, thk, color='red', **kwargs)
# plt.xlim((min(xL) * 0.9, max(xU) * 1.1))
# return li
def showStitchedModels(models, ax=None, x=None, cMin=None, cMax=None, thk=None,
logScale=True, title=None, zMin=0, zMax=0, zLog=False,
**kwargs):
"""Show several 1d block models as (stitched) section.
Parameters
----------
model : iterable of iterable (np.ndarray or list of np.array)
1D models (consisting of thicknesses and values) to plot
ax : matplotlib axes [None - create new]
axes object to plot in
x : iterable
positions of individual models
cMin/cMax : float [None - autodetection from range]
minimum and maximum colorscale range
logScale : bool [True]
use logarithmic color scaling
zMin/zMax : float [0 - automatic]
range for z (y axis) limits
zLog : bool
use logarithmic z (y axis) instead of linear
topo : iterable
vector of elevation for shifting
thk : iterable
vector of layer thicknesses for all models
Returns
-------
ax : matplotlib axes [None - create new]
axes object to plot in
"""
if x is None:
x = np.arange(len(models))
topo = kwargs.pop('topo', np.zeros_like(x))
fig = None
if ax is None:
fig, ax = plt.subplots()
dxmed2 = np.median(np.diff(x)) / 2.
patches = []
zMinLimit = 9e99
zMaxLimit = 0
if thk is not None:
nlay = len(models[0])
else:
nlay = int(np.floor((len(models[0]) + 1) / 2.))
vals = np.zeros((len(models), nlay))
for i, imod in enumerate(models):
if thk is not None: # take only resistivity from model
vals[i, :] = imod
thki = thk
else: # extract thickness from model vector
if isinstance(imod, pg.Vector):
vals[i, :] = imod[nlay - 1:2 * nlay - 1]
thki = np.asarray(imod[:nlay - 1])
else:
vals[i, :] = imod[nlay - 1:2 * nlay - 1]
thki = imod[:nlay - 1]
if zMax > 0:
z = np.hstack((0., np.cumsum(thki), zMax))
else:
thki = np.hstack((thki, thki[-1]*3))
z = np.hstack((0., np.cumsum(thki)))
z = topo[i] - z
zMinLimit = min(zMinLimit, z[-1])
zMaxLimit = max(zMaxLimit, z[0])
for j in range(nlay):
rect = Rectangle((x[i] - dxmed2, z[j]),
dxmed2 * 2, z[j+1]-z[j])
patches.append(rect)
p = PatchCollection(patches) # , cmap=cmap, linewidths=0)
if cMin is not None:
p.set_clim(cMin, cMax)
setMappableData(p, vals.ravel(), logScale=logScale)
ax.add_collection(p)
if logScale:
norm = colors.LogNorm(cMin, cMax)
p.set_norm(norm)
if 'cMap' in kwargs:
p.set_cmap(kwargs['cMap'])
# ax.set_ylim((zMaxLimit, zMin))
ax.set_ylim((zMinLimit, zMaxLimit))
if zLog:
ax.set_yscale("log", nonposy='clip')
ax.set_xlim((min(x) - dxmed2, max(x) + dxmed2))
if title is not None:
ax.set_title(title)
if kwargs.pop('colorBar', True):
cb = pg.viewer.mpl.createColorBar(p, cMin=cMin, cMax=cMax, nLevs=5)
# cb = plt.colorbar(p, orientation='horizontal',aspect=50,pad=0.1)
if 'cticks' in kwargs:
xt = np.unique(np.clip(kwargs['cticks'], cMin, cMax))
cb.set_ticks(xt)
cb.set_ticklabels([str(xti) for xti in xt])
plt.draw()
return ax # maybe return cb as well?
def showStitchedModels_Redundant(mods, ax=None,
cmin=None, cmax=None, **kwargs):
"""Show several 1d block models as (stitched) section."""
x = kwargs.pop('x', np.arange(len(mods)))
topo = kwargs.pop('topo', x*0)
nlay = int(np.floor((len(mods[0]) - 1) / 2.)) + 1
if cmin is None or cmax is None:
cmin = 1e9
cmax = 1e-9
for model in mods:
res = np.asarray(model)[nlay - 1:nlay * 2 - 1]
cmin = min(cmin, min(res))
cmax = max(cmax, max(res))
if kwargs.pop('sameSize', True): # all having the same width
dx = np.ones_like(x)*np.median(np.diff(x))
else:
dx = np.diff(x) * 1.05
dx = np.hstack((dx, dx[-1]))
x1 = x - dx / 2
if ax is None:
fig, ax = plt.subplots()
else:
ax = ax
fig = ax.figure
# ax.plot(x, x * 0., 'k.')
zm = kwargs.pop('zm', None)
maxz = 0.
if zm is not None:
maxz = zm
recs = []
RES = []
for i, mod in enumerate(mods):
mod1 = np.asarray(mod)
res = mod1[nlay - 1:]
RES.extend(res)
thk = mod1[:nlay - 1]
thk = np.hstack((thk, thk[-1]))
z = np.hstack((0., np.cumsum(thk)))
if zm is not None:
thk[-1] = zm - z[-2]
z[-1] = zm
else:
maxz = max(maxz, z[-1])
for j, _ in enumerate(thk):
recs.append(Rectangle((x1[i], topo[i]-z[j]), dx[i], -thk[j]))
pp = PatchCollection(recs, edgecolors=kwargs.pop('edgecolors', 'none'))
pp.set_edgecolor(kwargs.pop('edgecolors', 'none'))
pp.set_linewidths(0.0)
ax.add_collection(pp)
if 'cmap' in kwargs:
pp.set_cmap(kwargs['cmap'])
print(cmin, cmax)
norm = colors.LogNorm(cmin, cmax)
pp.set_norm(norm)
pp.set_array(np.array(RES))
# pp.set_clim(cmin, cmax)
ax.set_ylim((-maxz, max(topo)))
ax.set_xlim((x1[0], x1[-1] + dx[-1]))
cbar = None
if kwargs.pop('colorBar', True):
cbar = plt.colorbar(pp, ax=ax, norm=norm, orientation='horizontal',
aspect=60) # , ticks=[1, 3, 10, 30, 100, 300])
if 'ticks' in kwargs:
cbar.set_ticks(kwargs['ticks'])
# cbar.autoscale_None()
if ax is None: # newly created fig+ax
return fig, ax
else: # already given, better give back color bar
return cbar
def showStitchedModelsOld(models, x=None, cmin=None, cmax=None,
islog=True, title=None):
"""Show several 1d block models as (stitched) section."""
if x is None:
x = np.arange(len(models))
nlay = int(np.floor((len(models[0]) - 1) / 2.)) + 1
if cmin is None or cmax is None:
cmin = 1e9
cmax = 1e-9
for model in models:
res = np.asarray(model)[nlay - 1:nlay * 2 - 1]
cmin = min(cmin, min(res))
cmax = max(cmax, max(res))
print("cmin=", cmin, " cmax=", cmax)
dx = np.diff(x)
dx = np.hstack((dx, dx[-1]))
x1 = x - dx / 2
ax = plt.gcf().add_subplot(111)
ax.cla()
mapsize = 64
# cmap = jetmap(mapsize)
plt.plot(x, np.zeros(len(x)), 'k.')
maxz = 0.
for mod in models:
mod1 = np.asarray(mod)
res = mod1[nlay - 1:]
if islog:
res = np.log(res)
cmi = np.log(cmin)
cma = np.log(cmax)
else:
cmi = cmin
cma = cmax
thk = mod1[:nlay - 1]
thk = np.hstack((thk, thk[-1]))
z = np.hstack((0., np.cumsum(thk)))
maxz = max(maxz, z[-1])
nres = (res - cmi) / (cma - cmi)
cind = np.around(nres * mapsize)
cind[cind >= mapsize] = mapsize - 1
cind[cind < 0] = 0
# for j in range(len(thk)):
# fc = cmap[cind[j], :]
# rect = Rectangle((x1[i], z[j]), dx[i], thk[j], fc=fc)
# plt.gca().add_patch(rect)
ax.set_ylim((maxz, 0.))
ax.set_xlim((x1[0], x1[-1] + dx[-1]))
if title is not None:
plt.title(title)
plt.draw()
return
def draw1dmodel(x, thk=None, xlab=None, zlab="z in m", islog=True, z0=0):
"""DEPRECATED."""
print("STYLE_WARNING!!!!!!! don't use this call. "
"Use show1dmodel or drawModel1D instead.")
show1dmodel(x, thk, xlab, zlab, islog, z0)
def show1dmodel(x, thk=None, xlab=None, zlab="z in m", islog=True, z0=0,
**kwargs):
"""Show 1d block model defined by value and thickness vectors."""
print("STYLE_WARNING!!!!!!! don't use this call. "
"WHO use this anymore??.")
if xlab is None:
xlab = "$\\rho$ in $\\Omega$m"
if thk is None: # gimli blockmodel (thk+x together) given
nl = int(np.floor((len(x) - 1) / 2.)) + 1
thk = np.asarray(x)[:nl - 1]
x = np.asarray(x)[nl - 1:nl * 2 - 1]
z1 = np.concatenate(([0], np.cumsum(thk))) + z0
z = np.concatenate((z1, [z1[-1] * 1.2]))
nl = len(x) # x.size()
px = np.zeros((nl * 2, 1))
pz = np.zeros((nl * 2, 1))
for i in range(nl):
px[2 * i] = x[i]
px[2 * i + 1] = x[i]
pz[2 * i + 1] = z[i + 1]
if i < nl - 1:
pz[2 * i + 2] = z[i + 1]
# plt.cla()
if islog:
plt.semilogx(px, pz, **kwargs)
else:
plt.plot(px, pz, **kwargs)
plt.ion()
plt.grid(which='both')
plt.xlim((np.min(x) * 0.9, np.max(x) * 1.1))
plt.ylim((max(z1) * 1.15, 0.))
plt.xlabel(xlab)
plt.ylabel(zlab)
plt.show()
return
def showfdemsounding(freq, inphase, quadrat, response=None, npl=2):
"""Show FDEM sounding as real(inphase) and imaginary (quadrature) fields.
Show FDEM sounding as real(inphase) and imaginary (quadrature) fields
normalized by the (purely real) free air solution.
"""
nf = len(freq)
fig = plt.figure(1)
fig.clf()
ax1 = fig.add_subplot(1, npl, npl - 1)
plt.semilogy(inphase, freq, 'x-')
if response is not None:
plt.semilogy(np.asarray(response)[:nf], freq, 'x-')
plt.grid(which='both')
ax2 = fig.add_subplot(1, npl, npl)
plt.semilogy(quadrat, freq, 'x-')
if response is not None:
plt.semilogy(np.asarray(response)[nf:], freq, 'x-')
plt.grid(which='both')
fig.show()
ax = [ax1, ax2]
if npl > 2:
ax3 = fig.add_subplot(1, npl, 1)
ax.append(ax3)
return ax
|
JuliusHen/gimli | doc/examples/3_dc_and_ip/plot_02_ert_field_data.py | <reponame>JuliusHen/gimli
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
ERT field data with topography
------------------------------
Simple example of data measured over a slagdump demonstrating:
- 2D inversion with topography
- geometric factor generation
- topography effect
"""
# sphinx_gallery_thumbnail_number = 3
import pygimli as pg
from pygimli.physics.ert import ERTManager, createGeometricFactors
###############################################################################
# Get some example data with topography
#
data = pg.getExampleFile('ert/slagdump.ohm', load=True, verbose=True)
print(data)
###############################################################################
# The data file does not contain geometric factors (token field 'k'),
# so we create them based on the given topography.
data['k'] = createGeometricFactors(data, numerical=True)
###############################################################################
# We initialize the ERTManager for further steps and eventually inversion.
ert = ERTManager(sr=False, useBert=True, verbose=False, debug=False)
###############################################################################
# It might be interesting to see the topography effect, i.e the ratio between
# the numerically computed geometry factor and the analytical formula
k0 = createGeometricFactors(data)
ert.showData(data, vals=k0/data['k'], label='Topography effect')
###############################################################################
# The data container has no apparent resistivities (token field 'rhoa') yet.
# We can let the Manager fix this later for us (as we now have the 'k' field),
# or we do it manually.
ert.checkData(data)
print(data)
###############################################################################
# The data container does not necessarily contain data errors data errors
# (token field 'err'), requiring us to enter data errors. We can let the
# manager guess some defaults for us automaticly or set them manually
data['err'] = ert.estimateError(data, absoluteError=0.001, relativeError=0.03)
# or manually:
# data['err'] = data_errors # somehow
###############################################################################
# Now the data have all necessary fields ('rhoa', 'err' and 'k') so we can run
# the inversion. The inversion mesh will be created with some optional values
# for the parametric mesh generation.
#
mod = ert.invert(data, lam=10, verbose=True,
paraDX=0.3, paraMaxCellSize=10, paraDepth=20, quality=33.6)
###############################################################################
# We can view the resulting model in the usual way.
ert.showResultAndFit()
# np.testing.assert_approx_equal(ert.inv.chi2(), 1.10883, significant=3)
###############################################################################
# Or just plot the model only.
ert.showModel(mod)
|
JuliusHen/gimli | doc/tutorials/2_modelling/plot_5-mod-fem-heat-2d.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
Heat equation in 2D
-------------------
This tutorial simulates the stationary heat equation in 2D. The example is
taken from the pyGIMLi paper (https://cg17.pygimli.org).
"""
import pygimli as pg
import pygimli.meshtools as mt
###############################################################################
# Create geometry definition for the modelling domain.
world = mt.createWorld(start=[-20, 0], end=[20, -16], layers=[-2, -8],
worldMarker=False)
# Create a heterogeneous block
block = mt.createRectangle(start=[-6, -3.5], end=[6, -6.0],
marker=4, boundaryMarker=10, area=0.1)
# Merge geometrical entities
geom = world + block
pg.show(geom, boundaryMarker=True)
###############################################################################
# Create a mesh from based on the geometry definition. The quality of the mesh
# When calling the :func:`pg.meshtools.createMesh` function, a quality parameter
# can be forwarded to Triangle, which prescribes the minimum angle allowed in
# the final mesh. For a tutorial on the quality of the mesh please refer to :
# Mesh quality inspection [1]
# [1]: https://www.pygimli.org/_tutorials_auto/1_basics/plot_6-mesh-quality-inspection.html#sphx-glr-tutorials-auto-1-basics-plot-6-mesh-quality-inspection-py
# Note: Incrementing quality increases computer time, take precaution with quality
# values over 33.
mesh = mt.createMesh(geom, quality=33, area=0.2, smooth=[1, 10])
pg.show(mesh)
###############################################################################
# Call :py:func:`pygimli.solver.solveFiniteElements` to solve the heat
# diffusion equation :math:`\nabla\cdot(a\nabla T)=0` with :math:`T(bottom)=1`
# and :math:`T(top)=0`, where :math:`a` is the thermal diffusivity and :math:`T`
# is the temperature distribution. We assign thermal diffusivities to the four regions
# using their marker number in a dictionary (a) and the fixed temperatures at the
# boundaries using Dirichlet boundary conditions with the respective markers in
# another dictionary (bc)
T = pg.solver.solveFiniteElements(mesh,
a={1: 1.0, 2: 2.0, 3: 3.0, 4:0.1},
bc={'Dirichlet': {8: 1.0, 4: 0.0}}, verbose=True)
ax, _ = pg.show(mesh, data=T, label='Temperature $T$', cMap="hot_r")
pg.show(geom, ax=ax, fillRegion=False)
# just hold figure windows open if run outside from spyder, ipython or similar
pg.wait()
|
JuliusHen/gimli | pygimli/testing/__init__.py | # coding=utf-8
"""
Testing utilities
In Python you can call `pygimli.test()` to run all docstring
examples.
Writing tests for pygimli
-------------------------
Please check: https://docs.pytest.org/en/latest/
"""
import sys
from os.path import join, realpath
import matplotlib.pyplot as plt
import numpy as np
import pygimli as pg
import warnings
# __testingMode__ = False
#
# def setTestingMode(mode):
# """Set pygimli testing mode.
#
# Testing mode ensures a constant seed for the random generator if you use
# pg.randn().
# """
# globals()[__testingMode__] = mode
#
# def testingMode():
# """Determine pygimli testing mode.
#
# Returns True if pygimli is in testing mode.
# """
# return globals()[__testingMode__]
def test(target=None, show=False, onlydoctests=False, coverage=False,
htmlreport=False, abort=False, verbose=True):
"""Run docstring examples and additional tests.
Examples
--------
>>> import pygimli as pg
>>> # You can test everything with pg.test() or test a single function:
>>> pg.test("pg.utils.boxprint", verbose=False)
>>> # The target argument can also be the function directly
>>> from pygimli.utils import boxprint
>>> pg.test(boxprint, verbose=False)
Parameters
----------
target : function or string, optional
Function or method to test. By default everything is tested.
show : boolean, optional
Show matplotlib windows during test run. They will be closed
automatically.
onlydoctests : boolean, optional
Run test files in ../tests as well.
coverage : boolean, optional
Create a coverage report. Requires the pytest-cov plugin.
htmlreport : str, optional
Filename for HTML report such as www.pygimli.org/build_tests.html.
Requires pytest-html plugin.
abort : boolean, optional
Return correct exit code, e.g. abort documentation build when a test
fails.
"""
# pg.setTestingMode(True)
# Remove figure warnings
np.random.seed(1337)
plt.rcParams["figure.max_open_warning"] = 1000
warnings.filterwarnings("ignore", category=UserWarning,
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
printopt = np.get_printoptions()
if verbose:
pg.boxprint("Testing pygimli %s" % pg.__version__, sym="+", width=90)
# Numpy compatibility (array string representation has changed)
if np.__version__[:4] == "1.14":
pg.warn("Some doctests will fail due to old numpy version.",
"Consider upgrading to numpy >= 1.15")
if target:
if isinstance(target, str):
# If target is a string, such as "pg.solver.solve"
# the code below will overwrite target with the corresponding
# imported function, so that doctest works.
target = target.replace("pg.", "pygimli.")
import importlib
mod_name, func_name = target.rsplit('.', 1)
mod = importlib.import_module(mod_name)
target = getattr(mod, func_name)
if show: # Keep figure openend if single function is tested
plt.ioff()
import doctest
doctest.run_docstring_examples(target, globals(), verbose=verbose,
optionflags=doctest.ELLIPSIS,
name=target.__name__)
return
try:
import pytest
except ImportError:
raise ImportError("pytest is required to run test suite. "
"Try 'sudo pip install pytest'.")
old_backend = plt.get_backend()
if not show:
plt.switch_backend("Agg")
else:
plt.ion()
cwd = join(realpath(__path__[0]), '..')
excluded = [
"gui", "physics/traveltime/example.py", "physics/em/fdemexample.py"
]
if onlydoctests:
excluded.append("testing")
cmd = ([
"-v", "-rsxX", "--color", "yes", "--doctest-modules", "--durations",
"5", cwd
])
for directory in excluded:
cmd.extend(["--ignore", join(cwd, directory)])
if coverage:
pc = pg.optImport("pytest_cov", "create a code coverage report")
if pc:
cmd.extend(["--cov", "pygimli"])
cmd.extend(["--cov-report", "term"])
if htmlreport:
ph = pg.optImport("pytest_html", "create a html report")
if ph:
cmd.extend(["--html", htmlreport])
plt.close("all")
exitcode = pytest.main(cmd)
if abort:
print("Exiting with exitcode", exitcode)
sys.exit(exitcode)
plt.switch_backend(old_backend)
np.set_printoptions(**printopt)
|
JuliusHen/gimli | pygimli/testing/test_RValueConverter.py | <filename>pygimli/testing/test_RValueConverter.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import unittest
import numpy as np
import pygimli as pg
class TestRVectorMethods(unittest.TestCase):
def test_RVector(self):
""" implemented in custom_rvalue.cpp"""
a = pg.Vector(10)
self.assertEqual(a.size(), 10.0)
self.assertEqual(sum(a), 0.0)
def test_ListToRVector3(self):
""" implemented in custom_rvalue.cpp"""
x = [0.0, 1.0, 0.0]
p = pg.RVector3(x)
self.assertEqual(p.dist(x), 0.0)
self.assertEqual(p.dist([1.0, 1.0]), 1.0)
p = pg.RVector3((0.0, 1.0, 0.0))
self.assertEqual(p.dist([0.0, 1.0, 0.0]), 0.0)
def test_ListToIndexArray(self):
""" implemented in custom_rvalue.cpp"""
idx = [0, 1, 1, 0]
I = pg.core.IndexArray(idx)
self.assertEqual(pg.sum(I), sum(idx))
bn = (np.array(idx) > 0) # numpy bool
idx = np.nonzero(bn)[0] # numpy int64
# numyp int64 -> IndexArray
I = pg.core.IndexArray(idx)
self.assertEqual(I.size(), 2)
self.assertEqual(pg.sum(I), sum(idx))
def test_ListToRVector(self):
""" implemented in custom_rvalue.cpp"""
l = [1.0, 2.0, 3.0, 4.0]
a = pg.Vector(l)
self.assertEqual(a.size(), len(l))
self.assertEqual(pg.sum(a), sum(l))
l = (0.2, 0.3, 0.4, 0.5, 0.6)
x = pg.Vector(l)
self.assertEqual(x.size(), len(l))
l = [1, 2, 3]
x = pg.Vector(l)
self.assertEqual(x.size(), len(l))
def test_ListToR3Vector(self):
""" implemented in custom_rvalue.cpp"""
x = [0.0, 1.0, 0.0]
p = pg.RVector3(x)
pl = [p, p, p]
t = pg.core.R3Vector(pl)
self.assertEqual(t.size(), len(pl))
def test_NumpyToIndexArray(self):
"""Implemented in custom_rvalue.cpp."""
x = np.array(range(10))
a = pg.core.IndexArray(x)
self.assertEqual(a.size(), len(x))
self.assertEqual(pg.sum(a), sum(x))
x = np.arange(0, 10, dtype=np.int64)
a = pg.core.IndexArray(x)
self.assertEqual(a.size(), len(x))
self.assertEqual(pg.sum(a), sum(x))
x = np.arange(0, 10, dtype="int")
a = pg.core.IndexArray(x)
self.assertEqual(a.size(), len(x))
self.assertEqual(pg.sum(a), sum(x))
x = np.array([0, 100], dtype="int")
a = pg.core.IndexArray(x)
self.assertEqual(a.size(), len(x))
self.assertEqual(pg.sum(a), sum(x))
def test_NumpyToIVector(self):
"""Implemented in custom_rvalue.cpp."""
x = np.array(range(-10, 10))
a = pg.IVector(x)
# pg.core.setDeepDebug(1)
# print(a)
# pg.core.setDeepDebug(0)
# sys.exit()
self.assertEqual(a.size(), len(x))
self.assertEqual(pg.sum(a), sum(x))
x = np.arange(-10, 10, dtype=np.int64)
a = pg.IVector(x)
self.assertEqual(a.size(), len(x))
self.assertEqual(pg.sum(a), sum(x))
x = np.arange(-10, 10, dtype="int")
a = pg.IVector(x)
self.assertEqual(a.size(), len(x))
self.assertEqual(pg.sum(a), sum(x))
x = np.array([-10, 100], dtype="int")
a = pg.IVector(x)
self.assertEqual(a.size(), len(x))
self.assertEqual(pg.sum(a), sum(x))
x = np.arange(10, dtype=np.long)
a = pg.IVector(x)
self.assertEqual(a.size(), len(x))
self.assertEqual(pg.sum(a), sum(x))
self.assertEqual(pg.sum(x), sum(x))
def test_NumpyToBVector(self):
"""Implemented in custom_rvalue.cpp."""
x = np.array(range(-10, 10), dtype=float)
b = pg.BVector(x > 0.)
self.assertEqual(b[10], False)
self.assertEqual(b[11], True)
def test_NumpyToRVector(self):
"""Implemented in custom_rvalue.cpp."""
x = np.arange(0, 1., 0.2)
a = pg.Vector(x)
self.assertEqual(a.size(), len(x))
self.assertEqual(pg.sum(a), sum(x))
x = np.arange(0, 1., 0.2, dtype=np.float64)
a = pg.Vector(x)
self.assertEqual(a.size(), len(x))
self.assertEqual(pg.sum(a), sum(x))
x = np.arange(10, dtype=np.int)
a = pg.Vector(x)
self.assertEqual(a.size(), len(x))
self.assertEqual(pg.sum(a), sum(x))
x = np.arange(10, dtype=np.long)
a = pg.Vector(x)
self.assertEqual(a.size(), len(x))
self.assertEqual(pg.sum(a), sum(x))
self.assertEqual(pg.sum(x), sum(x))
def test_NumpyToCVector(self):
pass
# will not work .. until an idea how to choose right api for function with and RVector and CVector, e.g. sum()
#
#x = 1. + np.arange(0, 1., 0.1) * 1j
#a = pg.CVector(x)
#self.assertEqual(a.size(), len(x))
#self.assertEqual(pg.math.real(a), x.real)
#self.assertEqual(pg.math.imag(a), x.imag)
#self.assertEqual(pg.sum(a), sum(x))
#self.assertEqual(pg.sum(pg.math.real(a)), len(x))
def test_NumpyToRMatrix(self):
"""Implemented in custom_rvalue.cpp."""
M = np.ndarray((5, 4))
A = pg.Matrix(M)
self.assertEqual(A.rows(), M.shape[0])
self.assertEqual(A.cols(), M.shape[1])
M = np.arange(20.).reshape((5, 4))
A = pg.Matrix(M)
self.assertEqual(sum(A[0]), sum(M[0]))
self.assertEqual(sum(A[1]), sum(M[1]))
self.assertEqual(sum(A[2]), sum(M[2]))
self.assertEqual(sum(A[3]), sum(M[3]))
M = np.zeros((6,2), dtype=float)
M[0:3,0] = 1
M[3:,1] = 1
A = pg.Matrix(M)
self.assertEqual(A.col(0), M[:,0])
self.assertEqual(A.col(1), M[:,1])
A = pg.Matrix(M.T)
self.assertEqual(A.row(0), M[:,0])
self.assertEqual(A.row(1), M[:,1])
def test_NumpyToRVector3(self):
"""Implemented in custom_rvalue.cpp."""
x = np.array([0.0, 1.0, 0.0])
p = pg.RVector3(x)
self.assertEqual(p.dist(x), 0.0)
self.assertEqual(p.dist([1.0, 1.0]), 1.0)
x = np.array([0.0, 1.0])
p = pg.RVector3(x)
self.assertEqual(p.dist([0.0, 1.0, 0.0]), 0.0)
def test_RVectorToNumpy(self):
"""Implemented through hand_made_wrapper.py"""
# check ob wirklich from array genommen wird!
v = pg.Vector(10, 1.1)
a = np.asarray(v)
self.assertEqual(type(a), np.ndarray)
self.assertEqual(len(a), 10)
self.assertEqual(a[0], 1.1)
a = np.asarray(v, "int")
self.assertEqual(a[0], 1)
a = np.array(v)
self.assertEqual(type(a), np.ndarray)
self.assertEqual(len(a), 10)
def test_CVectorToNumpy(self):
"""Implemented through hand_made_wrapper.py"""
# check ob wirklich from array genommen wird!
v = pg.CVector(10, 1.1 + 1j*3)
a = np.array(v)
self.assertEqual(type(a), np.ndarray)
self.assertEqual(a.dtype, np.complex)
self.assertEqual(len(a), 10)
self.assertEqual(a[0], 1.1 + 1j*3)
def test_BVectorToNumpy(self):
"""Implemented through hand_made_wrapper.py"""
# check ob wirklich from array genommen wird!
# wird es noch nicht .. siehe __init__.py:__BVectorArrayCall__
v = pg.Vector(10, 1)
b = (v == 1)
self.assertEqual(type(b), pg.BVector)
v = pg.Vector(10, 1.1)
b = (v == 1.1)
self.assertEqual(type(b), pg.BVector)
a = np.asarray(b)
self.assertEqual(type(a), np.ndarray)
self.assertEqual(a.dtype, 'bool')
self.assertEqual(len(a), 10)
self.assertEqual(sum(a), 10)
a = np.array(b)
self.assertEqual(type(a), np.ndarray)
self.assertEqual(len(a), 10)
self.assertEqual(sum(a), 10)
def test_IndexArrayToNumpy(self):
"""Implemented through hand_made_wrapper.py"""
# check if array is really taken
# not yet taken: .. see __init__.py:__BVectorArrayCall__
v = pg.core.IndexArray(10, 2)
self.assertEqual(type(v), pg.core.IndexArray)
# print(type(v[0]))
# print(pg.showSizes())
a = np.asarray(v)
self.assertEqual(type(a), np.ndarray)
# self.assertEqual(a.dtype, 'int64')
self.assertEqual(len(a), 10)
self.assertEqual(sum(a), 20)
a = np.array(v)
self.assertEqual(type(a), np.ndarray)
self.assertEqual(len(a), 10)
self.assertEqual(sum(a), 20)
def test_RVector3ToNumpy(self):
"""Implemented through hand_made_wrapper.py"""
v = pg.RVector3()
a = np.array(v)
self.assertEqual(type(a), np.ndarray)
self.assertEqual(len(a), 3)
def test_R3VectorToNumpy(self):
"""Implemented through hand_made_wrapper.py"""
mesh = pg.createGrid(x=[0, 1, 2], y=[0, 1, 2], z=[1, 2])
v = np.asarray(mesh.positions())
self.assertEqual(type(v), np.ndarray)
self.assertEqual(len(v), mesh.nodeCount())
a = np.array(mesh.cellCenter())
self.assertEqual(type(a), np.ndarray)
self.assertEqual(len(a), mesh.cellCount())
self.assertEqual(mesh.positions()[0], v[0])
def test_RMatrixToNumpy(self):
"""Implemented through automatic iterator """
M = np.arange(20.).reshape((5, 4))
A = pg.Matrix(M)
N = np.array(A)
self.assertEqual(A.rows(), N.shape[0])
self.assertEqual(A.cols(), N.shape[1])
self.assertEqual(sum(A[0]), sum(N[0]))
self.assertEqual(sum(A[1]), sum(N[1]))
self.assertEqual(sum(A[2]), sum(N[2]))
self.assertEqual(sum(A[3]), sum(N[3]))
M = np.arange(16.).reshape((4,4))
A = pg.Matrix(M)
M2 = np.array(A)
np.testing.assert_equal(M, M2)
A = np.array(pg.Matrix(4,4))
def test_NumpyToScalar(self):
"""Implemented through automatic iterator """
x = pg.Vector(2)
x3 = pg.core.R3Vector(2)
w = pg.Vector()
x += np.float32(1.0)
np.testing.assert_equal(sum(x + 1.0), 4.0)
np.testing.assert_equal(sum(x + np.float32(1)), 4.0)
np.testing.assert_equal(sum(x + np.float64(1)), 4.0)
np.testing.assert_equal(sum(x - 1.0), 0.0)
np.testing.assert_equal(sum(x - np.float32(1)), 0.0)
np.testing.assert_equal(sum(x - np.float64(1)), 0.0)
# HarmonicModelling(size_t nh, const RVector & tvec);
pg.core.HarmonicModelling(np.int32(1), x)
pg.core.HarmonicModelling(np.uint32(1), x)
pg.core.HarmonicModelling(np.int64(1), x)
pg.core.HarmonicModelling(np.uint64(1), x)
# pg.PolynomialModelling(1, np.int32(1), x3, x);
# pg.PolynomialModelling(1, np.int64(1), x3, x);
# pg.PolynomialModelling(1, np.uint32(1), x3, x);
# pg.PolynomialModelling(1, np.uint64(1), x3, x);
x = pg.Pos(0.0, 0.0, 0.0)
x += np.float32(1)
np.testing.assert_equal(x, pg.Pos(1.0, 1.0, 1.0))
np.testing.assert_equal(x -1 , pg.Pos(0.0, 0.0, 0.0))
np.testing.assert_equal(x - np.float32(1), pg.Pos(0.0, 0.0, 0.0))
np.testing.assert_equal(x - np.float64(1), pg.Pos(0.0, 0.0, 0.0))
if __name__ == '__main__':
pg.core.setDeepDebug(0)
unittest.main()
|
JuliusHen/gimli | pygimli/physics/traveltime/utils.py | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tools for traveltime/refraction tomography."""
import numpy as np
import pygimli as pg
def createCrossholeData(sensors):
"""
Create crosshole scheme assuming two boreholes with equal sensor numbers.
Parameters
----------
sensors : array (Nx2)
Array with position of sensors.
Returns
-------
scheme : DataContainer
Data container with `sensors` predefined sensor indices 's' and 'g'
for shot and receiver numbers.
"""
from itertools import product
if len(sensors) % 2 > 0:
pg.error("createCrossholeData is only defined for an equal number of"
" sensors in two boreholes.")
n = len(sensors) // 2
numbers = np.arange(n)
rays = np.array(list(product(numbers, numbers + n)))
# Empty container
scheme = pg.DataContainer()
# Add sensors
for sen in sensors:
scheme.createSensor(sen)
# Add measurements
scheme.resize(len(rays))
scheme["s"] = rays[:, 0]
scheme["g"] = rays[:, 1]
scheme["valid"] = np.ones(len(rays))
scheme.registerSensorIndex("s")
scheme.registerSensorIndex("g")
return scheme
def shotReceiverDistances(data, full=False):
"""Return vector of all distances (in m) between shot and receiver.
for each 's' and 'g' in data.
Parameters
----------
data : pg.DataContainerERT
full : bool [False]
Get distances between shot and receiver position when full is True or
only form x coordinate if full is False
Returns
-------
dists : array
Array of distances
"""
if full:
pos = data.sensors()
s, g = data.id("s"), data.id("g")
off = [pos[s[i]].distance(pos[g[i]]) for i in range(data.size())]
return np.absolute(off)
else:
px = pg.x(data)
gx = np.array([px[g] for g in data.id("g")])
sx = np.array([px[s] for s in data.id("s")])
return np.absolute(gx - sx)
def createRAData(sensors, shotDistance=1):
"""Create a refraction data container.
Default data container for shot and geophon at every sensor position.
Predefined sensor indices's 's' as shot position and 'g' as
geophon position.
Parameters
----------
sensors: ndarray | R3Vector
Geophon and shot positions (same)
shotDistances: int [1]
Distance between shot indices.
Returns
-------
data : DataContainer
Data container with predefined sensor indices 's' and 'g'
for shot and receiver numbers.
"""
data = pg.DataContainer()
data.registerSensorIndex("s")
data.registerSensorIndex("g")
if isinstance(sensors, np.ndarray):
if len(sensors.shape) == 1:
for x in sensors:
data.createSensor([x, 0.0, 0.0])
else:
data.setSensorPositions(sensors)
else:
data.setSensorPositions(sensors)
S, G = [], []
for s in range(0, data.sensorCount(), shotDistance):
for g in range(data.sensorCount()):
if s is not g:
S.append(s)
G.append(g)
data.resize(len(S))
data.set("s", S)
data.set("g", G)
data.set("valid", np.abs(np.sign(data("g") - data("s"))))
return data
def createGradientModel2D(data, mesh, vTop, vBot):
"""Create 2D velocity gradient model.
Creates a smooth, linear, starting model that takes the slope
of the topography into account. This is done by fitting a straight line
and using the distance to that as the depth value.
Known as "The Marcus method"
TODO
----
* Cite "The Marcus method"
Parameters
----------
data: pygimli DataContainer
The topography list is in here.
mesh: pygimli.Mesh
The parametric mesh used for the inversion
vTop: float
The velocity at the surface of the mesh
vBot: float
The velocity at the bottom of the mesh
Returns
-------
model: pygimli Vector, length M
A numpy array with slowness values that can be used to start
the inversion.
"""
yVals = pg.y(data)
if abs(min(yVals)) < 1e-8 and abs(max(yVals)) < 1e-8:
yVals = pg.z(data)
p = np.polyfit(pg.x(data), yVals, deg=1) # slope-intercept form
n = np.asarray([-p[0], 1.0]) # normal vector
nLen = np.sqrt(np.dot(n, n))
x = pg.x(mesh.cellCenters())
z = pg.y(mesh.cellCenters())
pos = np.column_stack((x, z))
d = np.array([np.abs(np.dot(pos[i, :], n) - p[1]) / nLen for i
in range(pos.shape[0])])
return 1.0 / np.interp(d, [min(d), max(d)], [vTop, vBot])
|
JuliusHen/gimli | doc/examples/dev/multi/seismics.py | #!/usr/bin/env python
"""
Test multi
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import pygimli as pg
from pygimli.solver import parseArgToArray
from pygimli.physics.seismics import ricker, solvePressureWave
import pygimli.meshtools as mt
def createCacheName(base, mesh, times):
return '_seis-cache-' + base + '-' + str(mesh.nodeCount()) + '-' + \
str(len(times))
def velocityVp(porosity, vMatrix=5000, vFluid=1442, S=1,
mesh=None):
r"""Compute velocity from porosity."""
porosity = parseArgToArray(porosity, mesh.cellCount(), mesh)
vMatrix = parseArgToArray(vMatrix, mesh.cellCount(), mesh)
vFluid = parseArgToArray(vFluid, mesh.cellCount(), mesh)
S = parseArgToArray(S, mesh.cellCount(), mesh)
vAir = 343.0
# better use petrophysics module
vel = 1./(np.array((1.-porosity)/vMatrix) +
porosity * S / vFluid +
porosity * (1.-S)/vAir)
return vel
def calcSeismics(meshIn, vP):
"""Do seismic computations."""
meshSeis = meshIn.createH2()
meshSeis = mt.appendTriangleBoundary(
meshSeis, xbound=25, ybound=22.0, marker=1, quality=32.0, area=0.3,
smooth=True, markerBoundary=1, isSubSurface=False, verbose=False)
print(meshSeis)
meshSeis = meshSeis.createH2()
meshSeis = meshSeis.createH2()
# meshSeis = meshSeis.createP2()
meshSeis.smooth(1, 1, 1, 4)
vP = pg.interpolate(meshIn, vP, meshSeis.cellCenters())
mesh = meshSeis
vP = pg.solver.fillEmptyToCellArray(mesh, vP)
print(mesh)
# ax, cbar = pg.show(mesh, data=vP)
# pg.show(mesh, axes=ax)
geophPointsX = np.arange(-19, 19.1, 1)
geophPoints = np.vstack((geophPointsX, np.zeros(len(geophPointsX)))).T
sourcePos = geophPoints[4]
c = mesh.findCell(sourcePos)
h1 = pg.findBoundary(c.boundaryNodes(0)).size()
h2 = pg.findBoundary(c.boundaryNodes(1)).size()
h3 = pg.findBoundary(c.boundaryNodes(2)).size()
print([h1, h2, h3])
h = pg.math.median([h1, h2, h3])
# h = pg.math.median(mesh.boundarySizes())
f0scale = 0.25
cfl = 0.5
dt = cfl * h / max(vP)
print("Courant-Friedrich-Lewy number:", cfl)
tmax = 40./min(vP)
times = np.arange(0.0, tmax, dt)
solutionName = createCacheName('seis', mesh, times) + "cfl-" + str(cfl)
try:
# u = pg.load(solutionName + '.bmat')
uI = pg.load(solutionName + 'I.bmat')
except Exception as e:
print(e)
f0 = f0scale * 1./dt
print("h:", round(h, 2),
"dt:", round(dt, 5),
"1/dt:", round(1/dt, 1),
"f0", round(f0, 2),
"Wavelength: ", round(max(vP)/f0, 2), " m")
uSource = ricker(times, f0, t0=1./f0)
plt.figure()
plt.plot(times, uSource, '-*')
plt.show(block=0)
plt.pause(0.01)
u = solvePressureWave(mesh, vP, times, sourcePos=sourcePos,
uSource=uSource, verbose=10)
u.save(solutionName)
uI = pg.Matrix()
print("interpolate node to cell data ... ")
pg.interpolate(mesh, u, mesh.cellCenters(), uI)
print("... done")
uI.save(solutionName+'I')
# nodes = [mesh.findNearestNode(p) for p in geophPoints]
# fig = plt.figure()
# axs = fig.add_subplot(1,1,1)
# drawSeismogram(axs, mesh, u, nodes, dt, i=None)
# plt.show()
dpi = 92
scale = 1
fig = plt.figure(facecolor='white',
figsize=(scale*800/dpi, scale*490/dpi), dpi=dpi)
ax = fig.add_subplot(1, 1, 1)
gci = pg.viewer.mpl.drawModel(ax, mesh, data=uI[0],
cMin=-1, cMax=1, cmap='bwr')
pg.viewer.mpl.drawMeshBoundaries(ax, meshIn, hideMesh=1)
ax.set_xlim((-20, 20))
ax.set_ylim((-15, 0))
ax.set_ylabel('Depth [m]')
ax.set_xlabel('$x$ [m]')
ticks = ax.yaxis.get_majorticklocs()
tickLabels = []
for t in ticks:
tickLabels.append(str(int(abs(t))))
ax.set_yticklabels(tickLabels)
plt.tight_layout()
# ax, cbar = pg.show(mesh, data=vP)
# pg.showNow()
# ax = fig.add_subplot(1,1,1)
def animate(i):
i = i*5
if i > len(uI)-1:
return
print("Frame:", i, "/", len(uI))
ui = uI[i]
ui = ui / max(pg.abs(ui))
ui = pg.logDropTol(ui, 1e-2)
cMax = max(pg.abs(ui))
pg.viewer.mpl.setMappableData(gci, ui,
cMin=-cMax, cMax=cMax,
logScale=False)
# plt.pause(0.001)
anim = animation.FuncAnimation(fig, animate,
frames=int(len(uI)/5),
interval=0.001, repeat=0) # , blit=True)
out = 'seis' + str(f0scale) + "cfl-" + str(cfl)
anim.save(out + ".mp4", writer=None, fps=20, dpi=dpi, codec=None,
bitrate=24*1024, extra_args=None, metadata=None,
extra_anim=None, savefig_kwargs=None)
try:
print("create frames ... ")
os.system('mkdir -p anim-' + out)
os.system('ffmpeg -i ' + out + '.mp4 anim-' + out + '/movie%d.jpg')
except:
pass
|
JuliusHen/gimli | pygimli/physics/traveltime/TravelTimeManager.py | <filename>pygimli/physics/traveltime/TravelTimeManager.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Class for managing first arrival travel time inversions"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
import pygimli as pg
from pygimli.frameworks import MeshMethodManager
from pygimli.utils import getSavePath
from . modelling import TravelTimeDijkstraModelling
class TravelTimeManager(MeshMethodManager):
"""Manager for refraction seismics (traveltime tomography).
TODO Document main members and use default MethodManager interface
e.g., self.inv, self.fop, self.paraDomain, self.mesh, self.data
"""
def __init__(self, data=None, **kwargs):
"""Create an instance of the Traveltime manager.
Parameters
----------
data: :gimliapi:`GIMLI::DataContainer` | str
You can initialize the Manager with data or give them a dataset
when calling the inversion.
"""
self._useFMM = False
self.secNodes = 2 # default number of secondary nodes for inversion
super(TravelTimeManager, self).__init__(data=data, **kwargs)
self.inv.dataTrans = pg.trans.Trans()
@property
def velocity(self):
"""Return velocity vector (the inversion model)."""
# we can check here if there was an inversion run
return self.fw.model # shouldn't it be the inverse?
def createForwardOperator(self, **kwargs):
"""Create default forward operator for Traveltime modelling.
Your want your Manager use a special forward operator you can add them
here on default Dijkstra is used.
"""
fop = TravelTimeDijkstraModelling(**kwargs)
return fop
def load(self, fileName):
"""Load any supported data file."""
self.data = pg.physics.traveltime.load(fileName)
return self.data
def createMesh(self, data=None, **kwargs):
"""Create default inversion mesh.
Inversion mesh for traveltime inversion does not need boundary region.
"""
d = data or self.data
if d is None:
pg.critical('Please provide a data file for mesh generation')
return pg.meshtools.createParaMesh(d.sensors(),
boundary=0, **kwargs)
def checkData(self, data):
"""Return data from container."""
if isinstance(data, pg.DataContainer):
if not data.haveData('t'):
pg.critical('DataContainer has no "t" values.')
return data['t']
return data
def checkError(self, err, dataVals):
"""Return relative error."""
if isinstance(err, pg.DataContainer):
if not err.haveData('err'):
pg.error('DataContainer has no "err" values. Fallback to 3%')
return np.ones(err.size()) * 0.03
return err['err'] / dataVals
return err
def applyMesh(self, mesh, secNodes=None, ignoreRegionManager=False):
"""Apply mesh, i.e. set mesh in the forward operator class."""
if secNodes is None:
secNodes = self.secNodes
self.fop._refineSecNodes = secNodes
if secNodes > 0:
if ignoreRegionManager:
mesh = self.fop.createRefinedFwdMesh(mesh)
self.fop.setMesh(mesh, ignoreRegionManager=ignoreRegionManager)
def simulate(self, mesh, scheme, slowness=None, vel=None, seed=None,
secNodes=2, noiseLevel=0.0, noiseAbs=0.0, **kwargs):
"""Simulate traveltime measurements.
Perform the forward task for a given mesh, a slowness distribution (per
cell) and return data (traveltime) for a measurement scheme.
Parameters
----------
mesh : :gimliapi:`GIMLI::Mesh`
Mesh to calculate for or use the last known mesh.
scheme: :gimliapi:`GIMLI::DataContainer`
Data measurement scheme needs 's' for shot and 'g' for geophone
data token.
slowness : array(mesh.cellCount()) | array(N, mesh.cellCount())
Slowness distribution for the given mesh cells can be:
* a single array of len mesh.cellCount()
* a matrix of N slowness distributions of len mesh.cellCount()
* a res map as [[marker0, res0], [marker1, res1], ...]
vel : array(mesh.cellCount()) | array(N, mesh.cellCount())
Velocity distribution for the given mesh cells.
Will overwrite given slowness.
secNodes: int [2]
Number of refinement nodes to increase accuracy of the forward
calculation.
noiseLevel: float [0.0]
Add relative noise to the simulated data. noiseLevel*100 in %
noiseAbs: float [0.0]
Add absolute noise to the simulated data in ms.
seed: int [None]
Seed the random generator for the noise.
Keyword Arguments
-----------------
returnArray: [False]
Return only the calculated times.
verbose: [self.verbose]
Overwrite verbose level.
**kwargs
Additional kwargs ...
Returns
-------
t : array(N, data.size()) | DataContainer
The resulting simulated travel time values.
Either one column array or matrix in case of slowness matrix.
"""
verbose = kwargs.pop('verbose', self.verbose)
fop = self.fop
fop.data = scheme
fop.verbose = verbose
if mesh is not None:
self.applyMesh(mesh, secNodes=secNodes, ignoreRegionManager=True)
if vel is not None:
slowness = 1/vel
if slowness is None:
pg.critical("Need some slowness or velocity distribution for"
" simulation.")
if len(slowness) == self.fop.mesh().cellCount():
t = fop.response(slowness)
else:
print(self.fop.mesh())
print("slowness: ", slowness)
pg.critical("Simulate called with wrong slowness array.")
ret = pg.DataContainer(scheme)
ret.set('t', t)
if noiseLevel > 0 or noiseAbs > 0:
if not ret.allNonZero('err'):
ret.set('t', t)
err = noiseAbs + t * noiseLevel
ret.set('err', err)
pg.verbose("Absolute error estimates (min:max) {0}:{1}".format(
min(ret('err')), max(ret('err'))))
t += pg.randn(ret.size(), seed=seed) * ret('err')
ret.set('t', t)
if kwargs.pop('returnArray', False) is True:
return t
return ret
def invert(self, data=None, useGradient=True, vTop=500, vBottom=5000,
secNodes=2, **kwargs):
"""Invert data.
Parameters
----------
data : pg.DataContainer()
Data container with at least SensorIndieces 's g' and
data values 't' (traveltime in ms) and 'err' (absolute error in ms)
useGradient: bool [True]
Use a gradient like starting model suited for standard flat
earth cases. [Default]
For cross tomography geometry you should set this to False for a
non gradient startind model.
vTop: float
Top velocity for gradient stating model.
vBottom: float
Bottom velocity for gradient stating model.
secNodes: int [2]
Amount of secondary nodes used for ensure accuracy of the forward
operator.
Keyword Arguments
-----------------
** kwargs:
Inversion related arguments:
See :py:mod:`pygimli.frameworks.MeshMethodManager.invert`
"""
mesh = kwargs.pop('mesh', None)
self.secNodes = secNodes
if 'limits' in kwargs:
if kwargs['limits'][0] > 1:
tmp = kwargs['limits'][0]
kwargs['limits'][0] = 1.0 / kwargs['limits'][1]
kwargs['limits'][1] = 1.0 / tmp
pg.verbose('Switching velocity limits to slowness limits.',
kwargs['limits'])
if useGradient:
self.fop._useGradient = [vTop, vBottom]
else:
self.fop._useGradient = None
slowness = super().invert(data, mesh, **kwargs)
velocity = 1.0 / slowness
self.fw.model = velocity
return velocity
def drawRayPaths(self, ax, model=None, **kwargs):
"""Draw the the ray paths for model or last model.
If model is not specifies, the last calculated Jacobian is used.
Parameters
----------
model : array
Velocity model for which to calculate and visualize ray paths (the
default is model for last Jacobian calculation in self.velocity).
ax : matplotlib.axes object
To draw the model and the path into.
**kwargs : type
Additional arguments passed to LineCollection (alpha, linewidths,
color, linestyles).
Returns
-------
lc : matplotlib.LineCollection
"""
if model is not None:
if self.fop.jacobian().size() == 0 or model != self.model:
self.fop.createJacobian(1/model)
else:
model = self.model
_ = kwargs.setdefault("color", "w")
_ = kwargs.setdefault("alpha", 0.5)
_ = kwargs.setdefault("linewidths", 0.8)
shots = self.fop.data.id("s")
recei = self.fop.data.id("g")
segs = []
for s, g in zip(shots, recei):
wi = self.fop.way(s, g)
points = self.fop._core.mesh().positions(withSecNodes=True)[wi]
segs.append(np.column_stack((pg.x(points), pg.y(points))))
lc = LineCollection(segs, **kwargs)
ax.add_collection(lc)
return lc
def showRayPaths(self, model=None, ax=None, **kwargs):
"""Show the model with ray paths for given model.
If not model specified, the last calculated Jacobian is taken.
Parameters
----------
model : array
Velocity model for which to calculate and visualize ray paths (the
default is model for last Jacobian calculation in self.velocity).
ax : matplotlib.axes object
To draw the model and the path into.
**kwargs : type
forward to drawRayPaths
Returns
-------
ax : matplotlib.axes object
cb : matplotlib.colorbar object (only if model is provided)
Examples
--------
>>> # No reason to import matplotlib
>>> import pygimli as pg
>>> from pygimli.physics import TravelTimeManager
>>> from pygimli.physics.traveltime import createRAData
>>>
>>> x, y = 8, 6
>>> mesh = pg.createGrid(x, y)
>>> data = createRAData([(0,0)] + [(x, i) for i in range(y)],
... shotDistance=y+1)
>>> data.set("t", pg.Vector(data.size(), 1.0))
>>> tt = TravelTimeManager()
>>> tt.fop.setData(data)
>>> tt.applyMesh(mesh, secNodes=10)
>>> ax, cb = tt.showRayPaths(showMesh=True, diam=0.1)
"""
if model is None:
if self.fop.jacobian().size() == 0:
self.fop.mesh() # initialize any meshs .. just to be sure is 1
model = pg.Vector(self.fop.regionManager().parameterCount(),
1.0)
else:
model = self.model
ax, cbar = self.showModel(ax=ax, model=model,
showMesh=kwargs.pop('showMesh', None),
diam=kwargs.pop('diam', None))
self.drawRayPaths(ax, model=model, **kwargs)
return ax, cbar
def rayCoverage(self):
"""Ray coverage, i.e. summed raypath lengths."""
return self.fop.jacobian().transMult(
np.ones(self.fop.jacobian().rows()))
def standardizedCoverage(self):
"""Standardized coverage vector (0|1) using neighbor info."""
coverage = self.rayCoverage()
C = self.fop.constraintsRef()
return np.sign(np.absolute(C.transMult(C * coverage)))
def showCoverage(self, ax=None, name='coverage', **kwargs):
"""Show the ray coverage in log-scale."""
if ax is None:
fig, ax = plt.subplots()
cov = self.rayCoverage()
return pg.show(self.fop.paraDomain,
pg.log10(cov+min(cov[cov > 0])*.5), ax=ax,
coverage=self.standardizedCoverage(), **kwargs)
def saveResult(self, folder=None, size=(16, 10), verbose=False, **kwargs):
"""Save the results in a specified (or date-time derived) folder.
Saved items are:
* Resulting inversion model
* Velocity vector
* Coverage vector
* Standardized coverage vector
* Mesh (bms and vtk with results)
Args
----
path: str[None]
Path to save into. If not set the name is automatically created
size: (float, float) (16,10)
Figure size.
Keyword Args
------------
Will be forwarded to showResults
Returns
-------
str:
Name of the result path.
"""
subfolder = self.__class__.__name__
path = getSavePath(folder, subfolder)
if verbose:
pg.info('Saving refraction data to: {}'.format(path))
np.savetxt(os.path.join(path, 'velocity.vector'),
self.velocity)
np.savetxt(os.path.join(path, 'velocity-cov.vector'),
self.rayCoverage())
np.savetxt(os.path.join(path, 'velocity-scov.vector'),
self.standardizedCoverage())
m = pg.Mesh(self.paraDomain)
m['Velocity'] = self.paraModel(self.velocity)
m['Coverage'] = self.rayCoverage()
m['S_Coverage'] = self.standardizedCoverage()
m.exportVTK(os.path.join(path, 'velocity'))
m.saveBinaryV2(os.path.join(path, 'velocity-pd'))
self.fop.mesh().save(os.path.join(path, 'velocity-mesh'))
np.savetxt(os.path.join(path, 'chi.txt'), self.inv.chi2History)
fig, ax = plt.subplots()
self.showResult(ax=ax, cov=self.standardizedCoverage(), **kwargs)
fig.set_size_inches(size)
fig.savefig(os.path.join(path, 'velocity.pdf'), bbox_inches='tight')
pg.plt.close(fig)
return path
|
JuliusHen/gimli | pygimli/meshtools/grid.py | <reponame>JuliusHen/gimli<filename>pygimli/meshtools/grid.py
# -*- coding: utf-8 -*-
"""General grid generation and maintenance."""
import os
import numpy as np
import pygimli as pg
from .polytools import polyCreateWorld, syscallTetgen
def createGrid(x=None, y=None, z=None, **kwargs):
"""Create grid style mesh.
Generate simple grid with defined node positions for each dimension.
The resulting grid depends on the amount of given coordinate arguments
and consists out of edges (1D - x), quads (2D- x and y), or
hexahedrons(3D- x, y, and z).
Parameters
----------
kwargs:
x: array
x-coordinates for all Nodes (1D, 2D, 3D)
y: array
y-coordinates for all Nodes (2D, 3D)
z: array
z-coordinates for all Nodes (3D)
marker: int = 0
Marker for resulting cells.
worldBoundaryMarker : bool = False
Boundaries are enumerated with world marker, i.e., Top = -1
All remaining = -2.
Default marker are left=1, right=2, top=3, bottom=4, front=5, back=6
Returns
-------
:gimliapi:`GIMLI::Mesh`
Either 1D, 2D or 3D mesh depending the input.
Examples
--------
>>> import pygimli as pg
>>> mesh = pg.meshtools.createGrid(x=[0, 1, 1.5, 2], y=[-1, -0.5, -0.25, 0],
... marker=2)
>>> print(mesh)
Mesh: Nodes: 16 Cells: 9 Boundaries: 24
>>> fig, axs = pg.plt.subplots(1, 2)
>>> _ = pg.show(mesh, markers=True, showMesh=True, ax=axs[0])
>>> mesh = pg.meshtools.createGrid(x=[0, 1, 1.5, 2], y=[-1, -0.5, -0.25, 0],
... worldBoundaryMarker=True, marker=2)
>>> print(mesh)
Mesh: Nodes: 16 Cells: 9 Boundaries: 24
>>> _ = pg.show(mesh, markers=True, showBoundaries=True,
... showMesh=True, ax=axs[1])
"""
if 'degree' in kwargs:
return createGridPieShaped(x, **kwargs)
if x is not None:
if isinstance(x, int):
x = list(range(x))
kwargs['x'] = x
if y is not None:
if isinstance(y, int):
y = list(range(y))
kwargs['y'] = y
if z is not None:
if isinstance(z, int):
z = list(range(z))
kwargs['z'] = z
return pg.core.pgcore.createGrid(**kwargs)
def createGridPieShaped(x, degree=10.0, h=2, marker=0):
"""Create a 2D pie shaped grid (segment from annulus or cirlce).
TODO:
----
* degree: > 90 .. 360
Arguments
---------
x: array
x-coordinates for all Nodes (2D). If you need it 3D, you can apply :py:mod:`pygimli.meshtools.extrudeMesh` on it.
degree: float [None]
Create a pie shaped grid for a value between 0 and 90.
Creates an optional inner boundary (marker=2) for a annulus with x[0] > 0. Outer boundary marker is 1. Optional h refinement. Center node is the first for circle segment.
h: int [2]
H-Refinement for degree option.
marker: int = 0
Marker for resulting cells.
Returns
-------
mesh: :gimliapi:`GIMLI::Mesh`
Examples
--------
>>> import pygimli as pg
>>> mesh = pg.meshtools.createGridPieShaped(x=[0, 1, 3], degree=45, h=3)
>>> print(mesh)
Mesh: Nodes: 117 Cells: 128 Boundaries: 244
>>> _ = pg.show(mesh)
>>> mesh = pg.meshtools.createGridPieShaped(x=[1, 2, 3], degree=45, h=3)
>>> print(mesh)
Mesh: Nodes: 153 Cells: 128 Boundaries: 280
>>> _ = pg.show(mesh)
"""
mesh = pg.Mesh(dim=2)
for i in range(0, len(x)):
mesh.createNodeWithCheck([x[i], 0.0])
mesh.createNodeWithCheck([x[i]*np.cos(degree*np.pi/180),
x[i]*np.sin(degree*np.pi/180)])
if abs(x[0]) < 1e-6:
mesh.createCell([0, 1, 2])
for i in range(0, (len(x)-2)*2-1, 2):
c = mesh.createCell([i+1, i+3, i+4, i+2])
else:
for i in range(0, len(x)*2-2, 2):
c = mesh.createCell([i, i+2, i+3, i+1])
mesh.createBoundary([0, 1], marker=1)
mesh.createBoundary([mesh.nodeCount()-2, mesh.nodeCount()-1], marker=2)
for i in range(h):
mesh = mesh.createH2()
mesh.createNeighbourInfos()
for b in mesh.boundaries():
if b.outside() and b.marker() == 0:
if b.norm()[1] == 0.0:
b.setMarker(4) # bottom
else:
b.setMarker(3)
meshR = pg.Mesh(mesh)
## move all nodes on the inner boundary to rw
for b in mesh.boundaries():
line = pg.Line(b.node(0).pos(), b.node(1).pos())
rSoll = line.intersect([0.0, 0.0], [1.0, 0.0])[0]
if rSoll > 1e-4:
for n in b.nodes():
scale = rSoll/n.pos().abs()
if scale > 1:
meshR.node(n.id()).setPos(pg.Line([0.0, 0.0], n.pos()).at(scale))
if marker != 0:
for c in meshR.cells():
c.setMarker(marker)
return meshR
def appendBoundary(mesh, **kwargs):
""" Append Boundary to a given mesh.
Syntactic sugar for :py:mod:`pygimli.meshtools.appendTriangleBoundary`
and :py:mod:`pygimli.meshtools.appendTetrahedronBoundary`.
Parameters
----------
mesh: :gimliapi:`GIMLI::Mesh`
"2d or 3d Mesh to which the boundary will be appended.
Additional Args
---------------
** kwargs forwarded to :py:mod:`pygimli.meshtools.appendTriangleBoundary`
or :py:mod:`pygimli.meshtools.appendTetrahedronBoundary`.
Returns
-------
:gimliapi:`GIMLI::Mesh`
A new 2D or 3D mesh containing the original mesh and a boundary arround.
"""
if mesh.dim() == 2:
return appendTriangleBoundary(mesh, **kwargs)
elif mesh.dim() == 3:
return appendTetrahedronBoundary(mesh, **kwargs)
pg.critical("Don't know how to append boundary to: ", mesh)
def appendTriangleBoundary(mesh, xbound=10, ybound=10, marker=1,
isSubSurface=True,
**kwargs):
"""Add a triangle mesh boundary to a given mesh.
Returns a new mesh that contains a triangulated box around a given mesh
suitable for geo-simulation (surface boundary with marker = -1 at top and marker = -2 in the inner subsurface).
The old boundary marker from mesh will be preserved, except for marker == -2 which will be switched to 2 since we assume
-2 is the world marker for outer boundaries in the subsurface.
Note, this all will only work stable if the mesh generator (triangle) preserve all input boundaries.
This will lead to bad quality meshes for the boundary region so its a good idea to play with the addNodes keword argument
to manually refine the newly created outer boundaries.
Parameters
----------
mesh: :gimliapi:`GIMLI::Mesh`
Mesh to which the triangle boundary should be appended.
xbound: float, optional
Absolute horizontal prolongation distance.
ybound: float, optional
Absolute vertical prolongation distance.
marker: int, optional
Marker of new cells.
isSubSurface: boolean [True]
Apply boundary conditions suitable for geo-simulation and prolongate
mesh to the surface if necessary.
Additional Args
---------------
** kargs forwarded to pg.createMesh
quality : float, optional
Triangle quality.
area: float, optional
Triangle max size within the boundary.
smooth : boolean, optional
Apply mesh smoothing.
addNodes : int[5], iterable
Add aditional nodes on the outer boundaries. Or for each boundary if given 5 values (isSubsurface=True) or 4 for isSubsurface=False
Returns
-------
:gimliapi:`GIMLI::Mesh`
A new 2D mesh containing the original mesh and a boundary arround.
See Also
--------
:py:mod:`pygimli.meshtools.appendBoundary`
:py:mod:`pygimli.meshtools.appendTetrahedronBoundary`
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import pygimli as pg
>>> from pygimli.viewer.mpl import drawMesh, drawModel
>>> import pygimli.meshtools as mt
>>> inner = pg.createGrid(range(5), range(5), marker=1)
>>> fig, axs = plt.subplots(2,3)
>>> ax, _ = pg.show(inner, markers=True, showBoundaries=True, showMesh=True, ax=axs[0][0])
>>> m = mt.appendTriangleBoundary(inner, xbound=3, ybound=3, marker=2, addNodes=0, isSubSurface=False)
>>> ax, _ = pg.show(m, markers=True, showBoundaries=True, showMesh=True, ax=axs[0][1])
>>> m = mt.appendTriangleBoundary(inner, xbound=4, ybound=1, marker=2, addNodes=5, isSubSurface=False)
>>> ax, _ = pg.show(m, markers=True, showBoundaries=True, showMesh=True, ax=axs[0][2])
>>> m = mt.appendTriangleBoundary(inner, xbound=4, ybound=4, marker=2, addNodes=0, isSubSurface=True)
>>> ax, _ = pg.show(m, markers=True, showBoundaries=True, showMesh=True, ax=axs[1][0])
>>> m = mt.appendTriangleBoundary(inner, xbound=4, ybound=4, marker=2, addNodes=5, isSubSurface=True)
>>> ax, _ = pg.show(m, markers=True, showBoundaries=True, showMesh=True, ax=axs[1][1])
>>> surf = mt.createPolygon([[0, 0],[5, 3], [10, 1]], boundaryMarker=-1, addNodes=5, interpolate='spline')
>>> m = mt.appendTriangleBoundary(surf, xbound=4, ybound=4, marker=2, addNodes=5, isSubSurface=True)
>>> ax, _ = pg.show(m, markers=True, showBoundaries=True, showMesh=True, ax=axs[1][2])
"""
poly = pg.Mesh(isGeometry=True)
if isSubSurface == True:
bs = mesh.findBoundaryByMarker(pg.core.MARKER_BOUND_HOMOGEN_NEUMANN)
if len(bs) == 0:
for b in mesh.boundaries():
if b.outside() and b.norm()[1] == 1.0:
bs.append(b)
paths = mesh.findPaths(bs)
if len(paths) > 0:
startPoint = mesh.node(paths[0][0]).pos()
endPoint = mesh.node(paths[0][-1]).pos()
if startPoint[0] > endPoint[0]:
startPoint = endPoint
endPoint = mesh.node(paths[0][0]).pos()
else:
pg.critical("Can't identify upper part of the mesh to be moved to the surface.",
"Maybe you can define them with Marker==-1")
addNodes = kwargs.pop('addNodes', 5)
boundPoly = [pg.Pos(startPoint)]
if isinstance(addNodes, (float, int)) and addNodes > 0:
addNodes = np.full(5, addNodes)
if hasattr(addNodes, '__len__') and len(addNodes) == 5:
boundPoly.extend([boundPoly[-1] - pg.Pos(x, 0)
for x in pg.utils.grange(1, xbound, n=addNodes[0]+1, log=True)])
boundPoly.extend([boundPoly[-1] - pg.Pos(0, y)
for y in np.linspace(0, mesh.ymax()- mesh.ymin() + ybound,
addNodes[1]+1)[1:]])
boundPoly.extend([boundPoly[-1] + pg.Pos(x, 0)
for x in np.linspace(0, (endPoint-startPoint)[0] + 2*xbound,
addNodes[2]+1)[1:]])
boundPoly.extend([boundPoly[-1] + pg.Pos(0, y)
for y in np.linspace(0, endPoint[1]-boundPoly[-1][1],
addNodes[3]+1)[1:]])
boundPoly.extend([boundPoly[-1] - pg.Pos(xbound-x, 0)
for x in pg.utils.grange(1, xbound, n=addNodes[4]+1,
log=True)[::-1][1:]])
else:
boundPoly.append(boundPoly[-1] - pg.Pos(xbound, 0))
boundPoly.append(boundPoly[-1] - pg.Pos(0, mesh.ymax()- mesh.ymin() + ybound))
boundPoly.append(boundPoly[-1] + pg.Pos((endPoint-startPoint)[0] + 2*xbound, 0))
boundPoly.append(pg.Pos(endPoint) + pg.Pos(xbound, 0))
boundPoly.append(pg.Pos(endPoint))
poly = pg.meshtools.createPolygon(boundPoly, isClosed=False)
poly.addRegionMarker(pg.Pos([poly.xmin(), poly.ymin()]) +
[xbound/100, ybound/100],
marker=marker)
if mesh.cellCount() > 0:
poly.addHoleMarker(pg.Pos([mesh.xmin(), mesh.ymin()]) +
[0.001, 0.001])
else: # no isSubSurface
boundPoly = [
[mesh.xmin() - xbound, mesh.ymin() - ybound],
[mesh.xmin() - xbound, mesh.ymax() + ybound],
[mesh.xmax() + xbound, mesh.ymax() + ybound],
[mesh.xmax() + xbound, mesh.ymin() - ybound],
]
poly = pg.meshtools.createPolygon(boundPoly, isClosed=True,
marker=marker,
addNodes=kwargs.pop('addNodes', 5))
for b in mesh.boundaries():
if b.outside() or b.marker() == -1:
poly.copyBoundary(b)
preserveSwitch = 'Y'
# pg.show(poly, boundaryMarkers=True, showNodes=True)
# pg.wait()
mesh2 = pg.meshtools.createMesh(poly, preserveBoundary=preserveSwitch, **kwargs)
# pg.show(mesh2, boundaryMarkers=True, showNodes=True)
## start extracting all cells with marker from mesh2 and all orginal cells from mesh
mesh3 = pg.Mesh(2)
for c in mesh2.cells():
if c.marker() == marker:
mesh3.copyCell(c)
##! map does copies the cell not the reference, this should not happen **TODO check 20210305
# map(lambda cell: mesh2.copyCell(cell), mesh2.cells())
for c in mesh.cells():
mesh3.copyCell(c)
## we need to delete the old boundary markers or the new neighbour infos will fail for old outside boundaries
mesh3.setBoundaryMarkers(np.zeros(mesh3.boundaryCount()))
mesh3.createNeighborInfos(force=True)
for b in mesh.boundaries():
if b.marker() != 0:
b2 = mesh3.copyBoundary(b)
# some automagic .. original mesh contains bmarker == -2 which means mixed condition
# this special marker will be switched to 2
if b.marker() == -2:
b2.setMarker(2)
for b in mesh3.boundaries():
if b.outside() and b.marker() > -1:
if b.norm().x() != 0 or b.norm().y() == -1.0:
b.setMarker(pg.core.MARKER_BOUND_MIXED)
else:
b.setMarker(pg.core.MARKER_BOUND_HOMOGEN_NEUMANN)
return mesh3
def appendBoundaryGrid(grid, xbound=None, ybound=None, zbound=None,
marker=1, isSubSurface=True, **kwargs):
""" Return a copy of grid surrounded by boundary grid.
Note, that the input grid need to be a structured 2d or 3d grid with only quad or hex cells.
TODO
----
* preserve inner boundaries
* add subsurface setting
Args
----
grid: :gimliapi:`GIMLI::Mesh`
2D or 3D Mesh that must contain structured quads or hex cells
xbound: iterable of type float [None]
Needed for 2D or 3D grid prolongation and will be added on the left side in opposit order and on the right side in normal order.
ybound: iterable of type float [None]
Needed for 2D or 3D grid prolongation and will be added (2D bottom, 3D fron) in opposit order and (2D top, 3D back) in normal order.
zbound: iterable of type float [None]
Needed for 3D grid prolongation and will be added the bottom side in opposit order on the top side in normal order.
marker: int [1]
Cellmarker for the cells in the boundary region
isSubSurface : boolean, optional
Apply boundary conditions suitable for geo-simulaion and prolongate
mesh to the surface if necessary, e.i., no boundary on top of the grid.
Examples
--------
>>> import pygimli as pg
>>> import pygimli.meshtools as mt
>>> grid = mt.createGrid(5,5)
...
>>> g1 = mt.appendBoundaryGrid(grid,
... xbound=[1, 3, 6],
... ybound=[1, 3, 6],
... marker=2,
... isSubSurface=False)
>>> ax,_ = pg.show(g1, markers=True, showMesh=True)
>>> grid = mt.createGrid(5,5,5)
...
>>> g2 = mt.appendBoundaryGrid(grid,
... xbound=[1, 3, 6],
... ybound=[1, 3, 6],
... zbound=[1, 3, 6],
... marker=2,
... isSubSurface=False)
>>> ax, _ = pg.show(g2, g2.cellMarkers(), hold=True, opacity=0.5);
"""
if isSubSurface == True:
pg.critical('Implement me')
def _concat(v, vBound):
if (not pg.isArray(vBound)):
pg.critical("please give bound array")
v = np.append(-np.array(vBound)[::-1] + v[0], v)
v = np.append(v, v[-1] + np.array(vBound))
return v
x = None
y = None
z = None
if grid.dim() > 1:
if grid.dim() == 2:
if any([c.nodeCount() != 4 for c in grid.cells()]):
pg.critical("Grid have other cells than quads. Can't refine it with a grid")
x = pg.utils.unique(pg.x(grid))
y = pg.utils.unique(pg.y(grid))
x = _concat(x, xbound)
y = _concat(y, ybound)
if grid.dim() == 3:
if any([c.nodeCount() != 8 for c in grid.cells()]):
pg.critical("Grid have other cells than hex's. Can't refine it with a grid")
z = pg.utils.unique(pg.z(grid))
z = _concat(z, zbound)
mesh = pg.meshtools.createGrid(x=x, y=y, z=z, marker=marker)
mesh.setCellMarkers(pg.interpolate(grid,
grid.cellMarkers(),
mesh.cellCenters(),
fallback=marker))
return mesh
def appendTetrahedronBoundary(mesh, xbound=10, ybound=10, zbound=10,
marker=1, isSubSurface=True, **kwargs):
""" Return a copy of mesh surrounded by a tetrahedron mesh as boundary.
Returns a new mesh that contains a tetrahedron mesh box around a given mesh
suitable for geo-simulation (surface boundary with marker = -1 at top and marker = -2 in the inner subsurface).
The old boundary marker from mesh will be preserved, except for marker == -2 which will be switched to 2 since we assume
-2 is the world marker for outer boundaries in the subsurface.
Note
----
These method will only work stable if the mesh generator (Tetgen) preserve all input boundaries.
This will lead to bad quality meshes for the boundary region so its a good idea to play with the addNodes keword argument to manually refine the newly created outer boundaries.
Also, note. If the input mesh consists of hexahedrons a small inconsistency will arise because a quad boundary element will be split by 2 triangle boundaries from the boundary tetrahedrons. The effect of this hanging edges are unclear, also createNeighbourInfos may fail. We need to implement/test pyramid cells to handle this.
TODO
----
* set correct boundary conditions
* isSubSurface
* pyramid cells as connecting cells
* need for preserve Boundary check
* preserve Boundary support
* addNodes support
Parameters
----------
mesh: :gimliapi:`GIMLI::Mesh`
3D Mesh to which the tetrahedron boundary should be appended.
xbound: float [10]
Horizontal prolongation distance in meter at x-direction.
Need to be >= 0.
ybound: float [10]
Horizonal prolongation distance in meter at y-direction.
Need to be greater 0.
zbound: float [10]
Vertical prolongation distance in meter at z-direction. Need to be greater 0.
marker: int, optional
Marker of new cells.
addNodes: float, optional
Triangle quality.
isSubSurface : boolean, optional
Apply boundary conditions suitable for geo-simulaion and prolongate
mesh to the surface if necessary.
verbose : boolean, optional
Be verbose.
Returns
-------
:gimliapi:`GIMLI::Mesh`
A new 3D mesh containing the original mesh and a boundary arround.
See Also
--------
:py:mod:`pygimli.meshtools.appendBoundary`,
:py:mod:`pygimli.meshtools.appendTriangleBoundary`
Examples
--------
>>> import pygimli as pg
>>> import pygimli.meshtools as mt
>>> grid = mt.createGrid(5,5,5)
...
>>> mesh = mt.appendBoundary(grid, xbound=5, ybound=5, zbound=5,
... isSubSurface=False)
>>> ax, _ = pg.show(mesh, mesh.cellMarkers(), hold=True, opacity=0.5)
"""
if isSubSurface == True:
pg.critical('Implement me')
meshBoundary = pg.Mesh(3, isGeometry=True)
for b in mesh.boundaries():
if b.outside() or b.marker() == -1:
meshBoundary.copyBoundary(b)
bb = meshBoundary.bb()
meshBoundary.addHoleMarker(bb[0] + (bb[1]-bb[0])/1000.)
if not any([xbound > 0, ybound > 0, zbound > 0]):
pg.critical('all boundaries need to be greater 0.')
startPos = bb[0] - [xbound, ybound, zbound]
endPos = bb[1] + [xbound, ybound, zbound]
boundaryBox = pg.meshtools.createCube(start=startPos, end=endPos)
boundMesh = pg.meshtools.createMesh(boundaryBox + meshBoundary)
boundMesh.setCellMarkers(np.ones(boundMesh.cellCount()) * marker)
outMesh = pg.Mesh(mesh)
for c in boundMesh.cells():
outMesh.copyCell(c)
return outMesh
if __name__ == "__main__":
pass
|
JuliusHen/gimli | doc/examples/2_seismics/plot_02_crosshole_tomography.py | #!/usr/bin/env python
# coding: utf-8
"""
Crosshole traveltime tomography
-------------------------------
Seismic and ground penetrating radar (GPR) methods are frequently applied to
image the shallow subsurface. While novel developments focus on inverting the
full waveform, ray-based approximations are still widely used in practice and
offer a computationally efficient alternative. Here we demonstrate the modelling
of traveltimes and their inversion for the underlying slowness distribution for
a crosshole scenario.
We start by importing the necessary packages.
"""
# sphinx_gallery_thumbnail_number = 3
import matplotlib.pyplot as plt
import numpy as np
import pygimli as pg
import pygimli.meshtools as mt
from pygimli.physics.traveltime import TravelTimeManager, createCrossholeData
pg.utils.units.quants['vel']['cMap'] = 'inferno_r'
################################################################################
# Next, we build the crosshole acquisition geometry with two shallow boreholes.
# Acquisition parameters
bh_spacing = 20.0
bh_length = 25.0
sensor_spacing = 2.5
world = mt.createRectangle(start=[0, -(bh_length + 3)], end=[bh_spacing, 0.0],
marker=0)
depth = -np.arange(sensor_spacing, bh_length + sensor_spacing, sensor_spacing)
sensors = np.zeros((len(depth) * 2, 2)) # two boreholes
sensors[len(depth):, 0] = bh_spacing # x
sensors[:, 1] = np.hstack([depth] * 2) # y
###############################################################################
# Traveltime calculations work on unstructured meshes and structured grids. We
# demonstrate this here by simulating the synthetic data on an unstructured
# mesh and inverting it on a simple structured grid.
# Create forward model and mesh
c0 = mt.createCircle(pos=(7.0, -10.0), radius=3, nSegments=25, marker=1)
c1 = mt.createCircle(pos=(12.0, -18.0), radius=4, nSegments=25, marker=2)
geom = world + c0 + c1
for sen in sensors:
geom.createNode(sen)
mesh_fwd = mt.createMesh(geom, quality=34, area=0.25)
model = np.array([2000., 2300, 1700])[mesh_fwd.cellMarkers()]
pg.show(mesh_fwd, model,
label=pg.unit('vel'), cMap=pg.cmap('vel'), nLevs=3, logScale=False)
###############################################################################
# Next, we create an empty DataContainer and fill it with sensor positions and
# all possible shot-receiver pairs for the two-borehole scenario.
scheme = createCrossholeData(sensors)
###############################################################################
# The forward simulation is performed with a few lines of code. We initialize
# an instance of the Refraction manager and call its `simulate` function with
# the mesh, the scheme and the slowness model (1 / velocity). We also add 0.1%
# relative and 10 microseconds of absolute noise.
#
# Secondary nodes allow for more accurate forward simulations. Check out the
# paper by `<NAME> (2013)
# <https://doi.org/10.1016/j.cageo.2012.12.005>`_ to learn more about it.
tt = TravelTimeManager()
data = tt.simulate(mesh=mesh_fwd, scheme=scheme, slowness=1./model,
secNodes=4, noiseLevel=0.001, noiseAbs=1e-5, seed=1337)
###############################################################################
# Now we create a structured grid as inversion mesh
refinement = 0.25
x = np.arange(0, bh_spacing + refinement, sensor_spacing * refinement)
y = -np.arange(0.0, bh_length + 3, sensor_spacing * refinement)
mesh = pg.meshtools.createGrid(x, y)
ax, _ = pg.show(mesh, hold=True)
ax.plot(sensors[:, 0], sensors[:, 1], "ro")
###############################################################################
# Note. Setting setRecalcJacobian(False) to simulate linear inversion here.
tt.inv.inv.setRecalcJacobian(True)
invmodel = tt.invert(data, mesh=mesh, secNodes=3, lam=1000, zWeight=1.0,
useGradient=False, verbose=True)
print("chi^2 = %.2f" % tt.inv.chi2()) # Look at the data fit
# np.testing.assert_approx_equal(tt.inv.chi2(), 0.999038, significant=5)
################################################################################
# Finally, we visualize the true model and the inversion result next to each
# other.
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 7), sharex=True, sharey=True)
ax1.set_title("True model")
ax2.set_title("Inversion result")
pg.show(mesh_fwd, model, ax=ax1, showMesh=True,
label=pg.unit('vel'), cMap=pg.cmap('vel'), nLevs=3)
for ax in (ax1, ax2):
ax.plot(sensors[:, 0], sensors[:, 1], "wo")
tt.showResult(ax=ax2, logScale=False, nLevs=3)
tt.drawRayPaths(ax=ax2, color="0.8", alpha=0.3)
fig.tight_layout()
################################################################################
# Note how the rays are attracted by the high velocity anomaly while
# circumventing the low velocity region. This is also reflected in the coverage,
# which can be visualized as follows:
fig, ax = plt.subplots()
tt.showCoverage(ax=ax, cMap="Greens")
tt.drawRayPaths(ax=ax, color="k", alpha=0.3)
ax.plot(sensors[:, 0], sensors[:, 1], "ko")
################################################################################
# White regions indicate the model null space, i.e. cells that are not traversed
# by any ray.
|
JuliusHen/gimli | pygimli/meshtools/__init__.py | <gh_stars>100-1000
# encoding: utf-8
"""
Mesh generation and modification.
.. note::
Although we discriminate here between grids (structured meshes) and meshes
(unstructured), both objects are treated the same internally.
"""
from pygimli.core import createMesh1D, createMesh1DBlock, createMesh2D, createMesh3D
from .grid import (createGrid, createGridPieShaped,
appendBoundary,
appendBoundaryGrid,
appendTriangleBoundary,
appendTetrahedronBoundary,
)
from .mapping import (cellDataToBoundaryData, cellDataToNodeData,
fillEmptyToCellArray, interpolate, interpolateAlongCurve,
nodeDataToBoundaryData, nodeDataToCellData,
tapeMeasureToCoordinates)
from .mesh import (convert, convertMeshioMesh, convertHDF5Mesh, createMesh,
createParaMesh, createParaMesh2DGrid, createMeshFromHull, exportFenicsHDF5Mesh, exportHDF5Mesh,
exportSTL, extrudeMesh, merge2Meshes, mergeMeshes,
readFenicsHDF5Mesh, readGmsh, readHDF5Mesh,
readHydrus2dMesh, readHydrus3dMesh, readSTL, readTetgen,
readTriangle, readMeshIO, refineHex2Tet, refineQuad2Tri,
toSubsurface, fromSubsurface)
from .polytools import createParaDomain2D # keep for backward compatibility
from .polytools import (createCircle, createCube, createCylinder, createFacet,
createLine, createParaMeshPLC, createPolygon, merge,
createRectangle, createWorld, exportPLC, mergePLC,
mergePLC3D, readPLC, syscallTetgen, extrude)
from .quality import quality
# This is neither functional nor good practice # why?
# __all__ = [name for name in dir() if '_' not in name]
__all__ = [
'appendTriangleBoundary',
'appendTetrahedronBoundary',
'createMesh',
'createMeshFromHull',
'readGmsh',
'readTriangle',
'readTetgen',
'readHydrus2dMesh',
'readHydrus3dMesh',
'readHDF5Mesh',
'readFenicsHDF5Mesh',
'readSTL',
'readMeshIO',
'refineQuad2Tri',
'mergeMeshes',
'merge2Meshes',
'createParaMesh',
'createParaMesh2DGrid',
'createPolygon',
'createRectangle',
'createWorld',
'createCircle',
'createLine',
'createParaMeshPLC',
'convertHDF5Mesh',
'exportHDF5Mesh',
'exportFenicsHDF5Mesh',
'extrudeMesh',
'mergePLC',
'readPLC',
'writePLC',
'exportPLC',
'createParaDomain2D', # keep for backward compatibility
'quality'
]
|
JuliusHen/gimli | pygimli/physics/SIP/importData.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Import/Export for SIP data."""
import codecs
from datetime import datetime
import numpy as np
import re
import pygimli as pg
def load(fileName, verbose=False, **kwargs):
"""Shortcut to load SIP spectral data.
Import Data and try to assume the file format.
Parameters
----------
fileName: str
Returns
-------
freqs, amp, phi : np.array
Frequencies, amplitudes and phases phi in neg. radiant
"""
firstLine = None
with codecs.open(fileName, 'r', encoding='iso-8859-15',
errors='replace') as fi:
firstLine = fi.readline()
f, amp, phi = None, None, None
fnLow = fileName.lower()
if 'SIP Fuchs III' in firstLine:
if verbose:
pg.info("Reading SIP Fuchs III file")
f, amp, phi, header = readFuchs3File(fileName,
verbose=verbose, **kwargs)
phi *= -np.pi/180.
# print(header) # not used?
elif 'SIP-Quad' in firstLine:
if verbose:
pg.info("Reading SIP Quad file")
f, amp, phi, header = readFuchs3File(fileName,
verbose=verbose, **kwargs)
phi *= -np.pi/180.
elif 'SIP-Fuchs' in firstLine:
if verbose:
pg.info("Reading SIP Fuchs file")
f, amp, phi, drhoa, dphi = readRadicSIPFuchs(fileName,
verbose=verbose, **kwargs)
phi *= -np.pi/180.
elif fnLow.endswith('.txt') or fnLow.endswith('.csv'):
f, amp, phi = readTXTSpectrum(filename)
amp *= 1.0 # scale it with k if available
else:
raise Exception("Don't know how to read data.")
return f, amp, phi
def fstring(fri):
"""Format frequency to human-readable (mHz or kHz)."""
if fri > 1e3:
fstr = '{:d} kHz'.format(int(np.round(fri/1e3)))
elif fri < 1.:
fstr = '{:d} mHz'.format(int(np.round(fri*1e3)))
elif fri < 10.:
fstr = '{:3.1f} Hz'.format(fri)
elif fri < 100.:
fstr = '{:4.1f} Hz'.format(fri)
else:
fstr = '{:d} Hz'.format(int(np.round(fri)))
return fstr
def readTXTSpectrum(filename):
"""Read spectrum from ZEL device output (txt) data file."""
fid = open(filename)
lines = fid.readlines()
fid.close()
f, amp, phi = [], [], []
for line in lines[1:]:
snums = line.replace(';', ' ').split()
if len(snums) > 3:
f.append(float(snums[0]))
amp.append(float(snums[1]))
phi.append(-float(snums[3]))
else:
break
return np.asarray(f), np.asarray(amp), np.asarray(phi)
def readFuchs3File(resfile, k=1.0, verbose=False):
"""Read Fuchs III (SIP spectrum) data file.
Parameters
----------
k : float
Overwrite internal geometric factor from device.
"""
activeBlock = ''
header = {}
LINE = []
dataAct = False
with codecs.open(resfile, 'r', encoding='iso-8859-15', errors='replace') as f:
for line in f:
line = line.replace('\r\n', '\n') # correct for carriage return
if dataAct:
LINE.append(line)
if len(line) < 2:
f, amp, phi, kIn = [], [], [], []
for li in LINE:
sline = li.split()
if len(sline) > 12:
fi = float(sline[11])
if np.isfinite(fi):
f.append(fi)
amp.append(float(sline[12]))
phi.append(float(sline[13]))
kIn.append(float(sline[9]))
if k != 1.0 and verbose is True:
pg.info("Geometric value changed to:", k)
return np.array(f), np.array(amp)/np.array(kIn) * k, \
np.array(phi), header
elif len(line):
if line.rfind('Current') >= 0:
if dataAct:
break
else:
dataAct = True
if line[0] == '[':
token = line[1:line.rfind(']')].replace(' ', '_')
if token[:3] == 'End':
header[activeBlock] = np.array(header[activeBlock])
activeBlock = ''
elif token[:5] == 'Begin':
activeBlock = token[6:]
header[activeBlock] = []
else:
value = line[line.rfind(']') + 1:]
try: # direct line information
if '.' in value:
num = float(value)
else:
num = int(value)
header[token] = num
except BaseException as e:
# maybe beginning or end of a block
#print(e)
pass
else:
if activeBlock:
nums = np.array(line.split(), dtype=float)
header[activeBlock].append(nums)
def readRadicSIPFuchs(filename, readSecond=False, delLast=True):
"""Read SIP-Fuchs Software rev.: 070903
Read Radic instrument res file containing a single spectrum.
Please note the apparent resistivity value might be scaled with the
real geometric factor. Default is 1.0.
Parameters
----------
filename : string
readSecond: bool [False]
Read the first data block[default] or read the second that
consists in the file.
delLast : bool [True]
??
Returns
-------
fr : array [float]
Measured frequencies
rhoa : array [float]
Measured apparent resistivties
phi : array [float]
Measured phases
drhoa : array [float]
Measured apparent resistivties error
phi : array [float]
Measured phase error
"""
with codecs.open(resfile, 'r', encoding='iso-8859-15', errors='replace') as f:
line = f.readline()
fr = []
rhoa = []
phi = []
drhoa = []
dphi = []
while True:
line = f.readline()
if line.rfind('Freq') > -1:
break
return
if readSecond:
while True:
if f.readline().rfind('Freq') > -1:
break
while True:
line = f.readline()
b = line.split('\t')
if len(b) < 5:
break
fr.append(float(b[0]))
rhoa.append(float(b[1]))
phi.append(-float(b[2]) * np.pi / 180.)
drhoa.append(float(b[3]))
dphi.append(float(b[4]) * np.pi / 180.)
f.close()
if delLast:
fr.pop(0)
rhoa.pop(0)
phi.pop(0)
drhoa.pop(0)
dphi.pop(0)
return np.array(fr), np.array(rhoa), np.array(phi), np.array(drhoa), np.array(dphi)
def toTime(t, d):
""" convert time format into timestamp
11:08:02, 21/02/2019
"""
tim = [int(_t) for _t in t.split(':')]
if '/' in d: # 03/02/1975
day = [int(_t) for _t in d.split('/')]
dt = datetime(year=day[2], month=day[1], day=day[0],
hour=tim[0], minute=tim[1], second=tim[2])
elif '.' in d: # 03.02.1975
day = [int(_t) for _t in d.split('.')]
dt = datetime(year=day[2], month=day[1], day=day[0],
hour=tim[0], minute=tim[1], second=tim[2])
else: # 1975-02-03
day = [int(_t) for _t in d.split('-')]
dt = datetime(year=day[0], month=day[1], day=day[2],
hour=tim[0], minute=tim[1], second=tim[2])
return dt.timestamp()
def readSIP256file(resfile, verbose=False):
"""Read SIP256 file (RES format) - mostly used for 2d SIP by pybert.sip.
Read SIP256 file (RES format) - mostly used for 2d SIP by pybert.sip.
Parameters
----------
filename: str
*.RES file (SIP256 raw output file)
verbose: bool
do some output [False]
Returns
-------
header - dictionary of measuring setup
DATA - data AB-list of MN-list of matrices with f, amp, phi, dAmp, dPhi
AB - list of current injection
RU - list of remote units
Examples
--------
header, DATA, AB, RU = readSIP256file('myfile.res', True)
"""
activeBlock = ''
header = {}
LINE = []
dataAct = False
with codecs.open(resfile, 'r', encoding='iso-8859-15',
errors='replace') as fi:
content = fi.readlines()
for line in content:
if dataAct:
LINE.append(line)
elif len(line):
if line[0] == '[':
token = line[1:line.rfind(']')].replace(' ', '_')
# handle early 256D software bug
if 'FrequencyParameterBegin' in token:
token = token.replace('FrequencyParameterBegin',
'Begin_FrequencyParameter')
if 'FrequencyParameterEnd' in token:
token = token.replace('FrequencyParameterEnd',
'End_FrequencyParameter')
if token.replace(' ', '_') == 'Messdaten_SIP256':
dataAct = True
elif 'Messdaten' in token:
# res format changed into SIP256D .. so we are a
# little bit more flexible with this.
dataAct = True
elif token[:3] == 'End':
header[activeBlock] = np.array(header[activeBlock])
activeBlock = ''
elif token[:5] == 'Begin':
activeBlock = token[6:]
header[activeBlock] = []
else:
value = line[line.rfind(']') + 1:]
try: # direct line information
if '.' in value:
num = float(value)
else:
try:
num = int(value)
except:
num = 0
pass
header[token] = num
except BaseException as e:
# maybe beginning or end of a block
print(e)
else:
if activeBlock:
nums = np.array(line.split(), dtype=float)
header[activeBlock].append(nums)
DATA, dReading, dFreq, AB, RU, ru = [], [], [], [], [], []
tMeas = []
for i, line in enumerate(LINE):
# print(i, line)
line = line.replace(' nc ', ' 0 ') # no calibration should 0
line = line.replace(' c ', ' 1 ') # calibration should 1
# sline = line.split()
sline = line.rstrip('\r\n').split()
if line.find('Reading') == 0:
rdno = int(sline[1])
if rdno > 0:
AB.append((int(sline[4]), int(sline[6])))
if ru:
RU.append(ru)
ru = []
if rdno > 1 and dReading:
dReading.append(np.array(dFreq))
DATA.append(dReading)
pg.verbose('Reading {0}:{1} RUs'.format(rdno-1, len(dReading)))
dReading, dFreq = [], []
elif line.find('Remote Unit') == 0:
ru.append(int(sline[2]))
if dFreq:
dReading.append(np.array(dFreq))
dFreq = []
elif line.find('Freq') >= 0:
pass
elif len(sline) > 1 and rdno > 0: # some data present
# search for two numbers (with .) without a space inbetween
# variant 1: do it for every part
for i, ss in enumerate(sline):
if re.search('\.20[01][0-9]', ss) is None: # no date
fd = re.search('\.[0-9-]*\.', ss)
if fd:
if '-' in ss[1:]:
bpos = ss[1:].find('-') + 1
else:
bpos = fd.start() + 4
# print(ss[:bpos], ss[bpos:])
sline.insert(i, ss[:bpos])
sline[i+1] = ss[bpos:]
# print(sline)
fd = re.search('NaN[0-9-]*\.', ss)
if fd:
if '-' in ss[1:]:
bpos = ss.find('-')
else:
bpos = fd.start() + 3
# print(ss[:bpos], ss[bpos:])
sline.insert(i, ss[:bpos])
sline[i+1] = ss[bpos:]
# print(sline)
# variant 2: do it on whole line
# cdate = re.search('\.20[01][0-9]', line)
# if cdate:
# n2000 = cdate.start()
# else:
# n2000 = len(line)
# print(sline)
# concnums = re.search('\.[0-9-]*\.', line[:n2000])
# while concnums:
# bpos = concnums.span()[0] + 4
# line = line[:bpos] + ' ' + line[bpos:]
# n2000 += 1
# concnums = re.search('\.[0-9-]*\.', line[:n2000])
# sline = line.rstrip('\r\n').split()
# print(sline)
# if re.search('[0-9]-', line[:85]): # missing whitespace before -
# sline = re.sub('[0-9]-', '5 -', line).split()
# not a good idea for dates
for c in range(7): # this is expensive .. do we really need this?
if len(sline[c]) > 15: # too long line / missing space
if c == 0:
part1 = sline[c][:-15]
part2 = sline[c][-15:] # [10:]
else:
part1 = sline[c][:-10]
part2 = sline[c][-10:] # [11:]
sline = sline[:c] + [part1] + [part2] + sline[c + 1:]
if sline[c].find('c') >= 0:
sline[c] = '1.0'
#Frequency /Hz RA/Ohmm PA/� ERA/% EPA/� Cal? IA/mA K.-F./m Gains Time/h:m:s Date/d.m.y
#20000.00000000 0.4609 -6.72598 0.02234 0.01280 1 20.067 1.00 0 11:08:02 21/02/2019
try:
dFreq.append(
np.array(sline[:8] + [toTime(sline[-2], sline[-1])],
dtype=float))
except:
# dFreq.append(np.array(sline[:8], dtype=float))
print(i, line, sline)
raise ImportError()
dReading.append(np.array(dFreq))
DATA.append(dReading)
pg.verbose('Reading {0}:{1} RUs'.format(rdno, len(dReading)))
return header, DATA, AB, RU
if __name__ == "__main__":
pass
|
JuliusHen/gimli | pygimli/physics/ert/ertScheme.py | # -*- coding: utf-8 -*-
# import matplotlib.pyplot as plt
import numpy as np
# from numpy import ma
import pygimli as pg
from pygimli.physics import ert
from pygimli.viewer.mpl.colorbar import createColorBarOnly
def createData(elecs, schemeName='none', **kwargs):
""" Utility one-liner to create a BERT datafile
Parameters
----------
elecs : int | list[pos] | array(x)
Number of electrodes or electrode positions or x-positions
schemeName : str ['none']
Name of the configuration. If you provide an unknown scheme name, all
known schemes ['wa', 'wb', 'pp', 'pd', 'dd', 'slm', 'hw', 'gr'] listed.
**kwargs :
Arguments that will be forwarded to the scheme generator.
* inverse : bool
interchange AB MN with MN AB
* reciprocity : bool
interchange AB MN with BA NM
* addInverse : bool
add additional inverse measurements
* spacing : float [1]
electrode spacing in meters
* closed : bool
Close the chain. Measure from the end of the array to the first
electrode.
Returns
-------
data : DataContainerERT
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from pygimli.physics import ert
>>>
>>> schemes = ['wa', 'wb', 'pp', 'pd', 'dd', 'slm', 'hw', 'gr']
>>> fig, ax = plt.subplots(3,3)
>>>
>>> for i, schemeName in enumerate(schemes):
... s = ert.createData(elecs=41, schemeName=schemeName)
... k = ert.geometricFactors(s)
... _ = ert.show(s, vals=k, ax=ax.flat[i], label='k - ' + schemeName)
>>>
>>> plt.show()
"""
if kwargs.pop('sounding', False):
data = pg.DataContainerERT()
data.setSensors(pg.cat(-elecs[::-1], elecs))
nElecs = len(elecs)
for i in range(nElecs-1):
data.createFourPointData(i, i, 2*nElecs-i-1, nElecs-1, nElecs)
return data
mg = DataSchemeManager()
if schemeName == "none":
pg.error('argument "schemeName" not set. Valid schemeNames are:')
for i in mg.schemes():
print(i, "scheme: " + mg.scheme(i).prefix)
scheme = mg.scheme(schemeName)
scheme.setInverse(kwargs.pop('inverse', False))
scheme.addInverse(kwargs.pop('addInverse', False))
scheme._closed = kwargs.pop('closed', False)
if isinstance(elecs, int):
data = scheme.create(nElectrodes=elecs,
electrodeSpacing=kwargs.pop('spacing', 1),
**kwargs)
elif hasattr(elecs, '__iter__'):
if isinstance(elecs[0], float) or isinstance(elecs[0], int):
data = scheme.create(nElectrodes=len(elecs), **kwargs)
data.setSensors(elecs)
else:
data = scheme.create(sensorList=elecs, **kwargs)
else:
print(elecs)
pg.critical("Can't interpret elecs")
return data
def createDataVES(ab2, mn2):
""" Utility one-liner to create a BERT datafile for Schlumberger 1D VES
Parameters
----------
ab2: array
Half distance between current electrodes
mn2: float
Half distance between measurement electrodes
Returns
-------
data : DataContainerERT
"""
data = pg.DataContainerERT()
if type(mn2) is float or type(mn2) is int:
mn2 = [mn2]
count = 0
for mn in mn2:
emID = data.createSensor([-mn, 0.0, 0.0])
enID = data.createSensor([mn, 0.0, 0.0])
for x in ab2:
eaID = data.createSensor([-x, 0.0, 0.0])
ebID = data.createSensor([x, 0.0, 0.0])
data.createFourPointData(count, eaID, ebID, emID, enID)
count += 1
data.fitFillSize()
return data
class Pseudotype:
unknown = 0
A_M = 1
AB_MN = 2
AB_M = 3
AB_N = 4
DipoleDipole = 5
Schlumberger = 6
WennerAlpha = 7
WennerBeta = 8
Gradient = 9
PoleDipole = 10
HalfWenner = 11
PolePole = 12
Test = 99
class DataSchemeManager(object):
""" """
def __init__(self):
""" """
self.schemes_ = dict()
self.addScheme(DataSchemeBase())
self.addScheme(DataSchemeWennerAlpha())
self.addScheme(DataSchemeWennerBeta())
self.addScheme(DataSchemeDipoleDipole())
self.addScheme(DataSchemeSchlumberger())
self.addScheme(DataSchemePolePole())
self.addScheme(DataSchemePoleDipole())
self.addScheme(DataSchemeHalfWenner())
self.addScheme(DataSchemeMultipleGradient())
self.addScheme(DataSchemeBase(type=Pseudotype.A_M, name='A_M'))
self.addScheme(DataSchemeBase(type=Pseudotype.AB_MN, name='AB_MN'))
self.addScheme(DataSchemeBase(type=Pseudotype.AB_M, name='AB_M'))
self.addScheme(DataSchemeBase(type=Pseudotype.AB_N, name='AB_N'))
def addScheme(self, scheme):
""" """
self.schemes_[scheme.name] = scheme
def scheme(self, name):
"""
Return DataScheme for a given name if registered.
Parameters
----------
name : str | int
Name or prefix name of a known data scheme. If the name is unknown
all known data schemes are listed.
Name can be a integer number that
represents the internal Pseudotype.
Return
------
scheme : DataScheme
"""
if type(name) == int:
s = self.schemeFromTyp(name)
if s:
return s
elif type(name) == str: # or type(name) == unicode: (always in Py3)
s = self.schemeFromPrefix(name)
if s:
return s
if name in self.schemes_.keys():
return self.schemes_[name]
print('Unknown scheme name:', name)
print('-----------------------')
print('Valid names or prefixes')
print('-----------------------')
for s in self.schemes_.values():
print(s.name, ': ', s.prefix)
raise Exception("No scheme known for name: ", name)
return DataSchemeBase()
def schemeFromPrefix(self, prefix):
""" Return DataScheme for a given prefix name.
"""
for s in list(self.schemes_.values()):
if s.prefix == prefix:
return s
return None
def schemeFromTyp(self, type):
for s in list(self.schemes_.values()):
if s.type == type:
return s
return None
def schemes(self):
'''
'''
return list(self.schemes_.keys())
class DataSchemeBase(object):
"""Base class for ERT data schemes
Attributes
----------
closed : bool
Close the chain. Measure from the end of the array to the first
electrode.
"""
def __init__(self, type=Pseudotype.unknown, name="unknown", prefix='uk'):
self.name = name
self.prefix = prefix
self.type = type
self.data_ = None
self.inverse_ = False
self.addInverse_ = False
self.reciprocity = False
self.nElectrodes_ = 0
self.maxSeparation = 1e99
self._closed = False
@property
def closed(self):
return self._closed
def create(self, nElectrodes=24, electrodeSpacing=1, sensorList=None,
**kwargs):
"""
"""
self.createElectrodes(nElectrodes, electrodeSpacing, sensorList)
self.createData(**kwargs)
if self.addInverse_:
out = pg.DataContainerERT(self.data_)
self.setInverse(not self.inverse_)
self.createData(**kwargs)
self.data_.add(out)
self.data_.removeInvalid()
self.data_.sortSensorsIndex()
if kwargs.values():
print("Warning! DataSchemeBase::create has unhandled arguments")
print(kwargs)
return self.data_
def createElectrodes(self, nElectrodes=24, electrodeSpacing=1,
sensorList=None):
self.data_ = pg.DataContainerERT()
if sensorList is not None:
for p in sensorList:
if isinstance(p, float):
self.data_.createSensor((p, 0.))
else:
self.data_.createSensor(p)
else:
for i in range(nElectrodes):
self.data_.createSensor(pg.Pos(float(i) *
electrodeSpacing, 0.0))
self.nElectrodes_ = self.data_.sensorCount()
def createData(self, **kwargs):
print('*'*100)
def setInverse(self, inverse=False):
self.inverse_ = inverse
def addInverse(self, addInverse=False):
"""
Add inverse value to create a full dataset.
"""
self.addInverse_ = addInverse
def setMaxSeparation(self, maxSep):
if maxSep > 0.0:
self.maxSeparation = maxSep
else:
self.maxSeparation = 1e99
def createDatum_(self, a, b, m, n, count):
if a < self.nElectrodes_ and b < self.nElectrodes_ and \
m < self.nElectrodes_ and n < self.nElectrodes_:
if self.inverse_:
self.data_.createFourPointData(count, m, n, a, b)
else:
self.data_.createFourPointData(count, a, b, m, n)
count += 1
return count
class DataSchemePolePole(DataSchemeBase):
"""Pole-Pole data scheme."""
def __init__(self):
DataSchemeBase.__init__(self)
self.name = "Pole Pole (C-P)"
self.prefix = "pp"
self.type = Pseudotype.PolePole
def createData(self, **kwargs):
"""
Create a Pole-Pole dataset.
Don't use directly .. call create from DataSchemeManager or
ert.createData(elecs, schemeName='pp', **kwargs) instead.
"""
nElectrodes = self.nElectrodes_
# reserve a couple more than nesseccary ###
self.data_.resize((nElectrodes) * (nElectrodes))
count = 0
# enlargeEverySep = 0 # not used
b = -1
n = -1
for a in range(0, nElectrodes):
for m in range(a + 1, nElectrodes):
if m - a > self.maxSeparation:
break
count = self.createDatum_(a, b, m, n, count)
self.data_.removeInvalid()
return self.data_
class DataSchemeDipoleDipole(DataSchemeBase):
"""Dipole-dipole data scheme. """
def __init__(self):
DataSchemeBase.__init__(self)
self.name = "Dipole Dipole (CC-PP)"
self.prefix = "dd"
self.type = Pseudotype.DipoleDipole
self.enlargeEverySep = 0
self.spacings = [1]
def createData(self, **kwargs):
"""
Create a Dipole-Dipole dataset.
Don't use directly .. call create from DataSchemeManager or
ert.createData(elecs, schemeName='dd', **kwargs) instead.
Parameters
----------
**kwargs:
* complete : bool
Add reciprocity measurements.
* enlarge : int
Enlarge dipole length every n dipole separations.
* spacings : array[int]
vector of spacings (dipole lengths) to use
"""
nElectrodes = self.nElectrodes_
complete = kwargs.pop('complete', False)
if complete:
self._closed = True
self.enlargeEverySep = kwargs.pop('enlarge', 0)
self.spacings = kwargs.pop('spacings', self.spacings)
# self.createElectrodes(nElectrodes, electrodeSpacing)
# reserve a couple more than necessary ###
nElectrodes = self.nElectrodes_
self.data_.resize(nElectrodes * nElectrodes)
count = 0
if self.closed:
space = 1
for i in range(nElectrodes):
a = i
b = (a + space) % nElectrodes
for j in range(nElectrodes):
m = (j) % nElectrodes
n = (m + space) % nElectrodes
if not complete:
if j <= i:
continue
if a != m and a != n and b != m and b != n:
count = self.createDatum_(a, b, m, n, count)
else:
for space in self.spacings:
maxSep = nElectrodes - space
maxInj = nElectrodes - space
if self.maxSeparation < maxSep:
maxSep = self.maxSeparation
for sep in range(1, maxSep + 1):
if self.enlargeEverySep > 0:
if (sep-1) % self.enlargeEverySep == 0:
space += 1
for i in range(maxInj - sep):
a = i
b = (a + space) % nElectrodes
m = (b + sep) % nElectrodes
n = (m + space) % nElectrodes
if m + space < nElectrodes:
count = self.createDatum_(a, b, m, n, count)
self.data_.removeInvalid()
return self.data_
# class DataSchemeDipoleDipole
class DataSchemePoleDipole(DataSchemeBase):
"""Pole-dipole data scheme"""
def __init__(self):
DataSchemeBase.__init__(self)
self.name = "Pole Dipole (C-PP)"
self.prefix = "pd"
self.type = Pseudotype.PoleDipole
self.spacings = [1]
def createData(self, **kwargs):
"""
Create a Pole-Dipole dataset.
Don't use directly .. call create from DataSchemeManager or
ert.createData(elecs, schemeName='pd', **kwargs) instead.
Parameters
----------
**kwargs:
* enlarge : int
Enlarge dipole length every n dipole separations.
* spacings : array[int]
vector of spacings (dipole lengths) to use
"""
nElectrodes = self.nElectrodes_
# self.createElectrodes(nElectrodes, electrodeSpacing)
# reserve a couple more than nesseccary !!!
self.data_.resize((nElectrodes) * (nElectrodes))
count = 0
self.enlargeEverySep = kwargs.pop('enlarge', 0)
self.spacings = kwargs.pop('spacings', self.spacings)
b = -1
for a in range(0, nElectrodes):
for m in range(a + 1, nElectrodes - 1):
n = m + 1
if m - a > self.maxSeparation:
break
count = self.createDatum_(a, b, m, n, count)
self.data_.removeInvalid()
return self.data_
# class DataSchemePoleDipole
class DataSchemeHalfWenner(DataSchemeBase):
"""Pole-Dipole like Wenner Beta with increasing dipole distance"""
def __init__(self):
DataSchemeBase.__init__(self)
self.name = "Half Wenner (C-P-P)"
self.prefix = "hw"
self.type = Pseudotype.HalfWenner
def createData(self, **kwargs):
"""
Create a Half-Wenner dataset.
Don't use directly .. call create from DataSchemeManager or
ert.createData(elecs, schemeName='hw', **kwargs) instead.
"""
nElectrodes = self.nElectrodes_
# reserve a couple more than nesseccary !!!
self.data_.resize((nElectrodes) * (nElectrodes))
# print("create", self.maxSeparation)
count = 0
# space = 0 # not yet used
# enlargeEverySep = 0 # not yet used
b = -1
for a in range(0, nElectrodes):
inc = 1
while True:
m = a - inc
n = m - inc
if m < 0 or n < 0 or inc > self.maxSeparation:
break
count = self.createDatum_(a, b, m, n, count)
inc = inc + 1
inc = 1
while True:
m = a + inc
n = m + inc
if m > nElectrodes or n > nElectrodes or \
inc > self.maxSeparation:
break
count = self.createDatum_(a, b, m, n, count)
inc = inc + 1
self.data_.removeInvalid()
self.data_.sortSensorsIndex()
return self.data_
#class DataSchemeHalfWenner
class DataSchemeWennerAlpha(DataSchemeBase):
"""Wenner alpha (C--P--P--C) data scheme with equal distances. """
def __init__(self):
DataSchemeBase.__init__(self)
self.name = "Wenner Alpha (C-P-P-C)"
self.prefix = "wa"
self.type = Pseudotype.WennerAlpha
def createData(self, **kwargs):
"""Create a Wenner-alpha dataset.
Don't use directly .. call create from DataSchemeManager or
ert.createData(elecs, schemeName='wa', **kwargs) instead.
"""
nElectrodes = self.nElectrodes_
maxSep = nElectrodes - 2
if self.maxSeparation < maxSep:
maxSep = self.maxSeparation
# reserve a couple more than nesseccary !!!
self.data_.resize(nElectrodes * nElectrodes)
count = 0
for sep in range(1, maxSep + 1):
for i in range((nElectrodes - 2) - sep):
a = i
m = a + sep
n = m + sep
b = n + sep
count = self.createDatum_(a, b, m, n, count)
self.data_.removeInvalid()
return self.data_
#class DataSchemeWennerAlpha
class DataSchemeWennerBeta(DataSchemeBase):
"""Wenner-beta (C--C--P--P) data scheme with equal distance."""
def __init__(self):
DataSchemeBase.__init__(self)
self.name = "Wenner Beta(C-C-P-P)"
self.prefix = "wb"
self.type = Pseudotype.WennerBeta
def createData(self, **kwargs):
"""Create a Wenner-beta dataset.
Don't use directly .. call create from DataSchemeManager or
ert.createData(elecs, schemeName='wb', **kwargs) instead.
"""
nElectrodes = self.nElectrodes_
maxSep = nElectrodes - 2
if self.maxSeparation < maxSep:
maxSep = self.maxSeparation
# reserve a couple more than nesseccary ###
self.data_.resize((nElectrodes * nElectrodes))
count = 0
for sep in range(1, maxSep + 1):
for i in range((nElectrodes - 2) - sep):
a = i
b = a + sep
m = b + sep
n = m + sep
count = self.createDatum_(a, b, m, n, count)
self.data_.removeInvalid()
return self.data_
# class DataSchemeWennerBeta(...)
class DataSchemeSchlumberger(DataSchemeBase):
"""Wenner-Schlumberger (C--P-P--C) data scheme. """
def __init__(self):
DataSchemeBase.__init__(self)
self.name = "Schlumberger(C-PP-C)"
self.prefix = "slm"
self.type = Pseudotype.Schlumberger
def createData(self, **kwargs):
"""Create a full (Wenner-)Schlumberger dataset.
Don't use directly .. call create from DataSchemeManager or
ert.createData(elecs, schemeName='sl', **kwargs) instead.
"""
nElectrodes = self.nElectrodes_
maxSep = nElectrodes - 2
if self.maxSeparation < maxSep:
maxSep = self.maxSeparation
self.data_.resize(nElectrodes * nElectrodes)
count = 0
for sep in range(1, maxSep + 1):
for i in range((nElectrodes - 2) - sep):
a = i
m = a + sep
n = m + 1
b = n + sep
count = self.createDatum_(a, b, m, n, count)
self.data_.removeInvalid()
return self.data_
# class DataSchemeSchlumberger(...)
class DataSchemeMultipleGradient(DataSchemeBase):
"""MultipleGradient (C---P-P--C) data scheme. """
def __init__(self):
DataSchemeBase.__init__(self)
self.name = "MultipleGradient(C--P-P--C)"
self.prefix = "gr"
self.type = Pseudotype.Gradient
def createData(self, **kwargs):
"""Create a multi-gradient dataset.
Don't use directly .. call create from DataSchemeManager or
ert.createData(elecs, schemeName='gr', **kwargs) instead.
"""
nElectrodes = self.nElectrodes_
ab_sep_base = 9 # number of channels + 2
ev = 2
takeevery = 2
max_fak = int(np.ceil(nElectrodes / ab_sep_base))
ab_space = [ii*ab_sep_base for ii in range(max_fak) if ii % ev == 1]
mn_space = [ii for ii in range(max_fak) if ii % takeevery == 1]
count = 0
a, b, m, n = [], [], [], []
for ab in range(len(ab_space)): # ab spacings
for aa in np.arange(1, nElectrodes-ab_space[ab]+1, 1): # a index
mn = mn_space[ab]
for mm in np.arange(aa+mn, aa+ab_space[ab]-mn, mn):
count += 1
a.append(int(aa))
b.append(int(aa+ab_space[ab]))
m.append(int(mm))
n.append(int(mm+mn))
self.data_.resize(count)
self.data_.set('a', pg.Vector(a) - 1)
self.data_.set('b', pg.Vector(b) - 1)
self.data_.set('m', pg.Vector(m) - 1)
self.data_.set('n', pg.Vector(n) - 1)
self.data_.set('valid', pg.Vector(count, 1))
return self.data_
# class DataSchemeMultipleGradient(...)
if __name__ == '__main__':
schemes = ['wa', 'wb', 'pp', 'pd', 'dd', 'slm', 'gr', 'hw']
fig, ax = pg.plt.subplots(3, 3)
kw = dict(cMin=10, cMax=1000, logScale=True, colorBar=False, cMap="viridis")
for i, schemeName in enumerate(schemes):
shm = ert.createData(elecs=41, schemeName=schemeName)
print(schemeName, shm)
k = ert.geometricFactor(shm)
mg = DataSchemeManager()
longname = mg.scheme(schemeName).name
ert.show(shm, vals=np.abs(k), ax=ax.flat[i], colorBar=1, logScale=0,
label='k ' + longname + ')-' + schemeName)
createColorBarOnly(**kw, ax=ax.flat[-1], aspect=0.1)
pg.plt.show()
# %%
# import matplotlib.pyplot as plt
# from pygimli.physics import ert
# schemes = ['wa', 'wb', 'pp', 'pd', 'dd', 'slm', 'hw', 'gr']
# fig, ax = plt.subplots(3,3)
# for i, schemeName in enumerate(schemes):
# s = ert.createData(elecs=41, schemeName=schemeName)
# k = ert.geometricFactors(s)
# ert.show(s, vals=k, ax=ax.flat[i], label='k - ' + schemeName)
# plt.show()
|
JuliusHen/gimli | pygimli/testing/test_XVector.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import pygimli as pg
class TestRVectorMethods(unittest.TestCase):
def test_XVectorBasics(self):
def testVector(v):
for t in v:
self.assertEqual(t, 1.0)
self.assertEqual(sum(v), 5.0)
self.assertFalse(pg.core.haveInfNaN(v))
v[1] = 0
self.assertEqual(v[1], 0)
v[1] = 1
self.assertEqual(v[1], 1)
#print(v/v)
testVector(pg.Vector(5, 1.0))
testVector(pg.CVector(5, 1.0))
testVector(pg.BVector(5, True))
testVector(pg.IVector(5, 1))
def test_XVectorSetVal(self):
"""
"""
#
vec = pg.Vector(5, 1.0)
self.assertEqual(vec[0], 1.0)
vec[0] += 1.0
self.assertEqual(vec[0], 2.0)
vec[0] = vec[0] + 1.0
self.assertEqual(vec[0], 3.0)
vec.setVal(4.0, 0)
self.assertEqual(vec[0], 4.0)
vec.setVal(5.0, 0, 1)
self.assertEqual(vec[0], 5.0)
vec.setVal(6.0)
self.assertEqual(vec[0], 6.0)
vec.setVal(7.0, vec > 0.0)
self.assertEqual(vec[0], 7.0)
def test_IVectorOP(self):
v = pg.IVector(5, 1)
# print(v + 2)
# print(v - 2)
# print(2 * v)
#print(v * 2)
#self.assertEqual(sum(v * 2), 10)
self.assertEqual(sum(v + 1), 10)
self.assertEqual(sum(v - 2), -5)
self.assertEqual(sum(v / 1), 5)
self.assertEqual(sum(1 + v), 10)
self.assertEqual(sum(-1 - v), -10)
self.assertEqual(sum(1 / v), 5)
# no clue why this doesnt work .. we might could hack them if someone need it
#self.assertEqual(sum(v * 2), 10)
self.assertEqual(sum(v + v), 10)
self.assertEqual(sum(v * v), 5)
self.assertEqual(sum(v - v), 0)
self.assertEqual(sum(v / v), 5)
self.assertEqual(sum(2 * v), 10)
def test_IndexArray(self):
v = pg.core.IndexArray([0,1,2,3])
self.assertEqual(sum(v), 6)
np.testing.assert_array_equal(v + 1, [1, 2, 3, 4])
def test_RVectorOP(self):
v = pg.Vector(5, 1.0)
self.assertEqual(sum(v + 1), 10)
self.assertEqual(sum(v - 2), -5)
self.assertEqual(sum(v * 2), 10)
self.assertEqual(sum(v / 1), 5)
self.assertEqual(sum(1 + v), 10)
self.assertEqual(sum(-1 - v), -10)
self.assertEqual(sum(2 * v), 10)
self.assertEqual(sum(1 / v), 5)
self.assertEqual(sum(v + 1.0), 10)
self.assertEqual(sum(v - 2.0), -5)
self.assertEqual(sum(v * 2.0), 10)
self.assertEqual(sum(v / 1.0), 5)
self.assertEqual(sum(1.0 + v), 10)
self.assertEqual(sum(-1.0 - v), -10)
self.assertEqual(sum(2.0 * v), 10)
self.assertEqual(sum(1.0 / v), 5)
v2 = np.ones(len(v))* 0.01
# check pg * np
self.assertEqual(sum(v * v2), 5*0.01)
# check np * pg
self.assertEqual(sum(v2 * v), 5*0.01)
#v = pg.CVector(5, 1.0)
#self.assertEqual(sum(v + 1), 10)
#self.assertEqual(sum(v - 1), 0)
#self.assertEqual(sum(v * 1), 5)
#self.assertEqual(sum(v / 1), 5)
#self.assertEqual(sum(1 + v), 10)
#self.assertEqual(sum(1 - v), 0)
#self.assertEqual(sum(1 * v), 5)
#self.assertEqual(sum(1 / v), 5)
#self.assertEqual(sum(v + 1.0), 10)
#self.assertEqual(sum(v - 1.0), 0)
#self.assertEqual(sum(v * 1.0), 5)
#self.assertEqual(sum(v / 1.0), 5)
#self.assertEqual(sum(1.0 + v), 10)
#self.assertEqual(sum(1.0 - v), 0)
#self.assertEqual(sum(1.0 * v), 5)
#self.assertEqual(sum(1.0 / v), 5)
def test_RVectorIndexRW(self):
v = pg.Vector(5, 2.0)
np.testing.assert_array_equal(v, [2, 2, 2, 2, 2])
v += 1.0
np.testing.assert_array_equal(v, [3, 3, 3, 3, 3])
v += 1
np.testing.assert_array_equal(v, [4, 4, 4, 4, 4])
v[1] = 1.0
np.testing.assert_array_equal(v, [4, 1, 4, 4, 4])
v[1] += 1.0
np.testing.assert_array_equal(v, [4, 2, 4, 4, 4])
v[[1,2]] = 2.0
np.testing.assert_array_equal(v, [4, 2, 2, 4, 4])
v[pg.IVector(1,3)] = 3.0
np.testing.assert_array_equal(v, [4, 2, 2, 3, 4])
v[pg.IVector(5,2)] = 1.0
np.testing.assert_array_equal(v, [4, 2, 1, 3, 4])
v[pg.find(v==4.0)] = 5.0
np.testing.assert_array_equal(v, [5, 2, 1, 3, 5])
v[v==5.0] = 4.0
np.testing.assert_array_equal(v, [4, 2, 1, 3, 4])
v[v==4.0] = 5.0
np.testing.assert_array_equal(v, [5, 2, 1, 3, 5])
#this will work only if we overwrite __iadd__
#v[v==4.0] += 1.0
#np.testing.assert_array_equal(v, [6, 2, 1, 3, 6])
v.setVal(1.0, 1)
np.testing.assert_array_equal(v, [5, 1, 1, 3, 5])
def test_RVectorFuncts(self):
v = pg.Vector(5, 2.0)
self.assertEqual(sum(pg.math.pow(v, 2)), 20)
self.assertEqual(sum(pg.math.pow(v, 2.0)), 20)
self.assertEqual(sum(pg.math.pow(v, v)), 20)
def test_R3VectorIndex(self):
r3 = pg.core.R3Vector(10)
self.assertEqual(r3[0], pg.RVector3(0, 0, 0))
np.testing.assert_array_equal(r3[0], pg.RVector3(0, 0, 0))
r3[1] = pg.RVector3(0.0, 1.0, 0.0)
np.testing.assert_array_equal(r3[1], pg.RVector3(0.0, 1.0, 0.0))
r3[2] = (0.0, 2.0, 0.0)
np.testing.assert_array_equal(r3[2], pg.RVector3(0.0, 2.0, 0.0))
r3[3] = (0.0, 3.0, 0.0)
np.testing.assert_array_equal(r3[3], pg.RVector3(0.0, 3.0, 0.0))
d = pg.utils.dist(r3)
self.assertEqual(sum(d), 1+2+3)
def test_Slices(self):
a = pg.Vector(np.arange(10.))
np.testing.assert_array_equal(a[:], np.arange(10.)[:])
np.testing.assert_array_equal(a[::], np.arange(10.)[::])
np.testing.assert_array_equal(a[::1], np.arange(10.)[::1])
np.testing.assert_array_equal(a[::-1], np.arange(10.)[::-1])
np.testing.assert_array_equal(a[0:3:1], np.arange(10.)[0:3:1])
np.testing.assert_array_equal(a[0:3:2], np.arange(10.)[0:3:2])
np.testing.assert_array_equal(a[3:0:-1], np.arange(10.)[3:0:-1])
np.testing.assert_array_equal(a[3:0:-2], np.arange(10.)[3:0:-2])
np.testing.assert_array_equal(a[0:3:-1], np.arange(10.)[0:3:-1])
np.testing.assert_array_equal(a[0:3:-2], np.arange(10.)[0:3:-2])
def test_IndexAccess(self):
# (double) array/vector
an = np.arange(10.)
ag = pg.Vector(an)
# bn = nd.array(bool)
bn = (an > 4.)
self.assertEqual(type(bn), np.ndarray)
self.assertEqual(bn.dtype, 'bool')
self.assertEqual(sum(bn), 5)
# bg = BVector
bg = (ag > 4.)
self.assertEqual(type(bg), pg.BVector)
self.assertEqual(sum(bg), 5)
# BVector(nd.array(bool))
self.assertEqual(len(bg), len(pg.BVector(bn)))
self.assertEqual(sum(bg), sum(pg.BVector(bn)))
self.assertEqual(bg[0], pg.BVector(bn)[0])
np.testing.assert_array_equal(bg, pg.BVector(bn))
# In = nd.array(int)
In = np.nonzero(bn)[0]
self.assertEqual(type(In), np.ndarray)
self.assertEqual(In.dtype, 'int64')
self.assertEqual(len(In), 5)
self.assertEqual(In[0], 5)
# np.nonzero(bg)
np.testing.assert_array_equal(In, np.nonzero(bg)[0])
# Ig = IndexArray
Ig = pg.find(bg)
self.assertEqual(type(Ig), pg.core.IndexArray)
self.assertEqual(len(Ig), 5)
self.assertEqual(Ig[0], 5)
# pg.find(nd.array(bool))
np.testing.assert_array_equal(Ig, pg.find(bn))
## Indexoperators ##
# ndarray [nd.array(bool)] == ndarray [nd.array(int)]
np.testing.assert_equal(an[bn], an[In])
self.assertEqual(len(an[bn]), 5)
self.assertEqual(an[bn][0], 5)
# ndarray[IndexArray] == ndarray [nd.array(int)]
np.testing.assert_equal(an[Ig], an[In])
# ndarray[BVector] == ndarray [nd.array(bool)]
np.testing.assert_array_equal(an[np.array(bg, dtype='bool')], an[bn])
np.testing.assert_array_equal(an[np.array(bg)], an[bn])
np.testing.assert_array_equal(an[bg.array()], an[bn])
np.testing.assert_array_equal(an[an>5], [6, 7, 8, 9])
np.testing.assert_array_equal(ag[bg], ag[Ig])
self.assertEqual(len(ag[bg]), 5)
self.assertEqual(ag[bg][0], 5)
# RVector [BVector] == RVector [nd.array(bool)]
np.testing.assert_array_equal(ag[bg], ag[bn])
np.testing.assert_equal(sum(ag[bg]), sum(ag[bn]))
# RVector [IndexArray] == RVector [nd.array(int)]
np.testing.assert_array_equal(ag[Ig], ag[In])
# RVector(BVector) == RVector(nd.array(bool))
# RVector(IndexArray) == RVector(nd.array(int))
I = pg.core.IndexArray([0,1,1,0])
np.testing.assert_array_equal(pg.sum(I), 2)
np.testing.assert_array_equal(sum(I), 2)
np.testing.assert_array_equal(np.sum(I), 2)
def testComparison(self):
a = pg.Vector(10, 1)
b = pg.Vector(10, 2)
np.testing.assert_equal(len(a < 1), 10)
np.testing.assert_equal(len(a > 2), 10)
np.testing.assert_equal(len(a < b), 10)
np.testing.assert_equal(len(a > b), 10)
def testRMatrixIndex(self):
A = pg.Matrix(3,4)
A[0] = pg.Vector(4,1)
np.testing.assert_equal(sum(A[0]), 4)
A[1,2] = 2.0
# np.testing.assert_equal(sum(A[1]), 2)
np.testing.assert_equal(A[1,2], 2)
np.testing.assert_equal(A[:,2], A.col(2))
np.testing.assert_equal(A[2], A.row(2))
## will not work because A[2] refer to A[2]__getItem__ which only can
# return a const reference. use the tuple idx above
# A[2][2] = 2.0
# np.testing.assert_equal(sum(A[2]), 2)
if __name__ == '__main__':
# pg.setDeepDebug(1)
# t = TestRVectorMethods()
# # t.test_IVectorOP()
# t.test_Slices()
# t.testRMatrixIndex()
unittest.main()
|
JuliusHen/gimli | pygimli/physics/traveltime/modelling.py | <filename>pygimli/physics/traveltime/modelling.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Modelling classes for managing first arrival travel-time problems"""
import numpy as np
import pygimli as pg
from pygimli.frameworks import MeshModelling
from pygimli.viewer.mpl import createColorBar # , updateColorBar
from .utils import createGradientModel2D, shotReceiverDistances
from .plotting import drawVA
class TravelTimeDijkstraModelling(MeshModelling):
"""Forward modelling class for traveltime using Dijsktras method."""
def __init__(self, **kwargs):
self._core = pg.core.TravelTimeDijkstraModelling()
super(TravelTimeDijkstraModelling, self).__init__(**kwargs)
self._useGradient = None # assumed to be [vTop, vBot] if set
self._refineSecNodes = 3
self.jacobian = self._core.jacobian
self.setThreadCount = self._core.setThreadCount
# self.createJacobian = self.dijkstra.createJacobian
self.setJacobian(self._core.jacobian())
@property
def dijkstra(self):
"""Return current Dijkstra graph associated to mesh and model."""
return self._core.dijkstra()
def regionManagerRef(self):
"""Region manager reference (core Dijkstra has an own!)."""
return self._core.regionManagerRef()
def createRefinedFwdMesh(self, mesh):
"""Refine the current mesh for higher accuracy.
This is called automatic when accesing self.mesh() so it ensures any
effect of changing region properties (background, single).
"""
pg.info("Creating refined mesh (secnodes: {0}) to "
"solve forward task.".format(self._refineSecNodes))
m = mesh.createMeshWithSecondaryNodes(self._refineSecNodes)
pg.verbose(m)
return m
def setMeshPost(self, mesh):
"""Set mesh after forward operator has been initalized."""
self._core.setMesh(mesh)
def setDataPost(self, data):
"""Set data after forward operator has been initalized."""
self._core.setData(data)
def createStartModel(self, dataVals):
"""Create a starting model from data values (gradient or constant)."""
sm = None
if self._useGradient is not None:
[vTop, vBot] = self._useGradient # something strange here!!!
pg.info('Create gradient starting model. {0}: {1}'.format(vTop,
vBot))
sm = createGradientModel2D(self.data,
self.paraDomain,
vTop, vBot)
else:
dists = shotReceiverDistances(self.data, full=True)
aSlow = 1. / (dists / dataVals)
# pg._r(self.regionManager().parameterCount())
sm = pg.Vector(self.regionManager().parameterCount(),
pg.math.median(aSlow))
pg.info('Create constant starting model:', sm[0])
return sm
def createJacobian(self, par):
"""Create Jacobian (way matrix)."""
if not self.mesh():
pg.critical("no mesh")
return self._core.createJacobian(par)
def response(self, par):
"""Return forward response (simulated traveltimes)."""
if not self.mesh():
pg.critical("no mesh")
return self._core.response(par)
def way(self, s, g):
"""Return node indices for the way from the shot to the receiver.
The index is based on the given data, mesh and last known model.
"""
return self._core.way(s, g)
def drawModel(self, ax, model, **kwargs):
"""Draw the model."""
kwargs.setdefault('label', pg.unit('vel'))
kwargs.setdefault('cMap', pg.utils.cMap('vel'))
return super().drawModel(ax=ax, model=model,
logScale=kwargs.pop('logScale', True),
**kwargs)
def drawData(self, ax, data=None, **kwargs):
"""Draw the data (as apparent velocity crossplot by default).
Parameters
----------
data: pg.DataContainer
"""
if hasattr(data, '__iter__'):
kwargs['vals'] = data
data = self.data
elif data is None:
data = self.data
if kwargs.pop('firstPicks', False):
pg.physics.traveltime.drawFirstPicks(ax, data, **kwargs)
return ax
else:
kwargs.setdefault('label', pg.unit('va'))
kwargs.setdefault('cMap', pg.utils.cMap('va'))
gci = drawVA(ax, data, usePos=False, **kwargs)
cBar = createColorBar(gci, **kwargs)
return gci, cBar
class FatrayDijkstraModellingInterpolate(TravelTimeDijkstraModelling):
"""Shortest-path (Dijkstra) based travel time with fat ray jacobian."""
def __init__(self, frequency=100., verbose=False):
super().__init__(verbose)
self.frequency = frequency
self.iMat = pg.matrix.SparseMapMatrix()
def createJacobian(self, slowness):
"""Generate Jacobian matrix using fat-ray after Jordi et al. (2016)."""
self.J = pg.Matrix(self.data().size(), self.mesh().cellCount())
self.sensorNodes = [self.mesh().findNearestNode(pos)
for pos in self.data().sensorPositions()]
if (self.iMat.cols() != self.mesh().nodeCount() or
self.iMat.rows() != self.mesh().cellCount()):
self.iMat = self.mesh().interpolationMatrix(
self.mesh().cellCenters())
Di = self.dijkstra()
slowPerCell = self.createMappedModel(slowness, 1e16)
Di.setGraph(self.createGraph(slowPerCell))
numN = self.mesh().nodeCount()
data = self.data()
numS = data.sensorCount()
Tmat = pg.Matrix(numS, numN)
Dmat = pg.Matrix(numS, numS)
for i, node in enumerate(self.sensorNodes):
Di.setStartNode(node)
Tmat[i] = Di.distances() # (0, numN)
Dmat[i] = Tmat[i][self.sensorNodes]
for i in range(data.size()):
iS = int(data("s")[i])
iG = int(data("g")[i])
tsr = Dmat[iS][iG] # shot-receiver travel time
dt = self.iMat * (Tmat[iS] + Tmat[iG]) - tsr
weight = np.maximum(1 - 2 * self.frequency * dt, 0.0) # 1 on ray
wa = weight # * np.sqrt(self.mesh().cellSizes())
if np.sum(wa) > 0: # not if all values are zero
wa /= np.sum(wa)
self.J[i] = wa * tsr / slowness
self.setJacobian(self.J)
class FatrayDijkstraModellingMidpoint(TravelTimeDijkstraModelling):
"""Shortest-path (Dijkstra) based travel time with fat ray jacobian."""
def __init__(self, frequency=100., verbose=False):
super().__init__(verbose)
self.frequency = frequency
def setMesh(self, mesh, **kwargs): # secondaryNodes=3):
"""Set mesh and create additional secondary Nodes in cell centers."""
super().setMesh(mesh, **kwargs) # ignoreRegionManager=True)
print(self.mesh(), self.mesh().secondaryNodeCount())
self.mids = pg.IVector()
self.nnodes = self.mesh().nodeCount()
for c in self.mesh().cells():
n = self.mesh().createSecondaryNode(c.center())
c.addSecondaryNode(n)
self.mids.push_back(n.id())
print(self.mesh())
def createJacobian(self, slowness):
"""Generate Jacobian matrix using fat-ray after Jordi et al. (2016)."""
self.J = pg.Matrix(self.data().size(), self.mesh().cellCount())
self.sensorNodes = [self.mesh().findNearestNode(pos)
for pos in self.data().sensorPositions()]
Di = self.dijkstra()
slowPerCell = self.createMappedModel(slowness, 1e16)
Di.setGraph(self.createGraph(slowPerCell))
numN = self.mesh().nodeCount()
data = self.data()
numS = data.sensorCount()
Tmat = pg.Matrix(numS, numN)
Dmat = pg.Matrix(numS, numS)
print(self.mesh())
print(self.nnodes, max(self.mids))
for i, node in enumerate(self.sensorNodes):
Di.setStartNode(node)
dist0 = Di.distances()
dist = Di.distances(withSecNodes=True)
print("dist len ", len(dist0), len(dist))
Tmat[i] = dist[self.mids]
Dmat[i] = Tmat[i][self.sensorNodes]
for i in range(data.size()):
iS = int(data("s")[i])
iG = int(data("g")[i])
tsr = Dmat[iS][iG] # shot-receiver travel time
dt = Tmat[iS] + Tmat[iG] - tsr
weight = np.maximum(1 - 2 * self.frequency * dt, 0.0) # 1 on ray
wa = weight # * np.sqrt(self.mesh().cellSizes())
if np.sum(wa) > 0: # not if all values are zero
wa /= np.sum(wa)
self.J[i] = wa * tsr / slowness
self.setJacobian(self.J)
FatrayDijkstraModelling = FatrayDijkstraModellingInterpolate
# FatrayDijkstraModelling = FatrayDijkstraModellingMidpoint
|
JuliusHen/gimli | pygimli/physics/ert/ertManager.py | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Method Manager for Electrical Resistivity Tomography (ERT)"""
import os.path
import numpy as np
import matplotlib.pyplot as plt
import pygimli as pg
from pygimli.frameworks import MeshMethodManager
from .ertModelling import ERTModelling, ERTModellingReference
from .ert import createInversionMesh, createGeometricFactors, estimateError
from pygimli.utils import getSavePath
class ERTManager(MeshMethodManager):
"""ERT Manager.
Method Manager for Electrical Resistivity Tomography (ERT)
Todo
----
* 3d
* 3dtopo
* complex on/off
* closed geometry
* transdim
* singularity removal
* ERT specific inversion options:
* ...
"""
def __init__(self, data=None, **kwargs):
"""Create ERT Manager instance.
Parameters
----------
data: :gimliapi:`GIMLI::DataContainerERT` | str
You can initialize the Manager with data or give them a dataset
when calling the inversion.
Other Parameters
----------------
* useBert: bool [True]
Use Bert forward operator instead of the reference implementation.
* sr: bool [True]
Calculate with singularity removal technique.
Recommended but needs the primary potential.
For flat earth cases the primary potential will be calculated
analytical. For domains with topography the primary potential
will be calculated numerical using a p2 refined mesh or
you provide primary potentials with setPrimPot.
"""
self.useBert = kwargs.pop('useBert', True)
self.sr = kwargs.pop('sr', True)
super().__init__(data=data, **kwargs)
self.inv.dataTrans = pg.trans.TransLogLU()
def setSingularityRemoval(self, sr=True):
"""Turn singularity removal on or off."""
self.reinitForwardOperator(sr=True)
def createForwardOperator(self, **kwargs):
"""Create and choose forward operator."""
verbose = kwargs.pop('verbose', False)
self.useBert = kwargs.pop('useBert', self.useBert)
self.sr = kwargs.pop('sr', self.sr)
if self.useBert:
pg.verbose('Create ERTModelling FOP')
fop = ERTModelling(sr=self.sr, verbose=verbose)
else:
pg.verbose('Create ERTModellingReference FOP')
fop = ERTModellingReference(**kwargs)
return fop
def load(self, fileName):
"""Load ERT data.
Forwarded to :py:mod:`pygimli.physics.ert.load`
Parameters
----------
fileName: str
Filename for the data.
Returns
-------
data: :gimliapi:`GIMLI::DataContainerERT`
"""
self.data = pg.physics.ert.load(fileName)
return self.data
def createMesh(self, data=None, **kwargs):
"""Create default inversion mesh.
Forwarded to :py:mod:`pygimli.physics.ert.createInversionMesh`
"""
d = data or self.data
if d is None:
pg.critical('Please provide a data file for mesh generation')
return createInversionMesh(d, **kwargs)
def setPrimPot(self, pot):
"""Set primary potential from external is not supported anymore."""
pg.critical("Not implemented.")
def simulate(self, mesh, scheme, res, **kwargs):
"""Simulate an ERT measurement.
Perform the forward task for a given mesh, resistivity distribution &
measuring scheme and return data (apparent resistivity) or potentials.
For complex resistivity, the apparent resistivities is complex as well.
The forward operator itself only calculates potential values for the
electrodes in the given data scheme.
To calculate apparent resistivities, geometric factors (k) are needed.
If there are no values k in the DataContainerERT scheme, the function
tries to calculate them, either analytically or numerically by using a
p2-refined version of the given mesh.
TODO
----
* 2D + Complex + SR
Args
----
mesh : :gimliapi:`GIMLI::Mesh`
2D or 3D Mesh to calculate for.
res : float, array(mesh.cellCount()) | array(N, mesh.cellCount()) |
list
Resistivity distribution for the given mesh cells can be:
. float for homogeneous resistivity (e.g. 1.0)
. single array of length mesh.cellCount()
. matrix of N resistivity distributions of length mesh.cellCount()
. resistivity map as [[regionMarker0, res0],
[regionMarker0, res1], ...]
scheme : :gimliapi:`GIMLI::DataContainerERT`
Data measurement scheme.
Keyword Args
------------
verbose: bool[False]
Be verbose. Will override class settings.
calcOnly: bool [False]
Use fop.calculate instead of fop.response. Useful if you want
to force the calculation of impedances for homogeneous models.
No noise handling. Solution is put as token 'u' in the returned
DataContainerERT.
noiseLevel: float [0.0]
add normally distributed noise based on
scheme['err'] or on noiseLevel if error>0 is not contained
noiseAbs: float [0.0]
Absolute voltage error in V
returnArray: bool [False]
Returns an array of apparent resistivities instead of
a DataContainerERT
returnFields: bool [False]
Returns a matrix of all potential values (per mesh nodes)
for each injection electrodes.
Returns
-------
DataContainerERT | array(data.size()) | array(N, data.size()) |
array(N, mesh.nodeCount()):
Data container with resulting apparent resistivity data and
errors (if noiseLevel or noiseAbs is set).
Optional returns a Matrix of rhoa values
(for returnArray==True forces noiseLevel=0).
In case of a complex valued resistivity model, phase values are
returned in the DataContainerERT (see example below), or as an
additionally returned array.
Examples
--------
# >>> from pygimli.physics import ert
# >>> import pygimli as pg
# >>> import pygimli.meshtools as mt
# >>> world = mt.createWorld(start=[-50, 0], end=[50, -50],
# ... layers=[-1, -5], worldMarker=True)
# >>> scheme = ert.createData(
# ... elecs=pg.utils.grange(start=-10, end=10, n=21),
# ... schemeName='dd')
# >>> for pos in scheme.sensorPositions():
# ... _= world.createNode(pos)
# ... _= world.createNode(pos + [0.0, -0.1])
# >>> mesh = mt.createMesh(world, quality=34)
# >>> rhomap = [
# ... [1, 100. + 0j],
# ... [2, 50. + 0j],
# ... [3, 10.+ 0j],
# ... ]
# >>> data = ert.simulate(mesh, res=rhomap, scheme=scheme, verbose=1)
# >>> rhoa = data.get('rhoa').array()
# >>> phia = data.get('phia').array()
"""
verbose = kwargs.pop('verbose', self.verbose)
calcOnly = kwargs.pop('calcOnly', False)
returnFields = kwargs.pop("returnFields", False)
returnArray = kwargs.pop('returnArray', False)
noiseLevel = kwargs.pop('noiseLevel', 0.0)
noiseAbs = kwargs.pop('noiseAbs', 1e-4)
seed = kwargs.pop('seed', None)
sr = kwargs.pop('sr', self.sr)
# segfaults with self.fop (test & fix)
fop = self.createForwardOperator(useBert=self.useBert,
sr=sr, verbose=verbose)
fop.data = scheme
fop.setMesh(mesh, ignoreRegionManager=True)
rhoa = None
phia = None
isArrayData = False
# parse the given res into mesh-cell-sized array
if isinstance(res, int) or isinstance(res, float):
res = np.ones(mesh.cellCount()) * float(res)
elif isinstance(res, complex):
res = np.ones(mesh.cellCount()) * res
elif hasattr(res[0], '__iter__'): # ndim == 2
if len(res[0]) == 2: # res seems to be a res map
# check if there are markers in the mesh that are not defined
# the rhomap. better signal here before it results in errors
meshMarkers = list(set(mesh.cellMarkers()))
mapMarkers = [m[0] for m in res]
if any([mark not in mapMarkers for mark in meshMarkers]):
left = [m for m in meshMarkers if m not in mapMarkers]
pg.critical("Mesh contains markers without assigned "
"resistivities {}. Please fix given "
"rhomap.".format(left))
res = pg.solver.parseArgToArray(res, mesh.cellCount(), mesh)
else: # probably nData x nCells array
# better check for array data here
isArrayData = True
if isinstance(res[0], np.complex) or isinstance(res, pg.CVector):
pg.info("Complex resistivity values found.")
fop.setComplex(True)
else:
fop.setComplex(False)
if not scheme.allNonZero('k') and not calcOnly:
if verbose:
pg.info('Calculate geometric factors.')
scheme.set('k', fop.calcGeometricFactor(scheme))
ret = pg.DataContainerERT(scheme)
# just to be sure that we don't work with artifacts
ret['u'] *= 0.0
ret['i'] *= 0.0
ret['r'] *= 0.0
if isArrayData:
rhoa = np.zeros((len(res), scheme.size()))
for i, r in enumerate(res):
rhoa[i] = fop.response(r)
if verbose:
print(i, "/", len(res), " : ", pg.dur(), "s",
"min r:", min(r), "max r:", max(r),
"min r_a:", min(rhoa[i]), "max r_a:", max(rhoa[i]))
else: # res is single resistivity array
if len(res) == mesh.cellCount():
if calcOnly:
fop.mapERTModel(res, 0)
dMap = pg.core.DataMap()
fop.calculate(dMap)
if fop.complex():
pg.critical('Implement me')
else:
ret["u"] = dMap.data(scheme)
ret["i"] = np.ones(ret.size())
if returnFields:
return pg.Matrix(fop.solution())
return ret
else:
if fop.complex():
res = pg.utils.squeezeComplex(res)
resp = fop.response(res)
if fop.complex():
rhoa, phia = pg.utils.toPolar(resp)
else:
rhoa = resp
else:
print(mesh)
print("res: ", res)
raise BaseException(
"Simulate called with wrong resistivity array.")
if not isArrayData:
ret['rhoa'] = rhoa
if phia is not None:
ret.set('phia', phia)
else:
ret.set('rhoa', rhoa[0])
if phia is not None:
ret.set('phia', phia[0])
if returnFields:
return pg.Matrix(fop.solution())
if noiseLevel > 0: # if errors in data noiseLevel=1 just triggers
if not ret.allNonZero('err'):
# 1A and #100µV
ret.set('err', self.estimateError(ret,
relativeError=noiseLevel,
absoluteUError=noiseAbs,
absoluteCurrent=1))
print("Data error estimate (min:max) ",
min(ret('err')), ":", max(ret('err')))
rhoa *= 1. + pg.randn(ret.size(), seed=seed) * ret('err')
ret.set('rhoa', rhoa)
ipError = None
if phia is not None:
if scheme.allNonZero('iperr'):
ipError = scheme('iperr')
else:
# np.abs(self.data("phia") +TOLERANCE) * 1e-4absoluteError
if noiseLevel > 0.5:
noiseLevel /= 100.
if 'phiErr' in kwargs:
ipError = np.ones(ret.size()) * kwargs.pop('phiErr') \
/ 1000
else:
ipError = abs(ret["phia"]) * noiseLevel
if verbose:
print("Data IP abs error estimate (min:max) ",
min(ipError), ":", max(ipError))
phia += pg.randn(ret.size(), seed=seed) * ipError
ret['iperr'] = ipError
ret['phia'] = phia
# check what needs to be setup and returned
if returnArray:
if phia is not None:
return rhoa, phia
else:
return rhoa
return ret
def checkData(self, data=None):
"""Return data from container.
THINKABOUT: Data will be changed, or should the manager keep a copy?
"""
data = data or pg.DataContainerERT(self.data)
if isinstance(data, pg.DataContainer):
if not data.allNonZero('k'):
pg.warn("Data file contains no geometric factors (token='k').")
data['k'] = createGeometricFactors(data, verbose=True)
if self.fop.complex():
if not data.haveData('rhoa'):
pg.critical('Datacontainer have no "rhoa" values.')
if not data.haveData('ip'):
pg.critical('Datacontainer have no "ip" values.')
# pg.warn('check sign of phases')
rhoa = data['rhoa']
phia = -data['ip']/1000 # 'ip' is defined for neg mrad.
# we should think about some 'phia' in rad
return pg.utils.squeezeComplex(pg.utils.toComplex(rhoa, phia))
else:
if not data.haveData('rhoa'):
if data.allNonZero('r'):
pg.info("Creating apparent resistivies from "
"impedences rhoa = r * k")
data['rhoa'] = data['r'] * data['k']
elif data.allNonZero('u') and data.allNonZero('i'):
pg.info("Creating apparent resistivies from "
"voltage and currrent rhoa = u/i * k")
data['rhoa'] = data['u']/data['i'] * data['k']
else:
pg.critical("Datacontainer have neither: "
"apparent resistivies 'rhoa', "
"or impedances 'r', "
"or voltage 'u' along with current 'i'.")
if any(data['rhoa'] < 0) and \
isinstance(self.inv.dataTrans, pg.core.TransLog):
print(pg.find(data['rhoa'] < 0))
print(data['rhoa'][data['rhoa'] < 0])
pg.critical("Found negative apparent resistivities. "
"These can't be processed with logarithmic "
"data transformation. You should consider to "
"filter them out using "
"data.remove(data['rhoa'] < 0).")
return data['rhoa']
return data
def checkErrors(self, err, dataVals):
"""Return relative error.
Default we assume 'err' are relative vales.
"""
if isinstance(err, pg.DataContainer):
rae = None
if not err.allNonZero('err'):
pg.warn("Datacontainer have no 'err' values. "
"Fallback of 1mV + 3% using "
"ERTManager.estimateError(...) ")
rae = self.estimateError(err, absoluteError=0.001,
relativeError=0.03)
else:
rae = err['err']
if self.fop.complex():
ipe = None
if err.haveData('iperr'):
amp, phi = pg.utils.toPolar(dataVals)
# assuming ipErr are absolute dPhi in mrad
ipe = err['iperr'] / abs((phi*1000))
else:
pg.warn("Datacontainer have no 'iperr' values. "
"Fallback set to 0.01")
ipe = np.ones(err.size()) * 0.01
return pg.cat(rae, ipe)
return rae # not set if err is no DataContainer (else missing)
def estimateError(self, data=None, **kwargs):
"""Estimate error composed of an absolute and a relative part.
Parameters
----------
absoluteError : float [0.001]
Absolute data error in Ohm m. Need 'rhoa' values in data.
relativeError : float [0.03]
relative error level in %/100
absoluteUError : float [0.001]
Absolute potential error in V. Need 'u' values in data. Or
calculate them from 'rhoa', 'k' and absoluteCurrent if no 'i'
is given
absoluteCurrent : float [0.1]
Current level in A for reconstruction for absolute potential V
Returns
-------
error : Array
"""
if data is None: #
error = estimateError(self.data, **kwargs)
self.data["err"] = error
else: # the old way: better use ert.estimateError directly
error = estimateError(data, **kwargs)
return error
def coverage(self):
"""Coverage vector considering the logarithmic transformation."""
covTrans = pg.core.coverageDCtrans(self.fop.jacobian(),
1.0 / self.inv.response,
1.0 / self.inv.model)
paramSizes = np.zeros(len(self.inv.model))
for c in self.fop.paraDomain.cells():
paramSizes[c.marker()] += c.size()
return np.log10(covTrans / paramSizes)
def standardizedCoverage(self, threshhold=0.01):
"""Return standardized coverage vector (0|1) using thresholding."""
return 1.0*(abs(self.coverage()) > threshhold)
def saveResult(self, folder=None, size=(16, 10), **kwargs):
"""Save all results in the specified folder.
Saved items are:
Inverted profile
Resistivity vector
Coverage vector
Standardized coverage vector
Mesh (bms and vtk with results)
"""
subfolder = self.__class__.__name__
path = getSavePath(folder, subfolder)
pg.info('Saving resistivity data to: {}'.format(path))
np.savetxt(path + '/resistivity.vector',
self.model)
np.savetxt(path + '/resistivity-cov.vector',
self.coverage())
np.savetxt(path + '/resistivity-scov.vector',
self.standardizedCoverage())
m = pg.Mesh(self.paraDomain)
m['Resistivity'] = self.paraModel(self.model)
m['Resistivity (log10)'] = np.log10(m['Resistivity'])
m['Coverage'] = self.coverage()
m['S_Coverage'] = self.standardizedCoverage()
m.exportVTK(os.path.join(path, 'resistivity'))
m.saveBinaryV2(os.path.join(path, 'resistivity-pd'))
self.fop.mesh().save(os.path.join(path, 'resistivity-mesh'))
if self.paraDomain.dim() == 2:
fig, ax = plt.subplots(figsize=size)
self.showResult(ax=ax, coverage=self.coverage(), **kwargs)
fig.savefig(path + '/resistivity.pdf', bbox_inches="tight")
return path, fig, ax
return path
if __name__ == "__main__":
pass
|
JuliusHen/gimli | pygimli/viewer/__init__.py | # -*- coding: utf-8 -*-
"""Interface for 2D and 3D visualizations."""
from .mpl import hold, wait
from .showmesh import plt, show, showBoundaryNorm, showMesh
from .showmatrix import showMatrix
|
JuliusHen/gimli | pygimli/frameworks/methodManager.py | <filename>pygimli/frameworks/methodManager.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Method Manager
Provide the end user interface for method (geophysical) dependent
modelling and inversion as well as data and model visualization.
"""
import numpy as np
import pygimli as pg
from pygimli.utils import prettyFloat as pf
def fit(funct, data, err=None, **kwargs):
"""Generic function fitter.
Fit data to a given function.
TODO
----
* Dictionary support for funct to submit user data..
Parameters
----------
funct: callable
Function with the first argmument as data space, e.g., x, t, f, Nr. ..
Any following arguments are the parameters to be fit.
Except if a verbose flag if used.
data: iterable (float)
Data values
err: iterable (float) [None]
Data error values in %/100. Default is 1% if None are given.
Other Parameters
----------------
*dataSpace*: iterable
Keyword argument of the data space of len(data).
The name need to fit the first argument of funct.
Returns
-------
model: array
Fitted model parameter.
response: array
Model response.
Example
-------
>>> import pygimli as pg
>>>
>>> func = lambda t, a, b: a*np.exp(b*t)
>>> t = np.linspace(1, 2, 20)
>>> data = func(t, 1.1, 2.2)
>>> model, response = pg.frameworks.fit(func, data, t=t)
>>> print(pg.core.round(model, 1e-5))
2 [1.1, 2.2]
>>> _ = pg.plt.plot(t, data, 'o', label='data')
>>> _ = pg.plt.plot(t, response, label='response')
>>> _ = pg.plt.legend()
"""
mgr = ParameterInversionManager(funct, **kwargs)
model = mgr.invert(data, err, **kwargs)
return model, mgr.fw.response
# TG: harmonicFit does not really belong here as it is no curve fit
# We should rather use a class Decomposition
# Discuss .. rename to Framework or InversionFramework since he only manages
# the union of Inversion/Modelling and RegionManager(later)
class MethodManager(object):
"""General manager to maintenance a measurement method.
Method Manager are the interface to end-user interaction and can be seen as
simple but complete application classes which manage all tasks of
geophysical data processing.
The method manager holds one instance of a forward operator and an
appropriate inversion framework to handle modelling and data inversion.
Method Manager also helps with data import and export,
handle measurement data error estimation as well as model and data
visualization.
Attributes
----------
verbose : bool
Give verbose output.
debug : bool
Give debug output.
fop : :py:mod:`pygimli.frameworks.Modelling`
Forward Operator instance .. knows the physics.
fop is initialized by
:py:mod:`pygimli.manager.MethodManager.initForwardOperator`
and calls a valid
:py:mod:`pygimli.manager.MethodManager.createForwardOperator` method
in any derived classes.
inv : :py:mod:`pygimli.frameworks.Inversion`.
Inversion framework instance .. knows the reconstruction approach.
The attribute inv is initialized by default but can be changed
overwriting
:py:mod:`pygimli.manager.MethodManager.initInversionFramework`
"""
def __init__(self, fop=None, fw=None, data=None, **kwargs):
"""Constructor."""
self._fop = fop
self._fw = fw
# we hold our own copy of the data
self._verbose = kwargs.pop('verbose', False)
self._debug = kwargs.pop('debug', False)
self.data = None
if data is not None:
if isinstance(data, str):
self.load(data)
else:
self.data = data
# The inversion framework
self._initInversionFramework(verbose=self._verbose,
debug=self._debug)
# The forward operator is stored in self._fw
self._initForwardOperator(verbose=self._verbose, **kwargs)
# maybe obsolete
self.figs = {}
self.errIsAbsolute = False
def __hash__(self):
"""Create a hash for Method Manager."""
return pg.utils.strHash(str(type(self))) ^ hash(self.fop)
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, v):
self._verbose = v
self.fw.verbose = self._verbose
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, v):
self._debug = v
self.fw.debug = self._debug
@property
def fw(self):
return self._fw
@property
def fop(self):
return self.fw.fop
@property
def inv(self):
return self.fw
@property
def model(self):
return self.fw.model
def reinitForwardOperator(self, **kwargs):
"""Reinitialize the forward operator.
Sometimes it can be useful to reinitialize the forward operator.
Keyword arguments will be forwarded to 'self.createForwardOperator'.
"""
self._initForwardOperator(**kwargs)
def _initForwardOperator(self, **kwargs):
"""Initialize or re-initialize the forward operator.
Called once in the constructor to force the manager to create the
necessary forward operator member. Can be recalled if you need to
changed the mangers own forward operator object. If you want an own
instance of a valid FOP call createForwardOperator.
"""
if self._fop is not None:
fop = self._fop
else:
fop = self.createForwardOperator(**kwargs)
if fop is None:
pg.critical("It seems that createForwardOperator method "
"does not return a valid forward operator.")
if self.fw is not None:
self.fw.reset()
self.fw.setForwardOperator(fop)
else:
pg.critical("No inversion framework defined.")
def createForwardOperator(self, **kwargs):
"""Mandatory interface for derived classes.
Here you need to specify which kind of forward operator FOP
you want to use.
This is called by any initForwardOperator() call.
Parameters
----------
**kwargs
Any arguments that are necessary for your FOP creation.
Returns
-------
Modelling
Instance of any kind of :py:mod:`pygimli.framework.Modelling`.
"""
pg.critical("No forward operator defined, either give one or "
"overwrite in derived class")
def _initInversionFramework(self, **kwargs):
"""Initialize or re-initialize the inversion framework.
Called once in the constructor to force the manager to create the
necessary Framework instance.
"""
self._fw = self.createInversionFramework(**kwargs)
if self.fw is None:
pg.critical("createInversionFramework does not return "
"valid inversion framework.")
def createInversionFramework(self, **kwargs):
"""Create default Inversion framework.
Derived classes may overwrite this method.
Parameters
----------
**kwargs
Any arguments that are necessary for your creation.
Returns
-------
Inversion
Instance of any kind of :py:mod:`pygimli.framework.Inversion`.
"""
if self._fw is None:
return pg.frameworks.Inversion(**kwargs)
else:
return self._fw
def load(self, fileName):
"""API, overwrite in derived classes."""
pg.critical('API, overwrite in derived classes', fileName)
def estimateError(self, data, errLevel=0.01, absError=None):
# TODO check, rel or abs in return.
"""Estimate data error.
Create an error of estimated measurement error.
On default it returns an array of constant relative errors.
More sophisticated error estimation should be done
in specialized derived classes.
Parameters
----------
data : iterable
Data values for which the errors should be estimated.
errLevel : float (0.01)
Error level in percent/100 (i.e., 3% = 0.03).
absError : float (None)
Absolute error in the unit of the data.
Returns
-------
err : array
Returning array of size len(data)
"""
if absError is not None:
return absError + data * errLevel
return np.ones(len(data)) * errLevel
def simulate(self, model, **kwargs):
# """Run a simulation aka the forward task."""
ra = self.fop.response(par=model)
noiseLevel = kwargs.pop('noiseLevel', 0.0)
if noiseLevel > 0:
err = self.estimateError(ra, errLevel=noiseLevel)
ra *= 1. + pg.randn(ra.size(), seed=kwargs.pop('seed', None)) * err
return ra, err
return ra
def setData(self, data):
"""Set a data and distribute it to the forward operator"""
self.data = data
self.applyData(data)
def applyData(self, data):
""" """
self.fop.data = data
def checkData(self, data):
"""Overwrite for special checks to return data values"""
# if self._dataToken == 'nan':
# pg.critical('self._dataToken nan, should be set in class', self)
# return data(self._dataToken)
return data
def _ensureData(self, data):
"""Check data validity"""
if data is None:
data = self.fw.dataVals
vals = self.checkData(data)
if vals is None:
pg.critical("There are no data values.")
if abs(min(vals)) < 1e-12:
print(min(vals), max(vals))
pg.critical("There are zero data values.")
return vals
def checkError(self, err, dataVals=None):
"""Return relative error. Default we assume 'err' are relative values.
Overwrite is derived class if needed. """
if isinstance(err, pg.DataContainer):
if not err.haveData('err'):
pg.error('Datacontainer have no "err" values. '
'Fallback set to 0.01')
return err['err']
return err
def _ensureError(self, err, dataVals=None):
"""Check error validity"""
if err is None:
err = self.fw.errorVals
vals = self.checkError(err, dataVals)
if vals is None:
pg.warn('No data error given, set Fallback set to 1%')
vals = np.ones(len(dataVals)) * 0.01
try:
if min(vals) <= 0:
pg.critical("All error values need to be larger then 0. Either"
" give and err argument or fill dataContainer "
" with a valid 'err' ", min(vals), max(vals))
except ValueError:
pg.critical("Can't estimate data error")
return vals
def preRun(self, *args, **kwargs):
"""Called just before the inversion run starts."""
pass
def postRun(self, *args, **kwargs):
"""Called just after the inversion run."""
pass
def invert(self, data=None, err=None, **kwargs):
"""Invert the data.
Invert the data by calling self.inv.run() with mandatory data and
error values.
TODO
*need dataVals mandatory? what about already loaded data
Parameters
----------
dataVals : iterable
Data values to be inverted.
errVals : iterable | float
Error value for the given data.
If errVals is float we assume this means to be a global relative
error and force self.estimateError to be called.
"""
if data is not None:
self.data = data
else:
data = self.data
dataVals = self._ensureData(data)
errVals = self._ensureError(err, dataVals)
self.preRun(**kwargs)
self.fw.run(dataVals, errVals, **kwargs)
self.postRun(**kwargs)
return self.fw.model
def showModel(self, model, ax=None, **kwargs):
"""Show a model.
Draw model into a given axes or show inversion result from last run.
Forwards on default to the self.fop.drawModel function
of the modelling operator.
If there is no function given, you have to override this method.
Parameters
----------
ax : mpl axes
Axes object to draw into. Create a new if its not given.
model : iterable
Model data to be draw.
Returns
-------
ax, cbar
"""
if ax is None:
fig, ax = pg.plt.subplots()
ax, cBar = self.fop.drawModel(ax, model, **kwargs)
return ax, cBar
def showData(self, data=None, ax=None, **kwargs):
"""Show the data.
Draw data values into a given axes or show the data values from
the last run.
Forwards on default to the self.fop.drawData function
of the modelling operator.
If there is no given function given, you have to override this method.
Parameters
----------
ax : mpl axes
Axes object to draw into. Create a new if its not given.
data : iterable | pg.DataContainer
Data values to be draw.
Returns
-------
ax, cbar
"""
if ax is None:
fig, ax = pg.plt.subplots()
if data is None:
data = self.data
return self.fop.drawData(ax, data, **kwargs), None
def showResult(self, model=None, ax=None, **kwargs):
"""Show the last inversion result.
TODO
----
DRY: decide showModel or showResult
Parameters
----------
ax : mpl axes
Axes object to draw into. Create a new if its not given.
model : iterable [None]
Model values to be draw. Default is self.model from the last run
Returns
-------
ax, cbar
"""
if model is None:
model = self.model
return self.showModel(model, ax=ax, **kwargs)
def showFit(self, ax=None, **kwargs):
"""Show the last inversion data and response."""
ax, cBar = self.showData(data=self.inv.dataVals,
error=self.inv.errorVals,
label='Data',
ax=ax, **kwargs)
ax, cBar = self.showData(data=self.inv.response,
label='Response',
ax=ax, **kwargs)
if not kwargs.pop('hideFittingAnnotation', False):
fittext = r"rrms: {0}, $\chi^2$: {1}".format(
pf(self.fw.inv.relrms()), pf(self.fw.inv.chi2()))
ax.text(0.99, 0.005, fittext,
transform=ax.transAxes,
horizontalalignment='right',
verticalalignment='bottom',
fontsize=8)
if not kwargs.pop('hideLegend', False):
ax.legend()
return ax, cBar
def showResultAndFit(self, **kwargs):
"""Calls showResults and showFit."""
fig = pg.plt.figure()
ax = fig.add_subplot(1, 2, 1)
self.showResult(ax=ax, model=self.model, **kwargs)
ax1 = fig.add_subplot(2, 2, 2)
ax2 = fig.add_subplot(2, 2, 4)
self.showFit(axs=[ax1, ax2], **kwargs)
fig.tight_layout()
return fig
@staticmethod
def createArgParser(dataSuffix='dat'):
"""Create default argument parser.
TODO move this to some kind of app class
Create default argument parser for the following options:
-Q, --quiet
-R, --robustData: options.robustData
-B, --blockyModel: options.blockyModel
-l, --lambda: options.lam
-i, --maxIter: options.maxIter
--depth: options.depth
"""
import argparse
parser = argparse.ArgumentParser(
description="usage: %prog [options] *." + dataSuffix)
parser.add_argument("-Q", "--quiet", dest="quiet",
action="store_true", default=False,
help="Be verbose.")
# parser.add_argument("-R", "--robustData", dest="robustData",
# action="store_true", default=False,
# help="Robust data (L1 norm) minimization.")
# parser.add_argument("-B", "--blockyModel", dest="blockyModel",
# action="store_true", default=False,
# help="Blocky model (L1 norm) regularization.")
parser.add_argument('-l', "--lambda", dest="lam", type=float,
default=100,
help="Regularization strength.")
parser.add_argument('-i', "--maxIter", dest="maxIter", type=int,
default=20,
help="Maximum iteration count.")
# parser.add_argument("--depth", dest="depth", type=float,
# default=None,
# help="Depth of inversion domain. [None=auto].")
parser.add_argument('dataFileName')
return parser
class ParameterInversionManager(MethodManager):
"""Framework to invert unconstrained parameters."""
def __init__(self, funct=None, fop=None, **kwargs):
"""Constructor."""
if fop is not None:
if not isinstance(fop, pg.frameworks.ParameterModelling):
pg.critical("We need a fop if type ",
pg.frameworks.ParameterModelling)
elif funct is not None:
fop = pg.frameworks.ParameterModelling(funct)
else:
pg.critical("you should either give a valid fop or a function so "
"I can create the fop for you")
super(ParameterInversionManager, self).__init__(fop, **kwargs)
def createInversionFramework(self, **kwargs):
"""
"""
return pg.frameworks.MarquardtInversion(**kwargs)
def invert(self, data=None, err=None, **kwargs):
"""
Parameters
----------
limits: {str: [min, max]}
Set limits for parameter by parameter name.
startModel: {str: startModel}
Set the start value for parameter by parameter name.
"""
dataSpace = kwargs.pop(self.fop.dataSpaceName, None)
if dataSpace is not None:
self.fop.dataSpace = dataSpace
limits = kwargs.pop('limits', {})
for k, v in limits.items():
self.fop.setRegionProperties(k, limits=v)
startModel = kwargs.pop('startModel', {})
if isinstance(startModel, dict):
for k, v in startModel.items():
self.fop.setRegionProperties(k, startModel=v)
else:
kwargs['startModel'] = startModel
return super(ParameterInversionManager, self).invert(data=data,
err=err,
**kwargs)
class MethodManager1d(MethodManager):
"""Method Manager base class for managers on a 1d discretization."""
def __init__(self, fop=None, **kwargs):
"""Constructor."""
super(MethodManager1d, self).__init__(fop, **kwargs)
def createInversionFramework(self, **kwargs):
"""
"""
return pg.frameworks.Block1DInversion(**kwargs)
def invert(self, data=None, err=None, **kwargs):
""" """
return super(MethodManager1d, self).invert(data=data, err=err,
**kwargs)
class MeshMethodManager(MethodManager):
def __init__(self, **kwargs):
"""Constructor.
Attribute
---------
mesh: pg.Mesh
Copy of the main mesh to be distributed to inversion and the fop.
You can overwrite it with invert(mesh=mesh).
"""
super(MeshMethodManager, self).__init__(**kwargs)
self.mesh = None
@property
def paraDomain(self):
return self.fop.paraDomain
def paraModel(self, model=None):
"""Give the model parameter regarding the parameter mesh."""
if model is None:
model = self.fw.model
return self.fop.paraModel(model)
def createMesh(self, data=None, **kwargs):
"""API, implement in derived classes."""
pg.critical('no default mesh generation defined .. implement in '
'derived class')
def setMesh(self, mesh, **kwargs):
"""Set a mesh and distribute it to the forward operator"""
self.mesh = mesh
self.applyMesh(mesh, **kwargs)
def applyMesh(self, mesh, ignoreRegionManager=False, **kwargs):
""" """
if ignoreRegionManager:
mesh = self.fop.createRefinedFwdMesh(mesh, **kwargs)
self.fop.setMesh(mesh, ignoreRegionManager=ignoreRegionManager)
def invert(self, data=None, mesh=None, zWeight=1.0, startModel=None,
**kwargs):
"""Run the full inversion.
Parameters
----------
data : pg.DataContainer
mesh : pg.Mesh [None]
zWeight : float [1.0]
startModel : float | iterable [None]
If set to None fop.createDefaultStartModel(dataValues) is called.
Keyword Arguments
-----------------
forwarded to Inversion.run
Returns
-------
model : array
Model mapped for match the paraDomain Cell markers.
The calculated model is in self.fw.model.
"""
if data is None:
data = self.data
if data is None:
pg.critical('No data given for inversion')
self.applyData(data)
# no mesh given and there is no mesh known .. we create them
if mesh is None and self.mesh is None:
mesh = self.createMesh(data, **kwargs)
# a mesh was given or created so we forward it to the fop
if mesh is not None:
self.setMesh(mesh)
# remove unused keyword argument .. need better kwargfs
self.fop._refineP2 = kwargs.pop('refineP2', False)
dataVals = self._ensureData(self.fop.data)
errorVals = self._ensureError(self.fop.data, dataVals)
if self.fop.mesh() is None:
pg.critical('Please provide a mesh')
# inversion will call this itsself as default behaviour
# if startModel is None:
# startModel = self.fop.createStartModel(dataVals)
# pg._g('invert-dats', dataVals)
# pg._g('invert-err', errVals)
# pg._g('invert-sm', startModel)
kwargs['startModel'] = startModel
self.fop.setRegionProperties('*', zWeight=zWeight)
# Limits is no mesh related argument here or base??
limits = kwargs.pop('limits', None)
if limits is not None:
self.fop.setRegionProperties('*', limits=limits)
self.preRun(**kwargs)
self.fw.run(dataVals, errorVals, **kwargs)
self.postRun(**kwargs)
return self.paraModel(self.fw.model)
def showFit(self, axs=None, **kwargs):
"""Show data and the inversion result model response."""
orientation = 'vertical'
if axs is None:
fig, axs = pg.plt.subplots(nrows=1, ncols=2)
orientation = 'horizontal'
self.showData(data=self.inv.dataVals,
orientation=orientation,
ax=axs[0], **kwargs)
axs[0].text(0.0, 1.03, "Data",
transform=axs[0].transAxes,
horizontalalignment='left',
verticalalignment='center')
resp = None
data = None
if 'model' in kwargs:
resp = self.fop.response(kwargs['model'])
data = self._ensureData(self.fop.data)
else:
resp = self.inv.response
data = self.fw.dataVals
self.showData(data=resp,
orientation=orientation,
ax=axs[1], **kwargs)
axs[1].text(0.0, 1.03, "Response",
transform=axs[1].transAxes,
horizontalalignment='left',
verticalalignment='center')
fittext = r"rrms: {0}%, $\chi^2$: {1}".format(
pg.pf(pg.utils.rrms(data, resp)*100),
pg.pf(self.fw.chi2History[-1]))
axs[1].text(1.0, 1.03, fittext,
transform=axs[1].transAxes,
horizontalalignment='right',
verticalalignment='center')
# if not kwargs.pop('hideFittingAnnotation', False):
# axs[0].text(0.01, 1.0025, "rrms: {0}, $\chi^2$: {1}"
# .format(pg.utils.prettyFloat(self.fw.inv.relrms()),
# pg.utils.prettyFloat(self.fw.inv.chi2())),
# transform=axs[0].transAxes,
# horizontalalignment='left',
# verticalalignment='bottom')
return axs
def coverage(self):
"""Return coverage vector considering the logarithmic transformation.
"""
covTrans = pg.core.coverageDCtrans(self.fop.jacobian(),
1.0 / self.inv.response,
1.0 / self.inv.model)
nCells = self.fop.paraDomain.cellCount()
return np.log10(covTrans[:nCells] / self.fop.paraDomain.cellSizes())
def standardizedCoverage(self, threshhold=0.01):
"""Return standardized coverage vector (0|1) using thresholding.
"""
return 1.0*(abs(self.coverage()) > threshhold)
class PetroInversionManager(MeshMethodManager):
"""Class for petrophysical inversion (s. Rücker et al. 2017)."""
def __init__(self, petro, mgr=None, **kwargs):
"""Initialize instance with manager and petrophysical relation."""
petrofop = kwargs.pop('petrofop', None)
if petrofop is None:
fop = kwargs.pop('fop', None)
if fop is None and mgr is not None:
# Check! why I can't use mgr.fop
# fop = mgr.fop
fop = mgr.createForwardOperator()
self.checkData = mgr.checkData
self.checkError = mgr.checkError
if fop is not None:
if not isinstance(fop, pg.frameworks.PetroModelling):
petrofop = pg.frameworks.PetroModelling(fop, petro)
if petrofop is None:
print(mgr)
print(fop)
pg.critical('implement me')
super().__init__(fop=petrofop, **kwargs)
# Really necessary? Should a combination of petro and joint do the same
class JointPetroInversionManager(MeshMethodManager):
"""Joint inversion targeting at the same parameter through petrophysics."""
def __init__(self, petros, mgrs):
"""Initialize with lists of managers and transformations"""
self.mgrs = mgrs
self.fops = [pg.frameworks.PetroModelling(m.fop, p)
for p, m in zip(petros, mgrs)]
super().__init__(fop=pg.frameworks.JointModelling(self.fops))
# just hold a local copy
self.dataTrans = pg.trans.TransCumulative()
def checkError(self, err, data=None):
"""Collect error values."""
if len(err) != len(self.mgrs):
pg.critical("Please provide data for all managers")
vals = pg.Vector(0)
for i, mgr in enumerate(self.mgrs):
# we get the data values again or we have to split data
dataVals = mgr.checkData(self.fop._data[i])
vals = pg.cat(vals, mgr.checkError(err[i], dataVals))
return vals
def checkData(self, data):
"""Collect data values."""
if len(data) != len(self.mgrs):
pg.critical("Please provide data for all managers")
self.dataTrans.clear()
vals = pg.Vector(0)
for i, mgr in enumerate(self.mgrs):
self.dataTrans.add(mgr.inv.dataTrans, data[i].size())
vals = pg.cat(vals, mgr.checkData(data[i]))
self.inv.dataTrans = self.dataTrans
return vals
def invert(self, data, **kwargs):
"""Run inversion"""
limits = kwargs.pop('limits', [0., 1.])
self.fop.modelTrans.setLowerBound(limits[0])
self.fop.modelTrans.setUpperBound(limits[1])
kwargs['startModel'] = kwargs.pop('startModel',
(limits[1]+limits[0])/2.)
return super().invert(data, **kwargs)
|
PedroAmorim/Photobooth | config-dist.py | <gh_stars>0
# GPIO Config
led_pin = 27 # LED
btn_pin = 17 # pin for the start button
shutdown_btn_pin = 18 # pin for the shutdown button
print_btn_pin = 12 # pin for the print button
print_led_pin = 13 # pin for the print LED
# Config settings to change behavior of photo booth
# width of the display monitor
monitor_w = 800
# height of the display monitor
monitor_h = 480
# path to save images
# Should end with a "/"
file_path = '/home/pi/photobooth/pics/'
# True will clear previously stored photos as the program launches. False
# will leave all previous photos.
clear_on_startup = False
# how long to debounce the button. Add more time if the button triggers
# too many times.
# Time in milliseconds
debounce = 1000
# True to make an animated gif. False to post 4 jpgs into one post.
# Need GraphicsMagick -- http://www.graphicsmagick.org/
make_gifs = False
# adjust for lighting issues. Normal is 100 or 200. Sort of dark is 400.
# Dark is 800 max.
# available options: 100, 200, 320, 400, 500, 640, 800
camera_iso = 800
# full frame of v1 camera is 2592x1944. Wide screen max is 2592,1555
# if you run into resource issues, try smaller, like 1920x1152.
# or increase memory
# Should preserve the aspect ratio 4/3
# http://picamera.readthedocs.io/en/release-1.12/fov.html#hardware-limits
camera_high_res_w = 1296 # width (max 2592)
camera_high_res_h = 972 # height (max 1944)
# enable color on camera preview
camera_color_preview = True
# camera orientation
camera_landscape = False
# Configure sudoers on your system, to can execute shutdown whitout password
# Add this line in file /etc/sudoers
# myUser ALL = (root) NOPASSWD: /sbin/halt
enable_shutdown_btn = False
# Printing configuration
enable_print_btn = False
# Maximum number of prints for the same image
max_print = 3
# Debug mode
debug_mode = False
|
PedroAmorim/Photobooth | tests/playSound.py | #!/usr/bin/env python
# -*-coding:utf-8 -*
import os
import pygame
import time
# initialize pygame
pygame.mixer.pre_init(44100, -16, 2, 2048) # setup mixer to avoid sound lag
pygame.init()
capture = pygame.mixer.Sound(os.path.dirname(os.path.realpath(__file__)) + "/../camera-shutter-sound.wav") # load sound
# Main
print "Test before play sound"
capture.play()
time.sleep(0.5) # Wait 500 ms for the sound to coincide with the capture of the picture.
print "Test after play sound"
time.sleep(1)
print "End"
|
PedroAmorim/Photobooth | photobooth.py | <filename>photobooth.py
#!/usr/bin/env python
# -*-coding:utf-8 -*
#
# Initial project created by <EMAIL>
# Current fork made by <NAME> (<EMAIL>) and <NAME>
import os
import glob
import time
from time import sleep
import traceback
import RPi.GPIO as GPIO
import picamera # http://picamera.readthedocs.org/en/release-1.4/install2.html
import atexit
import sys
import pygame
from pygame.locals import QUIT, KEYDOWN, K_ESCAPE, K_SPACE, K_p
import config # this is the config python file config.py
import cups
####################
# Variables Config #
####################
led_pin = config.led_pin
btn_pin = config.btn_pin
shutdown_btn_pin = config.shutdown_btn_pin
print_btn_pin = config.print_btn_pin
print_led_pin = config.print_led_pin
total_pics = 4 # number of pics to be taken
capture_delay = 2 # delay between pics
prep_delay = 3 # number of seconds at step 1 as users prep to have photo taken
gif_delay = 100 # How much time between frames in the animated gif
restart_delay = 3 # how long to display finished message before beginning a new session
# how much to wait in-between showing pics on-screen after taking
replay_delay = 1
replay_cycles = 1 # how many times to show each photo on-screen after taking
# full frame of v1 camera is 2592x1944. Wide screen max is 2592,1555
# if you run into resource issues, try smaller, like 1920x1152.
# or increase memory
# http://picamera.readthedocs.io/en/release-1.12/fov.html#hardware-limits
high_res_w = config.camera_high_res_w # width of high res image, if taken
high_res_h = config.camera_high_res_h # height of high res image, if taken
# Preview
if config.camera_landscape:
preview_w = config.monitor_w
preview_h = config.monitor_h
else:
preview_w = (config.monitor_h * config.monitor_h) / config.monitor_w
preview_h = config.monitor_h
#######################
# Photobooth image #
#######################
# Image ratio 4/3
image_h = 525
image_w = 700
margin = 50
# Printed image ratio 3/2
output_h = 1200
output_w = 1800
#############
# Variables #
#############
# Do not change these variables, as the code will change it anyway
transform_x = config.monitor_w # how wide to scale the jpg when replaying
transfrom_y = config.monitor_h # how high to scale the jpg when replaying
offset_x = 0 # how far off to left corner to display photos
offset_y = 0 # how far off to left corner to display photos
print_counter = 0
print_error = 'OK'
last_image_save = 'no_file'
if not config.camera_landscape:
tmp = image_h
image_h = image_w
image_w = tmp
tmp = output_h
output_h = output_w
output_w = tmp
tmp = high_res_h
high_res_h = high_res_w
high_res_w = tmp
################
# Other Config #
################
real_path = os.path.dirname(os.path.realpath(__file__))
def log(text):
print time.strftime('%Y/%m/%d %H:%M:%S') + " | " + text
###########################
# Init output directories #
###########################
# Check directory is writable
now = str(time.time()).split('.')[0] # get the current timestamp, and remove milliseconds
if (not os.path.exists(config.file_path)):
log("ERROR config.file_path not writeable fallback to SD : " + config.file_path)
output_path = real_path + "/" + now + "/"
else:
output_path = config.file_path + now + "/"
output_path_photobooth = output_path + "photobooth/"
# Create directories
os.makedirs(output_path_photobooth, 0777)
if (not os.access(output_path_photobooth, os.W_OK)):
log("ERROR output_path_photobooth not writeable: " + output_path_photobooth)
sys.exit()
##############
# Initialize #
##############
# GPIO setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(led_pin, GPIO.OUT) # LED
GPIO.setup(btn_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(shutdown_btn_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(print_btn_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(print_led_pin, GPIO.OUT) # LED
# for some reason the pin turns on at the beginning of the program. Why?
GPIO.output(led_pin, False)
GPIO.output(print_led_pin, False)
# initialize pygame
pygame.init()
pygame.display.set_mode((config.monitor_w, config.monitor_h))
screen = pygame.display.get_surface()
pygame.display.set_caption('Photo Booth Pics')
if not config.debug_mode:
pygame.mouse.set_visible(False) # hide the mouse cursor
pygame.display.toggle_fullscreen()
capture = pygame.mixer.Sound(real_path + "/camera-shutter-sound.wav")
#############
# Functions #
#############
@atexit.register
def cleanup():
"""
@brief clean up running programs as needed when main program exits
"""
log('Ended abruptly!')
pygame.quit()
GPIO.cleanup()
def clear_pics(channel):
"""
@brief delete files in pics folder
@param channel The channel
"""
files = glob.glob(output_path + '*')
for f in files:
os.remove(f)
# light the lights in series to show completed
log("Deleted previous pics")
for x in range(0, 3): # blink light
GPIO.output(led_pin, True)
GPIO.output(print_led_pin, True)
sleep(0.25)
GPIO.output(led_pin, False)
GPIO.output(print_led_pin, False)
sleep(0.25)
def set_demensions(img_w, img_h):
"""
@brief Set variables to properly display the image on full screen at right ratio
Note this only works when in booting in desktop mode.
When running in terminal, the size is not correct (it displays small).
Why?
@param img_w The image w
@param img_h The image h
"""
# connect to global vars
global transform_y, transform_x, offset_y, offset_x
# based on output screen resolution, calculate how to display
ratio_h = (config.monitor_w * img_h) / img_w
if (ratio_h < config.monitor_h):
# Use horizontal black bars
transform_y = ratio_h
transform_x = config.monitor_w
offset_y = (config.monitor_h - ratio_h) / 2
offset_x = 0
elif (ratio_h > config.monitor_h):
# Use vertical black bars
transform_x = (config.monitor_h * img_w) / img_h
transform_y = config.monitor_h
offset_x = (config.monitor_w - transform_x) / 2
offset_y = 0
else:
# No need for black bars as photo ratio equals screen ratio
transform_x = config.monitor_w
transform_y = config.monitor_h
offset_y = offset_x = 0
if config.debug_mode:
log("Screen resolution debug:")
print str(img_w) + " x " + str(img_h)
print "ratio_h: " + str(ratio_h)
print "transform_x: " + str(transform_x)
print "transform_y: " + str(transform_y)
print "offset_y: " + str(offset_y)
print "offset_x: " + str(offset_x)
def set_demensions_preview(img_w, img_h):
"""
@brief Set variables to properly display the image on screen at right ratio
@param img_w The image w
@param img_h The image h
"""
# connect to global vars
global transform_y, transform_x, offset_y, offset_x
# based on output screen resolution, calculate how to display
ratio_h = (config.monitor_w * img_h) / img_w
if (ratio_h < config.monitor_h):
# Use horizontal black bars
transform_y = ratio_h
transform_x = config.monitor_w
offset_y = (config.monitor_h - ratio_h * 3 / 4) / 2
offset_x = 0
elif (ratio_h > config.monitor_h):
# Use vertical black bars
transform_x = (config.monitor_h * img_w) / img_h
transform_y = config.monitor_h
offset_x = (config.monitor_w - transform_x * 3 / 4) / 2
offset_y = 0
else:
# No need for black bars as photo ratio equals screen ratio
transform_x = config.monitor_w
transform_y = config.monitor_h
offset_y = offset_x = 0
if config.debug_mode:
log("Screen resolution debug:")
print str(img_w) + " x " + str(img_h)
print "ratio_h: " + str(ratio_h)
print "transform_x: " + str(transform_x)
print "transform_y: " + str(transform_y)
print "offset_y: " + str(offset_y)
print "offset_x: " + str(offset_x)
def show_image(image_path):
"""
@brief Display one image on screen
@param image_path The image path
"""
# clear the screen
screen.fill((0, 0, 0))
# load the image
img = pygame.image.load(image_path)
img = img.convert()
# set pixel dimensions based on image
set_demensions(img.get_width(), img.get_height())
# rescale the image to fit the current display
img = pygame.transform.scale(img, (transform_x, transfrom_y))
screen.blit(img, (offset_x, offset_y))
pygame.display.flip()
def show_image_print(image_path):
"""
@brief Display the image being printed
@param image_path The image path
"""
show_image(real_path + "/printing.png")
# Load image
img = pygame.image.load(image_path)
# set pixel dimensions based on image
set_demensions_preview(img.get_width(), img.get_height())
# rescale the image to fit the current display
img = pygame.transform.scale(img, (transform_x * 3 / 4, transfrom_y * 3 / 4))
screen.blit(img, (offset_x, offset_y))
pygame.display.flip()
sleep(restart_delay)
show_intro()
def clear_screen():
"""
@brief display a blank screen
"""
screen.fill((0, 0, 0))
pygame.display.flip()
def display_pics(jpg_group):
"""
@brief Display a group of images
@param jpg_group The jpg group
"""
for i in range(0, replay_cycles): # show pics a few times
for i in range(1, total_pics + 1): # show each pic
show_image(output_path + jpg_group + "-0" + str(i) + ".jpg")
sleep(replay_delay) # pause
def make_led_blinking(pin, counter=5, duration=0.25):
"""
@brief Make blinking a led with oneline code
@param pin Led pin
@param counter Number of time the led blink
@param pin Duration between blink
"""
for x in range(0, counter):
GPIO.output(pin, True)
sleep(duration)
GPIO.output(pin, False)
sleep(duration)
def start_photobooth():
"""
@brief Define the photo taking function for when the big button is pressed
"""
# connect to global vars
global print_counter, print_error
#
# Begin Step 1
#
log("Get Ready from " + real_path)
GPIO.output(led_pin, False)
GPIO.output(print_led_pin, False)
show_image(real_path + "/instructions.png")
sleep(prep_delay)
# clear the screen
clear_screen()
camera = picamera.PiCamera()
if not config.camera_color_preview:
camera.saturation = -100
camera.iso = config.camera_iso
# set camera resolution to high res
camera.resolution = (high_res_w, high_res_h)
#
# Begin Step 2
#
log("Taking pics")
# get the current timestamp, and remove milliseconds
now = str(time.time()).split('.')[0]
try: # take the photos
for i in range(1, total_pics + 1):
filename = output_path + now + '-0' + str(i) + '.jpg'
show_image(real_path + "/pose" + str(i) + ".png")
sleep(capture_delay) # pause in-between shots
clear_screen()
# preview a mirror image
camera.hflip = True
camera.start_preview(resolution=(preview_w, preview_h))
sleep(2) # warm up camera
GPIO.output(led_pin, True) # turn on the LED
camera.hflip = False # flip back when taking photo
# Play sound
capture.play()
sleep(0.5) # Wait 500 ms for the sound to coincide with the capture of the picture.
# Capture!
camera.capture(filename)
log("Capture : " + filename)
camera.stop_preview()
GPIO.output(led_pin, False) # turn off the LED
except Exception, e:
tb = sys.exc_info()[2]
traceback.print_exception(e.__class__, e, tb)
pygame.quit()
finally:
camera.close()
#
# Begin Step 3
#
show_image(real_path + "/processing.png")
if config.make_gifs: # make the gifs
log("Creating an animated gif")
# make an animated gif
graphicsmagick = "gm convert -delay " + \
str(gif_delay) + " " + output_path + now + \
"*.jpg " + output_path + now + ".gif"
os.system(graphicsmagick) # make the .gif
log("Creating a photo booth picture")
photobooth_image(now)
# reset print counter
print_counter = 0
#
# Begin Step 4
#
try:
display_pics(now)
except Exception, e:
tb = sys.exc_info()[2]
traceback.print_exception(e.__class__, e, tb)
pygame.quit()
log("Done")
show_image(real_path + "/finished.png")
sleep(restart_delay)
show_intro()
# turn on the LED
GPIO.output(led_pin, True)
if print_error == 'OK':
GPIO.output(print_led_pin, True)
def shutdown(channel):
"""
@brief Shutdown the RaspberryPi
config sudoers to be available to execute shutdown whitout password
Add this line in file /etc/sudoers
myUser ALL = (root) NOPASSWD: /sbin/halt
"""
print("Your RaspberryPi will be shut down in few seconds...")
pygame.quit()
GPIO.cleanup()
os.system("sudo halt -p")
def photobooth_image(now):
# connect to global vars
global last_image_save
# Load images
bgimage = pygame.image.load(real_path + "/bgimage.png")
image1 = pygame.image.load(output_path + now + "-01.jpg")
image2 = pygame.image.load(output_path + now + "-02.jpg")
image3 = pygame.image.load(output_path + now + "-03.jpg")
image4 = pygame.image.load(output_path + now + "-04.jpg")
# Rotate Background
if not config.camera_landscape:
bgimage = pygame.transform.rotate(bgimage, 270)
# Resize images
bgimage = pygame.transform.scale(bgimage, (output_w, output_h))
image1 = pygame.transform.scale(image1, (image_w, image_h))
image2 = pygame.transform.scale(image2, (image_w, image_h))
image3 = pygame.transform.scale(image3, (image_w, image_h))
image4 = pygame.transform.scale(image4, (image_w, image_h))
# Merge images
bgimage.blit(image1, (margin, margin))
bgimage.blit(image2, (margin * 2 + image_w, margin))
bgimage.blit(image3, (margin, margin * 2 + image_h))
bgimage.blit(image4, (margin * 2 + image_w, margin * 2 + image_h))
# Check directory is writable
if (os.access(output_path_photobooth, os.W_OK)):
last_image_save = output_path_photobooth + now + ".jpg"
pygame.image.save(bgimage, last_image_save)
if config.debug_mode:
log("INFO last image save: " + last_image_save)
else:
log("ERROR path not writeable: " + output_path_photobooth)
def print_image():
# connect to global vars
global print_counter, print_error
# Connect to cups and select printer 0
conn = cups.Connection()
printers = conn.getPrinters()
printer_name = printers.keys()[0]
if print_error != 'OK':
log("Printer restart after error")
# restart printer
conn.disablePrinter(printer_name)
sleep(2)
conn.enablePrinter(printer_name)
print_error = 'OK'
GPIO.output(print_led_pin, True) # Turn LED on
show_intro() # Reset screen
return # End here, printer should restart jobs pendings on the queue
# Check if printer status is available
# 3 => Printer is ready!
# 4 => is printing, but OK, push to printing queue
# 5 => Failure, no paper tray, no paper, ribbon depleted
printerAtt = conn.getPrinterAttributes(printer_name)
log("Printer status : (" + str(printerAtt['printer-state']) + ") " + printerAtt['printer-state-message'])
if (printerAtt['printer-state'] == 5):
log("Printer error : (" + str(printerAtt['printer-state']) + ") " + printerAtt['printer-state-message'])
make_led_blinking(print_led_pin, 6, 0.15) # LED blinking
print_error = printerAtt['printer-state-message']
show_intro()
return # End here, led is Off, wait for human action
if not os.path.isfile(last_image_save):
log("No image " + " : " + last_image_save)
elif print_counter < config.max_print:
print_counter += 1 # increase counter
GPIO.output(print_led_pin, False)
# Launch printing
if not config.debug_mode:
conn.printFile(printer_name, last_image_save, "PhotoBooth", {})
show_image_print(last_image_save)
log("Launch printing request on " + printer_name + " : " + last_image_save)
sleep(1)
# Turn LED on
GPIO.output(print_led_pin, True)
else:
make_led_blinking(print_led_pin, 3, 0.15) # LED blinking, at the end LED is off
log("You have reach print quota for image " + " : " + last_image_save)
def show_intro():
global print_error
if (print_error == 'OK'):
show_image(real_path + "/intro.png")
elif (print_error == 'Ribbon depleted!'):
show_image(real_path + "/error_ink.png")
elif (print_error == 'Paper feed problem!' or print_error == 'No paper tray loaded, aborting!'):
show_image(real_path + "/error_paper.png")
elif (print_error == 'Printer open failure (No suitable printers found!)'):
show_image(real_path + "/error_printer_off.png")
else:
show_image(real_path + "/error_printer.png")
##################
# Main Program #
##################
# clear the previously stored pics based on config settings
if config.clear_on_startup:
clear_pics(1)
# Add event listener to catch shutdown request
if config.enable_shutdown_btn:
GPIO.add_event_detect(shutdown_btn_pin, GPIO.FALLING, callback=shutdown, bouncetime=config.debounce)
# If printing enable, add event listener on print button
if config.enable_print_btn:
GPIO.add_event_detect(print_btn_pin, GPIO.FALLING, bouncetime=config.debounce)
# Setup button start_photobooth
# DON'T USE THREADED CALLBACKS
GPIO.add_event_detect(btn_pin, GPIO.FALLING, bouncetime=config.debounce)
log("Photo booth app running...")
# blink light to show the app is running
make_led_blinking((print_led_pin, led_pin)) # LED blinking
show_image(real_path + "/intro.png")
# turn on the light showing users they can push the button
GPIO.output(led_pin, True)
GPIO.output(print_led_pin, True)
while True:
sleep(1)
# Keyboard shortcuts
for event in pygame.event.get():
# pygame.QUIT is sent when the user clicks the window's "X" button
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
sys.exit()
# Start photobooth with key "space"
elif event.type == KEYDOWN and event.key == K_SPACE:
start_photobooth()
# Print last image with key "P"
elif event.type == KEYDOWN and event.key == K_p:
print_image()
# Detect event on start button
if GPIO.event_detected(btn_pin):
start_photobooth()
if config.enable_print_btn and GPIO.event_detected(print_btn_pin):
print_image()
|
RtKelleher/ServiceNow_Reporting | snow_reporting.py | <reponame>RtKelleher/ServiceNow_Reporting
#!/usr/bin/env python3
# encoding: utf-8
""" Python script to create servicenow tickets from hive cases. Reducing the need for duplication in CMS.
"""
from cortexutils.responder import Responder
import requests
import yaml
import re
import json
with open("config.yml", "r") as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
class CreateCase(Responder):
""" placeholder docstring
"""
def __init__(self):
Responder.__init__(self)
self.url = self.get_param("config.url", None, "SN url is missing")
self.user = self.get_param("config.user", None, "SN user is missing")
self.pwd = self.get_param("config.password", None, "SN password missing")
def operations(self, raw):
return [
(self.build_operation("AddTagToCase", tag="Responder:ServiceNow Case Created")),
(self.build_operation("AddTagToCase", tag=str(self.response.json()["result"]["number"]))),
]
def payload(self, cfg):
payload = cfg["servicenow"]
payload.add = cfg["hive"]
payload.add = {
"short_description": self.get_param("data.title", None),
"comments": self.url + "/index.html#!/case/" + self.get_param("data._routing") + "/details",
"assigned_to": self.get_param("data.owner") + "@" + cfg["servicenow"]["company"],
}
return payload
def run(self):
Responder.run(self)
tags = self.get_param("data.tags", None)
for tag in tags:
check_for_SN = re.match(r"^[\w]{9}:[\w]{10}\W[\w]{4}\W[\w]{7}", tag)
if not check_for_SN:
headers = {"Content-Type": "application/json", "Accept": "application/json"}
payload = payload(cfg)
self.response = requests.post(
(self.url + "/api/now/table/incident"),
auth=(self.user, self.pwd),
headers=headers,
data=json.dumps(payload),
)
if self.response.status_code == 200 or self.response.status_code == 201:
self.report({"message": json.loads(self.response.text)})
else:
self.error(self.response.status_code)
else:
self.error("Case already exists")
if __name__ == "__main__":
CreateCase().run()
|
lenforiee/LenHTTP | lenhttp/lenhttp.py | import socket
import asyncio
import os
import re
import http
import gzip
import select
import signal
import json
import traceback
from .timer import Timer
from urllib.parse import unquote
from .logger import info, error, warning
from typing import Any, Union, Tuple, Dict, Callable, Coroutine, List, Iterable, Optional
STATUS_CODE = {c.value: c.phrase for c in http.HTTPStatus}
# Sadly no windows support.
if os.name == "nt":
raise OSError("You can't use this package on windows machine!")
class Glob:
json = None
logging = False
glob = Glob()
class CaseInsensitiveDict:
"""A Python dictionary equivalent with case insensitive keys."""
__slots__ = ("_dict",)
def __init__(self, d: dict = None) -> None:
"""Creates an instance of `CaseInsensitiveDict`. If `d` is set, the
data in it will be converted into case insensitive."""
self._dict: dict = {}
# Dict convertion.
if d:
if not isinstance(d, dict):
raise ValueError("Only conversion of dict is supported.")
self.__conv_dict(d)
def __repr__(self) -> str:
"""String representation of the CaseInsensitiveDict."""
return f"<CaseInsensitiveDict {self._dict!r}>"
# Dictionary Functionality.
def __setitem__(self, key, val) -> None:
"""Sets an item to the CaseInsensitiveDict."""
if key.__class__ is str: key = key.lower()
self._dict[key] = val
def __getitem__(self, key):
"""Retrieves an item from the dictionary, raising a `KeyError` if not
found."""
if key.__class__ is str: key = key.lower()
return self._dict[key]
def __delitem__(self, key):
"""Deletes an item from the dictionary, raising a `KeyError` if not found."""
if key.__class__ is str: key = key.lower()
del self._dict[key]
def __iter__(self):
"""Simple iteration support, iterating over the keys."""
for k in self._dict: yield k
def __not__(self) -> bool:
"""Returns bool corresponding to whether the bool is empty."""
return not self._dict
def __concat__(self, d: Union[dict, 'CaseInsensitiveDict']) -> None:
"""Expands the current dict."""
self.__conv_dict(d)
def __contains__(self, key) -> bool:
"""Checks if the dict contains the key `key`."""
if key.__class__ is str: key = key.lower()
return key in self._dict
def __conv_dict(self, d: Union[dict, 'CaseInsensitiveDict']) -> None:
"""Converts data from the dictionary `d` to our storage format.
Note:
This does NOT clear the data of the CaseInsensitiveDict.
"""
for k, v in d.items():
if k.__class__ is str: k = k.lower()
self._dict[k] = v
def items(self):
"""Iterates over all items and keys of the dict."""
return self._dict.items()
def keys(self) -> tuple:
"""Displays all keys of the dictionary as a `tuple`."""
# Decided to do the conversion here as we dont need their fancy stuff.
return tuple(self._dict.keys())
def get(self, key, default = None):
"""Returns the value of a `key` in the dict, returning `default` if
key does not exist."""
if key.__class__ is str: key = key.lower()
return self._dict.get(key, default)
class Request:
"""A class for parsing incomming web request."""
def __init__(
self,
client: socket.socket,
loop: asyncio.AbstractEventLoop
) -> None:
self.__client: socket.socket = client
self.__loop: asyncio.AbstractEventLoop = loop
self.type: str = "GET"
self.http_ver: str = "1.1"
self.path: str = "/"
self.body: bytearray = bytearray()
self.elapsed: str = "0ms" # Logging purposes.
self.conns_served: int = 0
self.headers: CaseInsensitiveDict = CaseInsensitiveDict()
self.get_args: Dict[str, Any] = {}
self.post_args: Dict[str, Any] = {}
self.files: Dict[str, Any] = {}
self.handle_args: list = [self]
self.resp_code: int = 200
self.resp_headers: Dict[str, Any] = {}
def add_header(self, key: str, value: Any) -> None:
"""Adds header to response back headers."""
self.resp_headers[key] = value
def _parse_headers(self, data: str) -> None:
"""Instance funtion to parse headers content
from client data.
Params:
- content: bytes = first chunks splited by \r\n\r\n
from client response.
Returns:
Parsed headers, get_args.
"""
self.type, self.path, version = data.splitlines()[0].split(" ")
self.version = version.split("/")[1]
# Parsing get args.
if "?" in self.path:
self.path, args = self.path.split("?")
for arg in args.split("&"):
key, value = arg.split("=", 1)
self.get_args[unquote(key)] = unquote(value).strip()
# Now headers.
for key, value in [header.split(":", 1) for header in data.splitlines()[1:]]:
self.headers[key] = value.strip()
def _www_form_parser(self) -> None:
"""Optional parser for parsing form data.
Returns:
Updates self.post with form data args.
"""
body_str = self.body.decode()
for args in body_str.split("&"):
k, v = args.split("=", 1)
self.post_args[unquote(k).strip()] = unquote(v).strip()
def return_json(self, code: int, content: Union[dict, str, Any]):
"""Returns an response but in json."""
self.resp_code = code
json_parser = glob.json or json.dumps
resp_back = json_parser(content)
self.resp_headers["Content-Type"] = "application/json"
return resp_back
async def send(self, code: int, data: bytes) -> None:
"""Sends data back to the client.
Params:
- code: int = Status code to send back.
- data: bytes = Bytes to send back.
Returns:
Sends all data to client.
"""
resp = bytearray()
temp = [f"HTTP/1.1 {code} {STATUS_CODE.get(code)}"]
# Add content len
if data:
temp.append(f"Content-Length: {len(data)}")
# Join headers.
temp.extend(map(': '.join, self.resp_headers.items()))
resp += ('\r\n'.join(temp) + '\r\n\r\n').encode()
# Add body.
if data:
resp += data
try: # Send all data to client.
await self.__loop.sock_sendall(self.__client, resp)
except Exception:
pass
def _parse_multipart(self) -> None:
"""Simplest instance funtion to parse
multipart I found so far.
Returns:
Parsed files & post args from request.
"""
# Create an boundary.
boundary = "--" + self.headers['Content-Type'].split('boundary=', 1)[1]
parts = self.body.split(boundary.encode())[1:]
for part in parts[:-1]:
# We get headers & body.
headers, body = part.split(b"\r\n\r\n", 1)
temp_headers = CaseInsensitiveDict()
for key, val in [p.split(":", 1) for p in [h for h in headers.decode().split("\r\n")[1:]]]:
temp_headers[key] = val.strip()
content = temp_headers.get("Content-Disposition")
if not content:
# Main header don't exist, we can't continue.
continue
temp_args = {}
for key, val in [args.split("=", 1) for args in content.split(";")[1:]]:
temp_args[key.strip()] = val[1:-1]
if "filename" in temp_args: self.files[temp_args['filename']] = body[:-2] # It is a file.
else: self.post_args[temp_args['name']] = body[:-2].decode() # It's a post arg.
async def perform_parse(self) -> None:
"""Performs full parsing on headers and body bytes."""
buffer = bytearray() # Bytearray is faster than bytes.
while (offset := buffer.find(b"\r\n\r\n")) == -1:
buffer += await self.__loop.sock_recv(self.__client, 1024)
self._parse_headers(buffer[:offset].decode())
# Headers are parsed so now we put rest to body.
self.body += buffer[offset + 4:]
try: content_len = int(self.headers["Content-Length"])
except KeyError: return # Get args request only.
if (to_read := ((offset + 4) + content_len) - len(buffer)): # Find how much to read.
buffer += b"\x00" * to_read # Allocate space.
with memoryview(buffer)[-to_read:] as view:
while to_read:
read_bytes = await self.__loop.sock_recv_into(self.__client, view)
view = view[read_bytes:]
to_read -= read_bytes
# Add to body.
self.body += memoryview(buffer)[offset + 4 + len(self.body):].tobytes()
if self.type == "POST":
if (ctx_type := self.headers.get("Content-Type")):
if ctx_type.startswith("multipart/form-data") or \
"form-data" in ctx_type or "multipart/form-data" in ctx_type:
self._parse_multipart()
elif ctx_type in ("x-www-form", "application/x-www-form-urlencoded"):
self._www_form_parser()
class Endpoint:
"""An dataclass to match route."""
def __init__(
self,
path: Union[str, re.Pattern, Iterable],
handler: Coroutine,
methods: List[str] = ["GET"]
) -> None:
self.path: Union[str, re.Pattern, Iterable] = path
self.methods: List[str] = methods
self.handler: Coroutine = handler
self.condition: object = None
if not isinstance(self.path, re.Pattern) and all(char in self.path for char in ("<", ">")):
self.path = re.compile(rf"{self.path.replace('<', '(?P<').replace('>', '>.+)')}")
def parse_regex(self, path: str, regex_path: re.Pattern):
"""Checks for regex."""
if not (args := regex_path.match(path)):
return False
if not (adict := args.groupdict()):
return True
args_back = []
for key in adict:
args_back.append(unquote(adict[key]))
return args_back
def match(self, path: str) -> Union[bool, List[Any]]:
"""Compares the path with current endpoint path."""
if isinstance(self.path, re.Pattern):
# Parse regex :D
return self.parse_regex(path, self.path)
elif isinstance(self.path, str):
# This is simple one
return self.path == path
elif isinstance(self.path, Iterable):
if path in self.path: return True
for p in self.path:
if isinstance(p, re.Pattern):
return self.parse_regex(path, p)
return False
class Router:
"""A class for a single app router."""
def __init__(self, domain: Union[str, set, re.Pattern]) -> None:
self.domain: Union[str, set, re.Pattern] = domain
self.endpoints: set = set()
self.before_serve: set = set()
self.after_serve: set = set()
def match(self, host: str) -> bool:
"""Performs some checks to match domain with host."""
if isinstance(self.domain, str):
return host == self.domain
elif isinstance(self.domain, Iterable):
if host in self.domain: return True
for domain in self.domain:
if isinstance(domain, re.Pattern):
return domain.match(host) is not None
return False
elif isinstance(self.domain, re.Pattern):
return self.domain.match(host) is not None
def before_request(self) -> Callable:
"""Serves things before request."""
def wrapper(handler: Coroutine) -> Coroutine:
self.before_serve.add(handler)
return handler
return wrapper
def after_request(self) -> Callable:
"""Serves things after request."""
def wrapper(handler: Coroutine) -> Coroutine:
self.after_serve.add(handler)
return handler
return wrapper
def add_endpoint(self, path: Union[str, re.Pattern, Iterable], methods: List[str] = ["GET"]) -> Callable:
"""Adds the endpoint class to a set."""
def wrapper(handler: Coroutine) -> Coroutine:
self.endpoints.add(Endpoint(path, handler, methods))
return handler
return wrapper
class LenHTTP:
"""An http server class."""
def __init__(
self,
address: Union[Tuple[str, int], str],
loop = asyncio.get_event_loop(),
**kwargs
) -> None:
self.address: Union[Tuple[str, int], str] = address
self.loop: asyncio.AbstractEventLoop = loop
self.socket_fam: Union[socket.AF_INET, socket.AF_UNIX] = None
self.gzip = kwargs.get("gzip", 0)
self.max_conns = kwargs.get("max_conns", 5)
self.routers: set = set()
self.middleware_request: dict = {}
self._conns_served: int = 0
self.before_serving_coros: set = set()
self.after_serving_coros: set = set()
self.coro_tasks: set = set()
self.tasks: set = set()
self.app: bool = kwargs.get("app", False)
if "logging" in kwargs: glob.logging = kwargs.pop("logging")
if "json_serialize" in kwargs: glob.json = kwargs.pop("json_serialize")
def add_router(self, router: Router) -> None:
"""Adds router to server."""
self.routers.add(router)
def add_routers(self, routers: set[Router]) -> None:
"""Adds routers to server."""
self.routers |= routers
def add_task(self, task: Coroutine, *args) -> None:
"""Adds task to server."""
if args:
self.coro_tasks.add((task, args))
else:
self.coro_tasks.add(task)
def add_tasks(self, tasks: set[Coroutine]) -> None:
"""Adds tasks to server."""
self.coro_tasks |= tasks
def add_middleware(self, code: int) -> Callable:
"""Adds an custom middleware for handling codes."""
def wrapper(handler: Coroutine) -> Coroutine:
self.middleware_request[code] = handler
return handler
return wrapper
def find_router(self, host: str) -> Optional[Router]:
"""Finds the right router."""
for router in self.routers:
if router.match(host):
return router
def find_endpoint(self, router: Router, path: str) -> Optional[Tuple[Union[List[Any], bool], Endpoint]]:
"""Match an endpoint with given path."""
for endpoint in router.endpoints:
if (check := endpoint.match(path)):
return (check, endpoint)
def before_serving(self) -> Callable:
"""Adds the coroutines to be started before server permanently starts."""
def wrapper(handler: Coroutine) -> Coroutine:
self.before_serving_coros.add(handler)
return handler
return wrapper
def after_serving(self) -> Callable:
"""Adds the coroutines to be started after server close."""
def wrapper(handler: Coroutine) -> Coroutine:
self.after_serving_coros.add(handler)
return handler
return wrapper
async def handle_route(self, request: Request) -> None:
"""Handle a request route."""
host = request.headers['Host']
path = request.path
request.resp_code = 404
resp = b"Request not found!"
try:
# Check if there is custom middleware handler.
if (handler := self.middleware_request.get(request.resp_code)):
resp = await handler(request)
if isinstance(resp, str): resp = resp.encode()
if not (router := self.find_router(host)):
request.elapsed = request.elapsed.time_str()
if glob.logging:
info(f"{request.resp_code} | Handled {request.type} {host}{path} in {request.elapsed}")
return await request.send(request.resp_code, resp)
for coro in router.before_serve: await coro(request)
if (found := self.find_endpoint(router, path)):
check, endpoint = found
if isinstance(check, list):
resp = await endpoint.handler(*request.handle_args, *check)
request.resp_code = 200
else:
resp = await endpoint.handler(*request.handle_args)
request.resp_code = 200
if request.type not in endpoint.methods:
request.resp_code = 405
resp = b"Method not allowed!"
if (handler := self.middleware_request.get(request.resp_code)):
resp = await handler(request)
if isinstance(resp, tuple): request.resp_code, resp = resp # Convert it to variables.
if isinstance(resp, str): resp = resp.encode()
if self.gzip > 0 and "gzip" in \
request.headers.get("Accept-Encoding", "") and \
len(resp) > 1500 and request.resp_headers.get("Content-Type", "") \
not in ('image/png', 'image/jpeg'):
# This is fucking cluster.
resp = gzip.compress(resp, self.gzip)
request.add_header("Content-Encoding", "gzip")
await request.send(request.resp_code, resp)
except Exception:
tb = traceback.format_exc()
request.resp_code = 500
resp = f"There was an exception\n{tb}".encode()
if (handler := self.middleware_request.get(request.resp_code)):
resp = await handler(request, tb)
if isinstance(resp, str): resp = resp.encode()
if glob.logging:
error(f"There was an exception when handling path {request.path}\n{tb}")
await request.send(request.resp_code, resp)
# Time logging.
# This is not accurate but its fine for someone who dont want to use my logger.
request.elapsed = request.elapsed.time_str()
# Serve coroutines after request with request class.
for coro in router.after_serve: await coro(request)
async def handle_request(self, client: socket.socket) -> None:
"""Handles a connection from socket."""
timer = Timer()
timer.start()
# Parse request.
await (req := Request(client, self.loop)).perform_parse()
req.elapsed = timer
if "Host" not in req.headers:
client.shutdown(socket.SHUT_RDWR)
client.close()
return
# For statistics.
req.conns_served = self._conns_served = self._conns_served + 1
# Handle the route.
await self.handle_route(req)
# lastly close client.
try:
client.shutdown(socket.SHUT_RDWR)
client.close()
except Exception: pass
if glob.logging:
path = f"{req.headers['Host']}{req.path}"
info(f"{req.resp_code} | Handled {req.type} {path} in {req.elapsed}")
def start(self) -> None:
"""Starts an http server in perma loop."""
async def runner() -> None:
if isinstance(self.address, tuple):
addr_log = f"http://{self.address[0]}:{self.address[1]}/"
self.socket_fam = socket.AF_INET
elif isinstance(self.address, str):
addr_log = f"{self.address} socket file."
self.socket_fam = socket.AF_UNIX
else: raise ValueError('Invalid address.')
if self.socket_fam is socket.AF_UNIX:
if os.path.exists(self.address):
# Unlink the unix socket
os.remove(self.address)
# Starts before serving coros and tasks.
for coro in self.before_serving_coros: await coro()
for coroutine in self.coro_tasks:
if isinstance(coroutine, tuple):
coro, args = coroutine
task = self.loop.create_task(coro(*args))
else:
task = self.loop.create_task(coroutine())
self.tasks.add(task)
sig_rsock, sig_wsock = os.pipe()
os.set_blocking(sig_wsock, False)
signal.set_wakeup_fd(sig_wsock)
# connection listening sock
sock = socket.socket(self.socket_fam)
sock.setblocking(False)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(self.address)
if self.socket_fam is socket.AF_UNIX:
os.chmod(self.address, 0o777)
sock.listen(self.max_conns)
if glob.logging:
info(f"===== LenHTTP ({'ASGI' if not self.app else 'Application'}) running on {addr_log} =====")
close = False
while not close:
await asyncio.sleep(0.01) # Python what the fuck.
rlist, _, _ = select.select([sock, sig_rsock], [], [], 0)
for rd in rlist:
if rd is sock:
client, _ = await self.loop.sock_accept(sock)
self.loop.create_task(self.handle_request(client))
elif rd is sig_rsock:
print('\x1b[2K', end='\r') # Clears ^C.
if glob.logging:
error(f"Received an interuption all apps will be closed..")
close = True
else: raise ValueError(f"Invalid reader: {rd}") # Just don't read dat.
# server closed, clean things up.
for sock_fd in (sock.fileno(), sig_rsock, sig_wsock):
os.close(sock_fd)
signal.set_wakeup_fd(-1)
for coro in self.after_serving_coros: await coro()
if self.tasks:
if glob.logging:
_plural = lambda a: f"{a}s" if len(self.tasks) > 1 else a
warning(f"Canceling {len(self.tasks)} active {_plural('task')}..")
for task in self.tasks:
task.cancel()
await asyncio.gather(*self.tasks, return_exceptions=False)
if still_running := [t for t in asyncio.all_tasks()
if t is not asyncio.current_task()]:
try:
if glob.logging:
warning("Awaiting all tasks timeout in 5 seconds!")
await asyncio.wait(still_running, loop=self.loop, timeout=5.0)
except asyncio.TimeoutError:
if glob.logging:
warning("Timeout, force closing all running tasks!")
to_await = []
for task in still_running:
if not task.cancelled():
task.cancel()
to_await.append(task)
await asyncio.gather(*to_await, return_exceptions=False)
def _callback(fut) -> None:
"""Calls after future is finished."""
self.loop.stop()
def _empty_func(sg, f) -> None:
"""Function to block other calls."""
pass
for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGHUP):
signal.signal(sig, _empty_func)
future = asyncio.ensure_future(runner(), loop=self.loop)
future.add_done_callback(_callback)
try:
self.loop.run_forever()
finally:
future.remove_done_callback(_callback)
if glob.logging:
info(f"===== LenHTTP {'server' if not self.app else 'application'} is stopping =====")
self.loop.close()
class Application(LenHTTP):
"""A standalone http app class.
Note: This is wrapper around LenHTTP
to allow users not use Router class for easier code management.
"""
def __init__(
self,
routes: List[Endpoint],
**kwargs
) -> None:
self.routes: List[Endpoint] = routes
self.router: Router = Router("") # Placeholer router
self.find_router: eval = lambda _: self.router
kwargs["app"] = True
if "port" in kwargs and "unix" in kwargs:
raise RuntimeError("Please choose either 'port' or 'unix', cannot use both!")
elif "port" in kwargs:
addr = (kwargs.pop("loopback", "0.0.0.0"), kwargs.pop('port'))
elif "unix" in kwargs:
addr = kwargs.pop('unix')
else:
raise ValueError("Invalid connection type supplied! Use either 'port' or 'unix'")
super().__init__(addr, **kwargs)
for route in self.routes:
self.router.endpoints.add(route)
|
lenforiee/LenHTTP | example_app.py | <reponame>lenforiee/LenHTTP
import asyncio
from lenhttp import Endpoint, Request, Application, logger
async def home_page(request: Request):
"""Main page of app."""
return "Hello on main page!"
async def users(request: Request, user_id: int):
"""Test function for regex testing."""
return f"Hello user with ID: {user_id}"
app = Application(
port= 6969,
logging= True,
routes= [ Endpoint("/", home_page), Endpoint("/u/<user_id>", users) ]
)
@app.add_middleware(404)
async def error(request: Request):
return "404 Not found!"
@app.add_middleware(500)
async def error(request: Request, traceback: str):
return f"500 There was problem with handling request\n{traceback}".encode()
async def task():
while True:
await asyncio.sleep(5)
logger.info("This will show every 5 secs.")
app.add_task(task)
app.start()
|
lenforiee/LenHTTP | setup.py | import setuptools
import os
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
if os.name == "nt":
raise RuntimeError("You can't install this package on windows machine!")
setuptools.setup(
name="LenHTTP",
version="2.2.7",
author="lenforiee",
author_email="<EMAIL>",
description="An powerful web framework written from scratch!",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/lenforiee/LenHTTP",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.8',
)
|
guiambros/libratbag | tools/ratbagc.py | # vim: set expandtab shiftwidth=4 tabstop=4:
#
# Copyright 2017 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import libratbag
import os
import sys
from evdev import ecodes
# Deferred translations, see https://docs.python.org/3/library/gettext.html#deferred-translations
def N_(x):
return x
# we use a metaclass to automatically load symbols from libratbag in the classes
# define _PREFIX in subclasses to take advantage of this.
class MetaRatbag(type):
def __new__(cls, name, bases, dct):
try:
prefix = dct["_PREFIX"]
except KeyError:
pass
else:
for k in libratbag.__dict__.keys():
if k.startswith(prefix) and k.isupper():
key = k[len(prefix):]
dct[key] = getattr(libratbag, k)
c = type.__new__(cls, name, bases, dct)
if "__late_init__" in dct:
c.__late_init__()
return c
class RatbagErrorCode(metaclass=MetaRatbag):
RATBAG_SUCCESS = libratbag.RATBAG_SUCCESS
"""An error occured on the device. Either the device is not a libratbag
device or communication with the device failed."""
RATBAG_ERROR_DEVICE = libratbag.RATBAG_ERROR_DEVICE
"""Insufficient capabilities. This error occurs when a requested change is
beyond the device's capabilities."""
RATBAG_ERROR_CAPABILITY = libratbag.RATBAG_ERROR_CAPABILITY
"""Invalid value or value range. The provided value or value range is
outside of the legal or supported range."""
RATBAG_ERROR_VALUE = libratbag.RATBAG_ERROR_VALUE
"""A low-level system error has occured, e.g. a failure to access files
that should be there. This error is usually unrecoverable and libratbag will
print a log message with details about the error."""
RATBAG_ERROR_SYSTEM = libratbag.RATBAG_ERROR_SYSTEM
"""Implementation bug, either in libratbag or in the caller. This error is
usually unrecoverable and libratbag will print a log message with details
about the error."""
RATBAG_ERROR_IMPLEMENTATION = libratbag.RATBAG_ERROR_IMPLEMENTATION
class RatbagError(Exception):
"""A common base exception to catch any ratbag exception."""
pass
class RatbagErrorDevice(RatbagError):
"""An exception corresponding to RatbagErrorCode.RATBAG_ERROR_DEVICE."""
pass
class RatbagErrorCapability(RatbagError):
"""An exception corresponding to RatbagErrorCode.RATBAG_ERROR_CAPABILITY."""
pass
class RatbagErrorValue(RatbagError):
"""An exception corresponding to RatbagErrorCode.RATBAG_ERROR_Value."""
pass
class RatbagErrorSystem(RatbagError):
"""An exception corresponding to RatbagErrorCode.RATBAG_ERROR_System."""
pass
class RatbagErrorImplementation(RatbagError):
"""An exception corresponding to RatbagErrorCode.RATBAG_ERROR_IMPLEMENTATION."""
pass
"""A table mapping RatbagErrorCode values to RatbagError* exceptions."""
EXCEPTION_TABLE = {
RatbagErrorCode.RATBAG_ERROR_DEVICE: RatbagErrorDevice,
RatbagErrorCode.RATBAG_ERROR_CAPABILITY: RatbagErrorCapability,
RatbagErrorCode.RATBAG_ERROR_VALUE: RatbagErrorValue,
RatbagErrorCode.RATBAG_ERROR_SYSTEM: RatbagErrorSystem,
RatbagErrorCode.RATBAG_ERROR_IMPLEMENTATION: RatbagErrorImplementation
}
class Ratbagd(object):
"""The ratbagd top-level object. Provides a list of devices available
through libratbag; actual interaction with the devices is via the
RatbagdDevice, RatbagdProfile, RatbagdResolution and RatbagdButton objects.
"""
def __init__(self):
self._ratbag = libratbag.ratbag_create_context(libratbag.interface, None)
self._devices = {}
self._devices_initialized = False
def _init_devices(self):
for event in os.listdir("/dev/input"):
if not event.startswith("event"):
continue
name = os.path.join("/dev/input/", event)
try:
dev = RatbagdDevice(self._ratbag, name)
except RatbagErrorDevice:
pass
else:
self._devices[name] = dev
self._devices_initialized = True
@property
def verbose(self):
v = libratbag.ratbag_log_get_priority(self._ratbag)
if v == libratbag.RATBAG_LOG_PRIORITY_RAW:
return 3
elif v == libratbag.RATBAG_LOG_PRIORITY_DEBUG:
# to match with setter action, we return 1 instead of 2
return 1
elif v == libratbag.RATBAG_LOG_PRIORITY_INFO:
return 1
elif v == libratbag.RATBAG_LOG_PRIORITY_ERROR:
return 0
@verbose.setter
def verbose(self, verbosity):
if verbosity > 2:
libratbag.ratbag_log_set_priority(self._ratbag, libratbag.RATBAG_LOG_PRIORITY_RAW)
elif verbosity >= 1:
libratbag.ratbag_log_set_priority(self._ratbag, libratbag.RATBAG_LOG_PRIORITY_DEBUG)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for d in self._devices.values():
d.__exit__()
libratbag.ratbag_unref(self._ratbag)
@property
def devices(self):
"""A list of RatbagdDevice objects supported by ratbagd."""
if not self._devices_initialized:
self._init_devices()
return self._devices.values()
def __getitem__(self, id):
"""Returns the requested device, or None."""
if id not in self._devices.keys():
try:
dev = RatbagdDevice(self._ratbag, id)
except RatbagErrorDevice:
pass
else:
self._devices[id] = dev
return self._devices[id] if id in self._devices.keys() else None
@property
def themes(self):
"""A list of theme names. The theme 'default' is guaranteed to be
available."""
return ["default", "gnome"]
def set_verbose(self, verbose):
if verbose > 2:
libratbag.ratbag_log_set_priority(self._ratbag, libratbag.RATBAG_LOG_PRIORITY_RAW)
elif verbose >= 1:
libratbag.ratbag_log_set_priority(self._ratbag, libratbag.RATBAG_LOG_PRIORITY_DEBUG)
def get_capabilities(type, object):
capabilities = []
for k in libratbag.__dict__.keys():
if k.startswith("RATBAG_{}_CAP_".format(type.upper())) and "CAP_NONE" not in k:
cap = getattr(libratbag, k)
func = getattr(libratbag, "ratbag_{}_has_capability".format(type.lower()))
if func(object, cap):
capabilities.append(cap)
return capabilities
class RatbagdDevice(metaclass=MetaRatbag):
"""Represents a ratbagd device."""
_PREFIX = "RATBAG_DEVICE_"
def __init__(self, ratbag, path):
self._path = path
self._ratbag = ratbag
self._device = libratbag.ratbag_cmd_open_device(ratbag, path)
if self._device is None:
raise RatbagErrorDevice("device not compatible")
self._capabilities = get_capabilities("device", self._device)
self._profiles = [RatbagdProfile(self._device, i) for i in range(libratbag.ratbag_device_get_num_profiles(self._device))]
def __exit__(self):
for p in self._profiles:
p.__exit__()
libratbag.ratbag_device_unref(self._device)
@property
def id(self):
"""The unique identifier of this device."""
return self._path
@property
def capabilities(self):
"""The capabilities of this device as an array. Capabilities not
present on the device are not in the list. Thus use e.g.
if RatbagdDevice.CAP_SWITCHABLE_RESOLUTION is in device.capabilities:
do something
"""
return self._capabilities
@property
def name(self):
"""The device name, usually provided by the kernel."""
return libratbag.ratbag_device_get_name(self._device)
@property
def profiles(self):
"""A list of RatbagdProfile objects provided by this device."""
return self._profiles
@property
def active_profile(self):
"""The currently active profile. This is a non-DBus property computed
over the cached list of profiles. In the unlikely case that your device
driver is misconfigured and there is no active profile, this returns
the first profile."""
for profile in self._profiles:
if profile.is_active:
return profile
print("No active profile. Please report this bug to the libratbag developers", file=sys.stderr)
return self._profiles[0]
def get_svg(self, theme):
"""Gets the full path to the SVG for the given theme, or the empty
string if none is available.
The theme must be one of org.freedesktop.ratbag1.Manager.Themes. The
theme 'default' is guaranteed to be available.
@param theme The theme from which to retrieve the SVG, as str
"""
return os.path.join(theme, libratbag.ratbag_device_get_svg_name(self._device))
def commit(self, callback=None):
"""Commits all changes made to the device.
This is an async call to DBus and this method does not return
anything. Any success or failure code is reported to the callback
provided when ratbagd finishes writing to the device. Note that upon
failure, the device is automatically resynchronized by ratbagd and no
further interaction is required by the client; clients can thus treat a
commit as being always successful.
@param callback The function to call with the result of the commit, as
a function that takes the return value of the Commit
method.
"""
r = libratbag.ratbag_device_commit(self._device)
if callback is not None:
callback(r)
class RatbagdProfile(metaclass=MetaRatbag):
"""Represents a ratbagd profile."""
_PREFIX = "RATBAG_PROFILE_"
def __init__(self, device, id):
self._id = id
self._profile = libratbag.ratbag_device_get_profile(device, id)
self._dirty = False
self._capabilities = get_capabilities("profile", self._profile)
self._resolutions = [RatbagdResolution(self._profile, i) for i in range(libratbag.ratbag_profile_get_num_resolutions(self._profile))]
self._buttons = [RatbagdButton(self._profile, i) for i in range(libratbag.ratbag_device_get_num_buttons(device))]
self._leds = [RatbagdLed(self._profile, i) for i in range(libratbag.ratbag_device_get_num_leds(device))]
def __exit__(self):
for r in self._resolutions:
r.__exit__()
for b in self._buttons:
b.__exit__()
for l in self._leds:
l.__exit__()
libratbag.ratbag_profile_unref(self._profile)
@property
def capabilities(self):
"""The capabilities of this profile as an array. Capabilities not
present on the profile are not in the list. Thus use e.g.
if RatbagdProfile.CAP_WRITABLE_NAME is in profile.capabilities:
do something
"""
return self._capabilities
@property
def name(self):
"""The name of the profile"""
return libratbag.ratbag_profile_get_name(self._profile)
@name.setter
def name(self, name):
"""Set the name of this profile.
@param name The new name, as str"""
return libratbag.ratbag_profile_set_name(self._profile, name)
@property
def index(self):
"""The index of this profile."""
return self._id
@property
def dirty(self):
"""Whether this profile is dirty."""
return self._dirty
@property
def enabled(self):
"""tells if the profile is enabled."""
return libratbag.ratbag_profile_is_enabled(self._profile)
@enabled.setter
def enabled(self, enabled):
"""Enable/Disable this profile.
@param enabled The new state, as boolean"""
libratbag.ratbag_profile_set_enabled(self._profile, enabled)
@property
def resolutions(self):
"""A list of RatbagdResolution objects with this profile's resolutions.
Note that the list of resolutions differs between profiles but the number
of resolutions is identical across profiles."""
return self._resolutions
@property
def active_resolution(self):
"""The currently active resolution of this profile. This is a non-DBus
property computed over the cached list of resolutions. In the unlikely
case that your device driver is misconfigured and there is no active
resolution, this returns the first resolution."""
for resolution in self._resolutions:
if resolution.is_active:
return resolution
print("No active resolution. Please report this bug to the libratbag developers", file=sys.stderr)
return self._resolutions[0]
@property
def buttons(self):
"""A list of RatbagdButton objects with this profile's button mappings.
Note that the list of buttons differs between profiles but the number
of buttons is identical across profiles."""
return self._buttons
@property
def leds(self):
"""A list of RatbagdLed objects with this profile's leds. Note that the
list of leds differs between profiles but the number of leds is
identical across profiles."""
return self._leds
@property
def is_active(self):
"""Returns True if the profile is currenly active, false otherwise."""
return libratbag.ratbag_profile_is_active(self._profile)
def set_active(self):
"""Set this profile to be the active profile."""
libratbag.ratbag_profile_set_active(self._profile)
class RatbagdResolution(metaclass=MetaRatbag):
"""Represents a ratbagd resolution."""
_PREFIX = "RATBAG_RESOLUTION_"
def __init__(self, profile, id):
self._id = id
self._res = libratbag.ratbag_profile_get_resolution(profile, id)
self._capabilities = get_capabilities("resolution", self._res)
def __exit__(self):
libratbag.ratbag_resolution_unref(self._res)
@property
def index(self):
"""The index of this resolution."""
return self._id
@property
def capabilities(self):
"""The capabilities of this resolution as a list. Capabilities not
present on the resolution are not in the list. Thus use e.g.
if RatbagdResolution.CAP_SEPARATE_XY_RESOLUTION is in resolution.capabilities:
do something
"""
return self._capabilities
@property
def resolution(self):
"""The tuple (xres, yres) with each resolution in DPI."""
dpi_y = dpi_x = libratbag.ratbag_resolution_get_dpi_x(self._res)
if libratbag.RATBAG_RESOLUTION_CAP_SEPARATE_XY_RESOLUTION in self._capabilities:
dpi_y = libratbag.ratbag_resolution_get_dpi_y(self._res)
return (dpi_x, dpi_y)
@resolution.setter
def resolution(self, res):
"""Set the x- and y-resolution using the given (xres, yres) tuple.
@param res The new resolution, as (int, int)
"""
if libratbag.RATBAG_RESOLUTION_CAP_SEPARATE_XY_RESOLUTION in self._capabilities:
libratbag.ratbag_resolution_set_dpi_xy(self._res, *res)
else:
libratbag.ratbag_resolution_set_dpi(self._res, res[0])
@property
def report_rate(self):
"""The report rate in Hz."""
return libratbag.ratbag_resolution_get_report_rate(self._res)
@report_rate.setter
def report_rate(self, rate):
"""Set the report rate in Hz.
@param rate The new report rate, as int
"""
libratbag.ratbag_resolution_set_report_rate(self._res, rate)
@property
def resolutions(self):
"""The list of supported DPI values"""
dpis = [0 for i in range(300)]
n = libratbag.ratbag_resolution_get_dpi_list(self._res, dpis)
return dpis[:n]
@property
def report_rates(self):
"""The list of supported report rates"""
rates = [0 for i in range(300)]
n = libratbag.ratbag_resolution_get_report_rate_list(self._res, rates)
return rates[:n]
@property
def is_active(self):
"""True if this is the currently active resolution, False
otherwise"""
return libratbag.ratbag_resolution_is_active(self._res)
@property
def is_default(self):
"""True if this is the currently default resolution, False
otherwise"""
return libratbag.ratbag_resolution_is_default(self._res)
def set_default(self):
"""Set this resolution to be the default."""
return libratbag.ratbag_resolution_set_default(self._res)
def set_active(self):
"""Set this resolution to be the active one."""
return libratbag.ratbag_resolution_set_active(self._res)
class RatbagdButton(metaclass=MetaRatbag):
"""Represents a ratbagd button."""
_PREFIX = "RATBAG_BUTTON_"
MACRO_KEY_PRESS = libratbag.RATBAG_MACRO_EVENT_KEY_PRESSED
MACRO_KEY_RELEASE = libratbag.RATBAG_MACRO_EVENT_KEY_RELEASED
MACRO_WAIT = libratbag.RATBAG_MACRO_EVENT_WAIT
"""A table mapping a button's index to its usual function as defined by X
and the common desktop environments."""
BUTTON_DESCRIPTION = {
0: N_("Left mouse button click"),
1: N_("Right mouse button click"),
2: N_("Middle mouse button click"),
3: N_("Backward"),
4: N_("Forward"),
}
"""A table mapping a special function to its human-readable description."""
SPECIAL_DESCRIPTION = {}
@classmethod
def __late_init__(cls):
cls.SPECIAL_DESCRIPTION = {
cls.ACTION_SPECIAL_UNKNOWN: N_("Unknown"),
cls.ACTION_SPECIAL_DOUBLECLICK: N_("Doubleclick"),
cls.ACTION_SPECIAL_WHEEL_LEFT: N_("Wheel Left"),
cls.ACTION_SPECIAL_WHEEL_RIGHT: N_("Wheel Right"),
cls.ACTION_SPECIAL_WHEEL_UP: N_("Wheel Up"),
cls.ACTION_SPECIAL_WHEEL_DOWN: N_("Wheel Down"),
cls.ACTION_SPECIAL_RATCHET_MODE_SWITCH: N_("Ratchet Mode"),
cls.ACTION_SPECIAL_RESOLUTION_CYCLE_UP: N_("Cycle Resolution Up"),
cls.ACTION_SPECIAL_RESOLUTION_CYCLE_DOWN: N_("Cycle Resolution Down"),
cls.ACTION_SPECIAL_RESOLUTION_UP: N_("Resolution Up"),
cls.ACTION_SPECIAL_RESOLUTION_DOWN: N_("Resolution Down"),
cls.ACTION_SPECIAL_RESOLUTION_ALTERNATE: N_("Resolution Switch"),
cls.ACTION_SPECIAL_RESOLUTION_DEFAULT: N_("Default Resolution"),
cls.ACTION_SPECIAL_PROFILE_CYCLE_UP: N_("Cycle Profile Up"),
cls.ACTION_SPECIAL_PROFILE_CYCLE_DOWN: N_("Cycle Profile Down"),
cls.ACTION_SPECIAL_PROFILE_UP: N_("Profile Up"),
cls.ACTION_SPECIAL_PROFILE_DOWN: N_("Profile Down"),
cls.ACTION_SPECIAL_SECOND_MODE: N_("Second Mode"),
cls.ACTION_SPECIAL_BATTERY_LEVEL: N_("Battery Level"),
}
def __init__(self, profile, id):
self._id = id
self._button = libratbag.ratbag_profile_get_button(profile, id)
self._capabilities = get_capabilities("button", self._button)
def __exit__(self):
libratbag.ratbag_button_unref(self._button)
@property
def index(self):
"""The index of this button."""
return self._id
@property
def type(self):
"""An enum describing this button's type."""
return libratbag.ratbag_button_get_type(self._button)
@property
def mapping(self):
"""An integer of the current button mapping, if mapping to a button."""
return libratbag.ratbag_button_get_button(self._button)
@mapping.setter
def mapping(self, button):
"""Set the button mapping to the given button.
@param button The button to map to, as int
"""
libratbag.ratbag_button_set_button(self._button, button)
@property
def macro(self):
"""A RatbagdMacro object representing the currently set macro."""
return RatbagdMacro.from_ratbag(libratbag.ratbag_button_get_macro(self._button))
@macro.setter
def macro(self, macro):
"""Set the macro to the macro represented by the given RatbagdMacro
object.
@param macro A RatbagdMacro object representing the macro to apply to
the button, as RatbagdMacro.
"""
macro_object = libratbag.ratbag_button_macro_new("macro")
i = 0
for type, value in macro.keys:
libratbag.ratbag_button_macro_set_event(macro_object, i, type, value)
i += 1
libratbag.ratbag_button_set_macro(self._button, macro_object)
libratbag.ratbag_button_macro_unref(macro_object)
@property
def special(self):
"""An enum describing the current special mapping, if mapped to special."""
return libratbag.ratbag_button_get_special(self._button)
@special.setter
def special(self, special):
"""Set the button mapping to the given special entry.
@param special The special entry, as one of RatbagdButton.ACTION_SPECIAL_*
"""
libratbag.ratbag_button_set_special(self._button, special)
@property
def action_type(self):
"""An enum describing the action type of the button. One of
ACTION_TYPE_NONE, ACTION_TYPE_BUTTON, ACTION_TYPE_SPECIAL,
ACTION_TYPE_MACRO. This decides which
*Mapping property has a value.
"""
return libratbag.ratbag_button_get_action_type(self._button)
@property
def action_types(self):
"""An array of possible values for ActionType."""
return [t for t in (RatbagdButton.ACTION_TYPE_BUTTON, RatbagdButton.ACTION_TYPE_SPECIAL, RatbagdButton.ACTION_TYPE_MACRO)
if libratbag.ratbag_button_has_action_type(self._button, t)]
def disable(self):
"""Disables this button."""
return libratbag.ratbag_button_disable(self._button)
class RatbagdMacro(metaclass=MetaRatbag):
"""Represents a button macro. Note that it uses keycodes as defined by
linux/input.h and not those used by X.Org or any other higher layer such as
Gdk."""
# All keys from ecodes.KEY have a KEY_ prefix. We strip it.
_PREFIX_LEN = len("KEY_")
# Both a key press and release.
_MACRO_KEY = 1000
_MACRO_DESCRIPTION = {
RatbagdButton.MACRO_KEY_PRESS: lambda key:
"↓{}".format(ecodes.KEY[key][RatbagdMacro._PREFIX_LEN:]),
RatbagdButton.MACRO_KEY_RELEASE: lambda key:
"↑{}".format(ecodes.KEY[key][RatbagdMacro._PREFIX_LEN:]),
RatbagdButton.MACRO_WAIT: lambda val:
"{}ms".format(val),
_MACRO_KEY: lambda key:
"↕{}".format(ecodes.KEY[key][RatbagdMacro._PREFIX_LEN:]),
}
def __init__(self):
self._macro = []
def __str__(self):
if not self._macro:
return "None"
keys = []
idx = 0
while idx < len(self._macro):
t, v = self._macro[idx]
try:
if t == RatbagdButton.MACRO_KEY_PRESS:
# Check for a paired press/release event
t2, v2 = self._macro[idx + 1]
if t2 == RatbagdButton.MACRO_KEY_RELEASE and v == v2:
t = self._MACRO_KEY
idx += 1
except IndexError:
pass
keys.append(self._MACRO_DESCRIPTION[t](v))
idx += 1
return " ".join(keys)
@property
def keys(self):
"""A list of (RatbagdButton.MACRO_*, value) tuples representing the
current macro."""
return self._macro
@staticmethod
def from_ratbag(macro_object):
"""Instantiates a new RatbagdMacro instance from the given macro in
libratbag format.
@param macro The macro in libratbag format, as
[(RatbagdButton.MACRO_*, value)].
"""
ratbagd_macro = RatbagdMacro()
for i in range(libratbag.ratbag_button_macro_get_num_events(macro_object)):
type = libratbag.ratbag_button_macro_get_event_type(macro_object, i)
value = None
if type == RatbagdButton.MACRO_WAIT:
value = libratbag.ratbag_button_macro_get_event_timeout(macro_object, i)
else:
value = libratbag.ratbag_button_macro_get_event_key(macro_object, i)
ratbagd_macro.append(type, value)
return ratbagd_macro
def accept(self):
"""Applies the currently cached macro."""
self.emit("macro-set")
def append(self, type, value):
"""Appends the given event to the current macro.
@param type The type of event, as one of RatbagdButton.MACRO_*.
@param value If the type denotes a key event, the X.Org or Gdk keycode
of the event, as int. Otherwise, the value of the timeout
in milliseconds, as int.
"""
# Only append if the entry isn't identical to the last one, as we cannot
# e.g. have two identical key presses in a row.
if len(self._macro) == 0 or (type, value) != self._macro[-1]:
self._macro.append((type, value))
self.notify("keys")
class RatbagdLed(metaclass=MetaRatbag):
"""Represents a ratbagd led."""
_PREFIX = "RATBAG_LED_"
MODE_OFF = libratbag.RATBAG_LED_OFF
MODE_ON = libratbag.RATBAG_LED_ON
MODE_CYCLE = libratbag.RATBAG_LED_CYCLE
MODE_BREATHING = libratbag.RATBAG_LED_BREATHING
LED_DESCRIPTION = {
# Translators: the LED is off.
MODE_OFF: N_("Off"),
# Translators: the LED has a single, solid color.
MODE_ON: N_("Solid"),
# Translators: the LED is cycling between red, green and blue.
MODE_CYCLE: N_("Cycle"),
# Translators: the LED's is pulsating a single color on different
# brightnesses.
MODE_BREATHING: N_("Breathing"),
}
def __init__(self, profile, id):
self._id = id
self._led = libratbag.ratbag_profile_get_led(profile, id)
self._capabilities = get_capabilities("led", self._led)
def __exit__(self):
libratbag.ratbag_led_unref(self._led)
@property
def index(self):
"""The index of this led."""
return self._id
@property
def mode(self):
"""This led's mode, one of MODE_OFF, MODE_ON, MODE_CYCLE and
MODE_BREATHING."""
return libratbag.ratbag_led_get_mode(self._led)
@mode.setter
def mode(self, mode):
"""Set the led's mode to the given mode.
@param mode The new mode, as one of MODE_OFF, MODE_ON, MODE_CYCLE and
MODE_BREATHING.
"""
libratbag.ratbag_led_set_mode(self._led, mode)
@property
def type(self):
"""An enum describing this led's type, one of RatbagdLed.TYPE_UNKNOWN,
RatbagdLed.TYPE_LOGO or RatbagdLed.TYPE_SIDE."""
return libratbag.ratbag_led_get_type(self._led)
@property
def color(self):
"""An integer triple of the current LED color."""
c = libratbag.ratbag_led_get_color(self._led)
return (c.red, c.green, c.blue)
@color.setter
def color(self, color):
"""Set the led color to the given color.
@param color An RGB color, as an integer triplet with values 0-255.
"""
libratbag.ratbag_led_set_color(self._led, libratbag.ratbag_color(*color))
@property
def colordepth(self):
"""An enum describing this led's colordepth, one of
RatbagdLed.COLORDEPTH_MONOCHROME, RatbagdLed.COLORDEPTH_RGB"""
return libratbag.ratbag_led_get_colordepth(self._led)
@property
def effect_duration(self):
"""The LED's effect duration in ms, values range from 0 to 10000."""
return libratbag.ratbag_led_get_effect_duration(self._led)
@effect_duration.setter
def effect_duration(self, effect_duration):
"""Set the effect duration in ms. Allowed values range from 0 to 10000.
@param effect_duration The new effect duration, as int
"""
self._set_dbus_property("EffectDuration", "u", effect_duration)
@property
def brightness(self):
"""The LED's brightness, values range from 0 to 255."""
return libratbag.ratbag_led_get_brightness(self._led)
@brightness.setter
def brightness(self, brightness):
"""Set the brightness. Allowed values range from 0 to 255.
@param brightness The new brightness, as int
"""
libratbag.ratbag_led_set_brightness(self._led, brightness)
|
guiambros/libratbag | tools/merge_ratbagd.py | <filename>tools/merge_ratbagd.py
#!/usr/bin/env python3
#
# vim: set expandtab shiftwidth=4 tabstop=4:
#
# Copyright 2017 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
import os
import stat
import sys
def print_ratbagctl(ratbagctl_path, ratbagd_path, version_string):
with open(ratbagctl_path, 'r') as ratbagctl, open(ratbagd_path, 'r') as ratbagd:
for l in ratbagctl.readlines():
if l.startswith("from ratbagd import "):
headers = True
for r in ratbagd.readlines():
if not r.startswith('#') and r.strip():
headers = False
if not headers:
print(r.rstrip('\n'))
else:
if '@version@' in l:
l = l.replace('@version@', version_string)
print(l.rstrip('\n'))
def main(argv):
parser = argparse.ArgumentParser(description="merge ratbagd.py into ratbagctl")
parser.add_argument("ratbagctl", action='store')
parser.add_argument("ratbagd", action='store')
parser.add_argument("--output", action="store")
parser.add_argument("--version", action="store", default="git_master")
ns = parser.parse_args(sys.argv[1:])
if ns.output:
ns.output_file = open(ns.output, 'w')
st = os.stat(ns.output)
os.chmod(ns.output, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
sys.stdout = ns.output_file
print_ratbagctl(ns.ratbagctl, ns.ratbagd, ns.version)
try:
ns.output_file.close()
except AttributeError:
pass
if __name__ == "__main__":
main(sys.argv[1:])
|
guiambros/libratbag | tools/toolbox.py | <filename>tools/toolbox.py
# vim: set expandtab shiftwidth=4 tabstop=4:
#
# This file is part of libratbag.
#
# Copyright 2017 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import imp
import os
import shutil
import subprocess
import sys
from gi.repository import GLib
# various constants
RATBAGCTL_NAME = 'ratbagctl'
RATBAGCTL_PATH = os.path.join('@MESON_BUILD_ROOT@', RATBAGCTL_NAME)
RATBAGCTL_DEVEL_NAME = 'ratbagctl.devel'
RATBAGCTL_DEVEL_PATH = os.path.join('@MESON_BUILD_ROOT@', RATBAGCTL_DEVEL_NAME)
DBUS_CONF_DIR = '/etc/dbus-1/system.d'
DBUS_CONF_NAME = 'org.freedesktop.ratbag_devel1.conf'
DBUS_CONF_PATH = os.path.join(DBUS_CONF_DIR, DBUS_CONF_NAME)
def import_non_standard_path(name, path):
# Fast path: see if the module has already been imported.
try:
return sys.modules[name]
except KeyError:
pass
# If any of the following calls raises an exception,
# there's a problem we can't handle -- let the caller handle it.
with open(path, 'rb') as fp:
module = imp.load_module(name, fp, os.path.basename(path), ('.py', 'rb', imp.PY_SOURCE))
return module
def start_ratbagd(verbosity=0):
from gi.repository import Gio
import time
# first copy the policy for the ratbagd daemon to be allowed to run
shutil.copy(os.path.join('@MESON_BUILD_ROOT@', DBUS_CONF_NAME),
DBUS_CONF_PATH)
# FIXME: kill any running ratbagd.devel
args = [os.path.join('@MESON_BUILD_ROOT@', "ratbagd.devel")]
if verbosity >= 3:
args.append('--verbose=raw')
elif verbosity >= 2:
args.append('--verbose')
ratbagd_process = subprocess.Popen(args, shell=False, stdout=sys.stdout, stderr=sys.stderr)
dbus = Gio.bus_get_sync(Gio.BusType.SYSTEM, None)
name_owner = None
start_time = time.clock()
while name_owner is None and time.clock() - start_time < 30:
proxy = Gio.DBusProxy.new_sync(dbus,
Gio.DBusProxyFlags.NONE,
None,
"org.freedesktop.ratbag_devel1_@ratbagd_sha@",
"/org/freedesktop/ratbag_devel1_@ratbagd_sha@",
"org.freedesktop.ratbag_devel1_@ratbagd_sha@.Manager",
None)
name_owner = proxy.get_name_owner()
if name_owner is None:
time.sleep(0.2)
os.environ['RATBAGCTL_DEVEL'] = "org.freedesktop.ratbag_devel1_@ratbagd_sha@"
if name_owner is None or ratbagd_process.poll() is not None:
return None
return ratbagd_process
def terminate_ratbagd(ratbagd):
if ratbagd is not None:
try:
ratbagd.terminate()
ratbagd.wait(5)
except subprocess.TimeoutExpired:
ratbagd.kill()
try:
os.unlink(DBUS_CONF_PATH)
except FileNotFoundError:
pass
def sync_dbus():
main_context = GLib.MainContext.default()
while main_context.pending():
main_context.iteration(False)
ratbagctl = import_non_standard_path(RATBAGCTL_NAME, RATBAGCTL_PATH)
from ratbagctl import open_ratbagd, get_parser, RatbagError, RatbagErrorCapability
__all__ = [
RATBAGCTL_NAME,
RATBAGCTL_PATH,
DBUS_CONF_DIR,
DBUS_CONF_NAME,
DBUS_CONF_PATH,
start_ratbagd,
terminate_ratbagd,
open_ratbagd,
get_parser,
RatbagError,
RatbagErrorCapability,
]
|
janosimas/docker-dev | scripts/clang-tidy-diff.py | <reponame>janosimas/docker-dev<gh_stars>0
#!/usr/bin/env python
#
#===- clang-tidy-diff.py - ClangTidy Diff Checker ------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
ClangTidy Diff Checker
======================
This script reads input from a unified diff, runs clang-tidy on all changed
files and outputs clang-tidy warnings in changed lines only. This is useful to
detect clang-tidy regressions in the lines touched by a specific patch.
Example usage for git/svn users:
git diff -U0 HEAD^ | clang-tidy-diff.py -p1
svn diff --diff-cmd=diff -x-U0 | \
clang-tidy-diff.py -fix -checks=-*,modernize-use-override
"""
import argparse
import json
import re
import subprocess
import sys
def main():
parser = argparse.ArgumentParser(description=
'Run clang-tidy against changed files, and '
'output diagnostics only for modified '
'lines.')
parser.add_argument('-clang-tidy-binary', metavar='PATH',
default='clang-tidy',
help='path to clang-tidy binary')
parser.add_argument('-p', metavar='NUM', default=0,
help='strip the smallest prefix containing P slashes')
parser.add_argument('-regex', metavar='PATTERN', default=None,
help='custom pattern selecting file paths to check '
'(case sensitive, overrides -iregex)')
parser.add_argument('-iregex', metavar='PATTERN', default=
r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc)',
help='custom pattern selecting file paths to check '
'(case insensitive, overridden by -regex)')
parser.add_argument('-fix', action='store_true', default=False,
help='apply suggested fixes')
parser.add_argument('-checks',
help='checks filter, when not specified, use clang-tidy '
'default',
default='')
parser.add_argument('-path', dest='build_path',
help='Path used to read a compile command database.')
parser.add_argument('-extra-arg', dest='extra_arg',
action='append', default=[],
help='Additional argument to append to the compiler '
'command line.')
parser.add_argument('-extra-arg-before', dest='extra_arg_before',
action='append', default=[],
help='Additional argument to prepend to the compiler '
'command line.')
parser.add_argument('-quiet', action='store_true', default=False,
help='Run clang-tidy in quiet mode')
clang_tidy_args = []
argv = sys.argv[1:]
if '--' in argv:
clang_tidy_args.extend(argv[argv.index('--')+1:])
argv = argv[:argv.index('--')]
args = parser.parse_args(argv)
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
match = re.search('^\+\+\+\ \"?(.*?/){%s}([^ \t\n\"]*)' % args.p, line)
if match:
filename = match.group(2)
if filename == None:
continue
if args.regex is not None:
if not re.match('^%s$' % args.regex, filename):
continue
else:
if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
continue
match = re.search('^@@.*\+(\d+)(,(\d+))?', line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1;
lines_by_file.setdefault(filename, []).append([start_line, end_line])
if len(lines_by_file) == 0:
print("No relevant changes found.")
sys.exit(0)
line_filter_json = json.dumps(
[{"name" : name, "lines" : lines_by_file[name]} for name in lines_by_file],
separators = (',', ':'))
quote = "";
if sys.platform == 'win32':
line_filter_json=re.sub(r'"', r'"""', line_filter_json)
else:
quote = "'";
# Run clang-tidy on files containing changes.
command = [args.clang_tidy_binary]
command.append('-line-filter=' + quote + line_filter_json + quote)
if args.fix:
command.append('-fix')
if args.checks != '':
command.append('-checks=' + quote + args.checks + quote)
if args.quiet:
command.append('-quiet')
if args.build_path is not None:
command.append('-p=%s' % args.build_path)
command.extend(lines_by_file.keys())
for arg in args.extra_arg:
command.append('-extra-arg=%s' % arg)
for arg in args.extra_arg_before:
command.append('-extra-arg-before=%s' % arg)
command.extend(clang_tidy_args)
sys.exit(subprocess.call(' '.join(command), shell=True))
if __name__ == '__main__':
main()
|
dagger2/new_thrift_site | app/controller/main.py | from flask import (
Blueprint, render_template, redirect, url_for, abort
)
bp = Blueprint('main', __name__)
@bp.route('/')
def index():
return render_template('main/index.html')
@bp.route('/events/ezras_exchange')
def ezras_exchange():
return render_template('main/happenings.html')
@bp.route('/events/mending-workshops')
def mending_workshops():
return render_template('main/happenings.html')
@bp.route('/resources')
def resources():
return render_template('main/resources.html')
@bp.route('/members')
def members():
return render_template('main/members.html')
@bp.route('/connect')
def connect():
return render_template('main/connect.html')
@bp.route('/contact')
def contact():
return redirect(url_for('main.connect'))
@bp.route('/events')
def events():
return redirect(url_for('main.happenings'))
@bp.route('/happenings')
def happenings():
return render_template('main/happenings.html')
@bp.route('/events/pop-up-shops')
def pop_up_shops():
return render_template('main/events.html')
@bp.route('/dino-game')
def dino_game():
return render_template('main/dino_game.html')
@bp.route('/credits')
def credits():
return render_template('main/credits.html')
@bp.app_errorhandler(403)
def handle_403(err):
return render_template('errors/error.html',
error='403',
message="Internal Server Error. Sorry, that's our fault. Please check back later."), 403
@bp.app_errorhandler(404)
def handle_404(err):
return render_template('errors/error.html',
error='404',
message='Page not Found. Check your URL to make sure you typed it in correctly.'), 404
@bp.app_errorhandler(500)
def handle_500(err):
return render_template('errors/error.html',
error='500',
message="Internal Server Error. Sorry that's our fault. Please check back again later."), 500 |
dagger2/new_thrift_site | app/controller/pwa.py | <reponame>dagger2/new_thrift_site
from flask import (
Blueprint, make_response, send_from_directory
)
bp = Blueprint('pwa', __name__, url_prefix='')
@bp.route('/manifest.json')
def manifest():
return send_from_directory('static', 'manifest.json')
# @bp.route('/sw.js')
# def service_worker():
# response = make_response(send_from_directory('static', 'sw.js'))
# response.headers['Cache-Control'] = 'no-cache'
# return response
|
dagger2/new_thrift_site | wsgi.py | <reponame>dagger2/new_thrift_site
from app import create_app
app = create_app()
if __name__ == "__init__":
application.run(host='0.0.0.0')
|
WTL7/Prac_LogisticRegression | ML_LogisticRegression.py |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
ad_data = pd.read_csv('advertising.csv')
print ad_data.head()
print ad_data.describe()
print ad_data.info()
sns.set_style('whitegrid')
sns.distplot(ad_data['Age'], kde = False, bins = 30)
#sns.jointplot(x = 'Age', y = 'Area Income', data = ad_data, kind = 'kde')
#sns.jointplot(x = 'Age', y = 'Daily Time Spent on Site', data = ad_data)
sns.jointplot(x = 'Daily Time Spent on Site', y = 'Daily Internet Usage',
data = ad_data, kind = 'reg', color = 'g')
sns.pairplot(data = ad_data, hue = 'Clicked on Ad')
ad_data.drop(['Ad Topic Line','City','Country','Timestamp'], axis = 1, inplace = True)
plt.show()
#----------machine learning------------
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
ad_data.drop('Clicked on Ad', axis = 1), ad_data['Clicked on Ad'])
from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression()
logmodel.fit(X_train, y_train)
predictions = logmodel.predict(X_test)
from sklearn.metrics import classification_report
print classification_report(y_test, predictions) |
bowlofstew/vitess | test/vtctld_web_test.py | <filename>test/vtctld_web_test.py
#!/usr/bin/env python
"""A vtctl webdriver test."""
import logging
import os
from selenium import webdriver
import unittest
from vtproto import vttest_pb2
from vttest import environment as vttest_environment
from vttest import local_database
from vttest import mysql_flavor
import environment
import utils
from selenium.common.exceptions import NoSuchElementException
def setUpModule():
try:
if utils.options.xvfb:
try:
# This will be killed automatically by utils.kill_sub_processes()
utils.run_bg(['Xvfb', ':15', '-ac'])
os.environ['DISPLAY'] = ':15'
except OSError as err:
# Despite running in background, utils.run_bg() will throw immediately
# if the Xvfb binary is not found.
logging.error(
"Can't start Xvfb (will try local DISPLAY instead): %s", err)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
utils.remove_tmp_files()
utils.kill_sub_processes()
class TestVtctldWeb(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Set up two keyspaces: one unsharded, one with two shards."""
topology = vttest_pb2.VTTestTopology()
topology.cells.append('test')
keyspace = topology.keyspaces.add(name='test_keyspace')
keyspace.replica_count = 2
keyspace.rdonly_count = 2
keyspace.shards.add(name='-80')
keyspace.shards.add(name='80-')
keyspace2 = topology.keyspaces.add(name='test_keyspace2')
keyspace2.shards.add(name='0')
keyspace2.replica_count = 2
keyspace2.rdonly_count = 1
if os.environ.get('CI') == 'true' and os.environ.get('TRAVIS') == 'true':
username = os.environ['SAUCE_USERNAME']
access_key = os.environ['SAUCE_ACCESS_KEY']
capabilities = {}
capabilities['tunnel-identifier'] = os.environ['TRAVIS_JOB_NUMBER']
capabilities['build'] = os.environ['TRAVIS_BUILD_NUMBER']
capabilities['platform'] = 'Linux'
capabilities['browserName'] = 'chrome'
hub_url = '%s:%s@localhost:4445' % (username, access_key)
cls.driver = webdriver.Remote(
desired_capabilities=capabilities,
command_executor='http://%s/wd/hub' % hub_url)
else:
os.environ['webdriver.chrome.driver'] = os.path.join(
os.environ['VTROOT'], 'dist')
# Only testing against Chrome for now
cls.driver = webdriver.Chrome()
port = environment.reserve_ports(1)
vttest_environment.base_port = port
mysql_flavor.set_mysql_flavor(None)
cls.db = local_database.LocalDatabase(
topology, '', False, None,
os.path.join(os.environ['VTTOP'], 'web/vtctld'),
os.path.join(os.environ['VTTOP'], 'test/vttest_schema/default'))
cls.db.setup()
cls.vtctld_addr = 'http://localhost:%d' % cls.db.config()['port']
utils.pause('Paused test after vtcombo was started.\n'
'For manual testing, connect to vtctld: %s' % cls.vtctld_addr)
@classmethod
def tearDownClass(cls):
cls.db.teardown()
cls.driver.quit()
def _get_keyspaces(self):
"""Get list of all present keyspaces."""
content = self.driver.find_element_by_id('content')
# TODO(thompsonja) find better way to get keyspace name
keyspaces = content.find_elements_by_tag_name('md-card')
return [ks.find_element_by_tag_name('h2').text for ks in keyspaces]
def _get_keyspace_element(self, keyspace_name):
"""Get a specific keyspace element given a keyspace name."""
return self.driver.find_element_by_id('%s-card' % keyspace_name)
def _get_shards(self, keyspace_name):
shard_grid = self.driver.find_element_by_id(
'%s-shards-list' % keyspace_name)
return shard_grid.text.split('\n')
def _get_serving_shards(self, keyspace_name):
serving_shards = self.driver.find_element_by_id(
'%s-serving-list' % keyspace_name)
return serving_shards.text.split('\n')
def _get_inactive_shards(self, keyspace_name):
inactive_shards = self.driver.find_element_by_id(
'%s-inactive-list' % keyspace_name)
return inactive_shards.text.split('\n')
def _get_shard_element(self, keyspace_name, shard_name):
return self._get_keyspace_element(keyspace_name).find_element_by_link_text(
shard_name)
def _get_tablet_names(self):
tablet_elements = (
self.driver.find_element_by_id('tablets').find_elements_by_tag_name(
'md-card'))
tablet_titles = [
x.find_element_by_tag_name('md-toolbar').text.split('\n')[0]
for x in tablet_elements]
return dict(
[(x.split(' ')[0], x.split(' ')[1][1:-1]) for x in tablet_titles])
def _get_shard_record_keyspace_shard(self):
return self.driver.find_element_by_id('keyspace-shard').text
def _get_shard_record_master_tablet(self):
return self.driver.find_element_by_id('master-tablet').text
def _check_tablet_types(self, tablet_types, expected_counts):
for expected_type, count in expected_counts.iteritems():
self.assertEquals(count,
len([x for x in tablet_types if x == expected_type]))
def _check_shard_overview(
self, keyspace_name, shard_name, expected_tablet_types):
logging.info('Checking %s/%s', keyspace_name, shard_name)
self._get_shard_element(keyspace_name, shard_name).click()
self.assertEquals(self._get_shard_record_keyspace_shard(),
'%s/%s' % (keyspace_name, shard_name))
master = self._get_shard_record_master_tablet()
logging.info('master tablet is %s', master)
shard_tablets = self._get_tablet_names()
self.assertEquals(shard_tablets[master], 'master')
self._check_tablet_types(shard_tablets.values(), expected_tablet_types)
self.driver.back()
def test_keyspace_overview(self):
logging.info('Testing keyspace overview')
logging.info('Fetching main vtctld page: %s', self.vtctld_addr)
self.driver.get(self.vtctld_addr)
keyspace_names = self._get_keyspaces()
logging.info('Keyspaces: %s', ', '.join(keyspace_names))
self.assertListEqual(['test_keyspace', 'test_keyspace2'], keyspace_names)
test_keyspace_serving_shards = self._get_serving_shards('test_keyspace')
logging.info(
'Serving Shards in test_keyspace: %s', ', '.join(
test_keyspace_serving_shards))
self.assertListEqual(test_keyspace_serving_shards, ['-80', '80-'])
test_keyspace2_serving_shards = self._get_serving_shards('test_keyspace2')
logging.info(
'Serving Shards in test_keyspace2: %s', ', '.join(
test_keyspace2_serving_shards))
self.assertListEqual(test_keyspace2_serving_shards, ['0'])
with self.assertRaises(NoSuchElementException):
self._get_inactive_shards('test_keyspace')
logging.info(
'Inactive Shards in test_keyspace: %s', ', '.join([]))
with self.assertRaises(NoSuchElementException):
self._get_inactive_shards('test_keyspace2')
logging.info(
'Inactive Shards in test_keyspace2: %s', ', '.join([]))
def test_shard_overview(self):
logging.info('Testing shard overview')
logging.info('Fetching main vtctld page: %s', self.vtctld_addr)
self.driver.get(self.vtctld_addr)
self._check_shard_overview(
'test_keyspace', '-80', {'master': 1, 'replica': 1, 'rdonly': 2})
self._check_shard_overview(
'test_keyspace', '80-', {'master': 1, 'replica': 1, 'rdonly': 2})
self._check_shard_overview(
'test_keyspace2', '0', {'master': 1, 'replica': 1, 'rdonly': 1})
def add_test_options(parser):
parser.add_option(
'--no-xvfb', action='store_false', dest='xvfb', default=True,
help='Use local DISPLAY instead of headless Xvfb mode.')
if __name__ == '__main__':
utils.main(test_options=add_test_options)
|
bowlofstew/vitess | py/vtdb/keyspace.py | <gh_stars>1-10
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""A Vitess keyspace represents a sharded MySQL database."""
import struct
from vtdb import keyrange_constants
pack_keyspace_id = struct.Struct('!Q').pack
class Keyspace(object):
"""Represent the SrvKeyspace object from the toposerver.
Provide functions to extract sharding information from the same.
"""
# load this object from a SrvKeyspace object generated by vt
def __init__(self, name, data):
self.name = name
self.partitions = data.get('Partitions', {})
self.sharding_col_name = data.get('ShardingColumnName', '')
self.sharding_col_type = data.get(
'ShardingColumnType', keyrange_constants.KIT_UNSET)
self.served_from = data.get('ServedFrom', None)
def get_shards(self, db_type):
if not db_type:
raise ValueError('db_type is not set')
try:
return self.partitions[db_type]['ShardReferences']
except KeyError:
return []
def get_shard_count(self, db_type):
if not db_type:
raise ValueError('db_type is not set')
shards = self.get_shards(db_type)
return len(shards)
def get_shard_names(self, db_type):
if not db_type:
raise ValueError('db_type is not set')
shards = self.get_shards(db_type)
return [shard['Name'] for shard in shards]
def keyspace_id_to_shard_name_for_db_type(self, keyspace_id, db_type):
"""Finds the shard for a keyspace_id.
WARNING: this only works for KIT_UINT64 keyspace ids.
Args:
keyspace_id: A uint64 keyspace_id.
db_type: Str tablet type (master, rdonly, or replica).
Returns:
Shard name.
Raises:
ValueError: On invalid keyspace_id.
"""
if not keyspace_id:
raise ValueError('keyspace_id is not set')
if not db_type:
raise ValueError('db_type is not set')
# Pack this into big-endian and do a byte-wise comparison.
pkid = pack_keyspace_id(keyspace_id)
shards = self.get_shards(db_type)
for shard in shards:
if 'KeyRange' not in shard or not shard['KeyRange']:
# this keyrange is covering the full space
return shard['Name']
if _shard_contain_kid(pkid,
shard['KeyRange']['Start'],
shard['KeyRange']['End']):
return shard['Name']
raise ValueError(
'cannot find shard for keyspace_id %s in %s' % (keyspace_id, shards))
def _shard_contain_kid(pkid, start, end):
return start <= pkid and (end == keyrange_constants.MAX_KEY or pkid < end)
|
bowlofstew/vitess | py/vtproto/throttlerservice_pb2.py | <filename>py/vtproto/throttlerservice_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: throttlerservice.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import throttlerdata_pb2 as throttlerdata__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='throttlerservice.proto',
package='throttlerservice',
syntax='proto3',
serialized_pb=_b('\n\x16throttlerservice.proto\x12\x10throttlerservice\x1a\x13throttlerdata.proto2\xf3\x03\n\tThrottler\x12M\n\x08MaxRates\x12\x1e.throttlerdata.MaxRatesRequest\x1a\x1f.throttlerdata.MaxRatesResponse\"\x00\x12S\n\nSetMaxRate\x12 .throttlerdata.SetMaxRateRequest\x1a!.throttlerdata.SetMaxRateResponse\"\x00\x12\x65\n\x10GetConfiguration\x12&.throttlerdata.GetConfigurationRequest\x1a\'.throttlerdata.GetConfigurationResponse\"\x00\x12n\n\x13UpdateConfiguration\x12).throttlerdata.UpdateConfigurationRequest\x1a*.throttlerdata.UpdateConfigurationResponse\"\x00\x12k\n\x12ResetConfiguration\x12(.throttlerdata.ResetConfigurationRequest\x1a).throttlerdata.ResetConfigurationResponse\"\x00\x62\x06proto3')
,
dependencies=[throttlerdata__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaThrottlerServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def MaxRates(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def SetMaxRate(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def GetConfiguration(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def UpdateConfiguration(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def ResetConfiguration(self, request, context):
raise NotImplementedError()
class BetaThrottlerStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def MaxRates(self, request, timeout):
raise NotImplementedError()
MaxRates.future = None
@abc.abstractmethod
def SetMaxRate(self, request, timeout):
raise NotImplementedError()
SetMaxRate.future = None
@abc.abstractmethod
def GetConfiguration(self, request, timeout):
raise NotImplementedError()
GetConfiguration.future = None
@abc.abstractmethod
def UpdateConfiguration(self, request, timeout):
raise NotImplementedError()
UpdateConfiguration.future = None
@abc.abstractmethod
def ResetConfiguration(self, request, timeout):
raise NotImplementedError()
ResetConfiguration.future = None
def beta_create_Throttler_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
request_deserializers = {
('throttlerservice.Throttler', 'GetConfiguration'): throttlerdata_pb2.GetConfigurationRequest.FromString,
('throttlerservice.Throttler', 'MaxRates'): throttlerdata_pb2.MaxRatesRequest.FromString,
('throttlerservice.Throttler', 'ResetConfiguration'): throttlerdata_pb2.ResetConfigurationRequest.FromString,
('throttlerservice.Throttler', 'SetMaxRate'): throttlerdata_pb2.SetMaxRateRequest.FromString,
('throttlerservice.Throttler', 'UpdateConfiguration'): throttlerdata_pb2.UpdateConfigurationRequest.FromString,
}
response_serializers = {
('throttlerservice.Throttler', 'GetConfiguration'): throttlerdata_pb2.GetConfigurationResponse.SerializeToString,
('throttlerservice.Throttler', 'MaxRates'): throttlerdata_pb2.MaxRatesResponse.SerializeToString,
('throttlerservice.Throttler', 'ResetConfiguration'): throttlerdata_pb2.ResetConfigurationResponse.SerializeToString,
('throttlerservice.Throttler', 'SetMaxRate'): throttlerdata_pb2.SetMaxRateResponse.SerializeToString,
('throttlerservice.Throttler', 'UpdateConfiguration'): throttlerdata_pb2.UpdateConfigurationResponse.SerializeToString,
}
method_implementations = {
('throttlerservice.Throttler', 'GetConfiguration'): face_utilities.unary_unary_inline(servicer.GetConfiguration),
('throttlerservice.Throttler', 'MaxRates'): face_utilities.unary_unary_inline(servicer.MaxRates),
('throttlerservice.Throttler', 'ResetConfiguration'): face_utilities.unary_unary_inline(servicer.ResetConfiguration),
('throttlerservice.Throttler', 'SetMaxRate'): face_utilities.unary_unary_inline(servicer.SetMaxRate),
('throttlerservice.Throttler', 'UpdateConfiguration'): face_utilities.unary_unary_inline(servicer.UpdateConfiguration),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Throttler_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
request_serializers = {
('throttlerservice.Throttler', 'GetConfiguration'): throttlerdata_pb2.GetConfigurationRequest.SerializeToString,
('throttlerservice.Throttler', 'MaxRates'): throttlerdata_pb2.MaxRatesRequest.SerializeToString,
('throttlerservice.Throttler', 'ResetConfiguration'): throttlerdata_pb2.ResetConfigurationRequest.SerializeToString,
('throttlerservice.Throttler', 'SetMaxRate'): throttlerdata_pb2.SetMaxRateRequest.SerializeToString,
('throttlerservice.Throttler', 'UpdateConfiguration'): throttlerdata_pb2.UpdateConfigurationRequest.SerializeToString,
}
response_deserializers = {
('throttlerservice.Throttler', 'GetConfiguration'): throttlerdata_pb2.GetConfigurationResponse.FromString,
('throttlerservice.Throttler', 'MaxRates'): throttlerdata_pb2.MaxRatesResponse.FromString,
('throttlerservice.Throttler', 'ResetConfiguration'): throttlerdata_pb2.ResetConfigurationResponse.FromString,
('throttlerservice.Throttler', 'SetMaxRate'): throttlerdata_pb2.SetMaxRateResponse.FromString,
('throttlerservice.Throttler', 'UpdateConfiguration'): throttlerdata_pb2.UpdateConfigurationResponse.FromString,
}
cardinalities = {
'GetConfiguration': cardinality.Cardinality.UNARY_UNARY,
'MaxRates': cardinality.Cardinality.UNARY_UNARY,
'ResetConfiguration': cardinality.Cardinality.UNARY_UNARY,
'SetMaxRate': cardinality.Cardinality.UNARY_UNARY,
'UpdateConfiguration': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'throttlerservice.Throttler', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
|
bowlofstew/vitess | test/keyspace_util.py | <filename>test/keyspace_util.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""This module allows you to bring up and tear down keyspaces.
"""
import os
import environment
import tablet
import utils
class TestEnv(object):
"""Main class for this module."""
def __init__(self):
self.tablet_map = {}
def launch(
self, keyspace, shards=None, replica_count=1, rdonly_count=0, ddls=None):
"""Launch test environment."""
if replica_count < 1:
raise Exception('replica_count=%d < 1; tests now use semi-sync'
' and must have at least one replica' % replica_count)
self.tablets = []
self.master_tablets = []
utils.run_vtctl(['CreateKeyspace', keyspace])
if not shards or shards[0] == '0':
shards = ['0']
# Create tablets and start mysqld.
procs = []
for shard in shards:
procs.append(self._new_tablet(keyspace, shard, 'master', None))
for i in xrange(replica_count):
procs.append(self._new_tablet(keyspace, shard, 'replica', i))
for i in xrange(rdonly_count):
procs.append(self._new_tablet(keyspace, shard, 'rdonly', i))
utils.wait_procs(procs)
# init tablets.
for shard in shards:
tablet_index = 0
self._init_tablet(keyspace, shard, 'master', None, tablet_index)
tablet_index += 1
for i in xrange(replica_count):
self._init_tablet(keyspace, shard, 'replica', i, tablet_index)
tablet_index += 1
for i in xrange(rdonly_count):
self._init_tablet(keyspace, shard, 'rdonly', i, tablet_index)
tablet_index += 1
# Start tablets.
for shard in shards:
self._start_tablet(keyspace, shard, 'master', None)
for i in xrange(replica_count):
self._start_tablet(keyspace, shard, 'replica', i)
for i in xrange(rdonly_count):
self._start_tablet(keyspace, shard, 'rdonly', i)
for t in self.master_tablets:
t.wait_for_vttablet_state('SERVING')
for t in self.tablets:
if t not in self.master_tablets:
t.wait_for_vttablet_state('NOT_SERVING')
for t in self.master_tablets:
utils.run_vtctl(['InitShardMaster', '-force', keyspace+'/'+t.shard,
t.tablet_alias], auto_log=True)
t.tablet_type = 'master'
for t in self.tablets:
t.wait_for_vttablet_state('SERVING')
for ddl in ddls:
fname = os.path.join(environment.tmproot, 'ddl.sql')
with open(fname, 'w') as f:
f.write(ddl)
utils.run_vtctl(['ApplySchema', '-sql-file', fname, keyspace])
def teardown(self):
all_tablets = self.tablet_map.values()
tablet.kill_tablets(all_tablets)
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
for t in all_tablets:
t.remove_tree()
def _new_tablet(self, keyspace, shard, tablet_type, index):
"""Create a tablet and start mysqld."""
t = tablet.Tablet()
self.tablets.append(t)
if tablet_type == 'master':
self.master_tablets.append(t)
key = '%s.%s.%s' % (keyspace, shard, tablet_type)
else:
key = '%s.%s.%s.%s' % (keyspace, shard, tablet_type, index)
self.tablet_map[key] = t
return t.init_mysql()
def _init_tablet(self, keyspace, shard, tablet_type, index, tablet_index):
if tablet_type == 'master':
key = '%s.%s.%s' % (keyspace, shard, tablet_type)
else:
key = '%s.%s.%s.%s' % (keyspace, shard, tablet_type, index)
t = self.tablet_map[key]
t.init_tablet(tablet_type, keyspace, shard, tablet_index=tablet_index)
def _start_tablet(self, keyspace, shard, tablet_type, index):
"""Start a tablet."""
init_tablet_type = tablet_type
if tablet_type == 'master':
init_tablet_type = 'replica'
key = '%s.%s.%s' % (keyspace, shard, tablet_type)
else:
key = '%s.%s.%s.%s' % (keyspace, shard, tablet_type, index)
t = self.tablet_map[key]
t.create_db('vt_' + keyspace)
return t.start_vttablet(
wait_for_state=None, init_tablet_type=init_tablet_type,
init_keyspace=keyspace, init_shard=shard,
extra_args=['-queryserver-config-schema-reload-time', '1'])
|
bowlofstew/vitess | py/vtdb/topology.py | <filename>py/vtdb/topology.py
"""Deprecated module that holds keyspace / sharding methods."""
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# DEPRECATED module, just one hardcoded function left, so vtrouting.py
# is not changed yet. Will be cleaned up soon.
from vtdb import keyrange_constants
def get_sharding_col(keyspace_name):
_ = keyspace_name
return 'keyspace_id', keyrange_constants.KIT_UINT64
|
bowlofstew/vitess | test/end2end/base_environment.py | <reponame>bowlofstew/vitess
"""Base environment for full end2end tests.
Contains functions that all environments should implement along with functions
common to all environments.
"""
import json
from vttest import sharding_utils
class VitessEnvironmentError(Exception):
pass
class BaseEnvironment(object):
"""Base Environment."""
def __init__(self):
self.vtctl_helper = None
def create(self, **kwargs):
"""Create the environment.
Args:
**kwargs: kwargs parameterizing the environment.
Raises:
VitessEnvironmentError: Raised if unsupported
"""
raise VitessEnvironmentError(
'Create unsupported in this environment')
def use_named(self, instance_name):
"""Populate this instance based on a pre-existing environment.
Args:
instance_name: Name of the existing environment instance (string)
"""
self.master_capable_tablets = {}
for keyspace, num_shards in zip(self.keyspaces, self.num_shards):
self.master_capable_tablets[keyspace] = {}
for shard_name in sharding_utils.get_shard_names(num_shards):
raw_shard_tablets = self.vtctl_helper.execute_vtctl_command(
['ListShardTablets', '%s/%s' % (keyspace, shard_name)])
split_shard_tablets = [
t.split(' ') for t in raw_shard_tablets.split('\n') if t]
self.master_capable_tablets[keyspace][shard_name] = [
t[0] for t in split_shard_tablets
if (self.get_tablet_cell(t[0]) in self.primary_cells
and (t[3] == 'master' or t[3] == 'replica'))]
def destroy(self):
"""Teardown the environment.
Raises:
VitessEnvironmentError: Raised if unsupported
"""
raise VitessEnvironmentError(
'Destroy unsupported in this environment')
def get_vtgate_conn(self, cell):
"""Gets a connection to a vtgate in a particular cell.
Args:
cell: cell to obtain a vtgate connection from (string)
Returns:
A vtgate connection.
Raises:
VitessEnvironmentError: Raised if unsupported
"""
raise VitessEnvironmentError(
'Get VTGate Conn unsupported in this environment')
def restart_mysql_task(
self, cell, keyspace, shard, task_num, dbtype, task_name, is_alloc=False):
"""Restart a job within the mysql alloc or the whole alloc itself.
Args:
cell: cell value containing the vttablet alloc to restart (string).
keyspace: keyspace (string).
shard: shard number (int).
task_num: which vttablet alloc task to restart (int).
dbtype: which dbtype to restart (replica | rdonly) (string).
task_name: Name of specific task (droid, vttablet, mysql, etc.)
is_alloc: True to restart entire alloc
Returns:
return restart return val
Raises:
VitessEnvironmentError: Raised if unsupported
"""
raise VitessEnvironmentError(
'Restart MySQL task unsupported in this environment')
def wait_for_good_failover_status(
self, keyspace, shard_name, failover_completion_timeout_s=60):
"""Wait until failover status shows complete.
Repeatedly queries the master tablet for failover status until it is 'OFF'.
Most of the time the failover status check will immediately pass. When a
failover is in progress, it tends to take a good 5 to 10 attempts before
status is 'OFF'.
Args:
keyspace: Name of the keyspace to reparent (string)
shard_name: name of the shard to verify (e.g. '-80') (string)
failover_completion_timeout_s: Failover completion timeout (int)
Raises:
VitessEnvironmentError: Raised if unsupported
"""
raise VitessEnvironmentError(
'Wait for good failover status unsupported in this environment')
def wait_for_healthy_tablets(self):
"""Wait until all tablets report healthy status.
Raises:
VitessEnvironmentError: Raised if unsupported
"""
raise VitessEnvironmentError(
'Wait for healthy tablets unsupported in this environment')
def get_next_master(self, keyspace, shard_name, cross_cell=False):
"""Determine what instance to select as the next master.
If the next master is cross-cell, rotate the master cell and use instance 0
as the master. Otherwise, rotate the instance number.
Args:
keyspace: the name of the keyspace to reparent (string).
shard_name: name of the shard to reparent (string).
cross_cell: Whether the desired reparent is to another cell (bool).
Returns:
Tuple of cell, task num, tablet uid (string, int, string)
"""
num_tasks = self.keyspace_alias_to_num_instances_dict[keyspace]['replica']
current_master = self.get_current_master_name(keyspace, shard_name)
current_master_cell = self.get_tablet_cell(current_master)
next_master_cell = current_master_cell
next_master_task = 0
if cross_cell:
next_master_cell = self.primary_cells[(
self.primary_cells.index(current_master_cell) + 1) % len(
self.primary_cells)]
else:
next_master_task = (
(self.get_tablet_task_number(current_master) + 1) % num_tasks)
tablets_in_cell = [tablet for tablet in
self.master_capable_tablets[keyspace][shard_name]
if self.get_tablet_cell(tablet) == next_master_cell]
return (next_master_cell, next_master_task,
tablets_in_cell[next_master_task])
def get_tablet_task_number(self, tablet_name):
"""Gets a tablet's 0 based task number.
Args:
tablet_name: Name of the tablet (string)
Returns:
0 based task number (int).
Raises:
VitessEnvironmentError: Raised if unsupported
"""
raise VitessEnvironmentError(
'Get tablet task number unsupported in this environment')
def external_reparent(self, keyspace, new_cell, shard, new_task_num):
"""Perform a reparent through external means (Orchestrator, etc.).
Args:
keyspace: name of the keyspace to reparent (string)
new_cell: new master cell (string)
shard: 0 based shard index to reparent (int)
new_task_num: 0 based task num to become next master (int)
Raises:
VitessEnvironmentError: Raised if unsupported
"""
raise VitessEnvironmentError(
'External reparent unsupported in this environment')
def internal_reparent(self, keyspace, new_master_uid, emergency=False):
raise VitessEnvironmentError(
'Internal reparent unsupported in this environment')
def get_current_master_name(self, keyspace, shard_name):
"""Obtains current master's tablet name (cell-uid).
Args:
keyspace: name of the keyspace to get information on the master
shard_name: string representation of the shard in question (e.g. '-80')
Returns:
master tablet name (cell-uid) (string)
"""
shard_info = json.loads(self.vtctl_helper.execute_vtctl_command(
['GetShard', '{0}/{1}'.format(keyspace, shard_name)]))
master_alias = shard_info['master_alias']
return '%s-%s' % (master_alias['cell'], master_alias['uid'])
def get_tablet_cell(self, tablet_name):
"""Get the cell of a tablet.
Args:
tablet_name: Name of the tablet, including cell prefix. (string)
Returns:
Tablet's cell. (string)
"""
return tablet_name.split('-')[0]
def get_tablet_uid(self, tablet_name):
"""Get the uid of a tablet.
Args:
tablet_name: Name of the tablet, including cell prefix. (string)
Returns:
Tablet's uid. (int)
"""
return int(tablet_name.split('-')[-1])
def get_tablet_shard(self, tablet_name):
"""Get the shard of a tablet.
Args:
tablet_name: Name of the tablet, including cell prefix. (string)
Returns:
Tablet's shard. (string)
"""
return json.loads(self.vtctl_helper.execute_vtctl_command(
['GetTablet', tablet_name]))['shard']
def get_tablet_type(self, tablet_name):
"""Get the current type of the tablet as reported via vtctl.
Args:
tablet_name: Name of the tablet, including cell prefix. (string)
Returns:
Current tablet type (e.g. spare, replica, rdonly). (string)
"""
return json.loads(self.vtctl_helper.execute_vtctl_command(
['GetTablet', tablet_name]))['type']
def get_tablet_ip_port(self, tablet_name):
"""Get the ip and port of the tablet as reported via vtctl.
Args:
tablet_name: Name of the tablet, including cell prefix. (string)
Returns:
ip:port (string)
"""
tablet_info = json.loads(self.vtctl_helper.execute_vtctl_command(
['GetTablet', tablet_name]))
return '%s:%s' % (tablet_info['ip'], tablet_info['port_map']['vt'])
def get_tablet_types_for_shard(self, keyspace, shard_name):
"""Get the types for all tablets in a shard.
Args:
keyspace: Name of keyspace to get tablet information on. (string)
shard_name: single shard to obtain tablet types from (string)
Returns:
List of pairs of tablet's name and type
"""
tablet_info = []
raw_tablets = self.vtctl_helper.execute_vtctl_command(
['ListShardTablets', '{0}/{1}'.format(keyspace, shard_name)])
raw_tablets = filter(None, raw_tablets.split('\n'))
for tablet in raw_tablets:
tablet_words = tablet.split()
tablet_name = tablet_words[0]
tablet_type = tablet_words[3]
tablet_info.append((tablet_name, tablet_type))
return tablet_info
def get_all_tablet_types(self, keyspace, num_shards):
"""Get the types for all tablets in a keyspace.
Args:
keyspace: Name of keyspace to get tablet information on. (string)
num_shards: number of shards in the keyspace. (int)
Returns:
List of pairs of tablet's name and type
"""
tablet_info = []
for shard_name in sharding_utils.get_shard_names(num_shards):
tablet_info += self.get_tablet_types_for_shard(keyspace, shard_name)
return tablet_info
|
bowlofstew/vitess | test/topo_flavor/__init__.py | <filename>test/topo_flavor/__init__.py
#!/usr/bin/env python
# Copyright 2014, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
|
bowlofstew/vitess | replace_doc_link.py | <reponame>bowlofstew/vitess<gh_stars>1-10
# Copyright 2015, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# ./replace_doc_link doc_link_dir doc_to_be_replaced
#
# This tool replaces all links in specified doc by the new links given in the doc link dir.
#
import os
import re
import sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print "Usage: ./replace_doc_link.py doc_link_dir doc"
exit(1)
doc_link_dir = sys.argv[1]
doc = sys.argv[2]
files = next(os.walk(doc_link_dir))[2]
filename_dict = {f.split('-')[-1]: os.path.join(doc_link_dir, os.path.splitext(f)[0]) for f in files}
# print filename_dict
for line in open(doc).readlines():
line = line.rstrip() # remove newline
ans = []
pos = 0
for m in re.finditer(r'\[(.*)\]\((.*.md)\)', line):
title, link = m.group(1), m.group(2)
filename = link.split('/')[-1]
if filename in filename_dict:
ans.append(line[pos:m.start()])
ans.append('[' + title + ']')
ans.append('( {{ site.url }}/' + filename_dict[filename] + ')')
pos = m.end()
ans.append(line[pos:])
print ''.join(ans)
|
bowlofstew/vitess | test/end2end/vtctl_helper.py | """Helper module for running vtctl commands.
This module allows for retry logic to ensure that vtctl commands are properly
executed. This should help reduce flakiness in the sandbox.
"""
import logging
import os
import re
import subprocess
import tempfile
import time
from vtctl import vtctl_client
class VtctlClientError(Exception):
pass
class VtctlHelper(object):
"""Various functions for running vtctl commands."""
def __init__(self, protocol, vtctl_addr):
self.protocol = protocol
self.client = None
self.vtctl_addr = vtctl_addr
if vtctl_addr and protocol != 'grpc':
self.client = vtctl_client.connect(protocol, vtctl_addr, 30)
def execute_vtctl_command(self, args, action_timeout=60.0, expect_fail=False,
max_wait_s=180.0):
"""Executes a vtctl command on a running vtctl job.
This function attempts to execute on any running vtctl job, returning
immediately when a call to execute_vtctl_command completes successfully.
Args:
args: args to pass to vtctl_client's execute_vtctl_command function
action_timeout: total timeout for the action (float, in seconds)
expect_fail: whether or not the vtctl command should fail (bool)
max_wait_s: maximum amount of time to wait for success (float, in seconds)
Returns:
Result of executing vtctl command
Raises:
VtctlClientError: Could not successfully call execute_vtctl_command
"""
start_time = time.time()
while time.time() - start_time < max_wait_s:
try:
if self.protocol == 'grpc':
results = subprocess.check_output(
['vtctlclient', '-vtctl_client_protocol', self.protocol,
'-server', self.vtctl_addr] + args, stderr=subprocess.STDOUT)
else:
results = vtctl_client.execute_vtctl_command(
self.client, args, action_timeout=action_timeout)
return results
except Exception as e:
if expect_fail:
logging.info('Expected vtctl error, got: %s', e.message or e.output)
raise VtctlClientError('Caught an expected vtctl error')
logging.info('Vtctl error: %s', e.message or e.output)
time.sleep(5)
raise VtctlClientError('Timed out on vtctl_client execute_vtctl_command')
def execute_vtctl_command_until_success(
self, args, max_wait_s=180.0, retry_wait_s=5.0):
"""Executes a vtctl command on a running vtctl job.
This function attempts to execute on any running vtctl job, returning
immediately when a call to execute_vtctl_command returns nothing. Do not
use this if you expect execute_vtctl_client to return data.
Args:
args: args to pass to vtctl_client's execute_vtctl_command function
max_wait_s: maximum amount of time to wait for success (float, in seconds)
retry_wait_s: time between vtctl calls to wait (float, in seconds)
Raises:
VtctlClientError: execute_vtctl_command never returned empty data
"""
start_time = time.time()
while time.time() - start_time < max_wait_s:
try:
if not self.execute_vtctl_command(args):
return
except VtctlClientError:
pass
time.sleep(retry_wait_s)
raise VtctlClientError(
'Timed out on vtctl_client execute_vtctl_command_until_success')
|
bowlofstew/vitess | tools/proto3to2.py | #!/usr/bin/env python
import sys
import re
import string
syntax = re.compile(r'^\s*syntax\s*=\s*"proto3";')
package = re.compile(r'^\s*package\s*(\S+);')
missing_optional = re.compile(r'^(\s+)(\S+)(\s+\S+\s*=\s*\S+;)')
map_type = re.compile(r'^(\s*)map\s*<(\S+),\s*(\S+)>\s+(\S+)\s*=\s*(\S+);')
for line in sys.stdin:
# syntax = "proto3";
if syntax.match(line):
print 'syntax = "proto2";'
continue
m = package.match(line)
if m:
pkg = m.group(1)
print line
# Add PHP-specific options.
print 'import "php.proto";'
print 'option (php.namespace) = "Vitess.Proto.%s";' % pkg.capitalize()
print 'option (php.multifile) = true;'
continue
# map<key, value> field = index;
m = map_type.match(line)
if m:
(indent, key, value, field, index) = m.groups()
entry = string.capwords(field, '_').replace('_', '') + 'Entry'
print indent + 'message %s { optional %s key = 1; optional %s value = 2; }' % (entry, key, value)
print indent + 'repeated %s %s = %s;' % (entry, field, index)
continue
# type field = index;
m = missing_optional.match(line)
if m:
(indent, type, rest) = m.groups()
if type != 'option':
print indent + 'optional %s%s' % (type, rest)
continue
print line,
|
bowlofstew/vitess | test/tabletmanager.py | <gh_stars>0
#!/usr/bin/env python
import json
import logging
import time
import unittest
import urllib
import urllib2
import MySQLdb
from vtproto import topodata_pb2
import environment
import utils
import tablet
from mysql_flavor import mysql_flavor
from protocols_flavor import protocols_flavor
tablet_62344 = tablet.Tablet(62344)
tablet_62044 = tablet.Tablet(62044)
def setUpModule():
try:
if environment.topo_server().flavor() == 'zookeeper':
# this is a one-off test to make sure our zookeeper implementation
# behaves with a server that is not DNS-resolveable
environment.topo_server().setup(add_bad_host=True)
else:
environment.topo_server().setup()
# start mysql instance external to the test
setup_procs = [
tablet_62344.init_mysql(),
tablet_62044.init_mysql(),
]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [
tablet_62344.teardown_mysql(),
tablet_62044.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_62344.remove_tree()
tablet_62044.remove_tree()
class TestTabletManager(unittest.TestCase):
def tearDown(self):
tablet.Tablet.check_vttablet_count()
environment.topo_server().wipe()
for t in [tablet_62344, tablet_62044]:
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()
# run twice to check behavior with existing znode data
def test_sanity(self):
self._test_sanity()
self._test_sanity()
def _test_sanity(self):
# Start up a master mysql and vttablet
utils.run_vtctl(['CreateKeyspace', '-force', 'test_keyspace'])
utils.run_vtctl(['createshard', '-force', 'test_keyspace/0'])
tablet_62344.init_tablet('master', 'test_keyspace', '0', parent=False)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'])
utils.validate_topology()
# if these statements don't run before the tablet it will wedge
# waiting for the db to become accessible. this is more a bug than
# a feature.
tablet_62344.populate('vt_test_keyspace', self._create_vt_select_test,
self._populate_vt_select_test)
tablet_62344.start_vttablet()
# make sure the query service is started right away.
qr = tablet_62344.execute('select id, msg from vt_select_test')
self.assertEqual(len(qr['rows']), 4,
'expected 4 rows in vt_select_test: %s' % str(qr))
self.assertEqual(qr['fields'][0]['name'], 'id')
self.assertEqual(qr['fields'][1]['name'], 'msg')
# test exclude_field_names to vttablet works as expected.
qr = tablet_62344.execute('select id, msg from vt_select_test',
execute_options='exclude_field_names:true ')
self.assertEqual(len(qr['rows']), 4,
'expected 4 rows in vt_select_test: %s' % str(qr))
self.assertNotIn('name', qr['fields'][0])
self.assertNotIn('name', qr['fields'][1])
# make sure direct dba queries work
query_result = utils.run_vtctl_json(
['ExecuteFetchAsDba', '-json', tablet_62344.tablet_alias,
'select * from vt_test_keyspace.vt_select_test'])
self.assertEqual(
len(query_result['rows']), 4,
'expected 4 rows in vt_select_test: %s' % str(query_result))
self.assertEqual(
len(query_result['fields']), 2,
'expected 2 fields in vt_select_test: %s' % str(query_result))
# check Ping / RefreshState
utils.run_vtctl(['Ping', tablet_62344.tablet_alias])
utils.run_vtctl(['RefreshState', tablet_62344.tablet_alias])
# Quickly check basic actions.
utils.run_vtctl(['SetReadOnly', tablet_62344.tablet_alias])
utils.wait_db_read_only(62344)
utils.run_vtctl(['SetReadWrite', tablet_62344.tablet_alias])
utils.check_db_read_write(62344)
utils.run_vtctl(['DemoteMaster', tablet_62344.tablet_alias])
utils.wait_db_read_only(62344)
utils.validate_topology()
utils.run_vtctl(['ValidateKeyspace', 'test_keyspace'])
# not pinging tablets, as it enables replication checks, and they
# break because we only have a single master, no slaves
utils.run_vtctl(['ValidateShard', '-ping-tablets=false',
'test_keyspace/0'])
tablet_62344.kill_vttablet()
_create_vt_select_test = '''create table vt_select_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
_populate_vt_select_test = [
"insert into vt_select_test (msg) values ('test %s')" % x
for x in xrange(4)]
def test_actions_and_timeouts(self):
# Start up a master mysql and vttablet
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.init_tablet('master', 'test_keyspace', '0')
utils.validate_topology()
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.start_vttablet()
utils.run_vtctl(['Ping', tablet_62344.tablet_alias])
# schedule long action in the background, sleep a little bit to make sure
# it started to run
args = (environment.binary_args('vtctl') +
environment.topo_server().flags() +
['-tablet_manager_protocol',
protocols_flavor().tablet_manager_protocol(),
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
'-log_dir', environment.vtlogroot,
'Sleep', tablet_62344.tablet_alias, '10s'])
bg = utils.run_bg(args)
time.sleep(3)
# try a frontend RefreshState that should timeout as the tablet is busy
# running the other one
_, stderr = utils.run_vtctl(
['-wait-time', '3s', 'RefreshState', tablet_62344.tablet_alias],
expect_fail=True)
self.assertIn(protocols_flavor().rpc_timeout_message(), stderr)
# wait for the background vtctl
bg.wait()
if environment.topo_server().flavor() == 'zookeeper':
# extra small test: we ran for a while, get the states we were in,
# make sure they're accounted for properly
# first the query engine States
v = utils.get_vars(tablet_62344.port)
logging.debug('vars: %s', v)
# then the Zookeeper connections
if v['ZkCachedConn']['test_nj'] != 'Connected':
self.fail('invalid zk test_nj state: %s' %
v['ZkCachedConn']['test_nj'])
if v['ZkCachedConn']['global'] != 'Connected':
self.fail('invalid zk global state: %s' %
v['ZkCachedConn']['global'])
if v['TabletType'] != 'master':
self.fail('TabletType not exported correctly')
tablet_62344.kill_vttablet()
def _run_hook(self, params, expected_status, expected_stdout,
expected_stderr):
hr = utils.run_vtctl_json(['ExecuteHook', tablet_62344.tablet_alias] +
params)
self.assertEqual(hr['ExitStatus'], expected_status)
if isinstance(expected_stdout, basestring):
if expected_stdout[-1:] == '%':
self.assertEqual(
hr['Stdout'][:len(expected_stdout)-1],
expected_stdout[:len(expected_stdout)-1])
else:
self.assertEqual(hr['Stdout'], expected_stdout)
else:
found = False
for exp in expected_stdout:
if hr['Stdout'] == exp:
found = True
break
if not found:
self.assertFail(
'cannot find expected %s in %s' %
(str(expected_stdout), hr['Stdout']))
self.assertEqual(hr['Stderr'], expected_stderr)
def test_hook(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# create the database so vttablets start, as it is serving
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)
# test a regular program works
self._run_hook(['test.sh', '--flag1', '--param1=hello'], 0,
['TABLET_ALIAS: test_nj-0000062344\n'
'PARAM: --flag1\n'
'PARAM: --param1=hello\n',
'TABLET_ALIAS: test_nj-0000062344\n'
'PARAM: --param1=hello\n'
'PARAM: --flag1\n'],
'')
# test stderr output
self._run_hook(['test.sh', '--to-stderr'], 0,
'TABLET_ALIAS: test_nj-0000062344\n'
'PARAM: --to-stderr\n',
'ERR: --to-stderr\n')
# test commands that fail
self._run_hook(['test.sh', '--exit-error'], 1,
'TABLET_ALIAS: test_nj-0000062344\n'
'PARAM: --exit-error\n',
'ERROR: exit status 1\n')
# test hook that is not present
self._run_hook(['not_here.sh'], -1,
'Skipping missing hook: /%', # cannot go further, local path
'')
# test hook with invalid name
_, err = utils.run_vtctl(['--alsologtostderr', 'ExecuteHook',
tablet_62344.tablet_alias,
'/bin/ls'],
mode=utils.VTCTL_VTCTL, trap_output=True,
raise_on_error=False)
expected = "action failed: ExecuteHook hook name cannot have a '/' in it"
self.assertIn(expected, err)
tablet_62344.kill_vttablet()
def test_restart(self):
"""Test restart behavior of vttablet.
Tests that when starting a second vttablet with the same configuration as
another one, it will kill the previous process and take over listening on
the socket.
If vttablet listens to other ports (like gRPC), this feature will
break. We believe it is not widely used, so we're OK with this for now.
(container based installations usually handle tablet restarts
by using a different set of servers, and do not rely on this feature
at all).
"""
if environment.topo_server().flavor() != 'zookeeper':
logging.info('Skipping this test in non-github tree')
return
if tablet_62344.grpc_enabled():
logging.info('Skipping this test as second gRPC port interferes')
return
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# create the database so vttablets start, as it is serving
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0')
proc1 = tablet_62344.start_vttablet()
tablet_62344.start_vttablet()
for _ in xrange(20):
logging.debug('Sleeping waiting for first process to die')
time.sleep(1.0)
proc1.poll()
if proc1.returncode is not None:
break
if proc1.returncode is None:
self.fail('proc1 still running')
tablet_62344.kill_vttablet()
def test_shard_replication_fix(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.create_db('vt_test_keyspace')
tablet_62044.create_db('vt_test_keyspace')
# one master one replica
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('replica', 'test_keyspace', '0')
# make sure the replica is in the replication graph
before_bogus = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(2, len(before_bogus['nodes']),
'wrong shard replication nodes before: %s' %
str(before_bogus))
# manually add a bogus entry to the replication graph, and check
# it is removed by ShardReplicationFix
utils.run_vtctl(['ShardReplicationAdd', 'test_keyspace/0',
'test_nj-0000066666'], auto_log=True)
with_bogus = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(3, len(with_bogus['nodes']),
'wrong shard replication nodes with bogus: %s' %
str(with_bogus))
utils.run_vtctl(['ShardReplicationFix', 'test_nj', 'test_keyspace/0'],
auto_log=True)
after_fix = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(2, len(after_fix['nodes']),
'wrong shard replication nodes after fix: %s' %
str(after_fix))
def check_healthz(self, t, expected):
if expected:
self.assertEqual('ok\n', t.get_healthz())
else:
with self.assertRaises(urllib2.HTTPError):
t.get_healthz()
def test_health_check(self):
# one master, one replica that starts not initialized
# (for the replica, we let vttablet do the InitTablet)
tablet_62344.init_tablet('master', 'test_keyspace', '0')
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
tablet_62344.start_vttablet(wait_for_state=None)
tablet_62044.start_vttablet(wait_for_state=None,
lameduck_period='5s',
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0')
tablet_62344.wait_for_vttablet_state('SERVING')
tablet_62044.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(tablet_62044, False)
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
tablet_62344.tablet_alias])
# make sure the unhealthy slave goes to healthy
tablet_62044.wait_for_vttablet_state('SERVING')
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
self.check_healthz(tablet_62044, True)
# make sure the master is still master
ti = utils.run_vtctl_json(['GetTablet', tablet_62344.tablet_alias])
self.assertEqual(ti['type'], topodata_pb2.MASTER,
'unexpected master type: %s' % ti['type'])
# stop replication at the mysql level.
tablet_62044.mquery('', 'stop slave')
# vttablet replication_reporter should restart it.
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
# insert something on the master and wait for it on the slave.
tablet_62344.mquery('vt_test_keyspace', [
'create table repl_test_table (id int)',
'insert into repl_test_table values (123)'], write=True)
timeout = 10.0
while True:
try:
result = tablet_62044.mquery('vt_test_keyspace',
'select * from repl_test_table')
if result:
self.assertEqual(result[0][0], 123L)
break
except MySQLdb.ProgrammingError:
# Maybe the create table hasn't gone trough yet, we wait more
logging.exception('got this exception waiting for data, ignoring it')
timeout = utils.wait_step(
'slave replication repaired by replication_reporter', timeout)
# stop replication, make sure we don't go unhealthy.
# (we have a baseline as well, so the time should be good).
utils.run_vtctl(['StopSlave', tablet_62044.tablet_alias])
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
self.check_healthz(tablet_62044, True)
# make sure status web page is healthy
self.assertIn('>healthy</span></div>', tablet_62044.get_status())
# make sure the health stream is updated
health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
tablet_62044.tablet_alias])
self.assertTrue(('seconds_behind_master' not in health['realtime_stats']) or
(health['realtime_stats']['seconds_behind_master'] < 30),
'got unexpected health: %s' % str(health))
self.assertIn('serving', health)
# then restart replication, make sure we stay healthy
utils.run_vtctl(['StartSlave', tablet_62044.tablet_alias])
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
# make sure status web page is healthy
self.assertIn('>healthy</span></div>', tablet_62044.get_status())
# now test VtTabletStreamHealth returns the right thing
stdout, _ = utils.run_vtctl(['VtTabletStreamHealth',
'-count', '2',
tablet_62044.tablet_alias],
trap_output=True, auto_log=True)
lines = stdout.splitlines()
self.assertEqual(len(lines), 2)
for line in lines:
logging.debug('Got health: %s', line)
data = json.loads(line)
self.assertIn('realtime_stats', data)
self.assertIn('serving', data)
self.assertTrue(data['serving'])
self.assertNotIn('health_error', data['realtime_stats'])
self.assertNotIn('tablet_externally_reparented_timestamp', data)
self.assertEqual('test_keyspace', data['target']['keyspace'])
self.assertEqual('0', data['target']['shard'])
self.assertEqual(topodata_pb2.REPLICA, data['target']['tablet_type'])
# Test that VtTabletStreamHealth reports a QPS >0.0.
# Therefore, issue several reads first.
# NOTE: This may be potentially flaky because we'll observe a QPS >0.0
# exactly "once" for the duration of one sampling interval (5s) and
# after that we'll see 0.0 QPS rates again. If this becomes actually
# flaky, we need to read continuously in a separate thread.
for _ in range(10):
tablet_62044.execute('select 1 from dual')
# This may take up to 5 seconds to become true because we sample the query
# counts for the rates only every 5 seconds (see query_service_stats.go).
timeout = 10
while True:
health = utils.run_vtctl_json(['VtTabletStreamHealth', '-count', '1',
tablet_62044.tablet_alias])
if health['realtime_stats'].get('qps', 0.0) > 0.0:
break
timeout = utils.wait_step('QPS >0.0 seen', timeout)
# kill the tablets
tablet.kill_tablets([tablet_62344, tablet_62044])
def test_health_check_worker_state_does_not_shutdown_query_service(self):
# This test is similar to test_health_check, but has the following
# differences:
# - the second tablet is an 'rdonly' and not a 'replica'
# - the second tablet will be set to 'worker' and we expect that
# the query service won't be shutdown
# Setup master and rdonly tablets.
tablet_62344.init_tablet('master', 'test_keyspace', '0')
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
tablet_62344.start_vttablet(wait_for_state=None)
tablet_62044.start_vttablet(wait_for_state=None,
init_tablet_type='rdonly',
init_keyspace='test_keyspace',
init_shard='0')
tablet_62344.wait_for_vttablet_state('SERVING')
tablet_62044.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(tablet_62044, False)
# Enable replication.
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
tablet_62344.tablet_alias])
# Trigger healthcheck to save time waiting for the next interval.
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
tablet_62044.wait_for_vttablet_state('SERVING')
self.check_healthz(tablet_62044, True)
# Change from rdonly to worker and stop replication. (These
# actions are similar to the SplitClone vtworker command
# implementation.) The tablet will stay healthy, and the
# query service is still running.
utils.run_vtctl(['ChangeSlaveType', tablet_62044.tablet_alias, 'worker'])
utils.run_vtctl(['StopSlave', tablet_62044.tablet_alias])
# Trigger healthcheck explicitly to avoid waiting for the next interval.
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
utils.wait_for_tablet_type(tablet_62044.tablet_alias, 'worker')
self.check_healthz(tablet_62044, True)
# Query service is still running.
tablet_62044.wait_for_vttablet_state('SERVING')
# Restart replication. Tablet will become healthy again.
utils.run_vtctl(['ChangeSlaveType', tablet_62044.tablet_alias, 'rdonly'])
utils.run_vtctl(['StartSlave', tablet_62044.tablet_alias])
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
self.check_healthz(tablet_62044, True)
# kill the tablets
tablet.kill_tablets([tablet_62344, tablet_62044])
def test_no_mysql_healthcheck(self):
"""This test starts a vttablet with no mysql port, while mysql is down.
It makes sure vttablet will start properly and be unhealthy.
Then we start mysql, and make sure vttablet becomes healthy.
"""
# we need replication to be enabled, so the slave tablet can be healthy.
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
pos = mysql_flavor().master_position(tablet_62344)
# Use 'localhost' as hostname because Travis CI worker hostnames
# are too long for MySQL replication.
change_master_cmds = mysql_flavor().change_master_commands(
'localhost',
tablet_62344.mysql_port,
pos)
tablet_62044.mquery('', ['RESET MASTER', 'RESET SLAVE'] +
change_master_cmds + ['START SLAVE'])
# now shutdown all mysqld
shutdown_procs = [
tablet_62344.shutdown_mysql(),
tablet_62044.shutdown_mysql(),
]
utils.wait_procs(shutdown_procs)
# start the tablets, wait for them to be NOT_SERVING (mysqld not there)
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('replica', 'test_keyspace', '0',
include_mysql_port=False)
for t in tablet_62344, tablet_62044:
# Since MySQL is down at this point and we want the tablet to start up
# successfully, we have to use supports_backups=False.
t.start_vttablet(wait_for_state=None, supports_backups=False,
full_mycnf_args=True, include_mysql_port=False)
for t in tablet_62344, tablet_62044:
t.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(t, False)
# Tell slave to not try to repair replication in healthcheck.
# The StopSlave will ultimately fail because mysqld is not running,
# But vttablet should remember that it's not supposed to fix replication.
utils.run_vtctl(['StopSlave', tablet_62044.tablet_alias], expect_fail=True)
# The above notice to not fix replication should survive tablet restart.
tablet_62044.kill_vttablet()
tablet_62044.start_vttablet(wait_for_state='NOT_SERVING',
full_mycnf_args=True, include_mysql_port=False,
supports_backups=False)
# restart mysqld
start_procs = [
tablet_62344.start_mysql(),
tablet_62044.start_mysql(),
]
utils.wait_procs(start_procs)
# the master should still be healthy
utils.run_vtctl(['RunHealthCheck', tablet_62344.tablet_alias],
auto_log=True)
self.check_healthz(tablet_62344, True)
# the slave will now be healthy, but report a very high replication
# lag, because it can't figure out what it exactly is.
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias],
auto_log=True)
tablet_62044.wait_for_vttablet_state('SERVING')
self.check_healthz(tablet_62044, True)
health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
tablet_62044.tablet_alias])
self.assertTrue('seconds_behind_master' in health['realtime_stats'])
self.assertEqual(health['realtime_stats']['seconds_behind_master'], 7200)
self.assertIn('serving', health)
# restart replication, wait until health check goes small
# (a value of zero is default and won't be in structure)
utils.run_vtctl(['StartSlave', tablet_62044.tablet_alias])
timeout = 10
while True:
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias],
auto_log=True)
health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
tablet_62044.tablet_alias])
if 'serving' in health and (
('seconds_behind_master' not in health['realtime_stats']) or
(health['realtime_stats']['seconds_behind_master'] < 30)):
break
timeout = utils.wait_step('health delay goes back down', timeout)
# wait for the tablet to fix its mysql port
for t in tablet_62344, tablet_62044:
# wait for mysql port to show up
timeout = 10
while True:
ti = utils.run_vtctl_json(['GetTablet', t.tablet_alias])
if 'mysql' in ti['port_map']:
break
timeout = utils.wait_step('mysql port in tablet record', timeout)
self.assertEqual(ti['port_map']['mysql'], t.mysql_port)
# all done
tablet.kill_tablets([tablet_62344, tablet_62044])
def test_repeated_init_shard_master(self):
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None,
lameduck_period='5s',
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0')
# tablets are not replicating, so they won't be healthy
for t in tablet_62344, tablet_62044:
t.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(t, False)
# pick one master out of the two
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_62344.tablet_alias])
# run health check on both, make sure they are both healthy
for t in tablet_62344, tablet_62044:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias], auto_log=True)
self.check_healthz(t, True)
# pick the other one as master, make sure they are still healthy
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_62044.tablet_alias])
# run health check on both, make sure they are both healthy
for t in tablet_62344, tablet_62044:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias], auto_log=True)
self.check_healthz(t, True)
# and come back to the original guy
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_62344.tablet_alias])
# run health check on both, make sure they are both healthy
for t in tablet_62344, tablet_62044:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias], auto_log=True)
self.check_healthz(t, True)
# and done
tablet.kill_tablets([tablet_62344, tablet_62044])
def test_fallback_policy(self):
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62344.start_vttablet(security_policy='bogus')
f = urllib.urlopen('http://localhost:%d/queryz' % int(tablet_62344.port))
response = f.read()
f.close()
self.assertIn('not allowed', response)
tablet_62344.kill_vttablet()
def test_ignore_health_error(self):
tablet_62344.create_db('vt_test_keyspace')
# Starts unhealthy because of "no slave status" (not replicating).
tablet_62344.start_vttablet(wait_for_state='NOT_SERVING',
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0')
# Force it healthy.
utils.run_vtctl(['IgnoreHealthError', tablet_62344.tablet_alias,
'.*no slave status.*'])
utils.run_vtctl(['RunHealthCheck', tablet_62344.tablet_alias],
auto_log=True)
tablet_62344.wait_for_vttablet_state('SERVING')
self.check_healthz(tablet_62344, True)
# Turn off the force-healthy.
utils.run_vtctl(['IgnoreHealthError', tablet_62344.tablet_alias, ''])
utils.run_vtctl(['RunHealthCheck', tablet_62344.tablet_alias],
auto_log=True)
tablet_62344.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(tablet_62344, False)
tablet_62344.kill_vttablet()
if __name__ == '__main__':
utils.main()
|
bowlofstew/vitess | test/end2end/k8s_environment.py | <reponame>bowlofstew/vitess<filename>test/end2end/k8s_environment.py
"""Kubernetes environment."""
import json
import getpass
import logging
import os
import subprocess
import time
from vtdb import vtgate_client
import base_environment
import protocols_flavor
import vtctl_helper
class K8sEnvironment(base_environment.BaseEnvironment):
"""Environment for kubernetes clusters on Google Compute Engine."""
def __init__(self):
super(K8sEnvironment, self).__init__()
def use_named(self, instance_name):
# Check to make sure kubectl exists
try:
subprocess.check_output(['kubectl'])
except OSError:
raise base_environment.VitessEnvironmentError(
'kubectl not found, please install by visiting kubernetes.io or '
'running gcloud components update kubectl if using compute engine.')
get_address_template = (
'{{if ge (len .status.loadBalancer) 1}}'
'{{index (index .status.loadBalancer.ingress 0) "ip"}}'
'{{end}}')
get_address_params = ['kubectl', 'get', '-o', 'template', '--template',
get_address_template, 'service', '--namespace',
instance_name]
start_time = time.time()
vtctld_addr = ''
while time.time() - start_time < 60 and not vtctld_addr:
vtctld_addr = subprocess.check_output(
get_address_params + ['vtctld'], stderr=subprocess.STDOUT)
self.vtctl_addr = '%s:15999' % vtctld_addr
self.vtctl_helper = vtctl_helper.VtctlHelper('grpc', self.vtctl_addr)
self.cluster_name = instance_name
keyspaces = self.vtctl_helper.execute_vtctl_command(['GetKeyspaces'])
self.mobs = filter(None, keyspaces.split('\n'))
self.keyspaces = self.mobs
if not self.keyspaces:
raise base_environment.VitessEnvironmentError(
'Invalid environment, no keyspaces found')
self.num_shards = []
self.shards = []
for keyspace in self.keyspaces:
shards = json.loads(self.vtctl_helper.execute_vtctl_command(
['FindAllShardsInKeyspace', keyspace]))
self.shards.append(shards)
self.num_shards.append(len(shards))
# This assumes that all keyspaces use the same set of cells
self.cells = json.loads(self.vtctl_helper.execute_vtctl_command(
['GetShard', '%s/%s' % (self.keyspaces[0], self.shards[0].keys()[0])]
))['cells']
self.primary_cells = self.cells
self.replica_instances = []
self.rdonly_instances = []
# This assumes that all cells are equivalent for k8s environments.
all_tablets_in_a_cell = self.vtctl_helper.execute_vtctl_command(
['ListAllTablets', self.cells[0]])
all_tablets_in_a_cell = [x.split(' ') for x in
filter(None, all_tablets_in_a_cell.split('\n'))]
for index, keyspace in enumerate(self.keyspaces):
keyspace_tablets_in_cell = [
tablet for tablet in all_tablets_in_a_cell if tablet[1] == keyspace]
replica_tablets_in_cell = [
tablet for tablet in keyspace_tablets_in_cell
if tablet[3] == 'master' or tablet[3] == 'replica']
replica_instances = len(replica_tablets_in_cell) / self.num_shards[index]
self.replica_instances.append(replica_instances)
self.rdonly_instances.append(
(len(keyspace_tablets_in_cell) / self.num_shards[index]) -
replica_instances)
# Converts keyspace name and alias to number of instances
self.keyspace_alias_to_num_instances_dict = {}
for index, keyspace in enumerate(self.keyspaces):
self.keyspace_alias_to_num_instances_dict[keyspace] = {
'replica': int(self.replica_instances[index]),
'rdonly': int(self.rdonly_instances[index])
}
start_time = time.time()
self.vtgate_addrs = {}
self.vtgate_conns = {}
for cell in self.cells:
self.vtgate_addr = ''
while time.time() - start_time < 60 and not self.vtgate_addr:
vtgate_addr = subprocess.check_output(
get_address_params + ['vtgate-%s' % cell], stderr=subprocess.STDOUT)
self.vtgate_addrs[cell] = '%s:15001' % vtgate_addr
self.vtgate_conns[cell] = vtgate_client.connect(
protocols_flavor.protocols_flavor().vtgate_python_protocol(),
self.vtgate_addrs[cell], 60)
super(K8sEnvironment, self).use_named(instance_name)
def create(self, **kwargs):
self.create_gke_cluster = (
kwargs.get('create_gke_cluster', 'false').lower() != 'false')
if self.create_gke_cluster and 'GKE_NUM_NODES' not in kwargs:
raise base_environment.VitessEnvironmentError(
'Must specify GKE_NUM_NODES')
if 'GKE_CLUSTER_NAME' not in kwargs:
kwargs['GKE_CLUSTER_NAME'] = getpass.getuser()
if 'VITESS_NAME' not in kwargs:
kwargs['VITESS_NAME'] = getpass.getuser()
kwargs['TEST_MODE'] = '1'
self.script_dir = os.path.join(os.environ['VTTOP'], 'examples/kubernetes')
try:
subprocess.check_output(['gcloud', 'config', 'list'])
except OSError:
raise base_environment.VitessEnvironmentError(
'gcloud not found, please install by visiting cloud.google.com')
if 'project' in kwargs:
logging.info('Setting project to %s', kwargs['project'])
subprocess.check_output(
['gcloud', 'config', 'set', 'project', kwargs['project']])
project_name_json = json.loads(subprocess.check_output(
['gcloud', 'config', 'list', 'project', '--format', 'json']))
project_name = project_name_json['core']['project']
logging.info('Current project name: %s', project_name)
for k, v in kwargs.iteritems():
os.environ[k] = v
if self.create_gke_cluster:
cluster_up_txt = subprocess.check_output(
[os.path.join(self.script_dir, 'cluster-up.sh')],
cwd=self.script_dir, stderr=subprocess.STDOUT)
logging.info(cluster_up_txt)
vitess_up_output = subprocess.check_output(
[os.path.join(self.script_dir, 'vitess-up.sh')],
cwd=self.script_dir, stderr=subprocess.STDOUT)
logging.info(vitess_up_output)
self.use_named(kwargs['VITESS_NAME'])
def destroy(self):
vitess_down_output = subprocess.check_output(
[os.path.join(self.script_dir, 'vitess-down.sh')],
cwd=self.script_dir, stderr=subprocess.STDOUT)
logging.info(vitess_down_output)
if self.create_gke_cluster:
cluster_down_output = subprocess.check_output(
[os.path.join(self.script_dir, 'cluster-down.sh')],
cwd=self.script_dir, stderr=subprocess.STDOUT)
logging.info(cluster_down_output)
def get_vtgate_conn(self, cell):
return self.vtgate_conns[cell]
def wait_for_good_failover_status(
self, keyspace, shard_name, failover_completion_timeout_s=60):
return 0
def wait_for_healthy_tablets(self):
return 0
def get_tablet_task_number(self, tablet_name):
tablet_info = json.loads(self.vtctl_helper.execute_vtctl_command(
['GetTablet', tablet_name]))
return tablet_info['alias']['uid'] % 100
def internal_reparent(self, keyspace, shard_name, new_master_uid,
emergency=False):
reparent_command = (
'EmergencyReparentShard' if emergency else 'PlannedReparentShard')
self.vtctl_helper.execute_vtctl_command(
[reparent_command, '-keyspace_shard', '%s/%s' % (keyspace, shard_name),
'-new_master', new_master_uid])
self.vtctl_helper.execute_vtctl_command(['RebuildKeyspaceGraph', keyspace])
return 0, 'No output'
|
bowlofstew/vitess | test/update_stream.py | #!/usr/bin/env python
import logging
import time
import unittest
import environment
import tablet
import utils
from vtdb import dbexceptions
from vtdb import proto3_encoding
from vtdb import vtgate_client
from vtproto import query_pb2
from vtproto import topodata_pb2
from mysql_flavor import mysql_flavor
from protocols_flavor import protocols_flavor
from vtgate_gateway_flavor.gateway import vtgate_gateway_flavor
master_tablet = tablet.Tablet()
replica_tablet = tablet.Tablet()
master_host = 'localhost:%d' % master_tablet.port
# master_start_position has the replication position before we start
# doing anything to the master database. It is used by test_ddl to
# make sure we see DDLs.
master_start_position = None
_create_vt_insert_test = '''create table if not exists vt_insert_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
_create_vt_a = '''create table if not exists vt_a (
eid bigint,
id int,
primary key(eid, id)
) Engine=InnoDB'''
_create_vt_b = '''create table if not exists vt_b (
eid bigint,
name varchar(128),
foo varbinary(128),
primary key(eid, name)
) Engine=InnoDB'''
def _get_master_current_position():
return mysql_flavor().master_position(master_tablet)
def _get_repl_current_position():
return mysql_flavor().master_position(replica_tablet)
def setUpModule():
global master_start_position
try:
environment.topo_server().setup()
# start mysql instance external to the test
setup_procs = [master_tablet.init_mysql(),
replica_tablet.init_mysql()]
utils.wait_procs(setup_procs)
# start a vtctld so the vtctl insert commands are just RPCs, not forks
utils.Vtctld().start()
# Start up a master mysql and vttablet
logging.debug('Setting up tablets')
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
master_tablet.init_tablet('master', 'test_keyspace', '0', tablet_index=0)
replica_tablet.init_tablet('replica', 'test_keyspace', '0', tablet_index=1)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
utils.validate_topology()
master_tablet.create_db('vt_test_keyspace')
master_tablet.create_db('other_database')
replica_tablet.create_db('vt_test_keyspace')
replica_tablet.create_db('other_database')
master_tablet.start_vttablet(wait_for_state=None)
replica_tablet.start_vttablet(wait_for_state=None)
master_tablet.wait_for_vttablet_state('SERVING')
replica_tablet.wait_for_vttablet_state('NOT_SERVING')
for t in [master_tablet, replica_tablet]:
t.reset_replication()
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
master_tablet.tablet_alias], auto_log=True)
utils.wait_for_tablet_type(replica_tablet.tablet_alias, 'replica')
master_tablet.wait_for_vttablet_state('SERVING')
replica_tablet.wait_for_vttablet_state('SERVING')
# reset counter so tests don't assert
tablet.Tablet.tablets_running = 0
master_start_position = _get_master_current_position()
master_tablet.mquery('vt_test_keyspace', _create_vt_insert_test)
master_tablet.mquery('vt_test_keyspace', _create_vt_a)
master_tablet.mquery('vt_test_keyspace', _create_vt_b)
utils.run_vtctl(['ReloadSchema', master_tablet.tablet_alias])
utils.run_vtctl(['ReloadSchema', replica_tablet.tablet_alias])
utils.run_vtctl(['RebuildVSchemaGraph'])
utils.VtGate().start(tablets=[master_tablet, replica_tablet])
utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1)
# Wait for the master and slave tablet's ReloadSchema to have worked.
# Note we don't specify a keyspace name, there is only one, vschema
# will just use that single keyspace.
timeout = 10
while True:
try:
utils.vtgate.execute('select count(1) from vt_insert_test',
tablet_type='master')
utils.vtgate.execute('select count(1) from vt_insert_test',
tablet_type='replica')
break
except protocols_flavor().client_error_exception_type():
logging.exception('query failed')
timeout = utils.wait_step('slave tablet having correct schema', timeout)
# also re-run ReloadSchema on slave, it case the first one
# didn't get the replicated table.
utils.run_vtctl(['ReloadSchema', replica_tablet.tablet_alias])
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
logging.debug('Tearing down the servers and setup')
tablet.Tablet.tablets_running = 2
tablet.kill_tablets([master_tablet, replica_tablet])
teardown_procs = [master_tablet.teardown_mysql(),
replica_tablet.teardown_mysql()]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
master_tablet.remove_tree()
replica_tablet.remove_tree()
class TestUpdateStream(unittest.TestCase):
_populate_vt_insert_test = [
"insert into vt_insert_test (msg) values ('test %s')" % x
for x in xrange(4)]
def _populate_vt_a(self, count):
return ['insert into vt_a (eid, id) values (%d, %d)' % (x, x)
for x in xrange(count + 1) if x > 0]
def _populate_vt_b(self, count):
return [
"insert into vt_b (eid, name, foo) values (%d, 'name %s', 'foo %s')" %
(x, x, x) for x in xrange(count)]
def _get_vtgate_stream_conn(self):
protocol, addr = utils.vtgate.rpc_endpoint(python=True)
return vtgate_client.connect(protocol, addr, 30.0)
def _exec_vt_txn(self, query_list):
protocol, addr = utils.vtgate.rpc_endpoint(python=True)
vtgate_conn = vtgate_client.connect(protocol, addr, 30.0)
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace='test_keyspace',
shards=['0'], writable=True)
cursor.begin()
for query in query_list:
cursor.execute(query, {})
cursor.commit()
return
def test_stream_parity(self):
"""Tests parity of streams between master and replica for the same writes.
Also tests transactions are retrieved properly.
"""
timeout = 30
while True:
master_position = _get_master_current_position()
replica_position = _get_repl_current_position()
if master_position == replica_position:
break
timeout = utils.wait_step(
'%s == %s' % (master_position, replica_position),
timeout
)
logging.debug('run_test_stream_parity starting @ %s',
master_position)
self._exec_vt_txn(self._populate_vt_a(15))
self._exec_vt_txn(self._populate_vt_b(14))
self._exec_vt_txn(['delete from vt_a'])
self._exec_vt_txn(['delete from vt_b'])
# get master events
master_conn = self._get_vtgate_stream_conn()
master_events = []
for event, resume_timestamp in master_conn.update_stream(
'test_keyspace', topodata_pb2.MASTER,
event=query_pb2.EventToken(shard='0', position=master_position),
shard='0'):
logging.debug('Got master event(%d): %s', resume_timestamp, event)
master_events.append(event)
if len(master_events) == 4:
break
master_conn.close()
# get replica events
replica_conn = self._get_vtgate_stream_conn()
replica_events = []
for event, resume_timestamp in replica_conn.update_stream(
'test_keyspace', topodata_pb2.REPLICA,
event=query_pb2.EventToken(shard='0', position=replica_position),
shard='0'):
logging.debug('Got slave event(%d): %s', resume_timestamp, event)
replica_events.append(event)
if len(replica_events) == 4:
break
replica_conn.close()
# and compare
if len(master_events) != len(replica_events):
logging.debug(
'Test Failed - # of records mismatch, master %s replica %s',
master_events, replica_events)
for master_event, replica_event in zip(master_events, replica_events):
# The timestamp is from when the event was written to the binlogs.
# the master uses the timestamp of when it wrote it originally,
# the slave of when it applied the logs. These can differ and make this
# test flaky. So we just blank them out, easier. We really want to
# compare the replication positions.
master_event.event_token.timestamp = 123
replica_event.event_token.timestamp = 123
self.assertEqual(
master_event, replica_event,
"Test failed, data mismatch - master '%s' and replica '%s'" %
(master_event, replica_event))
logging.debug('Test Writes: PASS')
def test_ddl(self):
"""Asks for all statements since we started, find the DDL."""
start_position = master_start_position
logging.debug('test_ddl: starting @ %s', start_position)
master_conn = self._get_vtgate_stream_conn()
found = False
for event, _ in master_conn.update_stream(
'test_keyspace', topodata_pb2.MASTER,
event=query_pb2.EventToken(shard='0', position=start_position),
shard='0'):
for statement in event.statements:
if statement.sql == _create_vt_insert_test:
found = True
break
break
master_conn.close()
self.assertTrue(found, "didn't get right sql")
def test_set_insert_id(self):
start_position = _get_master_current_position()
self._exec_vt_txn(
['SET INSERT_ID=1000000'] + self._populate_vt_insert_test)
logging.debug('test_set_insert_id: starting @ %s', start_position)
master_conn = self._get_vtgate_stream_conn()
expected_id = 1000000
for event, _ in master_conn.update_stream(
'test_keyspace', topodata_pb2.MASTER,
event=query_pb2.EventToken(shard='0', position=start_position),
shard='0'):
for statement in event.statements:
fields, rows = proto3_encoding.convert_stream_event_statement(statement)
self.assertEqual(fields[0], 'id')
self.assertEqual(rows[0][0], expected_id)
expected_id += 1
break
if expected_id != 1000004:
self.fail('did not get my four values!')
master_conn.close()
def test_database_filter(self):
start_position = _get_master_current_position()
master_tablet.mquery('other_database', _create_vt_insert_test)
self._exec_vt_txn(self._populate_vt_insert_test)
logging.debug('test_database_filter: starting @ %s', start_position)
master_conn = self._get_vtgate_stream_conn()
for event, _ in master_conn.update_stream(
'test_keyspace', topodata_pb2.MASTER,
event=query_pb2.EventToken(shard='0', position=start_position),
shard='0'):
for statement in event.statements:
self.assertNotEqual(statement.category, 2, # query_pb2.StreamEvent.DDL
"query using other_database wasn't filtered out")
break
master_conn.close()
def test_service_switch(self):
"""tests the service switch from disable -> enable -> disable."""
# make the replica spare
utils.run_vtctl(['ChangeSlaveType', replica_tablet.tablet_alias, 'spare'])
utils.wait_for_tablet_type(replica_tablet.tablet_alias, 'spare')
# Check UpdateStreamState is disabled.
v = utils.get_vars(replica_tablet.port)
if v['UpdateStreamState'] != 'Disabled':
self.fail("Update stream service should be 'Disabled' but is '%s'" %
v['UpdateStreamState'])
start_position = _get_repl_current_position()
# Make sure we can't start a new request to vttablet directly.
_, stderr = utils.run_vtctl(['VtTabletUpdateStream',
'-position', start_position,
replica_tablet.tablet_alias],
expect_fail=True)
self.assertIn('operation not allowed in state NOT_SERVING', stderr)
# Make sure we can't start a new request through vtgate.
replica_conn = self._get_vtgate_stream_conn()
try:
for event, resume_timestamp in replica_conn.update_stream(
'test_keyspace', topodata_pb2.REPLICA,
event=query_pb2.EventToken(shard='0', position=start_position),
shard='0'):
self.assertFail('got event(%d): %s' % (resume_timestamp, str(event)))
self.assertFail('update_stream terminated with no exception')
except dbexceptions.DatabaseError as e:
self.assertIn(vtgate_gateway_flavor().no_tablet_found_message(), str(e))
# Go back to replica.
utils.run_vtctl(
['ChangeSlaveType', replica_tablet.tablet_alias, 'replica'])
utils.wait_for_tablet_type(replica_tablet.tablet_alias, 'replica')
# Check UpdateStreamState is enabled.
v = utils.get_vars(replica_tablet.port)
if v['UpdateStreamState'] != 'Enabled':
self.fail("Update stream service should be 'Enabled' but is '%s'" %
v['UpdateStreamState'])
def test_event_token(self):
"""Checks the background binlog monitor thread works."""
replica_position = _get_repl_current_position()
timeout = 10
while True:
value = None
v = utils.get_vars(replica_tablet.port)
if 'EventTokenPosition' in v:
value = v['EventTokenPosition']
if value == replica_position:
logging.debug('got expected EventTokenPosition vars: %s', value)
ts = v['EventTokenTimestamp']
now = long(time.time())
self.assertTrue(ts >= now - 120,
'EventTokenTimestamp is too old: %d < %d' %
(ts, now-120))
self.assertTrue(ts <= now,
'EventTokenTimestamp is too recent: %d > %d' %(ts, now))
break
timeout = utils.wait_step(
'EventTokenPosition must be up to date but got %s' % value, timeout)
# With vttablet up to date, test a vttablet query returns the EventToken.
qr = replica_tablet.execute('select * from vt_insert_test',
execute_options='include_event_token:true ')
logging.debug('Got result: %s', qr)
self.assertIn('extras', qr)
self.assertIn('event_token', qr['extras'])
self.assertEqual(qr['extras']['event_token']['position'], replica_position)
# Same thing through vtgate
qr = utils.vtgate.execute('select * from vt_insert_test',
tablet_type='replica',
execute_options='include_event_token:true ')
logging.debug('Got result: %s', qr)
self.assertIn('extras', qr)
self.assertIn('event_token', qr['extras'])
self.assertEqual(qr['extras']['event_token']['position'], replica_position)
# Make sure the compare_event_token flag works, by sending a very
# old timestamp, or a timestamp in the future.
qr = replica_tablet.execute(
'select * from vt_insert_test',
execute_options='compare_event_token: <timestamp:123 > ')
self.assertIn('extras', qr)
self.assertIn('fresher', qr['extras'])
self.assertTrue(qr['extras']['fresher'])
future_timestamp = long(time.time()) + 100
qr = replica_tablet.execute(
'select * from vt_insert_test',
execute_options='compare_event_token: <timestamp:%d > ' %
future_timestamp)
self.assertTrue(qr['extras'] is None)
# Same thing through vtgate
qr = utils.vtgate.execute(
'select * from vt_insert_test', tablet_type='replica',
execute_options='compare_event_token: <timestamp:123 > ')
self.assertIn('extras', qr)
self.assertIn('fresher', qr['extras'])
self.assertTrue(qr['extras']['fresher'])
future_timestamp = long(time.time()) + 100
qr = utils.vtgate.execute(
'select * from vt_insert_test', tablet_type='replica',
execute_options='compare_event_token: <timestamp:%d > ' %
future_timestamp)
self.assertTrue(qr['extras'] is None)
# Make sure the compare_event_token flag works, by sending a very
# old timestamp, or a timestamp in the future, when combined with
# include_event_token flag.
qr = replica_tablet.execute('select * from vt_insert_test',
execute_options='include_event_token:true '
'compare_event_token: <timestamp:123 > ')
self.assertIn('extras', qr)
self.assertIn('fresher', qr['extras'])
self.assertTrue(qr['extras']['fresher'])
self.assertIn('event_token', qr['extras'])
self.assertEqual(qr['extras']['event_token']['position'], replica_position)
future_timestamp = long(time.time()) + 100
qr = replica_tablet.execute('select * from vt_insert_test',
execute_options='include_event_token:true '
'compare_event_token: <timestamp:%d > ' %
future_timestamp)
self.assertNotIn('fresher', qr['extras'])
self.assertIn('event_token', qr['extras'])
self.assertEqual(qr['extras']['event_token']['position'], replica_position)
# Same thing through vtgate
qr = utils.vtgate.execute('select * from vt_insert_test',
tablet_type='replica',
execute_options='include_event_token:true '
'compare_event_token: <timestamp:123 > ')
self.assertIn('extras', qr)
self.assertIn('fresher', qr['extras'])
self.assertTrue(qr['extras']['fresher'])
self.assertIn('event_token', qr['extras'])
self.assertEqual(qr['extras']['event_token']['position'], replica_position)
future_timestamp = long(time.time()) + 100
qr = utils.vtgate.execute('select * from vt_insert_test',
tablet_type='replica',
execute_options='include_event_token:true '
'compare_event_token: <timestamp:%d > ' %
future_timestamp)
self.assertNotIn('fresher', qr['extras'])
self.assertIn('event_token', qr['extras'])
self.assertEqual(qr['extras']['event_token']['position'], replica_position)
def test_update_stream_interrupt(self):
"""Checks that a running query is terminated on going non-serving."""
# Make sure the replica is replica type.
utils.run_vtctl(
['ChangeSlaveType', replica_tablet.tablet_alias, 'replica'])
logging.debug('sleeping a bit for the replica action to complete')
utils.wait_for_tablet_type(replica_tablet.tablet_alias, 'replica', 30)
# Save current position, insert some data.
start_position = _get_repl_current_position()
logging.debug('test_update_stream_interrupt starting @ %s', start_position)
self._exec_vt_txn(self._populate_vt_a(1))
self._exec_vt_txn(['delete from vt_a'])
# Start an Update Stream from the slave. When we get the data, go to spare.
# That should interrupt the streaming RPC.
replica_conn = self._get_vtgate_stream_conn()
first = True
txn_count = 0
try:
for event, resume_timestamp in replica_conn.update_stream(
'test_keyspace', topodata_pb2.REPLICA,
event=query_pb2.EventToken(shard='0', position=start_position),
shard='0'):
logging.debug('test_update_stream_interrupt got event(%d): %s',
resume_timestamp, event)
if first:
utils.run_vtctl(
['ChangeSlaveType', replica_tablet.tablet_alias, 'spare'])
utils.wait_for_tablet_type(replica_tablet.tablet_alias, 'spare', 30)
first = False
else:
if event.event_token.position:
txn_count += 1
self.assertFail('update_stream terminated with no exception')
except dbexceptions.DatabaseError as e:
self.assertIn('context canceled', str(e))
self.assertFalse(first)
logging.debug('Streamed %d transactions before exiting', txn_count)
replica_conn.close()
def test_log_rotation(self):
start_position = _get_master_current_position()
logging.debug('test_log_rotation: starting @ %s', start_position)
position = start_position
master_tablet.mquery('vt_test_keyspace', 'flush logs')
self._exec_vt_txn(self._populate_vt_a(15))
self._exec_vt_txn(['delete from vt_a'])
master_conn = self._get_vtgate_stream_conn()
master_txn_count = 0
logs_correct = False
for event, _ in master_conn.update_stream(
'test_keyspace', topodata_pb2.MASTER,
event=query_pb2.EventToken(shard='0', position=start_position),
shard='0'):
if event.event_token.position:
master_txn_count += 1
position = event.event_token.position
if mysql_flavor().position_after(position, start_position):
logs_correct = True
logging.debug('Log rotation correctly interpreted')
break
if master_txn_count == 2:
self.fail('ran out of logs')
if not logs_correct:
self.fail("Flush logs didn't get properly interpreted")
master_conn.close()
def test_timestamp_start_current_log(self):
"""Test we can start binlog streaming from the current binlog.
Order of operation:
- Insert something in the binlogs for tablet vt_a then delete it.
- Get the current timestamp.
- Wait for 4 seconds for the timestamp to change for sure.
- Insert something else in vt_b and delete it.
- Then we stream events starting at the original timestamp + 2, we
should get only the vt_b events.
"""
self._test_timestamp_start(rotate_before_sleep=False,
rotate_after_sleep=False)
def test_timestamp_start_rotated_log_before_sleep(self):
"""Test we can start binlog streaming from the current rotated binlog.
Order of operation:
- Insert something in the binlogs for tablet vt_a then delete it.
- Rotate the logs.
- Get the current timestamp.
- Wait for 4 seconds for the timestamp to change for sure.
- Insert something else in vt_b and delete it.
- Then we stream events starting at the original timestamp + 2, we
should get only the vt_b events.
In this test case, the current binlogs have a starting time stamp
that is smaller than what we ask for, so it should just stay on it.
"""
self._test_timestamp_start(rotate_before_sleep=True,
rotate_after_sleep=False)
def test_timestamp_start_rotated_log_after_sleep(self):
"""Test we can start binlog streaming from the previous binlog.
Order of operation:
- Insert something in the binlogs for tablet vt_a then delete it.
- Get the current timestamp.
- Wait for 4 seconds for the timestamp to change for sure.
- Rotate the logs.
- Insert something else in vt_b and delete it.
- Then we stream events starting at the original timestamp + 2, we
should get only the vt_b events.
In this test case, the current binlogs have a starting time stamp
that is 2s higher than what we ask for, so it should go back to
the previous binlog.
"""
self._test_timestamp_start(rotate_before_sleep=False,
rotate_after_sleep=True)
def _test_timestamp_start(self,
rotate_before_sleep=False,
rotate_after_sleep=False):
"""Common function for timestamp tests."""
# Insert something in the binlogs for tablet vt_a then delete it.
self._exec_vt_txn(self._populate_vt_a(1))
self._exec_vt_txn(['delete from vt_a'])
# (optional) Rotate the logs
if rotate_before_sleep:
master_tablet.mquery('vt_test_keyspace', 'flush logs')
# Get the current timestamp.
starting_timestamp = long(time.time())
logging.debug('test_timestamp_start_current_log: starting @ %d',
starting_timestamp)
# Wait for 4 seconds for the timestamp to change for sure.
time.sleep(4)
# (optional) Rotate the logs
if rotate_after_sleep:
master_tablet.mquery('vt_test_keyspace', 'flush logs')
# Insert something else in vt_b and delete it.
self._exec_vt_txn(self._populate_vt_b(1))
self._exec_vt_txn(['delete from vt_b'])
# make sure we only get events related to vt_b.
master_conn = self._get_vtgate_stream_conn()
count = 0
for (event, resume_timestamp) in master_conn.update_stream(
'test_keyspace', topodata_pb2.MASTER,
timestamp=starting_timestamp+2,
shard='0'):
logging.debug('_test_timestamp_start: got event: %s @ %d',
str(event), resume_timestamp)
# we might get a couple extra events from the rotation, ignore these.
if event.statements[0].category == 0: # Statement.Category.Error
continue
self.assertEqual(event.statements[0].table_name, 'vt_b',
'got wrong event: %s' % str(event))
count += 1
if count == 2:
break
master_conn.close()
def test_timestamp_start_too_old(self):
"""Ask the server to start streaming from a timestamp 4h ago."""
starting_timestamp = long(time.time()) - 4*60*60
master_conn = self._get_vtgate_stream_conn()
try:
for (event, resume_timestamp) in master_conn.update_stream(
'test_keyspace', topodata_pb2.MASTER,
timestamp=starting_timestamp,
shard='0'):
self.assertFail('got an event: %s %d' % (str(event), resume_timestamp))
except dbexceptions.QueryNotServed as e:
self.assertIn('retry: cannot find relevant binlogs on this server',
str(e))
if __name__ == '__main__':
utils.main()
|
bowlofstew/vitess | test/vtgate_utils_test.py | <gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
"""Tests for vtgate_utils."""
import exceptions
import time
import unittest
import utils
from vtdb import vtgate_utils
def setUpModule():
pass
def tearDownModule():
pass
class SomeException(exceptions.Exception):
pass
class AnotherException(exceptions.Exception):
pass
class FakeVtGateConnection(object):
def __init__(self):
self.invoked_intervals = []
# session is used by exponential_backoff_retry
self.session = None
@vtgate_utils.exponential_backoff_retry(
retry_exceptions=(SomeException, AnotherException))
def method(self, exc_to_raise):
self.invoked_intervals.append(int(time.time() * 1000))
if exc_to_raise:
raise exc_to_raise
class TestVtgateUtils(unittest.TestCase):
def test_retry_exception(self):
fake_conn = FakeVtGateConnection()
with self.assertRaises(SomeException):
fake_conn.method(SomeException('an exception'))
self.assertEquals(
len(fake_conn.invoked_intervals), vtgate_utils.NUM_RETRIES + 1)
previous = fake_conn.invoked_intervals[0]
delay = vtgate_utils.INITIAL_DELAY_MS
for interval in fake_conn.invoked_intervals[1:]:
self.assertTrue(interval - previous >= delay)
previous = interval
delay *= vtgate_utils.BACKOFF_MULTIPLIER
def test_retry_another_exception(self):
fake_conn = FakeVtGateConnection()
with self.assertRaises(AnotherException):
fake_conn.method(AnotherException('an exception'))
self.assertEquals(
len(fake_conn.invoked_intervals), vtgate_utils.NUM_RETRIES + 1)
def test_no_retries_inside_txn(self):
fake_conn = FakeVtGateConnection()
fake_conn.session = object()
with self.assertRaises(SomeException):
fake_conn.method(SomeException('an exception'))
self.assertEquals(len(fake_conn.invoked_intervals), 1)
def test_no_retries_for_non_retryable_exception(self):
fake_conn = FakeVtGateConnection()
with self.assertRaises(exceptions.Exception):
fake_conn.method(exceptions.Exception('an exception'))
self.assertEquals(len(fake_conn.invoked_intervals), 1)
def test_no_retries_for_no_exception(self):
fake_conn = FakeVtGateConnection()
fake_conn.method(None)
self.assertEquals(len(fake_conn.invoked_intervals), 1)
if __name__ == '__main__':
utils.main()
|
bowlofstew/vitess | py/vttest/init_data_options.py | <reponame>bowlofstew/vitess
"""Stores options used for initializing the database with randomized data.
The options stored correspond to command line flags. See run_local_database.py
for more details on each option.
"""
class InitDataOptions(object):
valid_attrs = set([
'rng_seed',
'min_table_shard_size',
'max_table_shard_size',
'null_probability',
])
def __setattr__(self, name, value):
if name not in self.valid_attrs:
raise Exception(
'InitDataOptions: unsupported attribute: %s' % name)
self.__dict__[name] = value
|
bowlofstew/vitess | py/setup.py | <gh_stars>1-10
# Copyright 2012, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""This is the setup script for the submodules in the Vitess python client.
"""
from distutils.core import setup
setup(name="vitess",
packages=["vtctl", "vtdb", "vtproto", "vttest"],
platforms="Any",
)
|
bowlofstew/vitess | py/vtctl/vtctl_client.py | # Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""This module defines the vtctl client interface.
"""
import logging
# mapping from protocol to python class. The protocol matches the string
# used by vtctlclient as a -vtctl_client_protocol parameter.
vtctl_client_conn_classes = dict()
def register_conn_class(protocol, c):
"""Used by implementations to register themselves.
Args:
protocol: short string to document the protocol.
c: class to register.
"""
vtctl_client_conn_classes[protocol] = c
def connect(protocol, *pargs, **kargs):
"""connect will return a dialed VtctlClient connection to a vtctl server.
Args:
protocol: the registered protocol to use.
*pargs: passed to the registered protocol __init__ method.
**kargs: passed to the registered protocol __init__ method.
Returns:
A dialed VtctlClient.
Raises:
ValueError: if the protocol is unknown.
"""
if protocol not in vtctl_client_conn_classes:
raise ValueError('Unknown vtctl protocol', protocol)
conn = vtctl_client_conn_classes[protocol](*pargs, **kargs)
conn.dial()
return conn
class Event(object):
"""Event is streamed by VtctlClient.
Eventually, we will just use the proto3 definition for logutil.proto/Event.
"""
INFO = 0
WARNING = 1
ERROR = 2
CONSOLE = 3
def __init__(self, time, level, file, line, value):
self.time = time
self.level = level
self.file = file
self.line = line
self.value = value
class VtctlClient(object):
"""VtctlClient is the interface for the vtctl client implementations.
All implementations must implement all these methods.
If something goes wrong with the connection, this object will be thrown out.
"""
def __init__(self, addr, timeout):
"""Initialize a vtctl connection.
Args:
addr: server address. Can be protocol dependent.
timeout: connection timeout (float, in seconds).
"""
pass
def dial(self):
"""Dial to the server. If successful, call close() to close the connection.
"""
pass
def close(self):
"""Close the connection. This object may be re-used again by calling dial().
"""
pass
def is_closed(self):
"""Checks the connection status.
Returns:
True if this connection is closed.
"""
pass
def execute_vtctl_command(self, args, action_timeout=30.0):
"""Executes a remote command on the vtctl server.
Args:
args: Command line to run.
action_timeout: total timeout for the action (float, in seconds).
Returns:
This is a generator method that yields Event objects.
"""
pass
def execute_vtctl_command(client, args, action_timeout=30.0,
info_to_debug=False):
"""This is a helper method that executes a remote vtctl command.
It logs the output to the logging module, and returns the console output.
Args:
client: VtctlClient object to use.
args: Command line to run.
action_timeout: total timeout for the action (float, in seconds).
info_to_debug: if set, changes the info messages to debug.
Returns:
The console output of the action.
"""
console_result = ''
for e in client.execute_vtctl_command(args, action_timeout=action_timeout):
if e.level == Event.INFO:
if info_to_debug:
logging.debug('%s', e.value)
else:
logging.info('%s', e.value)
elif e.level == Event.WARNING:
logging.warning('%s', e.value)
elif e.level == Event.ERROR:
logging.error('%s', e.value)
elif e.level == Event.CONSOLE:
console_result += e.value
return console_result
|
bowlofstew/vitess | test/vtctld2_web_test.py | #!/usr/bin/env python
"""A vtctld2 webdriver test."""
import logging
import os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
import unittest
from vtproto import vttest_pb2
from vttest import environment as vttest_environment
from vttest import local_database
from vttest import mysql_flavor
import environment
import utils
def setUpModule():
try:
if utils.options.xvfb:
try:
# This will be killed automatically by utils.kill_sub_processes()
utils.run_bg(['Xvfb', ':15', '-ac'])
os.environ['DISPLAY'] = ':15'
except OSError as err:
# Despite running in background, utils.run_bg() will throw immediately
# if the Xvfb binary is not found.
logging.error(
"Can't start Xvfb (will try local DISPLAY instead): %s", err)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
utils.remove_tmp_files()
utils.kill_sub_processes()
class TestVtctld2WebStatus(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Set up two keyspaces: one unsharded, one with two shards."""
if os.environ.get('CI') == 'true' and os.environ.get('TRAVIS') == 'true':
username = os.environ['SAUCE_USERNAME']
access_key = os.environ['SAUCE_ACCESS_KEY']
capabilities = {}
capabilities['tunnel-identifier'] = os.environ['TRAVIS_JOB_NUMBER']
capabilities['build'] = os.environ['TRAVIS_BUILD_NUMBER']
capabilities['platform'] = 'Linux'
capabilities['browserName'] = 'chrome'
hub_url = '%s:%s@localhost:4445' % (username, access_key)
cls.driver = webdriver.Remote(
desired_capabilities=capabilities,
command_executor='http://%s/wd/hub' % hub_url)
else:
os.environ['webdriver.chrome.driver'] = os.path.join(
os.environ['VTROOT'], 'dist')
# Only testing against Chrome for now
cls.driver = webdriver.Chrome()
topology = vttest_pb2.VTTestTopology()
topology.cells.append('test')
topology.cells.append('test2')
keyspace = topology.keyspaces.add(name='test_keyspace')
keyspace.replica_count = 2
keyspace.rdonly_count = 2
keyspace.shards.add(name='-80')
keyspace.shards.add(name='80-')
keyspace2 = topology.keyspaces.add(name='test_keyspace2')
keyspace2.shards.add(name='0')
keyspace2.replica_count = 2
keyspace2.rdonly_count = 1
port = environment.reserve_ports(1)
vttest_environment.base_port = port
mysql_flavor.set_mysql_flavor(None)
cls.db = local_database.LocalDatabase(
topology,
os.path.join(os.environ['VTTOP'], 'test/vttest_schema'),
False, None,
web_dir=os.path.join(os.environ['VTTOP'], 'web/vtctld'),
default_schema_dir=os.path.join(
os.environ['VTTOP'], 'test/vttest_schema/default'),
web_dir2=os.path.join(os.environ['VTTOP'], 'web/vtctld2/app'))
cls.db.setup()
cls.vtctld_addr = 'http://localhost:%d' % cls.db.config()['port']
utils.pause('Paused test after vtcombo was started.\n'
'For manual testing, connect to vtctld: %s' % cls.vtctld_addr)
@classmethod
def tearDownClass(cls):
cls.db.teardown()
cls.driver.quit()
def _get_keyspaces(self):
"""Get list of all present keyspaces."""
wait = WebDriverWait(self.driver, 10)
wait.until(expected_conditions.visibility_of_element_located(
(By.TAG_NAME, 'vt-dashboard')))
dashboard_content = self.driver.find_element_by_tag_name('vt-dashboard')
toolbars = dashboard_content.find_elements_by_class_name('vt-card-toolbar')
return [t.find_element_by_class_name('vt-title').text for t in toolbars]
def _get_dropdown_options(self, group):
status_content = self.driver.find_element_by_tag_name('vt-status')
dropdown = status_content.find_element_by_id(group)
return [op.text for op in
dropdown.find_elements_by_tag_name('option')]
def _get_dropdown_selection(self, group):
status_content = self.driver.find_element_by_tag_name('vt-status')
dropdown = status_content.find_element_by_id(group)
return dropdown.find_element_by_tag_name('label').text
def _change_dropdown_option(self, dropdown_id, dropdown_value):
status_content = self.driver.find_element_by_tag_name('vt-status')
dropdown = status_content.find_element_by_id(dropdown_id)
dropdown.click()
options = dropdown.find_elements_by_tag_name('li')
for op in options:
if op.text == dropdown_value:
logging.info('dropdown %s: option %s clicked', dropdown_id, op.text)
op.click()
break
def _check_dropdowns(self, keyspaces, selected_keyspace, cells, selected_cell,
types, selected_type, metrics, selected_metric):
"""Checking that all dropdowns have the correct options and selection."""
keyspace_options = self._get_dropdown_options('keyspace')
keyspace_selected = self._get_dropdown_selection('keyspace')
logging.info('Keyspace options: %s Keyspace selected: %s',
', '.join(keyspace_options), keyspace_selected)
self.assertListEqual(keyspaces, keyspace_options)
self.assertEqual(selected_keyspace, keyspace_selected)
cell_options = self._get_dropdown_options('cell')
cell_selected = self._get_dropdown_selection('cell')
logging.info('Cell options: %s Cell Selected: %s',
', '.join(cell_options), cell_selected)
self.assertListEqual(cells, cell_options)
self.assertEqual(selected_cell, cell_selected)
type_options = self._get_dropdown_options('type')
type_selected = self._get_dropdown_selection('type')
logging.info('Type options: %s Type Selected: %s',
', '.join(cell_options), cell_selected)
self.assertListEqual(types, type_options)
self.assertEqual(selected_type, type_selected)
metric_options = self._get_dropdown_options('metric')
metric_selected = self._get_dropdown_selection('metric')
logging.info('metric options: %s metric Selected: %s',
', '.join(metric_options), metric_selected)
self.assertListEqual(metrics, metric_options)
self.assertEqual(selected_metric, metric_selected)
def _check_heatmaps(self, selected_keyspace):
"""Checking that the view has the correct number of heatmaps drawn."""
status_content = self.driver.find_element_by_tag_name('vt-status')
keyspaces = status_content.find_elements_by_tag_name('vt-heatmap')
logging.info('Number of keyspaces found: %d', len(keyspaces))
if selected_keyspace == 'all':
available_keyspaces = self._get_dropdown_options('keyspace')
self.assertEqual(len(keyspaces), len(available_keyspaces)-1)
for ks in keyspaces:
heading = ks.find_element_by_id('keyspaceName')
logging.info('Keyspace name: %s', heading.text)
try:
ks.find_element_by_id(heading.text)
except NoSuchElementException:
self.fail('Cannot get keyspace')
self.assertIn(heading.text, available_keyspaces)
else:
self.assertEquals(len(keyspaces), 1)
heading = keyspaces[0].find_element_by_id('keyspaceName')
logging.info('Keyspace name: %s', heading.text)
try:
keyspaces[0].find_element_by_id(heading.text)
except NoSuchElementException:
self.fail('Cannot get keyspace')
self.assertEquals(heading.text, selected_keyspace)
def _check_new_view(
self, keyspaces, selected_keyspace, cells, selected_cell, types,
selected_type, metrics, selected_metric):
"""Checking the dropdowns and heatmaps for each newly routed view."""
logging.info('Testing realtime stats view')
self._check_dropdowns(keyspaces, selected_keyspace, cells, selected_cell,
types, selected_type, metrics, selected_metric)
self._check_heatmaps(selected_keyspace)
def test_dashboard(self):
logging.info('Testing dashboard view')
logging.info('Fetching main vtctld page: %s/app2', self.vtctld_addr)
self.driver.get('%s/app2' % self.vtctld_addr)
keyspace_names = self._get_keyspaces()
logging.info('Keyspaces: %s', ', '.join(keyspace_names))
self.assertListEqual(['test_keyspace', 'test_keyspace2'], keyspace_names)
def test_realtime_stats(self):
logging.info('Testing realtime stats view')
# Navigate to the status page from initial app.
# TODO(thompsonja): Fix this once direct navigation works (going to status
# page directly should display correctly)
self.driver.get('%s/app2' % self.vtctld_addr)
status_button = self.driver.find_element_by_partial_link_text('Status')
status_button.click()
wait = WebDriverWait(self.driver, 10)
wait.until(expected_conditions.visibility_of_element_located(
(By.TAG_NAME, 'vt-status')))
test_cases = [
(None, None, 'all', 'all', 'all'),
('type', 'REPLICA', 'all', 'all', 'REPLICA'),
('cell', 'test2', 'all', 'test2', 'REPLICA'),
('keyspace', 'test_keyspace', 'test_keyspace', 'test2', 'REPLICA'),
('cell', 'all', 'test_keyspace', 'all', 'REPLICA'),
('type', 'all', 'test_keyspace', 'all', 'all'),
('cell', 'test2', 'test_keyspace', 'test2', 'all'),
('keyspace', 'all', 'all', 'test2', 'all'),
]
for (dropdown_id, dropdown_val, keyspace, cell, tablet_type) in test_cases:
logging.info('Routing to new %s-%s-%s view', keyspace, cell, tablet_type)
if dropdown_id and dropdown_val:
self._change_dropdown_option(dropdown_id, dropdown_val)
tablet_type_options = ['all', 'MASTER', 'REPLICA', 'RDONLY']
if cell == 'test2':
tablet_type_options = ['all', 'REPLICA', 'RDONLY']
self._check_new_view(keyspaces=['all', 'test_keyspace', 'test_keyspace2'],
selected_keyspace=keyspace,
cells=['all', 'test', 'test2'],
selected_cell=cell,
types=tablet_type_options,
selected_type=tablet_type,
metrics=['lag', 'qps', 'health'],
selected_metric='health'
)
def add_test_options(parser):
parser.add_option(
'--no-xvfb', action='store_false', dest='xvfb', default=True,
help='Use local DISPLAY instead of headless Xvfb mode.')
if __name__ == '__main__':
utils.main(test_options=add_test_options)
|
bowlofstew/vitess | examples/demo/run.py | <reponame>bowlofstew/vitess<filename>examples/demo/run.py
#!/usr/bin/env python
# Copyright 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""This is a demo for V3 features.
The script will launch all the processes necessary to bring up
the demo. It will bring up an HTTP server on port 8000 by default,
which you can override. Once done, hitting <Enter> will terminate
all processes. Vitess will always be started on port 12345.
"""
import json
import optparse
import os
import subprocess
import thread
from CGIHTTPServer import CGIHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from google.protobuf import text_format
from vtproto import vttest_pb2
def start_http_server(port):
httpd = HTTPServer(('', port), CGIHTTPRequestHandler)
thread.start_new_thread(httpd.serve_forever, ())
def start_vitess():
"""This is the main start function."""
topology = vttest_pb2.VTTestTopology()
keyspace = topology.keyspaces.add(name='user')
keyspace.shards.add(name='-80')
keyspace.shards.add(name='80-')
keyspace = topology.keyspaces.add(name='lookup')
keyspace.shards.add(name='0')
vttop = os.environ['VTTOP']
args = [os.path.join(vttop, 'py/vttest/run_local_database.py'),
'--port', '12345',
'--proto_topo', text_format.MessageToString(topology,
as_one_line=True),
'--web_dir', os.path.join(vttop, 'web/vtctld'),
'--schema_dir', os.path.join(vttop, 'examples/demo/schema')]
sp = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# This load will make us wait for vitess to come up.
json.loads(sp.stdout.readline())
return sp
def stop_vitess(sp):
sp.stdin.write('\n')
sp.wait()
def main():
parser = optparse.OptionParser()
parser.add_option('-p', '--port', default=8000, help='http server port')
(options, unused_args) = parser.parse_args()
sp = start_vitess()
try:
start_http_server(options.port)
raw_input('\n'
'Demo is running at: http://localhost:%d/\n'
'\n'
'Press enter to exit.\n' % options.port)
finally:
stop_vitess(sp)
if __name__ == '__main__':
main()
|
bowlofstew/vitess | test/grpc_protocols_flavor.py | <gh_stars>0
#!/usr/bin/env python
"""Defines which protocols to use for the gRPC flavor."""
from grpc.framework.interfaces.face import face
import protocols_flavor
# Now imports all the implementations we need.
# We will change this to explicit registration soon.
from vtctl import grpc_vtctl_client # pylint: disable=unused-import
from vtdb import grpc_vtgate_client # pylint: disable=unused-import
class GRpcProtocolsFlavor(protocols_flavor.ProtocolsFlavor):
"""Definitons to use gRPC everywhere.
"""
def binlog_player_protocol(self):
return 'grpc'
def vtctl_client_protocol(self):
return 'grpc'
def vtctl_python_client_protocol(self):
return 'grpc'
def vtworker_client_protocol(self):
return 'grpc'
def tablet_manager_protocol(self):
return 'grpc'
def tabletconn_protocol(self):
return 'grpc'
def throttler_client_protocol(self):
return 'grpc'
def vtgate_protocol(self):
return 'grpc'
def vtgate_python_protocol(self):
return 'grpc'
def vtgate_python_types(self):
return 'proto3'
def client_error_exception_type(self):
return face.AbortionError
def rpc_timeout_message(self):
return 'context deadline exceeded'
def service_map(self):
return [
'grpc-tabletmanager',
'grpc-throttler',
'grpc-queryservice',
'grpc-updatestream',
'grpc-vtctl',
'grpc-vtworker',
'grpc-vtgateservice',
]
def vttest_protocol(self):
return 'grpc'
|
bowlofstew/vitess | py/vttest/__init__.py | <reponame>bowlofstew/vitess
# Copyright 2015, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
from vttest import environment
from vttest import mysql_db_mysqlctl
environment.mysql_db_class = mysql_db_mysqlctl.MySqlDBMysqlctl
|
bowlofstew/vitess | py/vtproto/vtworkerservice_pb2.py | <filename>py/vtproto/vtworkerservice_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: vtworkerservice.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import vtworkerdata_pb2 as vtworkerdata__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='vtworkerservice.proto',
package='vtworkerservice',
syntax='proto3',
serialized_pb=_b('\n\x15vtworkerservice.proto\x12\x0fvtworkerservice\x1a\x12vtworkerdata.proto2\x83\x01\n\x08Vtworker\x12w\n\x16\x45xecuteVtworkerCommand\x12+.vtworkerdata.ExecuteVtworkerCommandRequest\x1a,.vtworkerdata.ExecuteVtworkerCommandResponse\"\x00\x30\x01\x62\x06proto3')
,
dependencies=[vtworkerdata__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaVtworkerServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def ExecuteVtworkerCommand(self, request, context):
raise NotImplementedError()
class BetaVtworkerStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def ExecuteVtworkerCommand(self, request, timeout):
raise NotImplementedError()
def beta_create_Vtworker_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import vtworkerdata_pb2
import vtworkerdata_pb2
request_deserializers = {
('vtworkerservice.Vtworker', 'ExecuteVtworkerCommand'): vtworkerdata_pb2.ExecuteVtworkerCommandRequest.FromString,
}
response_serializers = {
('vtworkerservice.Vtworker', 'ExecuteVtworkerCommand'): vtworkerdata_pb2.ExecuteVtworkerCommandResponse.SerializeToString,
}
method_implementations = {
('vtworkerservice.Vtworker', 'ExecuteVtworkerCommand'): face_utilities.unary_stream_inline(servicer.ExecuteVtworkerCommand),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Vtworker_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import vtworkerdata_pb2
import vtworkerdata_pb2
request_serializers = {
('vtworkerservice.Vtworker', 'ExecuteVtworkerCommand'): vtworkerdata_pb2.ExecuteVtworkerCommandRequest.SerializeToString,
}
response_deserializers = {
('vtworkerservice.Vtworker', 'ExecuteVtworkerCommand'): vtworkerdata_pb2.ExecuteVtworkerCommandResponse.FromString,
}
cardinalities = {
'ExecuteVtworkerCommand': cardinality.Cardinality.UNARY_STREAM,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'vtworkerservice.Vtworker', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
|
bowlofstew/vitess | test/automation_vertical_split.py | <reponame>bowlofstew/vitess
#!/usr/bin/env python
#
# Copyright 2016, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# "unittest" is used indirectly by importing "vertical_split", but pylint does
# not grasp this.
# Import it explicitly to make pylint happy and stop it complaining about
# setUpModule, tearDownModule and the missing module docstring.
import unittest # pylint: disable=unused-import
import environment
import utils
import vertical_split
def setUpModule():
vertical_split.setUpModule()
def tearDownModule():
vertical_split.tearDownModule()
class TestAutomationVerticalSplit(vertical_split.TestVerticalSplit):
"""End-to-end test for running a vertical split via the automation framework.
This test is a subset of vertical_split.py. The "VerticalSplitTask" automation
operation runs the major commands for a vertical split instead of calling them
"manually" from the test.
"""
def test_vertical_split(self):
# Use a dedicated worker to run all vtworker commands.
worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj'],
auto_log=True)
vtworker_endpoint = 'localhost:' + str(worker_rpc_port)
automation_server_proc, automation_server_port = (
utils.run_automation_server())
_, vtctld_endpoint = utils.vtctld.rpc_endpoint()
params = {'source_keyspace': 'source_keyspace',
'dest_keyspace': 'destination_keyspace',
'shard_list': '0',
'tables': 'moving.*,view1',
'vtctld_endpoint': vtctld_endpoint,
'vtworker_endpoint': vtworker_endpoint,
}
args = ['--server', 'localhost:' + str(automation_server_port),
'--task', 'VerticalSplitTask']
args.extend(['--param=' + k + '=' + v for k, v in params.items()])
utils.run(environment.binary_args('automation_client') + args)
# One of the two source rdonly tablets went spare after the diff.
# Force a healthcheck on both to get them back to "rdonly".
for t in [vertical_split.source_rdonly1, vertical_split.source_rdonly2]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
self._check_srv_keyspace('')
self._check_blacklisted_tables(vertical_split.source_master,
['moving.*', 'view1'])
self._check_blacklisted_tables(vertical_split.source_replica,
['moving.*', 'view1'])
self._check_blacklisted_tables(vertical_split.source_rdonly1,
['moving.*', 'view1'])
self._check_blacklisted_tables(vertical_split.source_rdonly2,
['moving.*', 'view1'])
# check the binlog player is gone now
vertical_split.destination_master.wait_for_binlog_player_count(0)
utils.kill_sub_process(automation_server_proc, soft=True)
utils.kill_sub_process(worker_proc, soft=True)
if __name__ == '__main__':
utils.main()
|
bowlofstew/vitess | py/vtdb/proto3_encoding.py | # Copyright 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""Utility module for proto3-python conversions.
This module defines the conversion functions from proto3 to python,
and utility methods / classes to convert requests / responses for any
python connector using the proto3 requests / responses.
"""
import datetime
from decimal import Decimal
from vtproto import query_pb2
from vtproto import topodata_pb2
from vtproto import vtgate_pb2
from vtdb import field_types
from vtdb import keyrange_constants
from vtdb import keyspace
from vtdb import times
from vtdb import vtgate_utils
# conversions is a map of type to the conversion function that needs
# to be used to convert the incoming array of bytes to the
# corresponding native python type.
# If a type doesn't need conversion, it's not in the map.
conversions = {
query_pb2.INT8: int,
query_pb2.UINT8: int,
query_pb2.INT16: int,
query_pb2.UINT16: int,
query_pb2.INT24: int,
query_pb2.UINT24: int,
query_pb2.INT32: int,
query_pb2.UINT32: int,
query_pb2.INT64: int,
query_pb2.UINT64: long,
query_pb2.FLOAT32: float,
query_pb2.FLOAT64: float,
query_pb2.TIMESTAMP: times.DateTimeOrNone,
query_pb2.DATE: times.DateOrNone,
query_pb2.TIME: times.TimeDeltaOrNone,
query_pb2.DATETIME: times.DateTimeOrNone,
query_pb2.YEAR: int,
query_pb2.DECIMAL: Decimal,
# query_pb2.TEXT: no conversion
# query_pb2.BLOB: no conversion
# query_pb2.VARCHAR: no conversion
# query_pb2.VARBINARY: no conversion
# query_pb2.CHAR: no conversion
# query_pb2.BINARY: no conversion
# query_pb2.BIT: no conversion
# query_pb2.ENUM: no conversion
# query_pb2.SET: no conversion
# query_pb2.TUPLE: no conversion
}
INT_UPPERBOUND_PLUS_ONE = 1<<63
def make_row(row, convs):
"""Builds a python native row from proto3 row, and conversion array.
Args:
row: proto3 query.Row object
convs: conversion function array
Returns:
an array of converted rows.
"""
converted_row = []
offset = 0
for i, l in enumerate(row.lengths):
if l == -1:
converted_row.append(None)
elif convs[i]:
converted_row.append(convs[i](row.values[offset:offset+l]))
offset += l
else:
converted_row.append(row.values[offset:offset+l])
offset += l
return converted_row
def convert_value(value, proto_value, allow_lists=False):
"""Convert a variable from python type to proto type+value.
Args:
value: the python value.
proto_value: the proto3 object, needs a type and value field.
allow_lists: allows the use of python lists.
"""
if isinstance(value, bool):
proto_value.type = query_pb2.INT64
proto_value.value = str(int(value))
elif isinstance(value, int):
proto_value.type = query_pb2.INT64
proto_value.value = str(value)
elif isinstance(value, long):
if value < INT_UPPERBOUND_PLUS_ONE:
proto_value.type = query_pb2.INT64
else:
proto_value.type = query_pb2.UINT64
proto_value.value = str(value)
elif isinstance(value, float):
proto_value.type = query_pb2.FLOAT64
proto_value.value = str(value)
elif hasattr(value, '__sql_literal__'):
proto_value.type = query_pb2.VARBINARY
proto_value.value = str(value.__sql_literal__())
elif isinstance(value, datetime.datetime):
proto_value.type = query_pb2.VARBINARY
proto_value.value = times.DateTimeToString(value)
elif isinstance(value, datetime.date):
proto_value.type = query_pb2.VARBINARY
proto_value.value = times.DateToString(value)
elif isinstance(value, str):
proto_value.type = query_pb2.VARBINARY
proto_value.value = value
elif isinstance(value, field_types.NoneType):
proto_value.type = query_pb2.NULL_TYPE
elif allow_lists and isinstance(value, (set, tuple, list)):
# this only works for bind variables, not for entities.
proto_value.type = query_pb2.TUPLE
for v in list(value):
proto_v = proto_value.values.add()
convert_value(v, proto_v)
else:
proto_value.type = query_pb2.VARBINARY
proto_value.value = str(value)
def convert_bind_vars(bind_variables, request_bind_variables):
"""Convert binding variables to proto3.
Args:
bind_variables: a map of strings to python native types.
request_bind_variables: the proto3 object to add bind variables to.
"""
if not bind_variables:
return
for key, val in bind_variables.iteritems():
convert_value(val, request_bind_variables[key], allow_lists=True)
def convert_stream_event_statement(statement):
"""Converts encoded rows inside a StreamEvent.Statement to native types.
Args:
statement: the StreamEvent.Statement object.
Returns:
fields: array of names for the primary key columns.
rows: array of tuples for each primary key value.
"""
fields = []
rows = []
if statement.primary_key_fields:
convs = []
for field in statement.primary_key_fields:
fields.append(field.name)
convs.append(conversions.get(field.type))
for r in statement.primary_key_values:
row = tuple(make_row(r, convs))
rows.append(row)
return fields, rows
class Proto3Connection(object):
"""A base class for proto3-based python connectors.
It assumes the derived object will contain a proto3 self.session object.
"""
def __init__(self):
self._effective_caller_id = None
def _add_caller_id(self, request, caller_id):
"""Adds the vtgate_client.CallerID to the proto3 request, if any.
Args:
request: proto3 request (any of the {,stream,batch} execute queries).
caller_id: vtgate_client.CallerID object.
"""
if caller_id:
if caller_id.principal:
request.caller_id.principal = caller_id.principal
if caller_id.component:
request.caller_id.component = caller_id.component
if caller_id.subcomponent:
request.caller_id.subcomponent = caller_id.subcomponent
def _add_session(self, request):
"""Adds self.session to the request, if any.
Args:
request: the proto3 request to add session to.
"""
if self.session:
request.session.CopyFrom(self.session)
def update_session(self, response):
"""Updates the current session from the response, if it has one.
Args:
response: a proto3 response that may contain a session object.
"""
if response.HasField('session') and response.session:
self.session = response.session
def _convert_entity_ids(self, entity_keyspace_ids, request_eki):
"""Convert external entity id map to ProtoBuffer.
Args:
entity_keyspace_ids: map of entity_keyspace_id.
request_eki: destination proto3 list.
"""
for xid, kid in entity_keyspace_ids.iteritems():
eid = request_eki.add()
eid.keyspace_id = kid
convert_value(xid, eid, allow_lists=False)
def _add_key_ranges(self, request, key_ranges):
"""Adds the provided keyrange.KeyRange objects to the proto3 request.
Args:
request: proto3 request.
key_ranges: list of keyrange.KeyRange objects.
"""
for kr in key_ranges:
encoded_kr = request.key_ranges.add()
encoded_kr.start = kr.Start
encoded_kr.end = kr.End
def _extract_rpc_error(self, exec_method, error):
"""Raises a VitessError for a proto3 vtrpc.RPCError structure, if set.
Args:
exec_method: name of the method to use in VitessError.
error: vtrpc.RPCError structure.
Raises:
vtgate_utils.VitessError: if an error was set.
"""
if error.code:
raise vtgate_utils.VitessError(exec_method, error.code, error.message)
def build_conversions(self, qr_fields):
"""Builds an array of fields and conversions from a result fields.
Args:
qr_fields: query result fields
Returns:
fields: array of fields
convs: conversions to use.
"""
fields = []
convs = []
for field in qr_fields:
fields.append((field.name, field.type))
convs.append(conversions.get(field.type))
return fields, convs
def _get_rowset_from_query_result(self, query_result):
"""Builds a python rowset from proto3 response.
Args:
query_result: proto3 query.QueryResult object.
Returns:
Array of rows
Number of modified rows
Last insert ID
Fields array of (name, type) tuples.
"""
if not query_result:
return [], 0, 0, []
fields, convs = self.build_conversions(query_result.fields)
results = []
for row in query_result.rows:
results.append(tuple(make_row(row, convs)))
rowcount = query_result.rows_affected
lastrowid = query_result.insert_id
return results, rowcount, lastrowid, fields
def begin_request(self, effective_caller_id):
"""Builds a vtgate_pb2.BeginRequest object.
Also remembers the effective caller id for next call to
commit_request or rollback_request.
Args:
effective_caller_id: optional vtgate_client.CallerID.
Returns:
A vtgate_pb2.BeginRequest object.
"""
request = vtgate_pb2.BeginRequest()
self._add_caller_id(request, effective_caller_id)
self._effective_caller_id = effective_caller_id
return request
def commit_request(self):
"""Builds a vtgate_pb2.CommitRequest object.
Uses the effective_caller_id saved from begin_request().
It will also clear the saved effective_caller_id.
Returns:
A vtgate_pb2.CommitRequest object.
"""
request = vtgate_pb2.CommitRequest()
self._add_caller_id(request, self._effective_caller_id)
self._add_session(request)
self._effective_caller_id = None
return request
def rollback_request(self):
"""Builds a vtgate_pb2.RollbackRequest object.
Uses the effective_caller_id saved from begin_request().
It will also clear the saved effective_caller_id.
Returns:
A vtgate_pb2.RollbackRequest object.
"""
request = vtgate_pb2.RollbackRequest()
self._add_caller_id(request, self._effective_caller_id)
self._add_session(request)
self._effective_caller_id = None
return request
def execute_request_and_name(self, sql, bind_variables, tablet_type,
keyspace_name,
shards,
keyspace_ids,
key_ranges,
entity_column_name, entity_keyspace_id_map,
not_in_transaction, effective_caller_id):
"""Builds the right vtgate_pb2 Request and method for an _execute call.
Args:
sql: the query to run. Bind Variables in there should be in python format.
bind_variables: python map of bind variables.
tablet_type: string tablet type.
keyspace_name: keyspace to apply the query to.
shards: array of strings representing the shards.
keyspace_ids: array of keyspace ids.
key_ranges: array of keyrange.KeyRange objects.
entity_column_name: the column name to vary.
entity_keyspace_id_map: map of external id to keyspace id.
not_in_transaction: do not create a transaction to a new shard.
effective_caller_id: optional vtgate_client.CallerID.
Returns:
A vtgate_pb2.XXXRequest object.
A dict that contains the routing parameters.
The name of the remote method called.
"""
if shards is not None:
request = vtgate_pb2.ExecuteShardsRequest(keyspace=keyspace_name)
request.shards.extend(shards)
routing_kwargs = {'shards': shards}
method_name = 'ExecuteShards'
elif keyspace_ids is not None:
request = vtgate_pb2.ExecuteKeyspaceIdsRequest(keyspace=keyspace_name)
request.keyspace_ids.extend(keyspace_ids)
routing_kwargs = {'keyspace_ids': keyspace_ids}
method_name = 'ExecuteKeyspaceIds'
elif key_ranges is not None:
request = vtgate_pb2.ExecuteKeyRangesRequest(keyspace=keyspace_name)
self._add_key_ranges(request, key_ranges)
routing_kwargs = {'keyranges': key_ranges}
method_name = 'ExecuteKeyRanges'
elif entity_keyspace_id_map is not None:
request = vtgate_pb2.ExecuteEntityIdsRequest(
keyspace=keyspace_name,
entity_column_name=entity_column_name)
self._convert_entity_ids(entity_keyspace_id_map,
request.entity_keyspace_ids)
routing_kwargs = {'entity_keyspace_id_map': entity_keyspace_id_map,
'entity_column_name': entity_column_name}
method_name = 'ExecuteEntityIds'
else:
request = vtgate_pb2.ExecuteRequest()
if keyspace_name:
request.keyspace = keyspace_name
routing_kwargs = {}
method_name = 'Execute'
request.query.sql = sql
convert_bind_vars(bind_variables, request.query.bind_variables)
request.tablet_type = topodata_pb2.TabletType.Value(tablet_type.upper())
request.not_in_transaction = not_in_transaction
self._add_caller_id(request, effective_caller_id)
self._add_session(request)
return request, routing_kwargs, method_name
def process_execute_response(self, exec_method, response):
"""Processes an Execute* response, and returns the rowset.
Args:
exec_method: name of the method called.
response: proto3 response returned.
Returns:
results: list of rows.
rowcount: how many rows were affected.
lastrowid: auto-increment value for the last row inserted.
fields: describes the field names and types.
"""
self.update_session(response)
self._extract_rpc_error(exec_method, response.error)
return self._get_rowset_from_query_result(response.result)
def execute_batch_request_and_name(self, sql_list, bind_variables_list,
keyspace_list,
keyspace_ids_list, shards_list,
tablet_type, as_transaction,
effective_caller_id):
"""Builds the right vtgate_pb2 ExecuteBatch query.
Args:
sql_list: list os SQL statements.
bind_variables_list: list of bind variables.
keyspace_list: list of keyspaces.
keyspace_ids_list: list of list of keyspace_ids.
shards_list: list of shards.
tablet_type: target tablet type.
as_transaction: execute all statements in a single transaction.
effective_caller_id: optional vtgate_client.CallerID.
Returns:
A proper vtgate_pb2.ExecuteBatchXXX object.
The name of the remote method to call.
"""
if keyspace_ids_list and keyspace_ids_list[0]:
request = vtgate_pb2.ExecuteBatchKeyspaceIdsRequest()
for sql, bind_variables, keyspace_name, keyspace_ids in zip(
sql_list, bind_variables_list, keyspace_list, keyspace_ids_list):
query = request.queries.add(keyspace=keyspace_name)
query.query.sql = sql
convert_bind_vars(bind_variables, query.query.bind_variables)
query.keyspace_ids.extend(keyspace_ids)
method_name = 'ExecuteBatchKeyspaceIds'
else:
request = vtgate_pb2.ExecuteBatchShardsRequest()
for sql, bind_variables, keyspace_name, shards in zip(
sql_list, bind_variables_list, keyspace_list, shards_list):
query = request.queries.add(keyspace=keyspace_name)
query.query.sql = sql
convert_bind_vars(bind_variables, query.query.bind_variables)
query.shards.extend(shards)
method_name = 'ExecuteBatchShards'
request.tablet_type = topodata_pb2.TabletType.Value(tablet_type.upper())
request.as_transaction = as_transaction
self._add_caller_id(request, effective_caller_id)
self._add_session(request)
return request, method_name
def process_execute_batch_response(self, exec_method, response):
"""Processes an ExecuteBatch* response, and returns the rowsets.
Args:
exec_method: name of the method called.
response: proto3 response returned.
Returns:
rowsets: array of tuples as would be returned by an execute method.
"""
self.update_session(response)
self._extract_rpc_error(exec_method, response.error)
rowsets = []
for result in response.results:
rowset = self._get_rowset_from_query_result(result)
rowsets.append(rowset)
return rowsets
def update_stream_request(self,
keyspace_name,
shard,
key_range,
tablet_type,
timestamp,
event,
effective_caller_id):
"""Builds the right vtgate_pb2 UpdateStreamRequest.
Args:
keyspace_name: keyspace to apply the query to.
shard: shard to ask for.
key_range: keyrange.KeyRange object.
tablet_type: string tablet type.
timestamp: when to start the stream from.
event: alternate way to describe where to start the stream from.
effective_caller_id: optional vtgate_client.CallerID.
Returns:
A vtgate_pb2.UpdateStreamRequest object.
"""
request = vtgate_pb2.UpdateStreamRequest(keyspace=keyspace_name,
tablet_type=tablet_type,
shard=shard)
if timestamp:
request.timestamp = timestamp
if event:
if event.timestamp:
request.event.timestamp = event.timestamp
if event.shard:
request.event.shard = event.shard
if event.position:
request.event.position = event.position
if key_range:
request.key_range.start = key_range.Start
request.key_range.end = key_range.End
self._add_caller_id(request, effective_caller_id)
return request
def stream_execute_request_and_name(self, sql, bind_variables, tablet_type,
keyspace_name,
shards,
keyspace_ids,
key_ranges,
effective_caller_id):
"""Builds the right vtgate_pb2 Request and method for a _stream_execute.
Args:
sql: the query to run. Bind Variables in there should be in python format.
bind_variables: python map of bind variables.
tablet_type: string tablet type.
keyspace_name: keyspace to apply the query to.
shards: array of strings representing the shards.
keyspace_ids: array of keyspace ids.
key_ranges: array of keyrange.KeyRange objects.
effective_caller_id: optional vtgate_client.CallerID.
Returns:
A vtgate_pb2.StreamExecuteXXXXRequest object.
A dict that contains the routing parameters.
The name of the remote method called.
"""
if shards is not None:
request = vtgate_pb2.StreamExecuteShardsRequest(keyspace=keyspace_name)
request.shards.extend(shards)
routing_kwargs = {'shards': shards}
method_name = 'StreamExecuteShards'
elif keyspace_ids is not None:
request = vtgate_pb2.StreamExecuteKeyspaceIdsRequest(
keyspace=keyspace_name)
request.keyspace_ids.extend(keyspace_ids)
routing_kwargs = {'keyspace_ids': keyspace_ids}
method_name = 'StreamExecuteKeyspaceIds'
elif key_ranges is not None:
request = vtgate_pb2.StreamExecuteKeyRangesRequest(keyspace=keyspace_name)
self._add_key_ranges(request, key_ranges)
routing_kwargs = {'keyranges': key_ranges}
method_name = 'StreamExecuteKeyRanges'
else:
request = vtgate_pb2.StreamExecuteRequest()
if keyspace_name:
request.keyspace = keyspace_name
routing_kwargs = {}
method_name = 'StreamExecute'
request.query.sql = sql
convert_bind_vars(bind_variables, request.query.bind_variables)
request.tablet_type = topodata_pb2.TabletType.Value(tablet_type.upper())
self._add_caller_id(request, effective_caller_id)
return request, routing_kwargs, method_name
def srv_keyspace_proto3_to_old(self, sk):
"""Converts a proto3 SrvKeyspace.
Args:
sk: proto3 SrvKeyspace.
Returns:
dict with converted values.
"""
result = {}
if sk.sharding_column_name:
result['ShardingColumnName'] = sk.sharding_column_name
if sk.sharding_column_type == 1:
result['ShardingColumnType'] = keyrange_constants.KIT_UINT64
elif sk.sharding_column_type == 2:
result['ShardingColumnType'] = keyrange_constants.KIT_BYTES
sfmap = {}
for sf in sk.served_from:
tt = keyrange_constants.PROTO3_TABLET_TYPE_TO_STRING[sf.tablet_type]
sfmap[tt] = sf.keyspace
result['ServedFrom'] = sfmap
if sk.partitions:
pmap = {}
for p in sk.partitions:
tt = keyrange_constants.PROTO3_TABLET_TYPE_TO_STRING[p.served_type]
srs = []
for sr in p.shard_references:
result_sr = {
'Name': sr.name,
}
if sr.key_range:
result_sr['KeyRange'] = {
'Start': sr.key_range.start,
'End': sr.key_range.end,
}
srs.append(result_sr)
pmap[tt] = {
'ShardReferences': srs,
}
result['Partitions'] = pmap
return result
def keyspace_from_response(self, name, response):
"""Builds a Keyspace object from the response of a GetSrvKeyspace call.
Args:
name: keyspace name.
response: a GetSrvKeyspaceResponse object.
Returns:
A keyspace.Keyspace object.
"""
return keyspace.Keyspace(
name,
self.srv_keyspace_proto3_to_old(response.srv_keyspace))
|
bowlofstew/vitess | py/vtdb/times.py | # times module
#
# This module provides some Date and Time interface for vtdb
#
# Use Python datetime module to handle date and time columns.
from datetime import date
from datetime import datetime
from datetime import time
from datetime import timedelta
from math import modf
from time import localtime
# FIXME(msolomon) what are these aliasesf for?
Date = date
Time = time
TimeDelta = timedelta
Timestamp = datetime
DateTimeDeltaType = timedelta
DateTimeType = datetime
# Convert UNIX ticks into a date instance.
def DateFromTicks(ticks):
return date(*localtime(ticks)[:3])
# Convert UNIX ticks into a time instance.
def TimeFromTicks(ticks):
return time(*localtime(ticks)[3:6])
# Convert UNIX ticks into a datetime instance.
def TimestampFromTicks(ticks):
return datetime(*localtime(ticks)[:6])
def DateTimeOrNone(s):
if ' ' in s:
sep = ' '
elif 'T' in s:
sep = 'T'
else:
return DateOrNone(s)
try:
d, t = s.split(sep, 1)
return datetime(*[int(x) for x in d.split('-')+t.split(':')])
except Exception:
return DateOrNone(s)
def TimeDeltaOrNone(s):
try:
h, m, s = s.split(':')
td = timedelta(
hours=int(h), minutes=int(m), seconds=int(float(s)),
microseconds=int(modf(float(s))[0]*1000000))
if h < 0:
return -td
else:
return td
except Exception:
return None
def TimeOrNone(s):
try:
h, m, s = s.split(':')
return time(
hour=int(h), minute=int(m), second=int(float(s)),
microsecond=int(modf(float(s))[0]*1000000))
except Exception:
return None
def DateOrNone(s):
try:
return date(*[int(x) for x in s.split('-', 2)])
except Exception:
return None
def DateToString(d):
return d.isoformat()
def DateTimeToString(dt):
return dt.isoformat(' ')
|
bowlofstew/vitess | py/vttest/mysql_db.py | # Copyright 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""This module defines the interface for the MySQL database.
"""
class MySqlDB(object):
"""A MySqlDB contains basic info about a MySQL instance."""
def __init__(self, directory, port, extra_my_cnf=None):
self._directory = directory
self._port = port
self._extra_my_cnf = extra_my_cnf
def setup(self, port):
"""Starts the MySQL database."""
raise NotImplementedError('MySqlDB is the base class.')
def teardown(self):
"""Stops the MySQL database."""
raise NotImplementedError('MySqlDB is the base class.')
def username(self):
raise NotImplementedError('MySqlDB is the base class.')
def password(self):
raise NotImplementedError('MySqlDB is the base class.')
def hostname(self):
raise NotImplementedError('MySqlDB is the base class.')
def port(self):
raise NotImplementedError('MySqlDB is the base class.')
def unix_socket(self):
raise NotImplementedError('MySqlDB is the base class.')
def config(self):
"""Returns the json config to output."""
raise NotImplementedError('MySqlDB is the base class.')
|
bowlofstew/vitess | py/vtdb/vtgate_cursor.py | <reponame>bowlofstew/vitess
# Copyright 2012, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""VTGateCursor, and StreamVTGateCursor."""
import itertools
import operator
import re
from vtdb import base_cursor
from vtdb import dbexceptions
write_sql_pattern = re.compile(r'\s*(insert|update|delete)', re.IGNORECASE)
def ascii_lower(string):
"""Lower-case, but only in the ASCII range."""
return string.encode('utf8').lower().decode('utf8')
class VTGateCursorMixin(object):
def connection_list(self):
return [self._conn]
def is_writable(self):
return self._writable
class VTGateCursor(base_cursor.BaseListCursor, VTGateCursorMixin):
"""A cursor for execute statements to VTGate.
Results are stored as a list.
"""
def __init__(
self, connection, tablet_type, keyspace=None,
shards=None, keyspace_ids=None, keyranges=None,
writable=False, as_transaction=False):
"""Init VTGateCursor.
Args:
connection: A PEP0249 connection object.
tablet_type: Str tablet_type.
keyspace: Str keyspace or None if batch API will be used.
shards: List of strings.
keyspace_ids: Struct('!Q').packed keyspace IDs.
keyranges: Str keyranges.
writable: True if writable.
as_transaction: True if an executemany call is its own transaction.
"""
super(VTGateCursor, self).__init__()
self._conn = connection
self._writable = writable
self.description = None
self.index = None
self.keyspace = keyspace
self.shards = shards
self.keyspace_ids = keyspace_ids
self.keyranges = keyranges
self.lastrowid = None
self.results = None
self.routing = None
self.rowcount = 0
self.tablet_type = tablet_type
self.as_transaction = as_transaction
self._clear_batch_state()
# pass kwargs here in case higher level APIs need to push more data through
# for instance, a key value for shard mapping
def execute(self, sql, bind_variables, **kwargs):
"""Perform a query, return the number of rows affected."""
self._clear_list_state()
self._clear_batch_state()
if self._handle_transaction_sql(sql):
return
entity_keyspace_id_map = kwargs.pop('entity_keyspace_id_map', None)
entity_column_name = kwargs.pop('entity_column_name', None)
write_query = bool(write_sql_pattern.match(sql))
# NOTE: This check may also be done at higher layers but adding it
# here for completion.
if write_query:
if not self.is_writable():
raise dbexceptions.DatabaseError('DML on a non-writable cursor', sql)
if entity_keyspace_id_map:
raise dbexceptions.DatabaseError(
'entity_keyspace_id_map is not allowed for write queries')
# FIXME(alainjobart): the entity_keyspace_id_map should be in the
# cursor, same as keyspace_ids, shards, keyranges, to avoid this hack.
if entity_keyspace_id_map:
shards = None
keyspace_ids = None
keyranges = None
else:
shards = self.shards
keyspace_ids = self.keyspace_ids
keyranges = self.keyranges
self.results, self.rowcount, self.lastrowid, self.description = (
self.connection._execute( # pylint: disable=protected-access
sql,
bind_variables,
tablet_type=self.tablet_type,
keyspace_name=self.keyspace,
shards=shards,
keyspace_ids=keyspace_ids,
keyranges=keyranges,
entity_keyspace_id_map=entity_keyspace_id_map,
entity_column_name=entity_column_name,
not_in_transaction=not self.is_writable(),
effective_caller_id=self.effective_caller_id,
**kwargs))
return self.rowcount
def fetch_aggregate_function(self, func):
return func(row[0] for row in self.fetchall())
def fetch_aggregate(self, order_by_columns, limit):
"""Fetch from many shards, sort, then remove sort columns.
A scatter query may return up to limit rows. Sort all results
manually order them, and return the first rows.
This is a special-use function.
Args:
order_by_columns: The ORDER BY clause. Each element is either a
column, [column, 'ASC'], or [column, 'DESC'].
limit: Int limit.
Returns:
Smallest rows, with up to limit items. First len(order_by_columns)
columns are stripped.
"""
sort_columns = []
desc_columns = []
for order_clause in order_by_columns:
if isinstance(order_clause, (tuple, list)):
sort_columns.append(order_clause[0])
if ascii_lower(order_clause[1]) == 'desc':
desc_columns.append(order_clause[0])
else:
sort_columns.append(order_clause)
# sort the rows and then trim off the prepended sort columns
if sort_columns:
sorted_rows = list(sort_row_list_by_columns(
self.fetchall(), sort_columns, desc_columns))[:limit]
else:
sorted_rows = itertools.islice(self.fetchall(), limit)
neutered_rows = [row[len(order_by_columns):] for row in sorted_rows]
return neutered_rows
def _clear_batch_state(self):
"""Clear state that allows traversal to next query's results."""
self.result_sets = []
self.result_set_index = None
def close(self):
super(VTGateCursor, self).close()
self._clear_batch_state()
def executemany(self, sql, params_list, **kwargs):
"""Execute multiple statements in one batch.
This adds len(params_list) result_sets to self.result_sets. Each
result_set is a (results, rowcount, lastrowid, fields) tuple.
Each call overwrites the old result_sets. After execution, nextset()
is called to move the fetch state to the start of the first
result set.
Args:
sql: The sql text, with %(format)s-style tokens. May be None.
params_list: A list of the keyword params that are normally sent
to execute. Either the sql arg or params['sql'] must be defined.
**kwargs: passed as is to connection._execute_batch.
"""
if sql:
sql_list = [sql] * len(params_list)
else:
sql_list = [params.get('sql') for params in params_list]
bind_variables_list = [params['bind_variables'] for params in params_list]
keyspace_list = [params['keyspace'] for params in params_list]
keyspace_ids_list = [params.get('keyspace_ids') for params in params_list]
shards_list = [params.get('shards') for params in params_list]
self._clear_batch_state()
# Find other _execute_batch calls in test code.
self.result_sets = self.connection._execute_batch( # pylint: disable=protected-access
sql_list, bind_variables_list, keyspace_list, keyspace_ids_list,
shards_list,
self.tablet_type, self.as_transaction, self.effective_caller_id,
**kwargs)
self.nextset()
def nextset(self):
"""Move the fetch state to the start of the next result set.
self.(results, rowcount, lastrowid, description) will be set to
the next result_set, and the fetch-commands will work on this
result set.
Returns:
True if another result set exists, False if not.
"""
if self.result_set_index is None:
self.result_set_index = 0
else:
self.result_set_index += 1
self._clear_list_state()
if self.result_set_index < len(self.result_sets):
self.results, self.rowcount, self.lastrowid, self.description = (
self.result_sets[self.result_set_index])
return True
else:
self._clear_batch_state()
return None
class StreamVTGateCursor(base_cursor.BaseStreamCursor, VTGateCursorMixin):
"""A cursor for streaming statements to VTGate.
Results are returned as a generator.
"""
def __init__(
self, connection, tablet_type, keyspace=None,
shards=None, keyspace_ids=None,
keyranges=None, writable=False):
super(StreamVTGateCursor, self).__init__()
self._conn = connection
self._writable = writable
self.keyspace = keyspace
self.shards = shards
self.keyspace_ids = keyspace_ids
self.keyranges = keyranges
self.routing = None
self.tablet_type = tablet_type
def is_writable(self):
return self._writable
# pass kwargs here in case higher level APIs need to push more data through
# for instance, a key value for shard mapping
def execute(self, sql, bind_variables, **kwargs):
"""Start a streaming query."""
if self._writable:
raise dbexceptions.ProgrammingError('Streaming query cannot be writable')
self._clear_stream_state()
self.generator, self.description = self.connection._stream_execute( # pylint: disable=protected-access
sql,
bind_variables,
tablet_type=self.tablet_type,
keyspace_name=self.keyspace,
shards=self.shards,
keyspace_ids=self.keyspace_ids,
keyranges=self.keyranges,
not_in_transaction=not self.is_writable(),
effective_caller_id=self.effective_caller_id,
**kwargs)
return 0
def sort_row_list_by_columns(row_list, sort_columns=(), desc_columns=()):
"""Sort by leading sort columns by stable-sorting in reverse-index order."""
for column_index, column_name in reversed(
[x for x in enumerate(sort_columns)]):
og = operator.itemgetter(column_index)
if not isinstance(row_list, list):
row_list = sorted(
row_list, key=og, reverse=bool(column_name in desc_columns))
else:
row_list.sort(key=og, reverse=bool(column_name in desc_columns))
return row_list
|
bowlofstew/vitess | test/resharding_bytes.py | #!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""Re-runs resharding.py with a varbinary keyspace_id."""
from vtdb import keyrange_constants
import base_sharding
import resharding
import utils
# this test is just re-running an entire resharding.py with a
# varbinary keyspace_id
if __name__ == '__main__':
base_sharding.keyspace_id_type = keyrange_constants.KIT_BYTES
utils.main(resharding)
|
bowlofstew/vitess | py/vtdb/keyrange_constants.py | <filename>py/vtdb/keyrange_constants.py
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""Constants related to keyspaces and shard names."""
# Keyrange that spans the entire space, used
# for unsharded database.
NON_PARTIAL_KEYRANGE = ''
MIN_KEY = ''
MAX_KEY = ''
KIT_UNSET = ''
KIT_UINT64 = 'uint64'
KIT_BYTES = 'bytes'
# Map from proto3 integer keyspace id type to lower case string version
PROTO3_KIT_TO_STRING = {
0: KIT_UNSET,
1: KIT_UINT64,
2: KIT_BYTES,
}
# Map from proto3 integer tablet type value to the lower case string
# (Eventually we will use the proto3 version of this)
PROTO3_TABLET_TYPE_TO_STRING = {
0: 'unknown',
1: 'master',
2: 'replica',
3: 'rdonly',
4: 'spare',
5: 'experimental',
6: 'backup',
7: 'restore',
8: 'worker',
9: 'scrap',
}
|
bowlofstew/vitess | test/base_sharding.py | #!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""This module contains a base class and utility functions for sharding tests.
"""
import struct
import logging
from vtdb import keyrange_constants
import utils
keyspace_id_type = keyrange_constants.KIT_UINT64
pack_keyspace_id = struct.Struct('!Q').pack
# fixed_parent_id is used as fixed value for the "parent_id" column in all rows.
# All tests assume a multi-column primary key (parent_id, id) but only adjust
# the "id" column and use this fixed value for "parent_id".
# Since parent_id is fixed, not all test code has to include parent_id in a
# WHERE clause (at the price of a full table scan).
fixed_parent_id = 86
class BaseShardingTest(object):
"""This base class uses unittest.TestCase methods to check various things.
All sharding tests should inherit from this base class, and use the
methods as needed.
"""
# _insert_value inserts a value in the MySQL database along with the comments
# required for routing.
# NOTE: We assume that the column name for the keyspace_id is called
# 'custom_ksid_col'. This is a regression test which tests for
# places which previously hardcoded the column name to 'keyspace_id'.
def _insert_value(self, tablet_obj, table, mid, msg, keyspace_id):
k = utils.uint64_to_hex(keyspace_id)
tablet_obj.mquery(
'vt_test_keyspace',
['begin',
'insert into %s(parent_id, id, msg, custom_ksid_col) '
'values(%d, %d, "%s", 0x%x) /* vtgate:: keyspace_id:%s */ '
'/* id:%d */' %
(table, fixed_parent_id, mid, msg, keyspace_id, k, mid),
'commit'],
write=True)
def _get_value(self, tablet_obj, table, mid):
"""Returns the row(s) from the table for the provided id, using MySQL.
Args:
tablet_obj: the tablet to get data from.
table: the table to query.
mid: id field of the table.
Returns:
A tuple of results.
"""
return tablet_obj.mquery(
'vt_test_keyspace',
'select parent_id, id, msg, custom_ksid_col from %s '
'where parent_id=%d and id=%d' %
(table, fixed_parent_id, mid))
def _check_value(self, tablet_obj, table, mid, msg, keyspace_id,
should_be_here=True):
result = self._get_value(tablet_obj, table, mid)
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = '%s'
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = '%x'
if should_be_here:
self.assertEqual(result, ((fixed_parent_id, mid, msg, keyspace_id),),
('Bad row in tablet %s for id=%d, custom_ksid_col=' +
fmt + ', row=%s') % (tablet_obj.tablet_alias, mid,
keyspace_id, str(result)))
else:
self.assertEqual(
len(result), 0,
('Extra row in tablet %s for id=%d, custom_ksid_col=' +
fmt + ': %s') % (tablet_obj.tablet_alias, mid, keyspace_id,
str(result)))
def _is_value_present_and_correct(
self, tablet_obj, table, mid, msg, keyspace_id):
"""_is_value_present_and_correct tries to read a value.
Args:
tablet_obj: the tablet to get data from.
table: the table to query.
mid: the id of the row to query.
msg: expected value of the msg column in the row.
keyspace_id: expected value of the keyspace_id column in the row.
Returns:
True if the value (row) is there and correct.
False if the value is not there.
If the value is not correct, the method will call self.fail.
"""
result = self._get_value(tablet_obj, table, mid)
if not result:
return False
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = '%s'
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = '%x'
self.assertEqual(result, ((fixed_parent_id, mid, msg, keyspace_id),),
('Bad row in tablet %s for id=%d, '
'custom_ksid_col=' + fmt) % (
tablet_obj.tablet_alias, mid, keyspace_id))
return True
def check_binlog_player_vars(self, tablet_obj, source_shards,
seconds_behind_master_max=0):
"""Checks the binlog player variables are correctly exported.
Args:
tablet_obj: the tablet to check.
source_shards: the shards to check we are replicating from.
seconds_behind_master_max: if non-zero, the lag should be smaller than
this value.
"""
v = utils.get_vars(tablet_obj.port)
self.assertIn('BinlogPlayerMapSize', v)
self.assertEquals(v['BinlogPlayerMapSize'], len(source_shards))
self.assertIn('BinlogPlayerSecondsBehindMaster', v)
self.assertIn('BinlogPlayerSecondsBehindMasterMap', v)
self.assertIn('BinlogPlayerSourceShardNameMap', v)
shards = v['BinlogPlayerSourceShardNameMap'].values()
self.assertEquals(sorted(shards), sorted(source_shards))
self.assertIn('BinlogPlayerSourceTabletAliasMap', v)
for i in xrange(len(source_shards)):
self.assertIn('%d' % i, v['BinlogPlayerSourceTabletAliasMap'])
if seconds_behind_master_max != 0:
self.assertTrue(
v['BinlogPlayerSecondsBehindMaster'] <
seconds_behind_master_max,
'BinlogPlayerSecondsBehindMaster is too high: %d > %d' % (
v['BinlogPlayerSecondsBehindMaster'],
seconds_behind_master_max))
for i in xrange(len(source_shards)):
self.assertTrue(
v['BinlogPlayerSecondsBehindMasterMap']['%d' % i] <
seconds_behind_master_max,
'BinlogPlayerSecondsBehindMasterMap is too high: %d > %d' % (
v['BinlogPlayerSecondsBehindMasterMap']['%d' % i],
seconds_behind_master_max))
def check_binlog_server_vars(self, tablet_obj, horizontal=True,
min_statements=0, min_transactions=0):
"""Checks the binlog server variables are correctly exported.
Args:
tablet_obj: the tablet to check.
horizontal: true if horizontal split, false for vertical split.
min_statements: check the statement count is greater or equal to this.
min_transactions: check the transaction count is greater or equal to this.
"""
v = utils.get_vars(tablet_obj.port)
if horizontal:
skey = 'UpdateStreamKeyRangeStatements'
tkey = 'UpdateStreamKeyRangeTransactions'
else:
skey = 'UpdateStreamTablesStatements'
tkey = 'UpdateStreamTablesTransactions'
self.assertIn(skey, v)
self.assertIn(tkey, v)
if min_statements > 0:
self.assertTrue(v[skey] >= min_statements,
'only got %d < %d statements' % (v[skey], min_statements))
if min_transactions > 0:
self.assertTrue(v[tkey] >= min_transactions,
'only got %d < %d transactions' % (v[tkey],
min_transactions))
def check_stream_health_equals_binlog_player_vars(self, tablet_obj, count):
"""Checks the variables exported by streaming health check match vars.
Args:
tablet_obj: the tablet to check.
count: number of binlog players to expect.
"""
blp_stats = utils.get_vars(tablet_obj.port)
self.assertEqual(blp_stats['BinlogPlayerMapSize'], count)
# Enforce health check because it's not running by default as
# tablets may not be started with it, or may not run it in time.
utils.run_vtctl(['RunHealthCheck', tablet_obj.tablet_alias])
stream_health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
tablet_obj.tablet_alias])
logging.debug('Got health: %s', str(stream_health))
self.assertNotIn('serving', stream_health)
self.assertIn('realtime_stats', stream_health)
self.assertNotIn('health_error', stream_health['realtime_stats'])
self.assertIn('binlog_players_count', stream_health['realtime_stats'])
self.assertEqual(blp_stats['BinlogPlayerMapSize'],
stream_health['realtime_stats']['binlog_players_count'])
self.assertEqual(blp_stats['BinlogPlayerSecondsBehindMaster'],
stream_health['realtime_stats'].get(
'seconds_behind_master_filtered_replication', 0))
def check_destination_master(self, tablet_obj, source_shards):
"""Performs multiple checks on a destination master.
Combines the following:
- wait_for_binlog_player_count
- check_binlog_player_vars
- check_stream_health_equals_binlog_player_vars
Args:
tablet_obj: the tablet to check.
source_shards: the shards to check we are replicating from.
"""
tablet_obj.wait_for_binlog_player_count(len(source_shards))
self.check_binlog_player_vars(tablet_obj, source_shards)
self.check_stream_health_equals_binlog_player_vars(tablet_obj,
len(source_shards))
def check_running_binlog_player(self, tablet_obj, query, transaction,
extra_text=None):
"""Checks binlog player is running and showing in status.
Args:
tablet_obj: the tablet to check.
query: number of expected queries.
transaction: number of expected transactions.
extra_text: if present, look for it in status too.
"""
status = tablet_obj.get_status()
self.assertIn('Binlog player state: Running', status)
self.assertIn(
'<td><b>All</b>: %d<br><b>Query</b>: %d<br>'
'<b>Transaction</b>: %d<br></td>' % (query+transaction, query,
transaction), status)
self.assertIn('</html>', status)
if extra_text:
self.assertIn(extra_text, status)
def check_no_binlog_player(self, tablet_obj):
"""Checks no binlog player is running.
Also checks the tablet is not showing any binlog player in its status page.
Args:
tablet_obj: the tablet to check.
"""
tablet_obj.wait_for_binlog_player_count(0)
status = tablet_obj.get_status()
self.assertIn('No binlog player is running', status)
self.assertIn('</html>', status)
def check_throttler_service(self, throttler_server, names, rate):
"""Checks that the throttler responds to RPC requests.
We assume it was enabled by SplitClone with the flag --max_tps 9999.
Args:
throttler_server: vtworker or vttablet RPC endpoint. Format: host:port
names: Names of the throttlers e.g. BinlogPlayer/0 or <keyspace>/<shard>.
rate: Expected initial rate the throttler was started with.
"""
self.check_throttler_service_maxrates(throttler_server, names, rate)
self.check_throttler_service_configuration(throttler_server, names)
def check_throttler_service_maxrates(self, throttler_server, names, rate):
"""Checks the vtctl ThrottlerMaxRates and ThrottlerSetRate commands."""
# Avoid flakes by waiting for all throttlers. (Necessary because filtered
# replication on vttablet will register the throttler asynchronously.)
timeout_s = 10
while True:
stdout, _ = utils.run_vtctl(['ThrottlerMaxRates', '--server',
throttler_server], auto_log=True,
trap_output=True)
if '%d active throttler(s)' % len(names) in stdout:
break
timeout_s = utils.wait_step('all throttlers registered', timeout_s)
for name in names:
self.assertIn('| %s | %d |' % (name, rate), stdout)
self.assertIn('%d active throttler(s)' % len(names), stdout)
# Check that it's possible to change the max rate on the throttler.
new_rate = 'unlimited'
stdout, _ = utils.run_vtctl(['ThrottlerSetMaxRate', '--server',
throttler_server, new_rate],
auto_log=True, trap_output=True)
self.assertIn('%d active throttler(s)' % len(names), stdout)
stdout, _ = utils.run_vtctl(['ThrottlerMaxRates', '--server',
throttler_server], auto_log=True,
trap_output=True)
for name in names:
self.assertIn('| %s | %s |' % (name, new_rate), stdout)
self.assertIn('%d active throttler(s)' % len(names), stdout)
def check_throttler_service_configuration(self, throttler_server, names):
"""Checks the vtctl (Get|Update|Reset)ThrottlerConfiguration commands."""
# Verify updating the throttler configuration.
stdout, _ = utils.run_vtctl(['UpdateThrottlerConfiguration',
'--server', throttler_server,
'--copy_zero_values',
'target_replication_lag_sec:12345 '
'max_replication_lag_sec:65789 '
'initial_rate:3 '
'max_increase:0.4 '
'emergency_decrease:0.5 '
'min_duration_between_increases_sec:6 '
'max_duration_between_increases_sec:7 '
'min_duration_between_decreases_sec:8 '
'spread_backlog_across_sec:9 '
'ignore_n_slowest_replicas:0 '
'ignore_n_slowest_rdonlys:0 '
'age_bad_rate_after_sec:12 '
'bad_rate_increase:0.13 '],
auto_log=True, trap_output=True)
self.assertIn('%d active throttler(s)' % len(names), stdout)
# Check the updated configuration.
stdout, _ = utils.run_vtctl(['GetThrottlerConfiguration',
'--server', throttler_server],
auto_log=True, trap_output=True)
for name in names:
# The max should be set and have a non-zero value.
# We test only the the first field 'target_replication_lag_sec'.
self.assertIn('| %s | target_replication_lag_sec:12345 ' % (name), stdout)
# protobuf omits fields with a zero value in the text output.
self.assertNotIn('ignore_n_slowest_replicas', stdout)
self.assertIn('%d active throttler(s)' % len(names), stdout)
# Reset clears our configuration values.
stdout, _ = utils.run_vtctl(['ResetThrottlerConfiguration',
'--server', throttler_server],
auto_log=True, trap_output=True)
self.assertIn('%d active throttler(s)' % len(names), stdout)
# Check that the reset configuration no longer has our values.
stdout, _ = utils.run_vtctl(['GetThrottlerConfiguration',
'--server', throttler_server],
auto_log=True, trap_output=True)
for name in names:
# Target lag value should no longer be 12345 and be back to the default.
self.assertNotIn('target_replication_lag_sec:12345', stdout)
self.assertIn('%d active throttler(s)' % len(names), stdout)
def verify_reconciliation_counters(self, worker_port, online_or_offline,
table, inserts, updates, deletes, equal):
"""Checks that the reconciliation Counters have the expected values."""
worker_vars = utils.get_vars(worker_port)
i = worker_vars['Worker' + online_or_offline + 'InsertsCounters']
if inserts == 0:
self.assertNotIn(table, i)
else:
self.assertEqual(i[table], inserts)
u = worker_vars['Worker' + online_or_offline + 'UpdatesCounters']
if updates == 0:
self.assertNotIn(table, u)
else:
self.assertEqual(u[table], updates)
d = worker_vars['Worker' + online_or_offline + 'DeletesCounters']
if deletes == 0:
self.assertNotIn(table, d)
else:
self.assertEqual(d[table], deletes)
e = worker_vars['Worker' + online_or_offline + 'EqualRowsCounters']
if equal == 0:
self.assertNotIn(table, e)
else:
self.assertEqual(e[table], equal)
|
bowlofstew/vitess | py/vtdb/__init__.py | """This file provides the PEP0249 compliant variables for this module.
See https://www.python.org/dev/peps/pep-0249 for more information on these.
"""
# Copyright 2012, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# Follows the Python Database API 2.0.
apilevel = '2.0'
# Threads may share the module, but not connections.
# (we store session information in the conection now, that should be in the
# cursor but are not for historical reasons).
threadsafety = 2
# Named style, e.g. ...WHERE name=:name.
#
# Note we also provide a function in dbapi to convert from 'pyformat'
# to 'named', and prune unused bind variables in the SQL query.
#
# Also, we use an extension to bind variables to handle lists:
# Using the '::name' syntax (instead of ':name') will indicate a list bind
# variable. The type then has to be a list, set or tuple.
paramstyle = 'named'
|
bowlofstew/vitess | test/end2end/local_environment.py | """A local test environment."""
import base_environment
class LocalEnvironment(base_environment.BaseEnvironment):
"""Environment for locally run instances, CURRENTLY UNSUPPORTED."""
def __init__(self):
super(LocalEnvironment, self).__init__()
def create(self, **kwargs):
pass
|
bowlofstew/vitess | misc/parse_cover.py | <reponame>bowlofstew/vitess<filename>misc/parse_cover.py
#!/usr/bin/python
# this is a small helper script to parse test coverage and display stats.
import re
import sys
coverage_pattern = re.compile(r"coverage: (\d+).(\d+)% of statements")
no_test_file_count = 0
coverage_count = 0
coverage_sum = 0.0
for line in sys.stdin:
print line,
sys.stdout.flush
if line.find('[no test files]') != -1:
no_test_file_count += 1
continue
m = coverage_pattern.search(line)
if m != None:
coverage_count += 1
coverage_sum += float(m.group(1) + "." + m.group(2))
continue
directories_covered = coverage_count * 100 / (no_test_file_count + coverage_count)
average_coverage = coverage_sum / coverage_count
print "Directory test coverage: %u%%" % directories_covered
print "Average test coverage: %u%%" % int(average_coverage)
|
bowlofstew/vitess | test/merge_sharding_bytes.py | <filename>test/merge_sharding_bytes.py
#!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""Re-runs merge_sharding.py with a varbinary keyspace_id."""
from vtdb import keyrange_constants
import base_sharding
import merge_sharding
import utils
if __name__ == '__main__':
base_sharding.keyspace_id_type = keyrange_constants.KIT_BYTES
utils.main(merge_sharding)
|
bowlofstew/vitess | test/initial_sharding_l2vtgate.py | #!/usr/bin/env python
#
# Copyright 2016, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""Re-runs initial_sharding.py with a l2vtgate process."""
import initial_sharding
import utils
if __name__ == '__main__':
initial_sharding.use_l2vtgate = True
utils.main(initial_sharding)
|
bowlofstew/vitess | py/vtproto/automationservice_pb2.py | <reponame>bowlofstew/vitess
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: automationservice.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import automation_pb2 as automation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='automationservice.proto',
package='automationservice',
syntax='proto3',
serialized_pb=_b('\n\x17\x61utomationservice.proto\x12\x11\x61utomationservice\x1a\x10\x61utomation.proto2\x81\x02\n\nAutomation\x12t\n\x17\x45nqueueClusterOperation\x12*.automation.EnqueueClusterOperationRequest\x1a+.automation.EnqueueClusterOperationResponse\"\x00\x12}\n\x1aGetClusterOperationDetails\x12-.automation.GetClusterOperationDetailsRequest\x1a..automation.GetClusterOperationDetailsResponse\"\x00\x62\x06proto3')
,
dependencies=[automation__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaAutomationServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def EnqueueClusterOperation(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def GetClusterOperationDetails(self, request, context):
raise NotImplementedError()
class BetaAutomationStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def EnqueueClusterOperation(self, request, timeout):
raise NotImplementedError()
EnqueueClusterOperation.future = None
@abc.abstractmethod
def GetClusterOperationDetails(self, request, timeout):
raise NotImplementedError()
GetClusterOperationDetails.future = None
def beta_create_Automation_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import automation_pb2
import automation_pb2
import automation_pb2
import automation_pb2
request_deserializers = {
('automationservice.Automation', 'EnqueueClusterOperation'): automation_pb2.EnqueueClusterOperationRequest.FromString,
('automationservice.Automation', 'GetClusterOperationDetails'): automation_pb2.GetClusterOperationDetailsRequest.FromString,
}
response_serializers = {
('automationservice.Automation', 'EnqueueClusterOperation'): automation_pb2.EnqueueClusterOperationResponse.SerializeToString,
('automationservice.Automation', 'GetClusterOperationDetails'): automation_pb2.GetClusterOperationDetailsResponse.SerializeToString,
}
method_implementations = {
('automationservice.Automation', 'EnqueueClusterOperation'): face_utilities.unary_unary_inline(servicer.EnqueueClusterOperation),
('automationservice.Automation', 'GetClusterOperationDetails'): face_utilities.unary_unary_inline(servicer.GetClusterOperationDetails),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Automation_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import automation_pb2
import automation_pb2
import automation_pb2
import automation_pb2
request_serializers = {
('automationservice.Automation', 'EnqueueClusterOperation'): automation_pb2.EnqueueClusterOperationRequest.SerializeToString,
('automationservice.Automation', 'GetClusterOperationDetails'): automation_pb2.GetClusterOperationDetailsRequest.SerializeToString,
}
response_deserializers = {
('automationservice.Automation', 'EnqueueClusterOperation'): automation_pb2.EnqueueClusterOperationResponse.FromString,
('automationservice.Automation', 'GetClusterOperationDetails'): automation_pb2.GetClusterOperationDetailsResponse.FromString,
}
cardinalities = {
'EnqueueClusterOperation': cardinality.Cardinality.UNARY_UNARY,
'GetClusterOperationDetails': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'automationservice.Automation', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
|
lukebrez/dfgui | scripts/expt_gui.py | import dfgui
import pandas as pd
file = 'X:/data/Brezovec/2P_Imaging/20190101_walking_dataset/master_expt.pkl'
df = pd.read_pickle(file)
dfgui.show(df) |
lukebrez/dfgui | dfgui/dfgui.py | #!/usr/bin/env python
# -*- encoding: utf-8
from __future__ import absolute_import, division, print_function
try:
import wx
except ImportError:
import sys
sys.path += [
"/usr/lib/python2.7/dist-packages/wx-2.8-gtk2-unicode",
"/usr/lib/python2.7/dist-packages"
]
import wx
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
from bisect import bisect
import numpy as np
import pandas as pd
# unused import required to allow 'eval' of date filters
import datetime
from datetime import date
# try to get nicer plotting styles
try:
import seaborn
seaborn.set()
except ImportError:
try:
from matplotlib import pyplot as plt
plt.style.use('ggplot')
except AttributeError:
pass
class ListCtrlDataFrame(wx.ListCtrl):
# TODO: we could do something more sophisticated to come
# TODO: up with a reasonable column width...
DEFAULT_COLUMN_WIDTH = 100
TMP_SELECTION_COLUMN = 'tmp_selection_column'
def __init__(self, parent, df, status_bar_callback):
wx.ListCtrl.__init__(
self, parent, -1,
style=wx.LC_REPORT | wx.LC_VIRTUAL | wx.LC_HRULES | wx.LC_VRULES | wx.LB_MULTIPLE
)
self.status_bar_callback = status_bar_callback
self.df_orig = df
self.original_columns = self.df_orig.columns[:]
self.current_columns = self.df_orig.columns[:]
self.sort_by_column = None
self._reset_mask()
# prepare attribute for alternating colors of rows
self.attr_light_blue = wx.ListItemAttr()
self.attr_light_blue.SetBackgroundColour("#D6EBFF")
self.Bind(wx.EVT_LIST_COL_CLICK, self._on_col_click)
self.Bind(wx.EVT_RIGHT_DOWN, self._on_right_click)
self.df = pd.DataFrame({}) # init empty to force initial update
self._update_rows()
self._update_columns(self.original_columns)
def _reset_mask(self):
#self.mask = [True] * self.df_orig.shape[0]
self.mask = pd.Series([True] * self.df_orig.shape[0], index=self.df_orig.index)
def _update_columns(self, columns):
self.ClearAll()
for i, col in enumerate(columns):
self.InsertColumn(i, col)
self.SetColumnWidth(i, self.DEFAULT_COLUMN_WIDTH)
# Note that we have to reset the count as well because ClearAll()
# not only deletes columns but also the count...
self.SetItemCount(len(self.df))
def set_columns(self, columns_to_use):
"""
External interface to set the column projections.
"""
self.current_columns = columns_to_use
self._update_rows()
self._update_columns(columns_to_use)
def _update_rows(self):
old_len = len(self.df)
self.df = self.df_orig.loc[self.mask.values, self.current_columns]
new_len = len(self.df)
if old_len != new_len:
self.SetItemCount(new_len)
self.status_bar_callback(0, "Number of rows: {}".format(new_len))
def apply_filter(self, conditions):
"""
External interface to set a filter.
"""
old_mask = self.mask.copy()
if len(conditions) == 0:
self._reset_mask()
else:
self._reset_mask() # set all to True for destructive conjunction
no_error = True
for column, condition in conditions:
if condition.strip() == '':
continue
condition = condition.replace("_", "self.df_orig['{}']".format(column))
print("Evaluating condition:", condition)
try:
tmp_mask = eval(condition)
if isinstance(tmp_mask, pd.Series) and tmp_mask.dtype == np.bool:
self.mask &= tmp_mask
except Exception as e:
print("Failed with:", e)
no_error = False
self.status_bar_callback(
1,
"Evaluating '{}' failed with: {}".format(condition, e)
)
if no_error:
self.status_bar_callback(1, "")
has_changed = any(old_mask != self.mask)
if has_changed:
self._update_rows()
return len(self.df), has_changed
def get_selected_items(self):
"""
Gets the selected items for the list control.
Selection is returned as a list of selected indices,
low to high.
"""
selection = []
current = -1 # start at -1 to get the first selected item
while True:
next = self.GetNextItem(current, wx.LIST_NEXT_ALL, wx.LIST_STATE_SELECTED)
if next == -1:
return selection
else:
selection.append(next)
current = next
def get_filtered_df(self):
return self.df_orig.loc[self.mask, :]
def _on_col_click(self, event):
"""
Sort data frame by selected column.
"""
# get currently selected items
selected = self.get_selected_items()
# append a temporary column to store the currently selected items
self.df[self.TMP_SELECTION_COLUMN] = False
self.df.iloc[selected, -1] = True
# get column name to use for sorting
col = event.GetColumn()
# determine if ascending or descending
if self.sort_by_column is None or self.sort_by_column[0] != col:
ascending = True
else:
ascending = not self.sort_by_column[1]
# store sort column and sort direction
self.sort_by_column = (col, ascending)
try:
# pandas 0.17
self.df.sort_values(self.df.columns[col], inplace=True, ascending=ascending)
except AttributeError:
# pandas 0.16 compatibility
self.df.sort(self.df.columns[col], inplace=True, ascending=ascending)
# deselect all previously selected
for i in selected:
self.Select(i, on=False)
# determine indices of selection after sorting
selected_bool = self.df.iloc[:, -1] == True
selected = self.df.reset_index().index[selected_bool]
# select corresponding rows
for i in selected:
self.Select(i, on=True)
# delete temporary column
del self.df[self.TMP_SELECTION_COLUMN]
def _on_right_click(self, event):
"""
Copies a cell into clipboard on right click. Unfortunately,
determining the clicked column is not straightforward. This
appraoch is inspired by the TextEditMixin in:
/usr/lib/python2.7/dist-packages/wx-2.8-gtk2-unicode/wx/lib/mixins/listctrl.py
More references:
- http://wxpython-users.1045709.n5.nabble.com/Getting-row-col-of-selected-cell-in-ListCtrl-td2360831.html
- https://groups.google.com/forum/#!topic/wxpython-users/7BNl9TA5Y5U
- https://groups.google.com/forum/#!topic/wxpython-users/wyayJIARG8c
"""
if self.HitTest(event.GetPosition()) != wx.NOT_FOUND:
x, y = event.GetPosition()
row, flags = self.HitTest((x, y))
col_locs = [0]
loc = 0
for n in range(self.GetColumnCount()):
loc = loc + self.GetColumnWidth(n)
col_locs.append(loc)
scroll_pos = self.GetScrollPos(wx.HORIZONTAL)
# this is crucial step to get the scroll pixel units
unit_x, unit_y = self.GetMainWindow().GetScrollPixelsPerUnit()
col = bisect(col_locs, x + scroll_pos * unit_x) - 1
value = self.df.iloc[row, col]
# print(row, col, scroll_pos, value)
clipdata = wx.TextDataObject()
clipdata.SetText(str(value))
wx.TheClipboard.Open()
wx.TheClipboard.SetData(clipdata)
wx.TheClipboard.Close()
def OnGetItemText(self, item, col):
"""
Implements the item getter for a "virtual" ListCtrl.
"""
value = self.df.iloc[item, col]
# print("retrieving %d %d %s" % (item, col, value))
return str(value)
def OnGetItemAttr(self, item):
"""
Implements the attribute getter for a "virtual" ListCtrl.
"""
if item % 2 == 0:
return self.attr_light_blue
else:
return None
class DataframePanel(wx.Panel):
"""
Panel providing the main data frame table view.
"""
def __init__(self, parent, df, status_bar_callback):
wx.Panel.__init__(self, parent)
self.df_list_ctrl = ListCtrlDataFrame(self, df, status_bar_callback)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.df_list_ctrl, 1, wx.ALL | wx.EXPAND | wx.GROW, 5)
self.SetSizer(sizer)
self.Show()
class ListBoxDraggable(wx.ListBox):
"""
Helper class to provide ListBox with extended behavior.
"""
def __init__(self, parent, size, data, *args, **kwargs):
wx.ListBox.__init__(self, parent, size, **kwargs)
self.data = data
self.InsertItems(data, 0)
self.Bind(wx.EVT_LISTBOX, self.on_selection_changed)
self.Bind(wx.EVT_LEFT_DOWN, self.on_left_down)
self.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down)
self.Bind(wx.EVT_RIGHT_UP, self.on_right_up)
self.Bind(wx.EVT_MOTION, self.on_move)
self.index_iter = range(len(self.data))
self.selected_items = [True] * len(self.data)
self.index_mapping = list(range(len(self.data)))
self.drag_start_index = None
self.update_selection()
self.SetFocus()
def on_left_down(self, event):
if self.HitTest(event.GetPosition()) != wx.NOT_FOUND:
index = self.HitTest(event.GetPosition())
self.selected_items[index] = not self.selected_items[index]
# doesn't really work to update selection direclty (focus issues)
# instead we wait for the EVT_LISTBOX event and fix the selection
# there...
# self.update_selection()
# TODO: we could probably use wx.CallAfter
event.Skip()
def update_selection(self):
# self.SetFocus()
# print(self.selected_items)
for i in self.index_iter:
if self.IsSelected(i) and not self.selected_items[i]:
#print("Deselecting", i)
self.Deselect(i)
elif not self.IsSelected(i) and self.selected_items[i]:
#print("Selecting", i)
self.Select(i)
def on_selection_changed(self, evt):
self.update_selection()
evt.Skip()
def on_right_down(self, event):
if self.HitTest(event.GetPosition()) != wx.NOT_FOUND:
index = self.HitTest(event.GetPosition())
self.drag_start_index = index
def on_right_up(self, event):
self.drag_start_index = None
event.Skip()
def on_move(self, event):
if self.drag_start_index is not None:
if self.HitTest(event.GetPosition()) != wx.NOT_FOUND:
index = self.HitTest(event.GetPosition())
if self.drag_start_index != index:
self.swap(self.drag_start_index, index)
self.drag_start_index = index
def swap(self, i, j):
self.index_mapping[i], self.index_mapping[j] = self.index_mapping[j], self.index_mapping[i]
self.SetString(i, self.data[self.index_mapping[i]])
self.SetString(j, self.data[self.index_mapping[j]])
self.selected_items[i], self.selected_items[j] = self.selected_items[j], self.selected_items[i]
# self.update_selection()
# print("Updated mapping:", self.index_mapping)
new_event = wx.PyCommandEvent(wx.EVT_LISTBOX.typeId, self.GetId())
self.GetEventHandler().ProcessEvent(new_event)
def get_selected_data(self):
selected = []
for i, col in enumerate(self.data):
if self.IsSelected(i):
index = self.index_mapping[i]
value = self.data[index]
selected.append(value)
# print("Selected data:", selected)
return selected
class ColumnSelectionPanel(wx.Panel):
"""
Panel for selecting and re-arranging columns.
"""
def __init__(self, parent, columns, df_list_ctrl):
wx.Panel.__init__(self, parent)
self.columns = columns
self.df_list_ctrl = df_list_ctrl
self.list_box = ListBoxDraggable(self, -1, columns, style=wx.LB_EXTENDED)
self.Bind(wx.EVT_LISTBOX, self.update_selected_columns)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.list_box, 1, wx.ALL | wx.EXPAND | wx.GROW, 5)
self.SetSizer(sizer)
self.list_box.SetFocus()
def update_selected_columns(self, evt):
selected = self.list_box.get_selected_data()
self.df_list_ctrl.set_columns(selected)
class FilterPanel(wx.Panel):
"""
Panel for defining filter expressions.
"""
def __init__(self, parent, columns, df_list_ctrl, change_callback):
wx.Panel.__init__(self, parent)
columns_with_neutral_selection = [''] + list(columns)
self.columns = columns
self.df_list_ctrl = df_list_ctrl
self.change_callback = change_callback
self.num_filters = 10
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.combo_boxes = []
self.text_controls = []
for i in range(self.num_filters):
combo_box = wx.ComboBox(self, choices=columns_with_neutral_selection, style=wx.CB_READONLY)
text_ctrl = wx.TextCtrl(self, wx.ID_ANY, '')
self.Bind(wx.EVT_COMBOBOX, self.on_combo_box_select)
self.Bind(wx.EVT_TEXT, self.on_text_change)
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
row_sizer.Add(combo_box, 0, wx.ALL, 5)
row_sizer.Add(text_ctrl, 1, wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, 5)
self.combo_boxes.append(combo_box)
self.text_controls.append(text_ctrl)
self.main_sizer.Add(row_sizer, 0, wx.EXPAND)
self.SetSizer(self.main_sizer)
def on_combo_box_select(self, event):
self.update_conditions()
def on_text_change(self, event):
self.update_conditions()
def update_conditions(self):
# print("Updating conditions")
conditions = []
for i in range(self.num_filters):
column_index = self.combo_boxes[i].GetSelection()
condition = self.text_controls[i].GetValue()
if column_index != wx.NOT_FOUND and column_index != 0:
# since we have added a dummy column for "deselect", we have to subtract one
column = self.columns[column_index - 1]
conditions += [(column, condition)]
num_matching, has_changed = self.df_list_ctrl.apply_filter(conditions)
if has_changed:
self.change_callback()
# print("Num matching:", num_matching)
class HistogramPlot(wx.Panel):
"""
Panel providing a histogram plot.
"""
def __init__(self, parent, columns, df_list_ctrl):
wx.Panel.__init__(self, parent)
columns_with_neutral_selection = [''] + list(columns)
self.columns = columns
self.df_list_ctrl = df_list_ctrl
self.figure = Figure(facecolor="white", figsize=(1, 1))
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
chart_toolbar = NavigationToolbar2Wx(self.canvas)
self.combo_box1 = wx.ComboBox(self, choices=columns_with_neutral_selection, style=wx.CB_READONLY)
self.Bind(wx.EVT_COMBOBOX, self.on_combo_box_select)
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
row_sizer.Add(self.combo_box1, 0, wx.ALL | wx.ALIGN_CENTER, 5)
row_sizer.Add(chart_toolbar, 0, wx.ALL, 5)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas, 1, flag=wx.EXPAND, border=5)
sizer.Add(row_sizer)
self.SetSizer(sizer)
def on_combo_box_select(self, event):
self.redraw()
def redraw(self):
column_index1 = self.combo_box1.GetSelection()
if column_index1 != wx.NOT_FOUND and column_index1 != 0:
# subtract one to remove the neutral selection index
column_index1 -= 1
df = self.df_list_ctrl.get_filtered_df()
if len(df) > 0:
self.axes.clear()
column = df.iloc[:, column_index1]
is_string_col = column.dtype == np.object and isinstance(column.values[0], str)
if is_string_col:
value_counts = column.value_counts().sort_index()
value_counts.plot(kind='bar', ax=self.axes)
else:
self.axes.hist(column.values, bins=100)
self.canvas.draw()
class ScatterPlot(wx.Panel):
"""
Panel providing a scatter plot.
"""
def __init__(self, parent, columns, df_list_ctrl):
wx.Panel.__init__(self, parent)
columns_with_neutral_selection = [''] + list(columns)
self.columns = columns
self.df_list_ctrl = df_list_ctrl
self.figure = Figure(facecolor="white", figsize=(1, 1))
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
chart_toolbar = NavigationToolbar2Wx(self.canvas)
self.combo_box1 = wx.ComboBox(self, choices=columns_with_neutral_selection, style=wx.CB_READONLY)
self.combo_box2 = wx.ComboBox(self, choices=columns_with_neutral_selection, style=wx.CB_READONLY)
self.Bind(wx.EVT_COMBOBOX, self.on_combo_box_select)
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
row_sizer.Add(self.combo_box1, 0, wx.ALL | wx.ALIGN_CENTER, 5)
row_sizer.Add(self.combo_box2, 0, wx.ALL | wx.ALIGN_CENTER, 5)
row_sizer.Add(chart_toolbar, 0, wx.ALL, 5)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas, 1, flag=wx.EXPAND, border=5)
sizer.Add(row_sizer)
self.SetSizer(sizer)
def on_combo_box_select(self, event):
self.redraw()
def redraw(self):
column_index1 = self.combo_box1.GetSelection()
column_index2 = self.combo_box2.GetSelection()
if column_index1 != wx.NOT_FOUND and column_index1 != 0 and \
column_index2 != wx.NOT_FOUND and column_index2 != 0:
# subtract one to remove the neutral selection index
column_index1 -= 1
column_index2 -= 1
df = self.df_list_ctrl.get_filtered_df()
# It looks like using pandas dataframe.plot causes something weird to
# crash in wx internally. Therefore we use plain axes.plot functionality.
# column_name1 = self.columns[column_index1]
# column_name2 = self.columns[column_index2]
# df.plot(kind='scatter', x=column_name1, y=column_name2)
if len(df) > 0:
self.axes.clear()
self.axes.plot(df.iloc[:, column_index1].values, df.iloc[:, column_index2].values, 'o', clip_on=False)
self.canvas.draw()
class MainFrame(wx.Frame):
"""
The main GUI window.
"""
def __init__(self, df):
wx.Frame.__init__(self, None, -1, "Pandas DataFrame GUI")
# Here we create a panel and a notebook on the panel
p = wx.Panel(self)
nb = wx.Notebook(p)
self.nb = nb
columns = df.columns[:]
self.CreateStatusBar(2, style=0)
self.SetStatusWidths([200, -1])
# create the page windows as children of the notebook
self.page1 = DataframePanel(nb, df, self.status_bar_callback)
self.page2 = ColumnSelectionPanel(nb, columns, self.page1.df_list_ctrl)
self.page3 = FilterPanel(nb, columns, self.page1.df_list_ctrl, self.selection_change_callback)
self.page4 = HistogramPlot(nb, columns, self.page1.df_list_ctrl)
self.page5 = ScatterPlot(nb, columns, self.page1.df_list_ctrl)
# add the pages to the notebook with the label to show on the tab
nb.AddPage(self.page1, "Data Frame")
nb.AddPage(self.page2, "Columns")
nb.AddPage(self.page3, "Filters")
nb.AddPage(self.page4, "Histogram")
nb.AddPage(self.page5, "Scatter Plot")
nb.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.on_tab_change)
# finally, put the notebook in a sizer for the panel to manage
# the layout
sizer = wx.BoxSizer()
sizer.Add(nb, 1, wx.EXPAND)
p.SetSizer(sizer)
self.SetSize((800, 600))
self.Center()
def on_tab_change(self, event):
self.page2.list_box.SetFocus()
page_to_select = event.GetSelection()
wx.CallAfter(self.fix_focus, page_to_select)
event.Skip(True)
def fix_focus(self, page_to_select):
page = self.nb.GetPage(page_to_select)
page.SetFocus()
if isinstance(page, DataframePanel):
self.page1.df_list_ctrl.SetFocus()
elif isinstance(page, ColumnSelectionPanel):
self.page2.list_box.SetFocus()
def status_bar_callback(self, i, new_text):
self.SetStatusText(new_text, i)
def selection_change_callback(self):
self.page4.redraw()
self.page5.redraw()
def show(df):
"""
The main function to start the data frame GUI.
"""
app = wx.App(False)
frame = MainFrame(df)
frame.Show()
app.MainLoop()
|
efrain2010/matchms | matchms/similarity/ModifiedCosine.py | from typing import Tuple
from matchms.typing import SpectrumType
from .spectrum_similarity_functions import collect_peak_pairs
from .spectrum_similarity_functions import get_peaks_array
from .spectrum_similarity_functions import score_best_matches
class ModifiedCosine:
"""Calculate 'modified cosine score' between mass spectra.
The modified cosine score aims at quantifying the similarity between two
mass spectra. The score is calculated by finding best possible matches between
peaks of two spectra. Two peaks are considered a potential match if their
m/z ratios lie within the given 'tolerance', or if their m/z ratios
lie within the tolerance once a mass-shift is applied. The mass shift is
simply the difference in precursor-m/z between the two spectra.
See Watrous et al. [PNAS, 2012, https://www.pnas.org/content/109/26/E1743]
for further details.
For example
.. testcode::
import numpy as np
from matchms import Spectrum
from matchms.similarity import ModifiedCosine
spectrum_1 = Spectrum(mz=np.array([100, 150, 200.]),
intensities=np.array([0.7, 0.2, 0.1]),
metadata={"precursor_mz": 100.0})
spectrum_2 = Spectrum(mz=np.array([104.9, 140, 190.]),
intensities=np.array([0.4, 0.2, 0.1]),
metadata={"precursor_mz": 105.0})
# Use factory to construct a similarity function
modified_cosine = ModifiedCosine(tolerance=0.2)
score, n_matches = modified_cosine(spectrum_1, spectrum_2)
print(f"Modified cosine score is {score:.2f} with {n_matches} matched peaks")
Should output
.. testoutput::
Modified cosine score is 0.83 with 1 matched peaks
"""
def __init__(self, tolerance: float = 0.1, mz_power: float = 0.0,
intensity_power: float = 1.0):
"""
Parameters
----------
tolerance:
Peaks will be considered a match when <= tolerance apart. Default is 0.1.
mz_power:
The power to raise mz to in the cosine function. The default is 0, in which
case the peak intensity products will not depend on the m/z ratios.
intensity_power:
The power to raise intensity to in the cosine function. The default is 1.
"""
self.tolerance = tolerance
self.mz_power = mz_power
self.intensity_power = intensity_power
def __call__(self, spectrum1: SpectrumType, spectrum2: SpectrumType) -> Tuple[float, int]:
"""Calculate modified cosine score between two spectra.
Args:
-----
spectrum1: SpectrumType
Input spectrum 1.
spectrum2: SpectrumType
Input spectrum 2.
Returns:
--------
Tuple with cosine score and number of matched peaks.
"""
def get_matching_pairs():
"""Find all pairs of peaks that match within the given tolerance."""
zero_pairs = collect_peak_pairs(spec1, spec2, self.tolerance, shift=0.0,
mz_power=self.mz_power,
intensity_power=self.intensity_power)
message = "Precursor_mz missing. Apply 'add_precursor_mz' filter first."
assert spectrum1.get("precursor_mz") and spectrum2.get("precursor_mz"), message
mass_shift = spectrum1.get("precursor_mz") - spectrum2.get("precursor_mz")
nonzero_pairs = collect_peak_pairs(spec1, spec2, self.tolerance, shift=mass_shift,
mz_power=self.mz_power,
intensity_power=self.intensity_power)
unsorted_matching_pairs = zero_pairs + nonzero_pairs
return sorted(unsorted_matching_pairs, key=lambda x: x[2], reverse=True)
spec1 = get_peaks_array(spectrum1)
spec2 = get_peaks_array(spectrum2)
matching_pairs = get_matching_pairs()
return score_best_matches(matching_pairs, spec1, spec2,
self.mz_power, self.intensity_power)
|
efrain2010/matchms | matchms/exporting/save_as_mgf.py | <reponame>efrain2010/matchms
from typing import List
import pyteomics.mgf as py_mgf
from ..Spectrum import Spectrum
def save_as_mgf(spectrums: List[Spectrum], filename: str):
"""Save spectrum(s) as mgf file.
:py:attr:`~matchms.Spectrum.losses` of spectrum will not be saved.
Arguments:
----------
spectrums:
Expected input are match.Spectrum.Spectrum() objects.
filename:
Provide filename to save spectrum(s).
"""
if not isinstance(spectrums, list):
# Assume that input was single Spectrum
spectrums = [spectrums]
# Convert matchms.Spectrum() into dictionaries for pyteomics
for spectrum in spectrums:
spectrum_dict = {"m/z array": spectrum.peaks.mz,
"intensity array": spectrum.peaks.intensities,
"params": spectrum.metadata}
# Append spectrum to file
py_mgf.write([spectrum_dict], filename)
|
efrain2010/matchms | tests/test_add_losses.py | <gh_stars>10-100
import numpy
import pytest
from matchms import Spectrum
from matchms.filtering import add_losses
def test_add_losses():
"""Test if all losses are correctly generated form mz values and precursor-m/z."""
spectrum_in = Spectrum(mz=numpy.array([100, 150, 200, 300], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000], dtype="float"),
metadata={"precursor_mz": 445.0})
spectrum = add_losses(spectrum_in)
expected_mz = numpy.array([145, 245, 295, 345], "float")
expected_intensities = numpy.array([1000, 100, 200, 700], "float")
assert numpy.allclose(spectrum.losses.mz, expected_mz), "Expected different loss m/z."
assert numpy.allclose(spectrum.losses.intensities, expected_intensities), "Expected different intensities."
def test_add_losses_without_precursor_mz():
"""Test if no changes are done without having a precursor-m/z."""
spectrum_in = Spectrum(mz=numpy.array([100, 150, 200, 300], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000], dtype="float"))
spectrum = add_losses(spectrum_in)
assert spectrum == spectrum_in and spectrum is not spectrum_in
def test_add_losses_with_precursor_mz_wrong_type():
"""Test if correct assert error is raised for precursor-mz as string."""
spectrum_in = Spectrum(mz=numpy.array([100, 150, 200, 300], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000], dtype="float"),
metadata={"precursor_mz": "445.0"})
with pytest.raises(AssertionError) as msg:
_ = add_losses(spectrum_in)
assert "Expected 'precursor_mz' to be a scalar number." in str(msg.value)
def test_add_losses_returns_new_spectrum_instance():
"""Test if no change is done to empty spectrum."""
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"))
spectrum = add_losses(spectrum_in)
assert spectrum == spectrum_in and spectrum is not spectrum_in
def test_add_losses_with_input_none():
"""Test if input spectrum is None."""
spectrum_in = None
spectrum = add_losses(spectrum_in)
assert spectrum is None
def test_add_losses_with_peakmz_larger_precursormz():
"""Test if losses are correctly generated and loss < 0 is discarded."""
spectrum_in = Spectrum(mz=numpy.array([100, 150, 200, 450], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000], dtype="float"),
metadata={"precursor_mz": 445.0})
spectrum = add_losses(spectrum_in)
expected_mz = numpy.array([245, 295, 345], "float")
expected_intensities = numpy.array([100, 200, 700], "float")
assert numpy.allclose(spectrum.losses.mz, expected_mz), "Expected different loss m/z."
assert numpy.allclose(spectrum.losses.intensities, expected_intensities), "Expected different intensities."
def test_add_losses_with_max_loss_mz_250():
"""Test if losses are correctly generated and losses with mz > 250 are discarded."""
spectrum_in = Spectrum(mz=numpy.array([100, 150, 200, 300], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000], dtype="float"),
metadata={"precursor_mz": 445.0})
spectrum = add_losses(spectrum_in, loss_mz_to=250)
expected_mz = numpy.array([145, 245], "float")
expected_intensities = numpy.array([1000, 100], "float")
assert numpy.allclose(spectrum.losses.mz, expected_mz), "Expected different loss m/z."
assert numpy.allclose(spectrum.losses.intensities, expected_intensities), "Expected different intensities."
|
efrain2010/matchms | tests/test_ParentmassMatchParallel.py | import numpy
from matchms import Spectrum
from matchms.similarity import ParentmassMatchParallel
from matchms.similarity.ParentmassMatchParallel import \
calculate_parentmass_scores
def test_parentmass_match():
"Test with default tolerance."
spectrum_1 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"parent_mass": 100.0})
spectrum_2 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"parent_mass": 101.0})
spectrum_a = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"parent_mass": 99.0})
spectrum_b = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"parent_mass": 98.0})
similarity_score = ParentmassMatchParallel()
scores = similarity_score([spectrum_1, spectrum_2], [spectrum_a, spectrum_b])
assert numpy.all(scores == numpy.array([[False, False],
[False, False]])), "Expected different scores."
def test_parentmass_match_tolerance2():
"""Test with tolerance=2."""
spectrum_1 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"parent_mass": 100.0})
spectrum_2 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"parent_mass": 101.0})
spectrum_a = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"parent_mass": 99.0})
spectrum_b = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"parent_mass": 98.0})
similarity_score = ParentmassMatchParallel(tolerance=2.0)
scores = similarity_score([spectrum_1, spectrum_2], [spectrum_a, spectrum_b])
assert numpy.all(scores == numpy.array([[True, True],
[True, False]])), "Expected different scores."
def test_calculate_parentmass_scores_compiled():
"""Test the underlying score function (numba compiled)."""
parentmasses_ref = numpy.asarray([101, 200, 300])
parentmasses_query = numpy.asarray([100, 301])
scores = calculate_parentmass_scores(parentmasses_ref, parentmasses_query, tolerance=2.0)
assert numpy.all(scores == numpy.array([[1., 0.],
[0., 0.],
[0., 1.]])), "Expected different scores."
def test_calculate_parentmass_scores():
"""Test the underlying score function (non-compiled)."""
parentmasses_ref = numpy.asarray([101, 200, 300])
parentmasses_query = numpy.asarray([100, 301])
scores = calculate_parentmass_scores.py_func(parentmasses_ref, parentmasses_query, tolerance=2.0)
assert numpy.all(scores == numpy.array([[True, False],
[False, False],
[False, True]])), "Expected different scores."
|
efrain2010/matchms | tests/test_load_from_msp.py | import os
import numpy
from matchms.importing import load_from_msp
def test_load_from_msp():
"""Test parse of msp file to sprectum objects"""
module_root = os.path.join(os.path.dirname(__file__), "..")
spectrums_file = os.path.join(module_root, "tests", "MoNA-export-GC-MS-first10.msp")
spectrum = load_from_msp(spectrums_file)
expected_inchikey = numpy.array([
"ALRLPDGCPYIVHP-UHFFFAOYSA-N", "UFBJCMHMOXMLKC-UHFFFAOYSA-N", "WDNBURPWRNALGP-UHFFFAOYSA-N",
"RANCECPPZPIPNO-UHFFFAOYSA-N", "HOLHYSJJBXSLMV-UHFFFAOYSA-N", "UMPSXRYVXUPCOS-UHFFFAOYSA-N",
"HFZWRUODUSTPEG-UHFFFAOYSA-N", "VPOMSPZBQMDLTM-UHFFFAOYSA-N", "LHJGJYXLEPZJPM-UHFFFAOYSA-N",
"LINPIYWFGCPVIE-UHFFFAOYSA-N"
])
for k, n in enumerate(spectrum):
assert n.get("inchikey").lower() == expected_inchikey[k].lower(), "Expected different InChIKey."
|
efrain2010/matchms | matchms/Spectrum.py | <reponame>efrain2010/matchms<filename>matchms/Spectrum.py
from typing import Optional
import numpy
from matplotlib import pyplot
from .Spikes import Spikes
class Spectrum:
"""Container for a collection of peaks, losses and metadata
For example
.. testcode::
import numpy as np
from matchms import Scores, Spectrum
from matchms.similarity import CosineGreedy
spectrum = Spectrum(mz=np.array([100, 150, 200.]),
intensities=np.array([0.7, 0.2, 0.1]),
metadata={'id': 'spectrum1'})
print(spectrum.peaks.mz[0])
print(spectrum.peaks.intensities[0])
print(spectrum.get('id'))
Should output
.. testoutput::
100.0
0.7
spectrum1
Attributes
----------
peaks: ~matchms.Spikes.Spikes
Peaks of spectrum
losses: ~matchms.Spikes.Spikes or None
Losses of spectrum, the difference between the precursor and all peaks.
Can be filled with
.. code-block ::
from matchms import Spikes
spectrum.losess = Spikes(mz=np.array([50.]), intensities=np.array([0.1]))
metadata: dict
Dict of metadata with for example the scan number of precursor m/z.
"""
def __init__(self, mz: numpy.array, intensities: numpy.array, metadata: Optional[dict] = None):
"""
Parameters
----------
mz
Array of m/z for the peaks
intensities
Array of intensities for the peaks
metadata
Dictionary with for example the scan number of precursor m/z.
"""
self.peaks = Spikes(mz=mz, intensities=intensities)
self.losses = None
if metadata is None:
self.metadata = dict()
else:
self.metadata = metadata
def __eq__(self, other):
return \
self.peaks == other.peaks and \
self.losses == other.losses and \
self.metadata == other.metadata
def clone(self):
"""Return a deepcopy of the spectrum instance."""
clone = Spectrum(mz=self.peaks.mz,
intensities=self.peaks.intensities,
metadata=self.metadata)
clone.losses = self.losses
return clone
def plot(self, intensity_from=0.0, intensity_to=None, with_histogram=False):
"""To visually inspect a spectrum run ``spectrum.plot()``
.. figure:: ../_static/spectrum-plot-example.png
:width: 400
:alt: spectrum plotting function
Example of a spectrum plotted using ``spectrum.plot()`` and ``spectrum.plot(intensity_to=0.02)``.."""
def plot_histogram():
"""Plot the histogram of intensity values as horizontal bars, aligned with the spectrum axes"""
def calc_bin_edges_intensity():
"""Calculate various properties of the histogram bins, given a range in intensity defined by
'intensity_from' and 'intensity_to', assuming a number of bins equal to 100."""
edges = numpy.linspace(intensity_from, intensity_to, n_bins + 1)
lefts = edges[:-1]
rights = edges[1:]
middles = (lefts + rights) / 2
widths = rights - lefts
return edges, middles, widths
bin_edges, bin_middles, bin_widths = calc_bin_edges_intensity()
counts, _ = numpy.histogram(self.peaks.intensities, bins=bin_edges)
histogram_ax.set_ylim(bottom=intensity_from, top=intensity_to)
pyplot.barh(bin_middles, counts, height=bin_widths, color="#047495")
pyplot.title("histogram (n_bins={0})".format(n_bins))
pyplot.xlabel("count")
def plot_spectrum():
"""plot mz v. intensity"""
def make_stems():
"""calculate where the stems of the spectrum peaks are going to be"""
x = numpy.empty([2, self.peaks.mz.size], dtype="float")
y = numpy.empty(x.shape)
for i, mz in enumerate(self.peaks.mz):
x[0:2, i] = [mz, mz]
y[0:2, i] = [0, self.peaks.intensities[i]]
return x, y
spectrum_ax.set_ylim(bottom=intensity_from, top=intensity_to)
x, y = make_stems()
pyplot.plot(x, y, color="#0f0f0f", linewidth=1.0, marker="")
pyplot.title("Spectrum")
pyplot.xlabel("M/z")
pyplot.ylabel("intensity")
if intensity_to is None:
intensity_to = self.peaks.intensities.max() * 1.05
n_bins = 100
fig = pyplot.figure()
if with_histogram:
spectrum_ax = fig.add_axes([0.2, 0.1, 0.5, 0.8])
plot_spectrum()
histogram_ax = fig.add_axes([0.72, 0.1, 0.2, 0.8])
plot_histogram()
histogram_ax.set_yticklabels([])
else:
spectrum_ax = fig.add_axes([0.2, 0.1, 0.7, 0.8])
plot_spectrum()
histogram_ax = None
return fig
def get(self, key: str, default=None):
"""Retrieve value from :attr:`metadata` dict. Shorthand for
.. code-block:: python
val = self.metadata[key]
"""
return self._metadata.get(key, default)
def set(self, key: str, value):
"""Set value in :attr:`metadata` dict. Shorthand for
.. code-block:: python
self.metadata[key] = val
"""
self._metadata[key] = value
return self
@property
def metadata(self):
return self._metadata.copy()
@metadata.setter
def metadata(self, value):
self._metadata = value
@property
def losses(self) -> Optional[Spikes]:
return self._losses.clone() if self._losses is not None else None
@losses.setter
def losses(self, value: Spikes):
self._losses = value
@property
def peaks(self) -> Spikes:
return self._peaks.clone()
@peaks.setter
def peaks(self, value: Spikes):
self._peaks = value
|
efrain2010/matchms | matchms/importing/__init__.py | from .load_adducts import load_adducts
from .load_from_json import load_from_json
from .load_from_mgf import load_from_mgf
from .load_from_msp import load_from_msp
from .load_from_usi import load_from_usi
__all__ = [
"load_from_json",
"load_from_mgf",
"load_from_msp",
"load_from_usi",
"load_adducts"
]
|
efrain2010/matchms | matchms/constants.py | PROTON_MASS = 1.00727645199076
|
efrain2010/matchms | matchms/filtering/derive_inchikey_from_inchi.py | from ..typing import SpectrumType
from ..utils import convert_inchi_to_inchikey
from ..utils import is_valid_inchi
from ..utils import is_valid_inchikey
def derive_inchikey_from_inchi(spectrum_in: SpectrumType) -> SpectrumType:
"""Find missing InchiKey and derive from Inchi where possible."""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
inchi = spectrum.get("inchi")
inchikey = spectrum.get("inchikey")
if is_valid_inchi(inchi) and not is_valid_inchikey(inchikey):
inchikey = convert_inchi_to_inchikey(inchi)
if inchikey:
spectrum.set("inchikey", inchikey)
else:
print("Could not convert InChI", inchi, "to inchikey.")
return spectrum
|
efrain2010/matchms | matchms/similarity/IntersectMz.py | <filename>matchms/similarity/IntersectMz.py<gh_stars>0
from matchms.typing import SpectrumType
class IntersectMz:
"""Example score for illustrating how to build custom spectra similarity score.
IntersectMz will count all exact matches of peaks and divide it by all unique
peaks found in both spectrums.
Example of how matchms similarity functions can be used:
.. testcode::
import numpy as np
from matchms import Spectrum
from matchms.similarity import IntersectMz
spectrum_1 = Spectrum(mz=np.array([100, 150, 200.]),
intensities=np.array([0.7, 0.2, 0.1]))
spectrum_2 = Spectrum(mz=np.array([100, 140, 190.]),
intensities=np.array([0.4, 0.2, 0.1]))
# Construct a similarity function
similarity_measure = IntersectMz(scaling=1.0)
score = similarity_measure(spectrum_1, spectrum_2)
print(f"IntersectMz score is {score:.2f}")
Should output
.. testoutput::
IntersectMz score is 0.20
"""
def __init__(self, scaling: float = 1.0):
"""Constructor. Here, function parameters are defined.
Parameters
----------
scaling
Scale scores to maximum possible score being 'scaling'.
"""
self.scaling = scaling
def __call__(self, spectrum: SpectrumType, reference_spectrum: SpectrumType) -> float:
"""Call method. This will calculate the similarity score between two spectra."""
mz = set(spectrum.peaks.mz)
mz_ref = set(reference_spectrum.peaks.mz)
intersected = mz.intersection(mz_ref)
unioned = mz.union(mz_ref)
if len(unioned) == 0:
return 0
return self.scaling * len(intersected) / len(unioned)
|
efrain2010/matchms | tests/test_save_as_mgf.py | <filename>tests/test_save_as_mgf.py<gh_stars>10-100
import os
import tempfile
import numpy
from matchms import Spectrum
from matchms.exporting import save_as_mgf
def test_save_as_mgf_single_spectrum():
"""Test saving spectrum to .mgf file"""
spectrum = Spectrum(mz=numpy.array([100, 200, 300], dtype="float"),
intensities=numpy.array([10, 10, 500], dtype="float"),
metadata={"charge": -1,
"inchi": '"InChI=1S/C6H12"',
"pepmass": (100, 10.0),
"test_field": "test"})
# Write to test file
with tempfile.TemporaryDirectory() as d:
filename = os.path.join(d, "test.mgf")
save_as_mgf(spectrum, filename)
# test if file exists
assert os.path.isfile(filename)
# Test if content of mgf file is correct
with open(filename, "r") as f:
mgf_content = f.readlines()
assert mgf_content[0] == "BEGIN IONS\n"
assert mgf_content[2] == "CHARGE=1-\n"
assert mgf_content[4] == "TEST_FIELD=test\n"
assert mgf_content[7].split(" ")[0] == "300.0"
def test_save_as_mgf_spectrum_list():
"""Test saving spectrum list to .mgf file"""
spectrum1 = Spectrum(mz=numpy.array([100, 200, 300], dtype="float"),
intensities=numpy.array([10, 10, 500], dtype="float"),
metadata={"test_field": "test1"})
spectrum2 = Spectrum(mz=numpy.array([100, 200, 300], dtype="float"),
intensities=numpy.array([10, 10, 500], dtype="float"),
metadata={"test_field": "test2"})
# Write to test file
with tempfile.TemporaryDirectory() as d:
filename = os.path.join(d, "test.mgf")
save_as_mgf([spectrum1, spectrum2], filename)
# test if file exists
assert os.path.isfile(filename)
# Test if content of mgf file is correct
with open(filename, "r") as f:
mgf_content = f.readlines()
assert mgf_content[5] == mgf_content[12] == "END IONS\n"
assert mgf_content[1].split("=")[1] == "test1\n"
assert mgf_content[8].split("=")[1] == "test2\n"
|
efrain2010/matchms | tests/test_cosine_greedy_vectorial.py | import numpy
import pytest
from matchms import Spectrum
from matchms.filtering import normalize_intensities
from matchms.similarity import CosineGreedyVectorial
def test_cosine_greedy_without_parameters():
spectrum_1 = Spectrum(mz=numpy.array([100, 150, 200, 300, 500, 510, 1100], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"))
spectrum_2 = Spectrum(mz=numpy.array([100, 140, 190, 300, 490, 510, 1090], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"))
norm_spectrum_1 = normalize_intensities(spectrum_1)
norm_spectrum_2 = normalize_intensities(spectrum_2)
cosine_greedy = CosineGreedyVectorial()
score, n_matches = cosine_greedy(norm_spectrum_1, norm_spectrum_2)
assert score == pytest.approx(0.81421, 0.0001), "Expected different cosine score."
assert n_matches == 3
def test_cosine_score_greedy_with_tolerance_0_2():
spectrum_1 = Spectrum(mz=numpy.array([100, 150, 200, 300, 500, 510, 1100], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"),
metadata=dict())
spectrum_2 = Spectrum(mz=numpy.array([50, 100, 200, 299.5, 489.5, 510.5, 1040], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"),
metadata=dict())
norm_spectrum_1 = normalize_intensities(spectrum_1)
norm_spectrum_2 = normalize_intensities(spectrum_2)
cosine_greedy = CosineGreedyVectorial(tolerance=0.2)
score, n_matches = cosine_greedy(norm_spectrum_1, norm_spectrum_2)
assert score == pytest.approx(0.081966, 0.0001), "Expected different cosine score."
assert n_matches == 2
def test_cosine_score_greedy_with_tolerance_2_0():
spectrum_1 = Spectrum(mz=numpy.array([100, 200, 299, 300, 301, 500, 510], dtype="float"),
intensities=numpy.array([10, 10, 500, 100, 200, 20, 100], dtype="float"),
metadata=dict())
spectrum_2 = Spectrum(mz=numpy.array([100, 200, 300, 301, 500, 512], dtype="float"),
intensities=numpy.array([10, 10, 500, 100, 20, 100], dtype="float"),
metadata=dict())
norm_spectrum_1 = normalize_intensities(spectrum_1)
norm_spectrum_2 = normalize_intensities(spectrum_2)
cosine_greedy = CosineGreedyVectorial(tolerance=2.0)
score, n_matches = cosine_greedy(norm_spectrum_1, norm_spectrum_2)
assert score == pytest.approx(0.903412, 0.0001), "Expected different cosine score."
assert n_matches == 6
def test_cosine_score_greedy_order_of_arguments():
spectrum_1 = Spectrum(mz=numpy.array([100, 200, 299, 300, 301, 500, 510], dtype="float"),
intensities=numpy.array([10, 10, 500, 100, 200, 20, 100], dtype="float"),
metadata=dict())
spectrum_2 = Spectrum(mz=numpy.array([100, 200, 300, 301, 500, 512], dtype="float"),
intensities=numpy.array([10, 10, 500, 100, 20, 100], dtype="float"),
metadata=dict())
norm_spectrum_1 = normalize_intensities(spectrum_1)
norm_spectrum_2 = normalize_intensities(spectrum_2)
cosine_greedy = CosineGreedyVectorial(tolerance=2.0)
score_1_2, n_matches_1_2 = cosine_greedy(norm_spectrum_1, norm_spectrum_2)
score_2_1, n_matches_2_1 = cosine_greedy(norm_spectrum_2, norm_spectrum_1)
assert score_1_2 == score_2_1, "Expected that the order of the arguments would not matter."
assert n_matches_1_2 == n_matches_2_1, "Expected that the order of the arguments would not matter."
|
efrain2010/matchms | tests/test_correct_charge.py | import numpy
from matchms import Spectrum
from matchms.filtering import correct_charge
def test_correct_charge_no_ionmode():
"""Test if no charge is added for empty ionmode."""
spectrum_in = Spectrum(mz=numpy.array([], dtype='float'),
intensities=numpy.array([], dtype='float'),
metadata={})
spectrum = correct_charge(spectrum_in)
assert spectrum.get("charge") == 0, "Expected zero charge value."
def test_correct_charge_add_charge():
"""Test if charge is corrected as expected."""
spectrum_in = Spectrum(mz=numpy.array([], dtype='float'),
intensities=numpy.array([], dtype='float'),
metadata={"ionmode": "positive"})
spectrum = correct_charge(spectrum_in)
assert spectrum.get("charge") == 1, "Expected different charge value."
def test_correct_charge_sign_plus_to_min():
"""Test if charge is corrected as expected."""
spectrum_in = Spectrum(mz=numpy.array([], dtype='float'),
intensities=numpy.array([], dtype='float'),
metadata={"ionmode": "negative",
"charge": 2})
spectrum = correct_charge(spectrum_in)
assert spectrum.get("charge") == -2, "Expected different charge value."
def test_correct_charge_sign_min_to_plus():
"""Test if charge is corrected as expected."""
spectrum_in = Spectrum(mz=numpy.array([], dtype='float'),
intensities=numpy.array([], dtype='float'),
metadata={"ionmode": "positive",
"charge": -2})
spectrum = correct_charge(spectrum_in)
assert spectrum.get("charge") == 2, "Expected different charge value."
def test_correct_charge_empty_spectrum():
spectrum_in = None
spectrum = correct_charge(spectrum_in)
assert spectrum is None, "Expected different handling of None spectrum."
|
efrain2010/matchms | matchms/filtering/repair_inchi_inchikey_smiles.py | <reponame>efrain2010/matchms
from ..typing import SpectrumType
from .SpeciesString import SpeciesString
def repair_inchi_inchikey_smiles(spectrum_in: SpectrumType):
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
# interpret available data and clean each
inchi = spectrum.get("inchi", "")
inchiaux = spectrum.get("inchiaux", "")
inchikey = spectrum.get("inchikey", "")
smiles = spectrum.get("smiles", "")
cleaneds = [SpeciesString(s) for s in [inchi, inchiaux, inchikey, smiles]]
# for each type, list what we have and pick one
inchis = [c.cleaned for c in cleaneds if c.target == "inchi" and c.cleaned != ""]
inchikeys = [c.cleaned for c in cleaneds if c.target == "inchikey" and c.cleaned != ""]
smiles = [c.cleaned for c in cleaneds if c.target == "smiles" and c.cleaned != ""]
spectrum.set("inchi", inchis[0] if len(inchis) > 0 else "")
spectrum.set("inchikey", inchikeys[0] if len(inchikeys) > 0 else "")
spectrum.set("smiles", smiles[0] if len(smiles) > 0 else "")
return spectrum
|
efrain2010/matchms | matchms/similarity/ParentmassMatchParallel.py | from typing import List
import numba
import numpy
from matchms.typing import SpectrumType
class ParentmassMatchParallel:
"""Return True if spectrums match in parent mass (within tolerance), and False otherwise."""
def __init__(self, tolerance: float = 0.1):
"""
Parameters:
----------
tolerance
Specify tolerance below which two masses are counted as match.
"""
self.tolerance = tolerance
def __call__(self, reference_spectrums: List[SpectrumType],
spectrums: List[SpectrumType]) -> numpy.ndarray:
"""Compare parent masses between all reference_spectrums and spectrums."""
def collect_parentmasses(spectrums):
"""Collect parentmasses."""
parentmasses = []
for spectrum in spectrums:
parentmass = spectrum.get("parent_mass")
assert parentmass is not None, "Missing parent mass."
parentmasses.append(parentmass)
return numpy.asarray(parentmasses)
parentmasses_ref = collect_parentmasses(reference_spectrums)
parentmasses_query = collect_parentmasses(spectrums)
return calculate_parentmass_scores(parentmasses_ref, parentmasses_query, self.tolerance)
@numba.njit
def calculate_parentmass_scores(parentmasses_ref, parentmasses_query, tolerance):
scores = numpy.zeros((len(parentmasses_ref), len(parentmasses_query)))
for i, parentmass_ref in enumerate(parentmasses_ref):
for j, parentmass_query in enumerate(parentmasses_query):
scores[i, j] = (abs(parentmass_ref-parentmass_query) <= tolerance)
return scores
|
efrain2010/matchms | tests/test_derive_ionmode.py | import numpy
from matchms import Spectrum
from matchms.filtering import derive_ionmode
def test_derive_ionmode_positive_adduct():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"adduct": "[M+H]"})
spectrum = derive_ionmode(spectrum_in)
assert spectrum.get("ionmode") == "positive", "Expected different ionmode."
def test_derive_ionmode_negative_adduct():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"adduct": "M-H-"})
spectrum = derive_ionmode(spectrum_in)
assert spectrum.get("ionmode") == "negative", "Expected different ionmode."
def test_derive_ionmode_empty_spectrum():
spectrum_in = None
spectrum = derive_ionmode(spectrum_in)
assert spectrum is None, "Expected differnt handling of None spectrum."
|
efrain2010/matchms | matchms/filtering/clean_compound_name.py | import re
from ..typing import SpectrumType
def clean_compound_name(spectrum_in: SpectrumType) -> SpectrumType:
"""Clean compound name.
A list of frequently seen name additions that do not belong to the compound
name will be removed."""
def remove_parts_by_regular_expression(name):
"""Clean name string by removing known parts that don't belong there."""
name = name.strip()
# remove type NCGC00180417-03_C31H40O16_
name = re.split(r"[A-Z]{3,}[0-9]{8,}-[0-9]{2,}_[A-Z,0-9]{4,}_", name)[-1]
# remove type NCGC00160232-01! or MLS001142816-01!
name = re.split(r"[A-Z]{3,}[0-9]{8,}-[0-9]{2,3}\!", name)[-1]
# remove type Massbank:EA008813 option1|option2|option3
name = re.split(r"((Massbank:)|(MassbankEU:))[A-Z]{2}[0-9]{5,6}.*\|", name)[-1]
# remove type Massbank:EA008813 or MassbankEU:EA008813
name = re.split(r"((Massbank:)|(MassbankEU:))[A-Z]{2}[0-9]{5,6}", name)[-1]
# remove type HMDB:HMDB00943-1336
name = re.split(r"HMDB:HMDB[0-9]{4,}-[0-9]{1,}", name)[-1]
# remove type MoNA:662599
name = re.split(r"MoNA:[0-9]{5,}", name)[-1]
# ReSpect:PS013405 option1|option2|option3...
name = re.split(r"ReSpect:[A-Z]{2,}[0-9]{6}.*\|", name)[-1]
# ReSpect:PS013405 option1
name = re.split(r"[A-Z]{2,}[0-9]{6}( )", name)[-1]
# remove type 0072_2-Mercaptobenzothiaz
name = re.split(r"^[0-9]{4}_", name)[-1]
# remove type nameofcompound_CID20_170920 or Spiraeoside_HCD30_170919
name = re.split(r"_((HCD)|(CID))[0-9]{2}_[0-9]{5,6}$", name)[0]
return name
def remove_known_non_compound_parts(name):
"""Remove known non compound-name strings from name."""
parts_remove = ["Spectral Match to",
"from NIST14",
"Massbank:"]
for part in parts_remove:
name = name.replace(part, "")
return name.strip("; ")
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
if spectrum.get("compound_name", None) is not None:
name = spectrum.get("compound_name")
else:
assert spectrum.get("name", None) in [None, ""], ("Found 'name' but not 'compound_name' in metadata",
"Apply 'add_compound_name' filter first.")
return spectrum
# Clean compound name
name_cleaned = remove_parts_by_regular_expression(name)
name_cleaned = remove_known_non_compound_parts(name_cleaned)
if name_cleaned != name:
spectrum.set("compound_name", name_cleaned)
print("Added cleaned compound name:", name_cleaned)
return spectrum
|
efrain2010/matchms | tests/test_derive_formula_from_name.py | <reponame>efrain2010/matchms<filename>tests/test_derive_formula_from_name.py<gh_stars>0
import numpy
from matchms import Spectrum
from matchms.filtering import derive_formula_from_name
def test_derive_formula_from_name():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"compound_name": "peptideXYZ [M+H+K] C5H12NO2"})
spectrum = derive_formula_from_name(spectrum_in)
assert spectrum.get("formula") == "C5H12NO2", "Expected different formula."
assert spectrum.get("compound_name") == "peptideXYZ [M+H+K]", "Expected different cleaned name."
def test_derive_formula_from_name_dont_overwrite_present_adduct():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"compound_name": "peptideXYZ C5H12NO2",
"formula": "totallycorrectformula"})
spectrum = derive_formula_from_name(spectrum_in)
assert spectrum.get("formula") == "totallycorrectformula", "Expected different adduct."
assert spectrum.get("compound_name") == "peptideXYZ", "Expected different cleaned name."
def test_derive_formula_from_name_remove_formula_false():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"compound_name": "peptideXYZ [M+H+K] C5H12NO2"})
spectrum = derive_formula_from_name(spectrum_in, remove_formula_from_name=False)
assert spectrum.get("formula") == "C5H12NO2", "Expected different formula."
assert spectrum.get("compound_name") == spectrum_in.get("compound_name"), "Expected no name change."
def test_derive_formula_from_name_no_name_given():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={})
spectrum = derive_formula_from_name(spectrum_in)
assert spectrum.get("formula", None) is None, "Expected None for adduct."
assert spectrum.get("compound_name", None) is None, "Expected None for name."
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.