sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def parse_args(arglist=None):
"""Parse cmd line arguments.
Update :attr:`stagpy.conf` accordingly.
Args:
arglist (list of str): the list of cmd line arguments. If set to
None, the arguments are taken from :attr:`sys.argv`.
Returns:
function: the function implementing the sub command to be executed.
"""
climan = CLIManager(conf, **SUB_CMDS)
create_complete_files(climan, CONFIG_DIR, 'stagpy', 'stagpy-git',
zsh_sourceable=True)
cmd_args, all_subs = climan.parse_args(arglist)
sub_cmd = cmd_args.loam_sub_name
if sub_cmd is None:
return cmd_args.func
if sub_cmd != 'config':
commands.report_parsing_problems(PARSING_OUT)
if conf.common.set:
set_conf_str(conf, conf.common.set)
if conf.common.config:
commands.config_pp(all_subs)
load_mplstyle()
try:
_steps_to_slices()
except AttributeError:
pass
return cmd_args.func | Parse cmd line arguments.
Update :attr:`stagpy.conf` accordingly.
Args:
arglist (list of str): the list of cmd line arguments. If set to
None, the arguments are taken from :attr:`sys.argv`.
Returns:
function: the function implementing the sub command to be executed. | entailment |
def detect_plates_vzcheck(step, seuil_memz):
"""detect plates and check with vz and plate size"""
v_z = step.fields['v3'][0, :, :, 0]
v_x = step.fields['v2'][0, :, :, 0]
tcell = step.fields['T'][0, :, :, 0]
n_z = step.geom.nztot
nphi = step.geom.nptot # -1? should be OK, ghost not included
rcmb = max(0, step.geom.rcmb)
radius = step.geom.r_coord + rcmb
radiusgrid = step.geom.rgeom[:, 0] + rcmb
dphi = 1 / nphi
# calculing temperature on the grid and vz_mean
vz_mean = 0
tgrid = np.zeros((nphi, n_z + 1))
tgrid[:, 0] = 1
for i_z in range(1, n_z):
for phi in range(nphi):
tgrid[phi, i_z] = (
tcell[phi, i_z - 1] *
(radiusgrid[i_z] - radius[i_z - 1]) + tcell[phi, i_z] *
(-radiusgrid[i_z] + radius[i_z])) / (radius[i_z] -
radius[i_z - 1])
vz_mean += abs(v_z[phi, i_z]) / (nphi * n_z)
flux_c = n_z * [0]
for i_z in range(1, n_z - 1):
for phi in range(nphi):
flux_c[i_z] += (tgrid[phi, i_z] - step.timeinfo.loc['Tmean']) * \
v_z[phi, i_z] * radiusgrid[i_z] * dphi
# checking stagnant lid
if all(abs(flux_c[i_z]) <= np.max(flux_c) / 50
for i_z in range(n_z - n_z // 20, n_z)):
raise error.StagnantLidError(step.sdat)
else:
# verifying horizontal plate speed and closeness of plates
dvphi = nphi * [0]
dvx_thres = 16 * step.timeinfo.loc['vrms']
for phi in range(0, nphi):
dvphi[phi] = (v_x[phi, n_z - 1] -
v_x[phi - 1, n_z - 1]) / ((1 + rcmb) * dphi)
limits = []
for phi in range(0, nphi - nphi // 33):
mark = all(abs(dvphi[i]) <= abs(dvphi[phi])
for i in range(phi - nphi // 33, phi + nphi // 33))
if mark and abs(dvphi[phi]) >= dvx_thres:
limits.append(phi)
for phi in range(nphi - nphi // 33 + 1, nphi):
mark = all(abs(dvphi[i]) <= abs(dvphi[phi])
for i in range(phi - nphi // 33 - nphi,
phi + nphi // 33 - nphi))
if mark and abs(dvphi[phi]) >= dvx_thres:
limits.append(phi)
# verifying vertical speed
k = 0
for i in range(len(limits)):
vzm = 0
phi = limits[i - k]
for i_z in range(1 if phi == nphi - 1 else 0, n_z):
vzm += (abs(v_z[phi, i_z]) +
abs(v_z[phi - 1, i_z]) +
abs(v_z[(phi + 1) % nphi, i_z])) / (n_z * 3)
vz_thres = vz_mean * 0.1 + seuil_memz / 2 if seuil_memz else 0
if vzm < vz_thres:
limits.remove(phi)
k += 1
return limits, nphi, dvphi, vz_thres, v_x[:, n_z - 1] | detect plates and check with vz and plate size | entailment |
def detect_plates(step, vrms_surface, fids, time):
"""detect plates using derivative of horizontal velocity"""
vphi = step.fields['v2'][0, :, :, 0]
ph_coord = step.geom.p_coord
if step.sdat.par['boundaries']['air_layer']:
dsa = step.sdat.par['boundaries']['air_thickness']
# we are a bit below the surface; should check if you are in the
# thermal boundary layer
indsurf = np.argmin(abs((1 - dsa) - step.geom.r_coord)) - 4
else:
indsurf = -1
vph2 = 0.5 * (vphi + np.roll(vphi, 1, 0)) # interpolate to the same phi
# velocity derivation
dvph2 = (np.diff(vph2[:, indsurf]) / (ph_coord[0] * 2.))
io_surface(step.isnap, time, fids[6], dvph2)
io_surface(step.isnap, time, fids[7], vph2[:-1, indsurf])
# prepare stuff to find trenches and ridges
myorder_trench = 15 if step.sdat.par['boundaries']['air_layer'] else 10
myorder_ridge = 20 # threshold
# finding trenches
pom2 = np.copy(dvph2)
if step.sdat.par['boundaries']['air_layer']:
maskbigdvel = -30 * vrms_surface
else:
maskbigdvel = -10 * vrms_surface
pom2[pom2 > maskbigdvel] = maskbigdvel
argless_dv = argrelextrema(
pom2, np.less, order=myorder_trench, mode='wrap')[0]
trench = ph_coord[argless_dv]
velocity_trench = vph2[argless_dv, indsurf]
dv_trench = dvph2[argless_dv]
# finding ridges
pom2 = np.copy(dvph2)
masksmalldvel = np.amax(dvph2) * 0.2
pom2[pom2 < masksmalldvel] = masksmalldvel
arggreat_dv = argrelextrema(
pom2, np.greater, order=myorder_ridge, mode='wrap')[0]
ridge = ph_coord[arggreat_dv]
# elimination of ridges that are too close to trench
argdel = []
if trench.any() and ridge.any():
for i, ridge_i in enumerate(ridge):
mdistance = np.amin(abs(trench - ridge_i))
if mdistance < 0.016:
argdel.append(i)
if argdel:
print('deleting from ridge', trench, ridge[argdel])
ridge = np.delete(ridge, np.array(argdel))
arggreat_dv = np.delete(arggreat_dv, np.array(argdel))
dv_ridge = dvph2[arggreat_dv]
if 'age' in conf.plates:
agefld = step.fields['age'][0, :, :, 0]
age_surface = np.ma.masked_where(agefld[:, indsurf] < 0.00001,
agefld[:, indsurf])
age_surface_dim = age_surface * vrms_surface *\
conf.scaling.ttransit / conf.scaling.yearins / 1.e6
agetrench = age_surface_dim[argless_dv] # age at the trench
else:
agetrench = np.zeros(len(argless_dv))
# writing the output into a file, all time steps are in one file
for itrench in np.arange(len(trench)):
fids[0].write("%7.0f %11.7f %10.6f %9.2f %9.2f \n" % (
step.isnap,
step.geom.ti_ad,
trench[itrench],
velocity_trench[itrench],
agetrench[itrench]
))
return trench, ridge, agetrench, dv_trench, dv_ridge | detect plates using derivative of horizontal velocity | entailment |
def plot_plate_limits(axis, ridges, trenches, ymin, ymax):
"""plot lines designating ridges and trenches"""
for trench in trenches:
axis.axvline(
x=trench, ymin=ymin, ymax=ymax,
color='red', ls='dashed', alpha=0.4)
for ridge in ridges:
axis.axvline(
x=ridge, ymin=ymin, ymax=ymax,
color='green', ls='dashed', alpha=0.4)
axis.set_xlim(0, 2 * np.pi)
axis.set_ylim(ymin, ymax) | plot lines designating ridges and trenches | entailment |
def plot_plate_limits_field(axis, rcmb, ridges, trenches):
"""plot arrows designating ridges and trenches in 2D field plots"""
for trench in trenches:
xxd = (rcmb + 1.02) * np.cos(trench) # arrow begin
yyd = (rcmb + 1.02) * np.sin(trench) # arrow begin
xxt = (rcmb + 1.35) * np.cos(trench) # arrow end
yyt = (rcmb + 1.35) * np.sin(trench) # arrow end
axis.annotate('', xy=(xxd, yyd), xytext=(xxt, yyt),
arrowprops=dict(facecolor='red', shrink=0.05))
for ridge in ridges:
xxd = (rcmb + 1.02) * np.cos(ridge)
yyd = (rcmb + 1.02) * np.sin(ridge)
xxt = (rcmb + 1.35) * np.cos(ridge)
yyt = (rcmb + 1.35) * np.sin(ridge)
axis.annotate('', xy=(xxd, yyd), xytext=(xxt, yyt),
arrowprops=dict(facecolor='green', shrink=0.05)) | plot arrows designating ridges and trenches in 2D field plots | entailment |
def plot_plates(step, time, vrms_surface, trench, ridge, agetrench,
topo, fids):
"""handle ploting stuff"""
vphi = step.fields['v2'][0, :, :, 0]
tempfld = step.fields['T'][0, :, :, 0]
concfld = step.fields['c'][0, :, :, 0]
timestep = step.isnap
if step.sdat.par['boundaries']['air_layer']:
dsa = step.sdat.par['boundaries']['air_thickness']
# we are a bit below the surface; delete "-some number"
# to be just below
# the surface (that is considered plane here); should check if you are
# in the thermal boundary layer
indsurf = np.argmin(abs((1 - dsa) - step.geom.r_coord)) - 4
# depth to detect the continents
indcont = np.argmin(abs((1 - dsa) - np.array(step.geom.r_coord))) - 10
else:
indsurf = -1
indcont = -1 # depth to detect continents
if step.sdat.par['boundaries']['air_layer'] and\
not step.sdat.par['continents']['proterozoic_belts']:
continents = np.ma.masked_where(
np.logical_or(concfld[:-1, indcont] < 3,
concfld[:-1, indcont] > 4),
concfld[:-1, indcont])
elif (step.sdat.par['boundaries']['air_layer'] and
step.sdat.par['continents']['proterozoic_belts']):
continents = np.ma.masked_where(
np.logical_or(concfld[:-1, indcont] < 3,
concfld[:-1, indcont] > 5),
concfld[:-1, indcont])
elif step.sdat.par['tracersin']['tracers_weakcrust']:
continents = np.ma.masked_where(
concfld[:-1, indcont] < 3, concfld[:-1, indcont])
else:
continents = np.ma.masked_where(
concfld[:-1, indcont] < 2, concfld[:-1, indcont])
# masked array, only continents are true
continentsall = continents / continents
ph_coord = step.geom.p_coord
# velocity
vph2 = 0.5 * (vphi + np.roll(vphi, 1, 0)) # interpolate to the same phi
dvph2 = (np.diff(vph2[:, indsurf]) / (ph_coord[0] * 2.))
# plotting
fig0, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, figsize=(12, 8))
ax1.plot(ph_coord[:-1], concfld[:-1, indsurf], color='g', label='Conc')
ax2.plot(ph_coord[:-1], tempfld[:-1, indsurf], color='k', label='Temp')
ax3.plot(ph_coord[:-1], vph2[:-1, indsurf], label='Vel')
ax1.fill_between(
ph_coord[:-1], continents, 1., facecolor='#8B6914', alpha=0.2)
ax2.fill_between(
ph_coord[:-1], continentsall, 0., facecolor='#8B6914', alpha=0.2)
tempmin = step.sdat.par['boundaries']['topT_val'] * 0.9\
if step.sdat.par['boundaries']['topT_mode'] == 'iso' else 0.0
tempmax = step.sdat.par['boundaries']['botT_val'] * 0.35\
if step.sdat.par['boundaries']['botT_mode'] == 'iso' else 0.8
ax2.set_ylim(tempmin, tempmax)
ax3.fill_between(
ph_coord[:-1], continentsall * round(1.5 * np.amax(dvph2), 1),
round(np.amin(dvph2) * 1.1, 1), facecolor='#8B6914', alpha=0.2)
ax3.set_ylim(conf.plates.vmin, conf.plates.vmax)
ax1.set_ylabel("Concentration")
ax2.set_ylabel("Temperature")
ax3.set_ylabel("Velocity")
ax1.set_title(timestep)
ax1.text(0.95, 1.07, str(round(time, 0)) + ' My',
transform=ax1.transAxes)
ax1.text(0.01, 1.07, str(round(step.geom.ti_ad, 8)),
transform=ax1.transAxes)
plot_plate_limits(ax3, ridge, trench, conf.plates.vmin,
conf.plates.vmax)
misc.saveplot(fig0, 'sveltempconc', timestep)
# plotting velocity and velocity derivative
fig0, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
ax1.plot(ph_coord[:-1], vph2[:-1, indsurf], label='Vel')
ax1.axhline(y=0, xmin=0, xmax=2 * np.pi,
color='black', ls='solid', alpha=0.2)
ax1.set_ylabel("Velocity")
ax1.text(0.95, 1.07, str(round(time, 0)) + ' My',
transform=ax1.transAxes)
ax1.text(0.01, 1.07, str(round(step.geom.ti_ad, 8)),
transform=ax1.transAxes)
ax2.plot(ph_coord[:-1] + ph_coord[0], dvph2, color='k', label='dv')
ax2.set_ylabel("dv")
plot_plate_limits(ax1, ridge, trench, conf.plates.vmin,
conf.plates.vmax)
plot_plate_limits(ax2, ridge, trench, conf.plates.dvmin,
conf.plates.dvmax)
ax1.set_xlim(0, 2 * np.pi)
ax1.set_title(timestep)
ax1.fill_between(
ph_coord[:-1], continentsall * conf.plates.vmin, conf.plates.vmax,
facecolor='#8b6914', alpha=0.2)
ax1.set_ylim(conf.plates.vmin, conf.plates.vmax)
ax2.fill_between(
ph_coord[:-1], continentsall * conf.plates.dvmin,
conf.plates.dvmax, facecolor='#8b6914', alpha=0.2)
ax2.set_ylim(conf.plates.dvmin, conf.plates.dvmax)
misc.saveplot(fig0, 'sveldvel', timestep)
# plotting velocity and second invariant of stress
if 'str' in conf.plates.plot:
stressfld = step.fields['sII'][0, :, :, 0]
fig0, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
ax1.plot(ph_coord[:-1], vph2[:-1, indsurf], label='Vel')
ax1.axhline(y=0, xmin=0, xmax=2 * np.pi,
color='black', ls='solid', alpha=0.2)
ax1.set_ylabel("Velocity")
ax1.text(0.95, 1.07, str(round(time, 0)) + ' My',
transform=ax1.transAxes)
ax1.text(0.01, 1.07, str(round(step.geom.ti_ad, 8)),
transform=ax1.transAxes)
ax2.plot(ph_coord[:-1],
stressfld[:-1, indsurf] * step.sdat.scales.stress / 1.e6,
color='k', label='Stress')
ax2.set_ylim(conf.plates.stressmin, conf.plates.stressmax)
ax2.set_ylabel("Stress [MPa]")
plot_plate_limits(ax1, ridge, trench,
conf.plates.vmin, conf.plates.vmax)
plot_plate_limits(ax2, ridge, trench,
conf.plates.stressmin, conf.plates.stressmax)
ax1.set_xlim(0, 2 * np.pi)
ax1.set_title(timestep)
ax1.fill_between(
ph_coord[:-1], continentsall * conf.plates.vmin,
conf.plates.vmax, facecolor='#8B6914', alpha=0.2)
ax1.set_ylim(conf.plates.vmin, conf.plates.vmax)
ax2.fill_between(
ph_coord[:-1], continentsall * conf.plates.dvmin,
conf.plates.dvmax,
facecolor='#8B6914', alpha=0.2)
misc.saveplot(fig0, 'svelstress', timestep)
# plotting velocity
fig1, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
ax1.plot(ph_coord[:-1], vph2[:-1, indsurf], label='Vel')
ax1.axhline(y=0, xmin=0, xmax=2 * np.pi,
color='black', ls='solid', alpha=0.2)
ax1.set_ylim(conf.plates.vmin, conf.plates.vmax)
ax1.set_ylabel("Velocity")
ax1.text(0.95, 1.07, str(round(time, 0)) + ' My',
transform=ax1.transAxes)
plot_plate_limits(ax1, ridge, trench, conf.plates.vmin,
conf.plates.vmax)
# plotting velocity and age at surface
if 'age' in conf.plates.plot:
agefld = step.fields['age'][0, :, :, 0]
age_surface = np.ma.masked_where(
agefld[:, indsurf] < 0.00001, agefld[:, indsurf])
age_surface_dim = (age_surface * vrms_surface * conf.scaling.ttransit /
conf.scaling.yearins / 1.e6)
fig2, (ax3, ax4) = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
ax3.plot(ph_coord[:-1], vph2[:-1, indsurf], label='Vel')
ax3.axhline(
y=0, xmin=0, xmax=2 * np.pi,
color='black', ls='solid', alpha=0.2)
ax3.set_ylim(conf.plates.vmin, conf.plates.vmax)
ax3.set_ylabel("Velocity")
ax3.text(0.95, 1.07, str(round(time, 0)) + ' My',
transform=ax3.transAxes)
ax3.fill_between(
ph_coord[:-1], continentsall * conf.plates.vmax,
conf.plates.vmin, facecolor='#8B6914', alpha=0.2)
plot_plate_limits(ax3, ridge, trench,
conf.plates.vmin, conf.plates.vmax)
times_subd = []
age_subd = []
distance_subd = []
ph_trench_subd = []
ph_cont_subd = []
if step.sdat.par['switches']['cont_tracers']:
for i, trench_i in enumerate(trench):
# detection of the distance in between subduction and continent
ph_coord_noend = ph_coord[:-1]
angdistance1 = abs(ph_coord_noend[continentsall == 1] - trench_i)
angdistance2 = 2. * np.pi - angdistance1
angdistance = np.minimum(angdistance1, angdistance2)
distancecont = min(angdistance)
argdistancecont = np.argmin(angdistance)
continentpos = ph_coord_noend[continentsall == 1][argdistancecont]
ph_trench_subd.append(trench_i)
age_subd.append(agetrench[i])
ph_cont_subd.append(continentpos)
distance_subd.append(distancecont)
times_subd.append(step.geom.ti_ad)
if angdistance1[argdistancecont] < angdistance2[argdistancecont]:
if continentpos - trench_i < 0: # continent is on the left
distancecont = - distancecont
ax1.annotate('', xy=(trench_i + distancecont, 2000),
xycoords='data', xytext=(trench_i, 2000),
textcoords='data',
arrowprops=dict(arrowstyle="->", lw="2",
shrinkA=0, shrinkB=0))
else: # distance over boundary
xy_anot, xy_text = 0, 2 * np.pi
if continentpos - trench_i < 0:
xy_anot, xy_text = xy_text, xy_anot
ax1.annotate('', xy=(xy_anot, 2000),
xycoords='data', xytext=(trench_i, 2000),
textcoords='data',
arrowprops=dict(arrowstyle="-", lw="2",
shrinkA=0, shrinkB=0))
ax1.annotate('', xy=(continentpos, 2000),
xycoords='data', xytext=(xy_text, 2000),
textcoords='data',
arrowprops=dict(arrowstyle="->", lw="2",
shrinkA=0, shrinkB=0))
ax1.fill_between(
ph_coord[:-1], continentsall * conf.plates.vmin,
conf.plates.vmax, facecolor='#8B6914', alpha=0.2)
ax2.set_ylabel("Topography [km]")
ax2.axhline(y=0, xmin=0, xmax=2 * np.pi,
color='black', ls='solid', alpha=0.2)
ax2.plot(topo[:, 0],
topo[:, 1] * step.sdat.scales.length / 1.e3,
color='black')
ax2.set_xlim(0, 2 * np.pi)
ax2.set_ylim(conf.plates.topomin, conf.plates.topomax)
ax2.fill_between(
ph_coord[:-1], continentsall * conf.plates.topomax,
conf.plates.topomin, facecolor='#8B6914', alpha=0.2)
plot_plate_limits(ax2, ridge, trench, conf.plates.topomin,
conf.plates.topomax)
ax1.set_title(timestep)
misc.saveplot(fig1, 'sveltopo', timestep)
if 'age' in conf.plates.plot:
ax4.set_ylabel("Seafloor age [My]")
# in dimensions
ax4.plot(ph_coord[:-1], age_surface_dim[:-1], color='black')
ax4.set_xlim(0, 2 * np.pi)
ax4.fill_between(
ph_coord[:-1], continentsall * conf.plates.agemax,
conf.plates.agemin, facecolor='#8B6914', alpha=0.2)
ax4.set_ylim(conf.plates.agemin, conf.plates.agemax)
plot_plate_limits(ax4, ridge, trench, conf.plates.agemin,
conf.plates.agemax)
ax3.set_title(timestep)
misc.saveplot(fig2, 'svelage', timestep)
# writing the output into a file, all time steps are in one file
for isubd in np.arange(len(distance_subd)):
fids[1].write("%6.0f %11.7f %11.3f %10.6f %10.6f %10.6f %11.3f\n" % (
timestep,
times_subd[isubd],
time,
distance_subd[isubd],
ph_trench_subd[isubd],
ph_cont_subd[isubd],
age_subd[isubd],
)) | handle ploting stuff | entailment |
def io_surface(timestep, time, fid, fld):
"""Output for surface files"""
fid.write("{} {}".format(timestep, time))
fid.writelines(["%10.2e" % item for item in fld[:]])
fid.writelines(["\n"]) | Output for surface files | entailment |
def lithospheric_stress(step, trench, ridge, time):
"""calculate stress in the lithosphere"""
timestep = step.isnap
base_lith = step.geom.rcmb + 1 - 0.105
stressfld = step.fields['sII'][0, :, :, 0]
stressfld = np.ma.masked_where(step.geom.r_mesh[0] < base_lith, stressfld)
# stress integration in the lithosphere
dzm = (step.geom.r_coord[1:] - step.geom.r_coord[:-1])
stress_lith = np.sum((stressfld[:, 1:] * dzm.T), axis=1)
ph_coord = step.geom.p_coord # probably doesn't need alias
# plot stress in the lithosphere
fig, axis, _, _ = field.plot_scalar(step, 'sII', stressfld,
cmap='plasma_r', vmin=0, vmax=300)
# Annotation with time and step
axis.text(1., 0.9, str(round(time, 0)) + ' My', transform=axis.transAxes)
axis.text(1., 0.1, str(timestep), transform=axis.transAxes)
misc.saveplot(fig, 'lith', timestep)
# velocity
vphi = step.fields['v2'][0, :, :, 0]
vph2 = 0.5 * (vphi + np.roll(vphi, 1, 0)) # interpolate to the same phi
# position of continents
concfld = step.fields['c'][0, :, :, 0]
if step.sdat.par['boundaries']['air_layer']:
# we are a bit below the surface; delete "-some number"
# to be just below
dsa = step.sdat.par['boundaries']['air_thickness']
# depth to detect the continents
indcont = np.argmin(abs((1 - dsa) - step.geom.r_coord)) - 10
else:
# depth to detect continents
indcont = -1
if step.sdat.par['boundaries']['air_layer'] and\
not step.sdat.par['continents']['proterozoic_belts']:
continents = np.ma.masked_where(
np.logical_or(concfld[:-1, indcont] < 3,
concfld[:-1, indcont] > 4),
concfld[:-1, indcont])
elif step.sdat.par['boundaries']['air_layer'] and\
step.sdat.par['continents']['proterozoic_belts']:
continents = np.ma.masked_where(
np.logical_or(concfld[:-1, indcont] < 3,
concfld[:-1, indcont] > 5),
concfld[:-1, indcont])
elif step.sdat.par['tracersin']['tracers_weakcrust']:
continents = np.ma.masked_where(
concfld[:-1, indcont] < 3, concfld[:-1, indcont])
else:
continents = np.ma.masked_where(
concfld[:-1, indcont] < 2, concfld[:-1, indcont])
# masked array, only continents are true
continentsall = continents / continents
# plot integrated stress in the lithosphere
fig0, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
ax1.plot(ph_coord[:-1], vph2[:-1, -1], label='Vel')
ax1.axhline(y=0, xmin=0, xmax=2 * np.pi,
color='black', ls='solid', alpha=0.2)
ax1.set_ylabel("Velocity")
ax1.text(0.95, 1.07, str(round(time, 0)) + ' My',
transform=ax1.transAxes)
ax1.text(0.01, 1.07, str(round(step.geom.ti_ad, 8)),
transform=ax1.transAxes)
intstr_scale = step.sdat.scales.stress * step.sdat.scales.length / 1.e12
ax2.plot(ph_coord, stress_lith * intstr_scale, color='k', label='Stress')
ax2.set_ylabel(r"Integrated stress [$TN\,m^{-1}$]")
plot_plate_limits(ax1, ridge, trench, conf.plates.vmin,
conf.plates.vmax)
plot_plate_limits(ax2, ridge, trench, conf.plates.stressmin,
conf.plates.lstressmax)
ax1.set_xlim(0, 2 * np.pi)
ax1.set_title(timestep)
ax1.fill_between(
ph_coord[:-1], continentsall * conf.plates.vmin,
conf.plates.vmax, facecolor='#8b6914', alpha=0.2)
ax1.set_ylim(conf.plates.vmin, conf.plates.vmax)
ax2.fill_between(
ph_coord[:-1], continentsall * conf.plates.stressmin,
conf.plates.lstressmax, facecolor='#8b6914', alpha=0.2)
ax2.set_ylim(conf.plates.stressmin, conf.plates.lstressmax)
misc.saveplot(fig0, 'svelslith', timestep) | calculate stress in the lithosphere | entailment |
def set_of_vars(arg_plot):
"""Build set of needed variables.
Args:
arg_plot (str): string with variable names separated with ``,``.
Returns:
set of str: set of variables.
"""
return set(var for var in arg_plot.split(',') if var in phyvars.PLATES) | Build set of needed variables.
Args:
arg_plot (str): string with variable names separated with ``,``.
Returns:
set of str: set of variables. | entailment |
def main_plates(sdat):
"""Plot several plates information."""
# calculating averaged horizontal surface velocity
# needed for redimensionalisation
ilast = sdat.rprof.index.levels[0][-1]
rlast = sdat.rprof.loc[ilast]
nprof = 0
uprof_averaged = rlast.loc[:, 'vhrms'] * 0
for step in sdat.walk.filter(rprof=True):
uprof_averaged += step.rprof['vhrms']
nprof += 1
uprof_averaged /= nprof
radius = rlast['r'].values
if sdat.par['boundaries']['air_layer']:
dsa = sdat.par['boundaries']['air_thickness']
isurf = np.argmin(abs(radius - radius[-1] + dsa))
vrms_surface = uprof_averaged.iloc[isurf]
isurf = np.argmin(abs((1 - dsa) - radius))
isurf -= 4 # why different isurf for the rest?
else:
isurf = -1
vrms_surface = uprof_averaged.iloc[isurf]
with misc.InchoateFiles(8, 'plates') as fids:
fids.fnames = ['plate_velocity', 'distance_subd', 'continents',
'flux', 'topography', 'age', 'velderiv', 'velocity']
fids[0].write('# it time ph_trench vel_trench age_trench\n')
fids[1].write('# it time time [My] distance '
'ph_trench ph_cont age_trench [My]\n')
istart, iend = None, None
for step in sdat.walk.filter(fields=['T']):
# could check other fields too
timestep = step.isnap
istart = timestep if istart is None else istart
iend = timestep
print('Treating snapshot', timestep)
rcmb = step.geom.rcmb
# topography
fname = sdat.filename('sc', timestep=timestep, suffix='.dat')
topo = np.genfromtxt(str(fname))
# rescaling topography!
if sdat.par['boundaries']['air_layer']:
topo[:, 1] = topo[:, 1] / (1. - dsa)
time = step.geom.ti_ad * vrms_surface *\
conf.scaling.ttransit / conf.scaling.yearins / 1.e6
trenches, ridges, agetrenches, _, _ =\
detect_plates(step, vrms_surface, fids, time)
plot_plates(step, time, vrms_surface, trenches, ridges,
agetrenches, topo, fids)
# prepare for continent plotting
concfld = step.fields['c'][0, :, :, 0]
continentsfld = np.ma.masked_where(
concfld < 3, concfld) # plotting continents, to-do
continentsfld = continentsfld / continentsfld
temp = step.fields['T'][0, :, :, 0]
tgrad = (temp[:, isurf - 1] - temp[:, isurf]) /\
(step.geom.r_coord[isurf] - step.geom.r_coord[isurf - 1])
io_surface(timestep, time, fids[2], concfld[:-1, isurf])
io_surface(timestep, time, fids[3], tgrad)
io_surface(timestep, time, fids[4], topo[:, 1])
if 'age' in conf.plates.plot:
io_surface(timestep, time, fids[5],
step.fields['age'][0, :, isurf, 0])
# plot viscosity field with position of trenches and ridges
etamin, _ = sdat.scale(1e-2, 'Pa')
etamax, _ = sdat.scale(sdat.par['viscosity']['eta_max'], 'Pa')
fig, axis, _, _ = field.plot_scalar(step, 'eta',
vmin=etamin, vmax=etamax)
# plotting continents
field.plot_scalar(step, 'c', continentsfld, axis, False,
cmap='cool_r', vmin=0, vmax=0)
cmap2 = plt.cm.ocean
cmap2.set_over('m')
# plotting velocity vectors
field.plot_vec(axis, step, 'v')
# Annotation with time and step
axis.text(1., 0.9, str(round(time, 0)) + ' My',
transform=axis.transAxes)
axis.text(1., 0.1, str(timestep),
transform=axis.transAxes)
# Put arrow where ridges and trenches are
plot_plate_limits_field(axis, rcmb, ridges, trenches)
misc.saveplot(fig, 'eta', timestep, close=False)
# Zoom
if conf.plates.zoom is not None:
if not 0 <= conf.plates.zoom <= 360:
raise error.InvalidZoomError(conf.plates.zoom)
if 45 < conf.plates.zoom <= 135:
ladd, radd, uadd, dadd = 0.8, 0.8, 0.05, 0.1
elif 135 < conf.plates.zoom <= 225:
ladd, radd, uadd, dadd = 0.05, 0.1, 0.8, 0.8
elif 225 < conf.plates.zoom <= 315:
ladd, radd, uadd, dadd = 0.8, 0.8, 0.1, 0.05
else: # >315 or <=45
ladd, radd, uadd, dadd = 0.1, 0.05, 0.8, 0.8
xzoom = (rcmb + 1) * np.cos(np.radians(conf.plates.zoom))
yzoom = (rcmb + 1) * np.sin(np.radians(conf.plates.zoom))
axis.set_xlim(xzoom - ladd, xzoom + radd)
axis.set_ylim(yzoom - dadd, yzoom + uadd)
misc.saveplot(fig, 'etazoom', timestep, close=False)
plt.close(fig)
# plot stress field with position of trenches and ridges
if 'str' in conf.plates.plot:
fig, axis, _, _ = field.plot_scalar(step, 'sII',
vmin=0, vmax=300)
# Annotation with time and step
axis.text(1., 0.9, str(round(time, 0)) + ' My',
transform=axis.transAxes)
axis.text(1., 0.1, str(timestep),
transform=axis.transAxes)
# Put arrow where ridges and trenches are
plot_plate_limits_field(axis, rcmb, ridges, trenches)
misc.saveplot(fig, 's', timestep, close=False)
# Zoom
if conf.plates.zoom is not None:
axis.set_xlim(xzoom - ladd, xzoom + radd)
axis.set_ylim(yzoom - dadd, yzoom + uadd)
misc.saveplot(fig, 'szoom', timestep, close=False)
plt.close(fig)
# calculate stresses in the lithosphere
lithospheric_stress(step, trenches, ridges, time)
# plotting the principal deviatoric stress field
if 'sx' in conf.plates.plot:
fig, axis, _, _ = field.plot_scalar(step, 'sII',
alpha=0.1)
# plotting continents
field.plot_scalar(step, 'c', continentsfld, axis, False,
cmap='cool_r', vmin=0, vmax=0)
cmap2 = plt.cm.ocean
cmap2.set_over('m')
# plotting principal deviatoric stress
field.plot_vec(axis, step, 'sx')
# Annotation with time and step
axis.text(1., 0.9, str(round(time, 0)) + ' My',
transform=axis.transAxes)
axis.text(1., 0.1, str(timestep),
transform=axis.transAxes)
# Put arrow where ridges and trenches are
plot_plate_limits_field(axis, rcmb, ridges, trenches)
misc.saveplot(fig, 'sx', timestep)
# determine names of files
ptn = misc.out_name('{}_{}_{}')
stem = ptn.format(fids.fnames[0], istart, iend)
idx = 0
fmt = '{}.dat'
while pathlib.Path(fmt.format(stem, idx)).is_file():
fmt = '{}_{}.dat'
idx += 1
fids.fnames = [fmt.format(ptn.format(fname, istart, iend), idx)
for fname in fids.fnames] | Plot several plates information. | entailment |
def cmd():
"""Implementation of plates subcommand.
Other Parameters:
conf.plates
conf.scaling
conf.plot
conf.core
"""
sdat = StagyyData(conf.core.path)
conf.plates.plot = set_of_vars(conf.plates.plot)
if not conf.plates.vzcheck:
conf.scaling.dimensional = True
conf.scaling.factors['Pa'] = 'M'
main_plates(sdat)
else:
seuil_memz = 0
nb_plates = []
time = []
ch2o = []
istart, iend = None, None
for step in sdat.walk.filter(fields=['T']):
# could check other fields too
if conf.plates.timeprofile:
time.append(step.timeinfo.loc['t'])
ch2o.append(step.timeinfo.loc[0])
istart = step.isnap if istart is None else istart
iend = step.isnap
limits, nphi, dvphi, seuil_memz, vphi_surf =\
detect_plates_vzcheck(step, seuil_memz)
water_profile = np.mean(step.fields['wtr'][0, :, :, 0], axis=0)
limits.sort()
sizeplates = [limits[0] + nphi - limits[-1]]
for lim in range(1, len(limits)):
sizeplates.append(limits[lim] - limits[lim - 1])
lim = len(limits) * [max(dvphi)]
fig = plt.figure()
plt.subplot(221)
plt.axis([0, nphi,
np.min(vphi_surf) * 1.2, np.max(vphi_surf) * 1.2])
plt.plot(vphi_surf)
plt.subplot(223)
plt.axis(
[0, nphi,
np.min(dvphi) * 1.2, np.max(dvphi) * 1.2])
plt.plot(dvphi)
plt.scatter(limits, lim, color='red')
plt.subplot(222)
plt.hist(sizeplates, 10, (0, nphi / 2))
plt.subplot(224)
plt.plot(water_profile)
misc.saveplot(fig, 'plates', step.isnap)
nb_plates.append(len(limits))
if conf.plates.timeprofile:
for i in range(2, len(nb_plates) - 3):
nb_plates[i] = (nb_plates[i - 2] + nb_plates[i - 1] +
nb_plates[i] + nb_plates[i + 1] +
nb_plates[i + 2]) / 5
figt = plt.figure()
plt.subplot(121)
plt.axis([time[0], time[-1], 0, np.max(nb_plates)])
plt.plot(time, nb_plates)
plt.subplot(122)
plt.plot(time, ch2o)
misc.saveplot(figt, 'plates_{}_{}'.format(istart, iend)) | Implementation of plates subcommand.
Other Parameters:
conf.plates
conf.scaling
conf.plot
conf.core | entailment |
def _check_config():
"""Create config files as necessary."""
config.CONFIG_DIR.mkdir(parents=True, exist_ok=True)
verfile = config.CONFIG_DIR / '.version'
uptodate = verfile.is_file() and verfile.read_text() == __version__
if not uptodate:
verfile.write_text(__version__)
if not (uptodate and config.CONFIG_FILE.is_file()):
conf.create_config_(update=True)
for stfile in ('stagpy-paper.mplstyle',
'stagpy-slides.mplstyle'):
stfile_conf = config.CONFIG_DIR / stfile
if not (uptodate and stfile_conf.is_file()):
stfile_local = pathlib.Path(__file__).parent / stfile
shutil.copy(str(stfile_local), str(stfile_conf)) | Create config files as necessary. | entailment |
def load_mplstyle():
"""Try to load conf.plot.mplstyle matplotlib style."""
plt = importlib.import_module('matplotlib.pyplot')
if conf.plot.mplstyle:
for style in conf.plot.mplstyle.split():
stfile = config.CONFIG_DIR / (style + '.mplstyle')
if stfile.is_file():
style = str(stfile)
try:
plt.style.use(style)
except OSError:
print('Cannot import style {}.'.format(style),
file=sys.stderr)
conf.plot.mplstyle = ''
if conf.plot.xkcd:
plt.xkcd() | Try to load conf.plot.mplstyle matplotlib style. | entailment |
def dtime(sdat, tstart=None, tend=None):
"""Time increment dt.
Compute dt as a function of time.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: dt and time arrays.
"""
tseries = sdat.tseries_between(tstart, tend)
time = tseries['t'].values
return time[1:] - time[:-1], time[:-1] | Time increment dt.
Compute dt as a function of time.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: dt and time arrays. | entailment |
def dt_dt(sdat, tstart=None, tend=None):
"""Derivative of temperature.
Compute dT/dt as a function of time using an explicit Euler scheme.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: derivative of temperature and time
arrays.
"""
tseries = sdat.tseries_between(tstart, tend)
time = tseries['t'].values
temp = tseries['Tmean'].values
dtdt = (temp[1:] - temp[:-1]) / (time[1:] - time[:-1])
return dtdt, time[:-1] | Derivative of temperature.
Compute dT/dt as a function of time using an explicit Euler scheme.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: derivative of temperature and time
arrays. | entailment |
def ebalance(sdat, tstart=None, tend=None):
"""Energy balance.
Compute Nu_t - Nu_b + V*dT/dt as a function of time using an explicit
Euler scheme. This should be zero if energy is conserved.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: energy balance and time arrays.
"""
tseries = sdat.tseries_between(tstart, tend)
rbot, rtop = misc.get_rbounds(sdat.steps.last)
if rbot != 0: # spherical
coefsurf = (rtop / rbot)**2
volume = rbot * ((rtop / rbot)**3 - 1) / 3
else:
coefsurf = 1.
volume = 1.
dtdt, time = dt_dt(sdat, tstart, tend)
ftop = tseries['ftop'].values * coefsurf
fbot = tseries['fbot'].values
radio = tseries['H_int'].values
ebal = ftop[1:] - fbot[1:] + volume * (dtdt - radio[1:])
return ebal, time | Energy balance.
Compute Nu_t - Nu_b + V*dT/dt as a function of time using an explicit
Euler scheme. This should be zero if energy is conserved.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: energy balance and time arrays. | entailment |
def mobility(sdat, tstart=None, tend=None):
"""Plates mobility.
Compute the ratio vsurf / vrms.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: mobility and time arrays.
"""
tseries = sdat.tseries_between(tstart, tend)
steps = sdat.steps[tseries.index[0]:tseries.index[-1]]
time = []
mob = []
for step in steps.filter(rprof=True):
time.append(step.timeinfo['t'])
mob.append(step.rprof.iloc[-1].loc['vrms'] / step.timeinfo['vrms'])
return np.array(mob), np.array(time) | Plates mobility.
Compute the ratio vsurf / vrms.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: mobility and time arrays. | entailment |
def r_edges(step):
"""Cell border.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the position of the bottom and top walls
of the cells. The two elements of the tuple are identical.
"""
rbot, rtop = misc.get_rbounds(step)
centers = step.rprof.loc[:, 'r'].values + rbot
# assume walls are mid-way between T-nodes
# could be T-nodes at center between walls
edges = (centers[:-1] + centers[1:]) / 2
edges = np.insert(edges, 0, rbot)
edges = np.append(edges, rtop)
return edges, edges | Cell border.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the position of the bottom and top walls
of the cells. The two elements of the tuple are identical. | entailment |
def _scale_prof(step, rprof, rad=None):
"""Scale profile to take sphericity into account"""
rbot, rtop = misc.get_rbounds(step)
if rbot == 0: # not spherical
return rprof
if rad is None:
rad = step.rprof['r'].values + rbot
return rprof * (2 * rad / (rtop + rbot))**2 | Scale profile to take sphericity into account | entailment |
def diff_prof(step):
"""Diffusion.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the diffusion and the radial position
at which it is evaluated.
"""
rbot, rtop = misc.get_rbounds(step)
rad = step.rprof['r'].values + rbot
tprof = step.rprof['Tmean'].values
diff = (tprof[:-1] - tprof[1:]) / (rad[1:] - rad[:-1])
# assume tbot = 1
diff = np.insert(diff, 0, (1 - tprof[0]) / (rad[0] - rbot))
# assume ttop = 0
diff = np.append(diff, tprof[-1] / (rtop - rad[-1]))
# actually computed at r_edges...
return diff, np.append(rad, rtop) | Diffusion.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the diffusion and the radial position
at which it is evaluated. | entailment |
def diffs_prof(step):
"""Scaled diffusion.
This computation takes sphericity into account if necessary.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the diffusion and the radial position
at which it is evaluated.
"""
diff, rad = diff_prof(step)
return _scale_prof(step, diff, rad), rad | Scaled diffusion.
This computation takes sphericity into account if necessary.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the diffusion and the radial position
at which it is evaluated. | entailment |
def energy_prof(step):
"""Energy flux.
This computation takes sphericity into account if necessary.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the energy flux and the radial position
at which it is evaluated.
"""
diff, rad = diffs_prof(step)
adv, _ = advts_prof(step)
return (diff + np.append(adv, 0)), rad | Energy flux.
This computation takes sphericity into account if necessary.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the energy flux and the radial position
at which it is evaluated. | entailment |
def advth(step):
"""Theoretical advection.
This compute the theoretical profile of total advection as function of
radius.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array` and None: the theoretical advection.
The second element of the tuple is None.
"""
rbot, rtop = misc.get_rbounds(step)
rmean = 0.5 * (rbot + rtop)
rad = step.rprof['r'].values + rbot
radio = step.timeinfo['H_int']
if rbot != 0: # spherical
th_adv = -(rtop**3 - rad**3) / rmean**2 / 3
else:
th_adv = rad - rtop
th_adv *= radio
th_adv += step.timeinfo['Nutop']
return th_adv, None | Theoretical advection.
This compute the theoretical profile of total advection as function of
radius.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array` and None: the theoretical advection.
The second element of the tuple is None. | entailment |
def init_c_overturn(step):
"""Initial concentration.
This compute the resulting composition profile if fractional
crystallization of a SMO is assumed.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the composition and the radial position
at which it is evaluated.
"""
rbot, rtop = misc.get_rbounds(step)
xieut = step.sdat.par['tracersin']['fe_eut']
k_fe = step.sdat.par['tracersin']['k_fe']
xi0l = step.sdat.par['tracersin']['fe_cont']
xi0s = k_fe * xi0l
xired = xi0l / xieut
rsup = (rtop**3 - xired**(1 / (1 - k_fe)) *
(rtop**3 - rbot**3))**(1 / 3)
def initprof(rpos):
"""Theoretical initial profile."""
if rpos < rsup:
return xi0s * ((rtop**3 - rbot**3) /
(rtop**3 - rpos**3))**(1 - k_fe)
return xieut
rad = np.linspace(rbot, rtop, 500)
initprof = np.vectorize(initprof)
return initprof(rad), rad | Initial concentration.
This compute the resulting composition profile if fractional
crystallization of a SMO is assumed.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the composition and the radial position
at which it is evaluated. | entailment |
def c_overturned(step):
"""Theoretical overturned concentration.
This compute the resulting composition profile if fractional
crystallization of a SMO is assumed and then a purely radial
overturn happens.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the composition and the radial position
at which it is evaluated.
"""
rbot, rtop = misc.get_rbounds(step)
cinit, rad = init_c_overturn(step)
radf = (rtop**3 + rbot**3 - rad**3)**(1 / 3)
return cinit, radf | Theoretical overturned concentration.
This compute the resulting composition profile if fractional
crystallization of a SMO is assumed and then a purely radial
overturn happens.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the composition and the radial position
at which it is evaluated. | entailment |
def stream_function(step):
"""Stream function.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
:class:`numpy.array`: the stream function field, with four dimensions:
x-direction, y-direction, z-direction and block.
"""
if step.geom.twod_yz:
x_coord = step.geom.y_coord
v_x = step.fields['v2'][0, :, :, 0]
v_z = step.fields['v3'][0, :, :, 0]
shape = (1, v_x.shape[0], v_x.shape[1], 1)
elif step.geom.twod_xz and step.geom.cartesian:
x_coord = step.geom.x_coord
v_x = step.fields['v1'][:, 0, :, 0]
v_z = step.fields['v3'][:, 0, :, 0]
shape = (v_x.shape[0], 1, v_x.shape[1], 1)
else:
raise NotAvailableError('Stream function only implemented in '
'2D cartesian and spherical annulus')
psi = np.zeros_like(v_x)
if step.geom.spherical: # YZ annulus
# positions
r_nc = step.geom.r_coord + step.geom.rcmb # numerical centers
r_pc = step.geom.r_mesh[0, 0, :] # physical centers
r_nw = r_edges(step)[0][:2] # numerical walls of first cell
# vz at center of bottom cells
vz0 = ((r_nw[1] - r_nc[0]) * v_z[:, 0] +
(r_nc[0] - r_nw[0]) * v_z[:, 1]) / (r_nw[1] - r_nw[0])
psi[1:, 0] = -integrate.cumtrapz(r_pc[0]**2 * vz0, x=x_coord)
# vx at center
vxc = (v_x + np.roll(v_x, -1, axis=0)) / 2
for i_x in range(len(x_coord)):
psi[i_x, 1:] = psi[i_x, 0] + \
integrate.cumtrapz(r_pc * vxc[i_x], x=r_nc)
else: # assume cartesian geometry
z_nc = step.geom.z_coord
z_nw = r_edges(step)[0][:2]
vz0 = ((z_nw[1] - z_nc[0]) * v_z[:, 0] +
(z_nc[0] - z_nw[0]) * v_z[:, 1]) / (z_nw[1] - z_nw[0])
psi[1:, 0] = -integrate.cumtrapz(vz0, x=x_coord)
# vx at center
vxc = (v_x + np.roll(v_x, -1, axis=0)) / 2
for i_x in range(len(x_coord)):
psi[i_x, 1:] = psi[i_x, 0] + \
integrate.cumtrapz(vxc[i_x], x=z_nc)
if step.geom.twod_xz:
psi = - psi
psi = np.reshape(psi, shape)
return psi | Stream function.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
:class:`numpy.array`: the stream function field, with four dimensions:
x-direction, y-direction, z-direction and block. | entailment |
def last(self):
"""Last time step available.
Example:
>>> sdat = StagyyData('path/to/run')
>>> assert(sdat.steps.last is sdat.steps[-1])
"""
if self._last is UNDETERMINED:
# not necessarily the last one...
self._last = self.sdat.tseries.index[-1]
return self[self._last] | Last time step available.
Example:
>>> sdat = StagyyData('path/to/run')
>>> assert(sdat.steps.last is sdat.steps[-1]) | entailment |
def bind(self, isnap, istep):
"""Register the isnap / istep correspondence.
Users of :class:`StagyyData` should not use this method.
Args:
isnap (int): snapshot index.
istep (int): time step index.
"""
self._isteps[isnap] = istep
self.sdat.steps[istep].isnap = isnap | Register the isnap / istep correspondence.
Users of :class:`StagyyData` should not use this method.
Args:
isnap (int): snapshot index.
istep (int): time step index. | entailment |
def _pass(self, step):
"""Check whether a :class:`~stagpy._step.Step` passes the filters."""
okf = True
okf = okf and (not self._flt['snap'] or step.isnap is not None)
okf = okf and (not self._flt['rprof'] or step.rprof is not None)
okf = okf and all(f in step.fields for f in self._flt['fields'])
okf = okf and bool(self._flt['func'](step))
return okf | Check whether a :class:`~stagpy._step.Step` passes the filters. | entailment |
def filter(self, **filters):
"""Update filters with provided arguments.
Note that filters are only resolved when the view is iterated, and
hence they do not compose. Each call to filter merely updates the
relevant filters. For example, with this code::
view = sdat.steps[500:].filter(rprof=True, fields=['T'])
view.filter(fields=[])
the produced ``view``, when iterated, will generate the steps after the
500-th that have radial profiles. The ``fields`` filter set in the
first line is emptied in the second line.
Args:
snap (bool): the step must be a snapshot to pass.
rprof (bool): the step must have rprof data to pass.
fields (list): list of fields that must be present to pass.
func (function): arbitrary function taking a
:class:`~stagpy._step.Step` as argument and returning a True
value if the step should pass the filter.
Returns:
self.
"""
for flt, val in self._flt.items():
self._flt[flt] = filters.pop(flt, val)
if filters:
raise error.UnknownFiltersError(filters.keys())
return self | Update filters with provided arguments.
Note that filters are only resolved when the view is iterated, and
hence they do not compose. Each call to filter merely updates the
relevant filters. For example, with this code::
view = sdat.steps[500:].filter(rprof=True, fields=['T'])
view.filter(fields=[])
the produced ``view``, when iterated, will generate the steps after the
500-th that have radial profiles. The ``fields`` filter set in the
first line is emptied in the second line.
Args:
snap (bool): the step must be a snapshot to pass.
rprof (bool): the step must have rprof data to pass.
fields (list): list of fields that must be present to pass.
func (function): arbitrary function taking a
:class:`~stagpy._step.Step` as argument and returning a True
value if the step should pass the filter.
Returns:
self. | entailment |
def hdf5(self):
"""Path of output hdf5 folder if relevant, None otherwise."""
if self._rundir['hdf5'] is UNDETERMINED:
h5_folder = self.path / self.par['ioin']['hdf5_output_folder']
if (h5_folder / 'Data.xmf').is_file():
self._rundir['hdf5'] = h5_folder
else:
self._rundir['hdf5'] = None
return self._rundir['hdf5'] | Path of output hdf5 folder if relevant, None otherwise. | entailment |
def tseries(self):
"""Time series data.
This is a :class:`pandas.DataFrame` with istep as index and variable
names as columns.
"""
if self._stagdat['tseries'] is UNDETERMINED:
timefile = self.filename('TimeSeries.h5')
self._stagdat['tseries'] = stagyyparsers.time_series_h5(
timefile, list(phyvars.TIME.keys()))
if self._stagdat['tseries'] is not None:
return self._stagdat['tseries']
timefile = self.filename('time.dat')
if self.hdf5 and not timefile.is_file():
# check legacy folder as well
timefile = self.filename('time.dat', force_legacy=True)
self._stagdat['tseries'] = stagyyparsers.time_series(
timefile, list(phyvars.TIME.keys()))
return self._stagdat['tseries'] | Time series data.
This is a :class:`pandas.DataFrame` with istep as index and variable
names as columns. | entailment |
def files(self):
"""Set of found binary files output by StagYY."""
if self._rundir['ls'] is UNDETERMINED:
out_stem = pathlib.Path(self.par['ioin']['output_file_stem'] + '_')
out_dir = self.path / out_stem.parent
if out_dir.is_dir():
self._rundir['ls'] = set(out_dir.iterdir())
else:
self._rundir['ls'] = set()
return self._rundir['ls'] | Set of found binary files output by StagYY. | entailment |
def walk(self):
"""Return view on configured steps slice.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.core.timesteps: the slice of timesteps.
"""
if conf.core.snapshots is not None:
return self.snaps[conf.core.snapshots]
elif conf.core.timesteps is not None:
return self.steps[conf.core.timesteps]
return self.snaps[-1:] | Return view on configured steps slice.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.core.timesteps: the slice of timesteps. | entailment |
def scale(self, data, unit):
"""Scales quantity to obtain dimensionful quantity.
Args:
data (numpy.array): the quantity that should be scaled.
dim (str): the dimension of data as defined in phyvars.
Return:
(float, str): scaling factor and unit string.
Other Parameters:
conf.scaling.dimensional: if set to False (default), the factor is
always 1.
"""
if self.par['switches']['dimensional_units'] or \
not conf.scaling.dimensional or \
unit == '1':
return data, ''
scaling = phyvars.SCALES[unit](self.scales)
factor = conf.scaling.factors.get(unit, ' ')
if conf.scaling.time_in_y and unit == 's':
scaling /= conf.scaling.yearins
unit = 'yr'
elif conf.scaling.vel_in_cmpy and unit == 'm/s':
scaling *= 100 * conf.scaling.yearins
unit = 'cm/y'
if factor in phyvars.PREFIXES:
scaling *= 10**(-3 * (phyvars.PREFIXES.index(factor) + 1))
unit = factor + unit
return data * scaling, unit | Scales quantity to obtain dimensionful quantity.
Args:
data (numpy.array): the quantity that should be scaled.
dim (str): the dimension of data as defined in phyvars.
Return:
(float, str): scaling factor and unit string.
Other Parameters:
conf.scaling.dimensional: if set to False (default), the factor is
always 1. | entailment |
def tseries_between(self, tstart=None, tend=None):
"""Return time series data between requested times.
Args:
tstart (float): starting time. Set to None to start at the
beginning of available data.
tend (float): ending time. Set to None to stop at the end of
available data.
Returns:
:class:`pandas.DataFrame`: slice of :attr:`tseries`.
"""
if self.tseries is None:
return None
ndat = self.tseries.shape[0]
if tstart is None:
istart = 0
else:
igm = 0
igp = ndat - 1
while igp - igm > 1:
istart = igm + (igp - igm) // 2
if self.tseries.iloc[istart]['t'] >= tstart:
igp = istart
else:
igm = istart
istart = igp
if tend is None:
iend = None
else:
igm = 0
igp = ndat - 1
while igp - igm > 1:
iend = igm + (igp - igm) // 2
if self.tseries.iloc[iend]['t'] > tend:
igp = iend
else:
igm = iend
iend = igm + 1
return self.tseries.iloc[istart:iend] | Return time series data between requested times.
Args:
tstart (float): starting time. Set to None to start at the
beginning of available data.
tend (float): ending time. Set to None to stop at the end of
available data.
Returns:
:class:`pandas.DataFrame`: slice of :attr:`tseries`. | entailment |
def filename(self, fname, timestep=None, suffix='', force_legacy=False):
"""Return name of StagYY output file.
Args:
fname (str): name stem.
timestep (int): snapshot number, set to None if this is not
relevant.
suffix (str): optional suffix of file name.
force_legacy (bool): force returning the legacy output path.
Returns:
:class:`pathlib.Path`: the path of the output file constructed
with the provided segments.
"""
if timestep is not None:
fname += '{:05d}'.format(timestep)
fname += suffix
if not force_legacy and self.hdf5:
fpath = self.hdf5 / fname
else:
fpath = self.par['ioin']['output_file_stem'] + '_' + fname
fpath = self.path / fpath
return fpath | Return name of StagYY output file.
Args:
fname (str): name stem.
timestep (int): snapshot number, set to None if this is not
relevant.
suffix (str): optional suffix of file name.
force_legacy (bool): force returning the legacy output path.
Returns:
:class:`pathlib.Path`: the path of the output file constructed
with the provided segments. | entailment |
def binfiles_set(self, isnap):
"""Set of existing binary files at a given snap.
Args:
isnap (int): snapshot index.
Returns:
set of pathlib.Path: the set of output files available for this
snapshot number.
"""
possible_files = set(self.filename(fstem, isnap, force_legacy=True)
for fstem in phyvars.FIELD_FILES)
return possible_files & self.files | Set of existing binary files at a given snap.
Args:
isnap (int): snapshot index.
Returns:
set of pathlib.Path: the set of output files available for this
snapshot number. | entailment |
def _tidy_names(names, nnames, extra_names=None):
"""Truncate or extend names so that its len is nnames.
The list is modified, this function returns nothing.
Args:
names (list): list of names.
nnames (int): desired number of names.
extra_names (list of str): list of names to be used to extend the list
if needed. If this list isn't provided, a range is used instead.
"""
if len(names) < nnames and extra_names is not None:
names.extend(extra_names)
names.extend(range(nnames - len(names)))
del names[nnames:] | Truncate or extend names so that its len is nnames.
The list is modified, this function returns nothing.
Args:
names (list): list of names.
nnames (int): desired number of names.
extra_names (list of str): list of names to be used to extend the list
if needed. If this list isn't provided, a range is used instead. | entailment |
def time_series(timefile, colnames):
"""Read temporal series text file.
If :data:`colnames` is too long, it will be truncated. If it is too short,
additional numeric column names from 0 to N-1 will be attributed to the N
extra columns present in :data:`timefile`.
Args:
timefile (:class:`pathlib.Path`): path of the time.dat file.
colnames (list of names): names of the variables expected in
:data:`timefile` (may be modified).
Returns:
:class:`pandas.DataFrame`:
Time series, with the variables in columns and the time steps in
rows.
"""
if not timefile.is_file():
return None
data = pd.read_csv(timefile, delim_whitespace=True, dtype=str,
header=None, skiprows=1, index_col=0,
engine='c', memory_map=True,
error_bad_lines=False, warn_bad_lines=False)
data = data.apply(pd.to_numeric, raw=True, errors='coerce')
# detect useless lines produced when run is restarted
rows_to_del = []
irow = len(data) - 1
while irow > 0:
iprev = irow - 1
while iprev >= 0 and data.index[irow] <= data.index[iprev]:
rows_to_del.append(iprev)
iprev -= 1
irow = iprev
if rows_to_del:
rows_to_keep = set(range(len(data))) - set(rows_to_del)
data = data.take(list(rows_to_keep), convert=False)
ncols = data.shape[1]
_tidy_names(colnames, ncols)
data.columns = colnames
return data | Read temporal series text file.
If :data:`colnames` is too long, it will be truncated. If it is too short,
additional numeric column names from 0 to N-1 will be attributed to the N
extra columns present in :data:`timefile`.
Args:
timefile (:class:`pathlib.Path`): path of the time.dat file.
colnames (list of names): names of the variables expected in
:data:`timefile` (may be modified).
Returns:
:class:`pandas.DataFrame`:
Time series, with the variables in columns and the time steps in
rows. | entailment |
def time_series_h5(timefile, colnames):
"""Read temporal series HDF5 file.
If :data:`colnames` is too long, it will be truncated. If it is too short,
additional column names will be deduced from the content of the file.
Args:
timefile (:class:`pathlib.Path`): path of the TimeSeries.h5 file.
colnames (list of names): names of the variables expected in
:data:`timefile` (may be modified).
Returns:
:class:`pandas.DataFrame`:
Time series, with the variables in columns and the time steps in
rows.
"""
if not timefile.is_file():
return None
with h5py.File(timefile, 'r') as h5f:
dset = h5f['tseries']
_, ncols = dset.shape
ncols -= 1 # first is istep
h5names = map(bytes.decode, h5f['names'][len(colnames) + 1:])
_tidy_names(colnames, ncols, h5names)
data = dset[()]
return pd.DataFrame(data[:, 1:],
index=np.int_(data[:, 0]), columns=colnames) | Read temporal series HDF5 file.
If :data:`colnames` is too long, it will be truncated. If it is too short,
additional column names will be deduced from the content of the file.
Args:
timefile (:class:`pathlib.Path`): path of the TimeSeries.h5 file.
colnames (list of names): names of the variables expected in
:data:`timefile` (may be modified).
Returns:
:class:`pandas.DataFrame`:
Time series, with the variables in columns and the time steps in
rows. | entailment |
def _extract_rsnap_isteps(rproffile):
"""Extract istep and compute list of rows to delete"""
step_regex = re.compile(r'^\*+step:\s*(\d+) ; time =\s*(\S+)')
isteps = [] # list of (istep, time, nz)
rows_to_del = set()
line = ' '
with rproffile.open() as stream:
while line[0] != '*':
line = stream.readline()
match = step_regex.match(line)
istep = int(match.group(1))
time = float(match.group(2))
nlines = 0
iline = 0
for line in stream:
if line[0] == '*':
isteps.append((istep, time, nlines))
match = step_regex.match(line)
istep = int(match.group(1))
time = float(match.group(2))
nlines = 0
# remove useless lines produced when run is restarted
nrows_to_del = 0
while isteps and istep <= isteps[-1][0]:
nrows_to_del += isteps.pop()[-1]
rows_to_del = rows_to_del.union(
range(iline - nrows_to_del, iline))
else:
nlines += 1
iline += 1
isteps.append((istep, time, nlines))
return isteps, rows_to_del | Extract istep and compute list of rows to delete | entailment |
def rprof(rproffile, colnames):
"""Extract radial profiles data
If :data:`colnames` is too long, it will be truncated. If it is too short,
additional numeric column names from 0 to N-1 will be attributed to the N
extra columns present in :data:`timefile`.
Args:
rproffile (:class:`pathlib.Path`): path of the rprof.dat file.
colnames (list of names): names of the variables expected in
:data:`rproffile` (may be modified).
Returns:
tuple of :class:`pandas.DataFrame`: (profs, times)
:data:`profs` are the radial profiles, with the variables in
columns and rows double-indexed with the time step and the radial
index of numerical cells.
:data:`times` is the dimensionless time indexed by time steps.
"""
if not rproffile.is_file():
return None, None
data = pd.read_csv(rproffile, delim_whitespace=True, dtype=str,
header=None, comment='*', skiprows=1,
engine='c', memory_map=True,
error_bad_lines=False, warn_bad_lines=False)
data = data.apply(pd.to_numeric, raw=True, errors='coerce')
isteps, rows_to_del = _extract_rsnap_isteps(rproffile)
if rows_to_del:
rows_to_keep = set(range(len(data))) - rows_to_del
data = data.take(list(rows_to_keep), convert=False)
id_arr = [[], []]
for istep, _, n_z in isteps:
id_arr[0].extend(repeat(istep, n_z))
id_arr[1].extend(range(n_z))
data.index = id_arr
ncols = data.shape[1]
_tidy_names(colnames, ncols)
data.columns = colnames
df_times = pd.DataFrame(list(map(itemgetter(1), isteps)),
index=map(itemgetter(0), isteps))
return data, df_times | Extract radial profiles data
If :data:`colnames` is too long, it will be truncated. If it is too short,
additional numeric column names from 0 to N-1 will be attributed to the N
extra columns present in :data:`timefile`.
Args:
rproffile (:class:`pathlib.Path`): path of the rprof.dat file.
colnames (list of names): names of the variables expected in
:data:`rproffile` (may be modified).
Returns:
tuple of :class:`pandas.DataFrame`: (profs, times)
:data:`profs` are the radial profiles, with the variables in
columns and rows double-indexed with the time step and the radial
index of numerical cells.
:data:`times` is the dimensionless time indexed by time steps. | entailment |
def rprof_h5(rproffile, colnames):
"""Extract radial profiles data
If :data:`colnames` is too long, it will be truncated. If it is too short,
additional column names will be deduced from the content of the file.
Args:
rproffile (:class:`pathlib.Path`): path of the rprof.dat file.
colnames (list of names): names of the variables expected in
:data:`rproffile`.
Returns:
tuple of :class:`pandas.DataFrame`: (profs, times)
:data:`profs` are the radial profiles, with the variables in
columns and rows double-indexed with the time step and the radial
index of numerical cells.
:data:`times` is the dimensionless time indexed by time steps.
"""
if not rproffile.is_file():
return None, None
isteps = []
with h5py.File(rproffile) as h5f:
dnames = sorted(dname for dname in h5f.keys()
if dname.startswith('rprof_'))
ncols = h5f['names'].shape[0]
h5names = map(bytes.decode, h5f['names'][len(colnames):])
_tidy_names(colnames, ncols, h5names)
data = np.zeros((0, ncols))
for dname in dnames:
dset = h5f[dname]
data = np.concatenate((data, dset[()]))
isteps.append((dset.attrs['istep'], dset.attrs['time'],
dset.shape[0]))
id_arr = [[], []]
for istep, _, n_z in isteps:
id_arr[0].extend(repeat(istep, n_z))
id_arr[1].extend(range(n_z))
df_data = pd.DataFrame(data, index=id_arr, columns=colnames)
df_times = pd.DataFrame(list(map(itemgetter(1), isteps)),
index=map(itemgetter(0), isteps))
return df_data, df_times | Extract radial profiles data
If :data:`colnames` is too long, it will be truncated. If it is too short,
additional column names will be deduced from the content of the file.
Args:
rproffile (:class:`pathlib.Path`): path of the rprof.dat file.
colnames (list of names): names of the variables expected in
:data:`rproffile`.
Returns:
tuple of :class:`pandas.DataFrame`: (profs, times)
:data:`profs` are the radial profiles, with the variables in
columns and rows double-indexed with the time step and the radial
index of numerical cells.
:data:`times` is the dimensionless time indexed by time steps. | entailment |
def _readbin(fid, fmt='i', nwords=1, file64=False, unpack=True):
"""Read n words of 4 or 8 bytes with fmt format.
fmt: 'i' or 'f' or 'b' (integer or float or bytes)
4 or 8 bytes: depends on header
Return an array of elements if more than one element.
Default: read 1 word formatted as an integer.
"""
if fmt in 'if':
fmt += '8' if file64 else '4'
elts = np.fromfile(fid, fmt, nwords)
if unpack and len(elts) == 1:
elts = elts[0]
return elts | Read n words of 4 or 8 bytes with fmt format.
fmt: 'i' or 'f' or 'b' (integer or float or bytes)
4 or 8 bytes: depends on header
Return an array of elements if more than one element.
Default: read 1 word formatted as an integer. | entailment |
def fields(fieldfile, only_header=False, only_istep=False):
"""Extract fields data.
Args:
fieldfile (:class:`pathlib.Path`): path of the binary field file.
only_header (bool): when True (and :data:`only_istep` is False), only
:data:`header` is returned.
only_istep (bool): when True, only :data:`istep` is returned.
Returns:
depends on flags.: :obj:`int`: istep
If :data:`only_istep` is True, this function returns the time step
at which the binary file was written.
:obj:`dict`: header
Else, if :data:`only_header` is True, this function returns a dict
containing the header informations of the binary file.
:class:`numpy.array`: fields
Else, this function returns the tuple :data:`(header, fields)`.
:data:`fields` is an array of scalar fields indexed by variable,
x-direction, y-direction, z-direction, block.
"""
# something to skip header?
if not fieldfile.is_file():
return None
header = {}
with fieldfile.open('rb') as fid:
readbin = partial(_readbin, fid)
magic = readbin()
if magic > 8000: # 64 bits
magic -= 8000
readbin() # need to read 4 more bytes
readbin = partial(readbin, file64=True)
# check nb components
nval = 1
if magic > 400:
nval = 4
elif magic > 300:
nval = 3
magic %= 100
# extra ghost point in horizontal direction
header['xyp'] = int(magic >= 9 and nval == 4)
# total number of values in relevant space basis
# (e1, e2, e3) = (theta, phi, radius) in spherical geometry
# = (x, y, z) in cartesian geometry
header['nts'] = readbin(nwords=3)
# number of blocks, 2 for yinyang or cubed sphere
header['ntb'] = readbin() if magic >= 7 else 1
# aspect ratio
header['aspect'] = readbin('f', 2)
# number of parallel subdomains
header['ncs'] = readbin(nwords=3) # (e1, e2, e3) space
header['ncb'] = readbin() if magic >= 8 else 1 # blocks
# r - coordinates
# rgeom[0:self.nrtot+1, 0] are edge radial position
# rgeom[0:self.nrtot, 1] are cell-center radial position
if magic >= 2:
header['rgeom'] = readbin('f', header['nts'][2] * 2 + 1)
else:
header['rgeom'] = np.array(range(0, header['nts'][2] * 2 + 1))\
* 0.5 / header['nts'][2]
header['rgeom'] = np.resize(header['rgeom'], (header['nts'][2] + 1, 2))
header['rcmb'] = readbin('f') if magic >= 7 else None
header['ti_step'] = readbin() if magic >= 3 else 0
if only_istep:
return header['ti_step']
header['ti_ad'] = readbin('f') if magic >= 3 else 0
header['erupta_total'] = readbin('f') if magic >= 5 else 0
header['bot_temp'] = readbin('f') if magic >= 6 else 1
if magic >= 4:
header['e1_coord'] = readbin('f', header['nts'][0])
header['e2_coord'] = readbin('f', header['nts'][1])
header['e3_coord'] = readbin('f', header['nts'][2])
else:
# could construct them from other info
raise ParsingError(fieldfile,
'magic >= 4 expected to get grid geometry')
if only_header:
return header
# READ FIELDS
# number of points in (e1, e2, e3) directions PER CPU
npc = header['nts'] // header['ncs']
# number of blocks per cpu
nbk = header['ntb'] // header['ncb']
# number of values per 'read' block
npi = (npc[0] + header['xyp']) * (npc[1] + header['xyp']) * npc[2] * \
nbk * nval
header['scalefac'] = readbin('f') if nval > 1 else 1
flds = np.zeros((nval,
header['nts'][0] + header['xyp'],
header['nts'][1] + header['xyp'],
header['nts'][2],
header['ntb']))
# loop over parallel subdomains
for icpu in product(range(header['ncb']),
range(header['ncs'][2]),
range(header['ncs'][1]),
range(header['ncs'][0])):
# read the data for one CPU
data_cpu = readbin('f', npi) * header['scalefac']
# icpu is (icpu block, icpu z, icpu y, icpu x)
# data from file is transposed to obtained a field
# array indexed with (x, y, z, block), as in StagYY
flds[:,
icpu[3] * npc[0]:(icpu[3] + 1) * npc[0] + header['xyp'], # x
icpu[2] * npc[1]:(icpu[2] + 1) * npc[1] + header['xyp'], # y
icpu[1] * npc[2]:(icpu[1] + 1) * npc[2], # z
icpu[0] * nbk:(icpu[0] + 1) * nbk # block
] = np.transpose(data_cpu.reshape(
(nbk, npc[2], npc[1] + header['xyp'],
npc[0] + header['xyp'], nval)))
return header, flds | Extract fields data.
Args:
fieldfile (:class:`pathlib.Path`): path of the binary field file.
only_header (bool): when True (and :data:`only_istep` is False), only
:data:`header` is returned.
only_istep (bool): when True, only :data:`istep` is returned.
Returns:
depends on flags.: :obj:`int`: istep
If :data:`only_istep` is True, this function returns the time step
at which the binary file was written.
:obj:`dict`: header
Else, if :data:`only_header` is True, this function returns a dict
containing the header informations of the binary file.
:class:`numpy.array`: fields
Else, this function returns the tuple :data:`(header, fields)`.
:data:`fields` is an array of scalar fields indexed by variable,
x-direction, y-direction, z-direction, block. | entailment |
def tracers(tracersfile):
"""Extract tracers data.
Args:
tracersfile (:class:`pathlib.Path`): path of the binary tracers file.
Returns:
dict of list of numpy.array:
Tracers data organized by attribute and block.
"""
if not tracersfile.is_file():
return None
tra = {}
with tracersfile.open('rb') as fid:
readbin = partial(_readbin, fid)
magic = readbin()
if magic > 8000: # 64 bits
magic -= 8000
readbin()
readbin = partial(readbin, file64=True)
if magic < 100:
raise ParsingError(tracersfile,
'magic > 100 expected to get tracervar info')
nblk = magic % 100
readbin('f', 2) # aspect ratio
readbin() # istep
readbin('f') # time
ninfo = readbin()
ntra = readbin(nwords=nblk, unpack=False)
readbin('f') # tracer ideal mass
curv = readbin()
if curv:
readbin('f') # r_cmb
infos = [] # list of info names
for _ in range(ninfo):
infos.append(b''.join(readbin('b', 16)).strip().decode())
tra[infos[-1]] = []
if magic > 200:
ntrace_elt = readbin()
if ntrace_elt > 0:
readbin('f', ntrace_elt) # outgassed
for ntrab in ntra: # blocks
data = readbin('f', ntrab * ninfo)
for idx, info in enumerate(infos):
tra[info].append(data[idx::ninfo])
return tra | Extract tracers data.
Args:
tracersfile (:class:`pathlib.Path`): path of the binary tracers file.
Returns:
dict of list of numpy.array:
Tracers data organized by attribute and block. | entailment |
def _read_group_h5(filename, groupname):
"""Return group content.
Args:
filename (:class:`pathlib.Path`): path of hdf5 file.
groupname (str): name of group to read.
Returns:
:class:`numpy.array`: content of group.
"""
with h5py.File(filename, 'r') as h5f:
data = h5f[groupname][()]
return data | Return group content.
Args:
filename (:class:`pathlib.Path`): path of hdf5 file.
groupname (str): name of group to read.
Returns:
:class:`numpy.array`: content of group. | entailment |
def _make_3d(field, twod):
"""Add a dimension to field if necessary.
Args:
field (numpy.array): the field that need to be 3d.
twod (str): 'XZ', 'YZ' or None depending on what is relevant.
Returns:
numpy.array: reshaped field.
"""
shp = list(field.shape)
if twod and 'X' in twod:
shp.insert(1, 1)
elif twod:
shp.insert(0, 1)
return field.reshape(shp) | Add a dimension to field if necessary.
Args:
field (numpy.array): the field that need to be 3d.
twod (str): 'XZ', 'YZ' or None depending on what is relevant.
Returns:
numpy.array: reshaped field. | entailment |
def _ncores(meshes, twod):
"""Compute number of nodes in each direction."""
nnpb = len(meshes) # number of nodes per block
nns = [1, 1, 1] # number of nodes in x, y, z directions
if twod is None or 'X' in twod:
while (nnpb > 1 and
meshes[nns[0]]['X'][0, 0, 0] ==
meshes[nns[0] - 1]['X'][-1, 0, 0]):
nns[0] += 1
nnpb -= 1
cpu = lambda icy: icy * nns[0]
if twod is None or 'Y' in twod:
while (nnpb > 1 and
meshes[cpu(nns[1])]['Y'][0, 0, 0] ==
meshes[cpu(nns[1] - 1)]['Y'][0, -1, 0]):
nns[1] += 1
nnpb -= nns[0]
cpu = lambda icz: icz * nns[0] * nns[1]
while (nnpb > 1 and
meshes[cpu(nns[2])]['Z'][0, 0, 0] ==
meshes[cpu(nns[2] - 1)]['Z'][0, 0, -1]):
nns[2] += 1
nnpb -= nns[0] * nns[1]
return np.array(nns) | Compute number of nodes in each direction. | entailment |
def _conglomerate_meshes(meshin, header):
"""Conglomerate meshes from several cores into one."""
meshout = {}
npc = header['nts'] // header['ncs']
shp = [val + 1 if val != 1 else 1 for val in header['nts']]
x_p = int(shp[0] != 1)
y_p = int(shp[1] != 1)
for coord in meshin[0]:
meshout[coord] = np.zeros(shp)
for icore in range(np.prod(header['ncs'])):
ifs = [icore // np.prod(header['ncs'][:i]) % header['ncs'][i] * npc[i]
for i in range(3)]
for coord, mesh in meshin[icore].items():
meshout[coord][ifs[0]:ifs[0] + npc[0] + x_p,
ifs[1]:ifs[1] + npc[1] + y_p,
ifs[2]:ifs[2] + npc[2] + 1] = mesh
return meshout | Conglomerate meshes from several cores into one. | entailment |
def _read_coord_h5(files, shapes, header, twod):
"""Read all coord hdf5 files of a snapshot.
Args:
files (list of pathlib.Path): list of NodeCoordinates files of
a snapshot.
shapes (list of (int,int)): shape of mesh grids.
header (dict): geometry info.
twod (str): 'XZ', 'YZ' or None depending on what is relevant.
"""
meshes = []
for h5file, shape in zip(files, shapes):
meshes.append({})
with h5py.File(h5file, 'r') as h5f:
for coord, mesh in h5f.items():
# for some reason, the array is transposed!
meshes[-1][coord] = mesh[()].reshape(shape).T
meshes[-1][coord] = _make_3d(meshes[-1][coord], twod)
header['ncs'] = _ncores(meshes, twod)
header['nts'] = list((meshes[0]['X'].shape[i] - 1) * header['ncs'][i]
for i in range(3))
header['nts'] = np.array([max(1, val) for val in header['nts']])
# meshes could also be defined in legacy parser, so that these can be used
# in geometry setup
meshes = _conglomerate_meshes(meshes, header)
if np.any(meshes['Z'][:, :, 0] != 0):
# spherical
header['x_mesh'] = np.copy(meshes['Y']) # annulus geometry...
header['y_mesh'] = np.copy(meshes['Z'])
header['z_mesh'] = np.copy(meshes['X'])
header['r_mesh'] = np.sqrt(header['x_mesh']**2 + header['y_mesh']**2 +
header['z_mesh']**2)
header['t_mesh'] = np.arccos(header['z_mesh'] / header['r_mesh'])
header['p_mesh'] = np.roll(
np.arctan2(header['y_mesh'], -header['x_mesh']) + np.pi, -1, 1)
header['e1_coord'] = header['t_mesh'][:, 0, 0]
header['e2_coord'] = header['p_mesh'][0, :, 0]
header['e3_coord'] = header['r_mesh'][0, 0, :]
else:
header['e1_coord'] = meshes['X'][:, 0, 0]
header['e2_coord'] = meshes['Y'][0, :, 0]
header['e3_coord'] = meshes['Z'][0, 0, :]
header['aspect'] = (header['e1_coord'][-1] - header['e2_coord'][0],
header['e1_coord'][-1] - header['e2_coord'][0])
header['rcmb'] = header['e3_coord'][0]
if header['rcmb'] == 0:
header['rcmb'] = -1
else:
# could make the difference between r_coord and z_coord
header['e3_coord'] = header['e3_coord'] - header['rcmb']
if twod is None or 'X' in twod:
header['e1_coord'] = header['e1_coord'][:-1]
if twod is None or 'Y' in twod:
header['e2_coord'] = header['e2_coord'][:-1]
header['e3_coord'] = header['e3_coord'][:-1] | Read all coord hdf5 files of a snapshot.
Args:
files (list of pathlib.Path): list of NodeCoordinates files of
a snapshot.
shapes (list of (int,int)): shape of mesh grids.
header (dict): geometry info.
twod (str): 'XZ', 'YZ' or None depending on what is relevant. | entailment |
def _get_field(xdmf_file, data_item):
"""Extract field from data item."""
shp = _get_dim(data_item)
h5file, group = data_item.text.strip().split(':/', 1)
icore = int(group.split('_')[-2]) - 1
fld = _read_group_h5(xdmf_file.parent / h5file, group).reshape(shp)
return icore, fld | Extract field from data item. | entailment |
def _maybe_get(elt, item, info, conversion=None):
"""Extract and convert info if item is present."""
maybe_item = elt.find(item)
if maybe_item is not None:
maybe_item = maybe_item.get(info)
if conversion is not None:
maybe_item = conversion(maybe_item)
return maybe_item | Extract and convert info if item is present. | entailment |
def read_geom_h5(xdmf_file, snapshot):
"""Extract geometry information from hdf5 files.
Args:
xdmf_file (:class:`pathlib.Path`): path of the xdmf file.
snapshot (int): snapshot number.
Returns:
(dict, root): geometry information and root of xdmf document.
"""
header = {}
xdmf_root = xmlET.parse(str(xdmf_file)).getroot()
if snapshot is None:
return None, xdmf_root
# Domain, Temporal Collection, Snapshot
# should check that this is indeed the required snapshot
elt_snap = xdmf_root[0][0][snapshot]
header['ti_ad'] = float(elt_snap.find('Time').get('Value'))
header['mo_lambda'] = _maybe_get(elt_snap, 'mo_lambda', 'Value', float)
header['mo_thick_sol'] = _maybe_get(elt_snap, 'mo_thick_sol', 'Value',
float)
header['ntb'] = 1
coord_h5 = [] # all the coordinate files
coord_shape = [] # shape of meshes
twod = None
for elt_subdomain in elt_snap.findall('Grid'):
if elt_subdomain.get('Name').startswith('meshYang'):
header['ntb'] = 2
break # iterate only through meshYin
elt_geom = elt_subdomain.find('Geometry')
if elt_geom.get('Type') == 'X_Y' and twod is None:
twod = ''
for data_item in elt_geom.findall('DataItem'):
coord = data_item.text.strip()[-1]
if coord in 'XYZ':
twod += coord
data_item = elt_geom.find('DataItem')
coord_shape.append(_get_dim(data_item))
coord_h5.append(
xdmf_file.parent / data_item.text.strip().split(':/', 1)[0])
_read_coord_h5(coord_h5, coord_shape, header, twod)
return header, xdmf_root | Extract geometry information from hdf5 files.
Args:
xdmf_file (:class:`pathlib.Path`): path of the xdmf file.
snapshot (int): snapshot number.
Returns:
(dict, root): geometry information and root of xdmf document. | entailment |
def _to_spherical(flds, header):
"""Convert vector field to spherical."""
cth = np.cos(header['t_mesh'][:, :, :-1])
sth = np.sin(header['t_mesh'][:, :, :-1])
cph = np.cos(header['p_mesh'][:, :, :-1])
sph = np.sin(header['p_mesh'][:, :, :-1])
fout = np.copy(flds)
fout[0] = cth * cph * flds[0] + cth * sph * flds[1] - sth * flds[2]
fout[1] = sph * flds[0] - cph * flds[1] # need to take the opposite here
fout[2] = sth * cph * flds[0] + sth * sph * flds[1] + cth * flds[2]
return fout | Convert vector field to spherical. | entailment |
def _flds_shape(fieldname, header):
"""Compute shape of flds variable."""
shp = list(header['nts'])
shp.append(header['ntb'])
# probably a better way to handle this
if fieldname == 'Velocity':
shp.insert(0, 3)
# extra points
header['xp'] = int(header['nts'][0] != 1)
shp[1] += header['xp']
header['yp'] = int(header['nts'][1] != 1)
shp[2] += header['yp']
header['zp'] = 1
header['xyp'] = 1
else:
shp.insert(0, 1)
header['xp'] = 0
header['yp'] = 0
header['zp'] = 0
header['xyp'] = 0
return shp | Compute shape of flds variable. | entailment |
def _post_read_flds(flds, header):
"""Process flds to handle sphericity."""
if flds.shape[0] >= 3 and header['rcmb'] > 0:
# spherical vector
header['p_mesh'] = np.roll(
np.arctan2(header['y_mesh'], header['x_mesh']), -1, 1)
for ibk in range(header['ntb']):
flds[..., ibk] = _to_spherical(flds[..., ibk], header)
header['p_mesh'] = np.roll(
np.arctan2(header['y_mesh'], -header['x_mesh']) + np.pi, -1, 1)
return flds | Process flds to handle sphericity. | entailment |
def read_field_h5(xdmf_file, fieldname, snapshot, header=None):
"""Extract field data from hdf5 files.
Args:
xdmf_file (:class:`pathlib.Path`): path of the xdmf file.
fieldname (str): name of field to extract.
snapshot (int): snapshot number.
header (dict): geometry information.
Returns:
(dict, numpy.array): geometry information and field data. None
is returned if data is unavailable.
"""
if header is None:
header, xdmf_root = read_geom_h5(xdmf_file, snapshot)
else:
xdmf_root = xmlET.parse(str(xdmf_file)).getroot()
npc = header['nts'] // header['ncs'] # number of grid point per node
flds = np.zeros(_flds_shape(fieldname, header))
data_found = False
for elt_subdomain in xdmf_root[0][0][snapshot].findall('Grid'):
ibk = int(elt_subdomain.get('Name').startswith('meshYang'))
for data_attr in elt_subdomain.findall('Attribute'):
if data_attr.get('Name') != fieldname:
continue
icore, fld = _get_field(xdmf_file, data_attr.find('DataItem'))
# for some reason, the field is transposed
fld = fld.T
shp = fld.shape
if shp[-1] == 1 and header['nts'][0] == 1: # YZ
fld = fld.reshape((shp[0], 1, shp[1], shp[2]))
if header['rcmb'] < 0:
fld = fld[(2, 0, 1), ...]
elif shp[-1] == 1: # XZ
fld = fld.reshape((shp[0], shp[1], 1, shp[2]))
if header['rcmb'] < 0:
fld = fld[(0, 2, 1), ...]
elif header['nts'][1] == 1: # cart XZ
fld = fld.reshape((1, shp[0], 1, shp[1]))
ifs = [icore // np.prod(header['ncs'][:i]) % header['ncs'][i] *
npc[i] for i in range(3)]
if header['zp']: # remove top row
fld = fld[:, :, :, :-1]
flds[:,
ifs[0]:ifs[0] + npc[0] + header['xp'],
ifs[1]:ifs[1] + npc[1] + header['yp'],
ifs[2]:ifs[2] + npc[2],
ibk] = fld
data_found = True
flds = _post_read_flds(flds, header)
return (header, flds) if data_found else None | Extract field data from hdf5 files.
Args:
xdmf_file (:class:`pathlib.Path`): path of the xdmf file.
fieldname (str): name of field to extract.
snapshot (int): snapshot number.
header (dict): geometry information.
Returns:
(dict, numpy.array): geometry information and field data. None
is returned if data is unavailable. | entailment |
def read_tracers_h5(xdmf_file, infoname, snapshot, position):
"""Extract tracers data from hdf5 files.
Args:
xdmf_file (:class:`pathlib.Path`): path of the xdmf file.
infoname (str): name of information to extract.
snapshot (int): snapshot number.
position (bool): whether to extract position of tracers.
Returns:
dict of list of numpy.array:
Tracers data organized by attribute and block.
"""
xdmf_root = xmlET.parse(str(xdmf_file)).getroot()
tra = {}
tra[infoname] = [{}, {}] # two blocks, ordered by cores
if position:
for axis in 'xyz':
tra[axis] = [{}, {}]
for elt_subdomain in xdmf_root[0][0][snapshot].findall('Grid'):
ibk = int(elt_subdomain.get('Name').startswith('meshYang'))
if position:
for data_attr in elt_subdomain.findall('Geometry'):
for data_item, axis in zip(data_attr.findall('DataItem'),
'xyz'):
icore, data = _get_field(xdmf_file, data_item)
tra[axis][ibk][icore] = data
for data_attr in elt_subdomain.findall('Attribute'):
if data_attr.get('Name') != infoname:
continue
icore, data = _get_field(xdmf_file, data_attr.find('DataItem'))
tra[infoname][ibk][icore] = data
for info in tra:
tra[info] = [trab for trab in tra[info] if trab] # remove empty blocks
for iblk, trab in enumerate(tra[info]):
tra[info][iblk] = np.concatenate([trab[icore]
for icore in range(len(trab))])
return tra | Extract tracers data from hdf5 files.
Args:
xdmf_file (:class:`pathlib.Path`): path of the xdmf file.
infoname (str): name of information to extract.
snapshot (int): snapshot number.
position (bool): whether to extract position of tracers.
Returns:
dict of list of numpy.array:
Tracers data organized by attribute and block. | entailment |
def read_time_h5(h5folder):
"""Iterate through (isnap, istep) recorded in h5folder/'time_botT.h5'.
Args:
h5folder (:class:`pathlib.Path`): directory of HDF5 output files.
Yields:
tuple of int: (isnap, istep).
"""
with h5py.File(h5folder / 'time_botT.h5', 'r') as h5f:
for name, dset in h5f.items():
yield int(name[-5:]), int(dset[2]) | Iterate through (isnap, istep) recorded in h5folder/'time_botT.h5'.
Args:
h5folder (:class:`pathlib.Path`): directory of HDF5 output files.
Yields:
tuple of int: (isnap, istep). | entailment |
def _init_shape(self):
"""Determine shape of geometry"""
shape = self._par['geometry']['shape'].lower()
aspect = self._header['aspect']
if self.rcmb is not None and self.rcmb >= 0:
# curvilinear
self._shape['cyl'] = self.twod_xz and (shape == 'cylindrical' or
aspect[0] >= np.pi)
self._shape['sph'] = not self._shape['cyl']
elif self.rcmb is None:
self._header['rcmb'] = self._par['geometry']['r_cmb']
if self.rcmb >= 0:
if self.twod_xz and shape == 'cylindrical':
self._shape['cyl'] = True
elif shape == 'spherical':
self._shape['sph'] = True
self._shape['axi'] = self.cartesian and self.twod_xz and \
shape == 'axisymmetric' | Determine shape of geometry | entailment |
def _get_raw_data(self, name):
"""Find file holding data and return its content."""
# try legacy first, then hdf5
filestem = ''
for filestem, list_fvar in self._files.items():
if name in list_fvar:
break
fieldfile = self.step.sdat.filename(filestem, self.step.isnap,
force_legacy=True)
if not fieldfile.is_file():
fieldfile = self.step.sdat.filename(filestem, self.step.isnap)
parsed_data = None
if fieldfile.is_file():
parsed_data = stagyyparsers.fields(fieldfile)
elif self.step.sdat.hdf5 and self._filesh5:
for filestem, list_fvar in self._filesh5.items():
if name in list_fvar:
break
parsed_data = stagyyparsers.read_field_h5(
self.step.sdat.hdf5 / 'Data.xmf', filestem, self.step.isnap)
return list_fvar, parsed_data | Find file holding data and return its content. | entailment |
def geom(self):
"""Geometry information.
:class:`_Geometry` instance holding geometry information. It is
issued from binary files holding field information. It is set to
None if not available for this time step.
"""
if self._header is UNDETERMINED:
binfiles = self.step.sdat.binfiles_set(self.step.isnap)
if binfiles:
self._header = stagyyparsers.fields(binfiles.pop(),
only_header=True)
elif self.step.sdat.hdf5:
xmf = self.step.sdat.hdf5 / 'Data.xmf'
self._header, _ = stagyyparsers.read_geom_h5(xmf,
self.step.isnap)
else:
self._header = None
if self._geom is UNDETERMINED:
if self._header is None:
self._geom = None
else:
self._geom = _Geometry(self._header, self.step.sdat.par)
return self._geom | Geometry information.
:class:`_Geometry` instance holding geometry information. It is
issued from binary files holding field information. It is set to
None if not available for this time step. | entailment |
def timeinfo(self):
"""Time series data of the time step.
Set to None if no time series data is available for this time step.
"""
if self.istep not in self.sdat.tseries.index:
return None
return self.sdat.tseries.loc[self.istep] | Time series data of the time step.
Set to None if no time series data is available for this time step. | entailment |
def rprof(self):
"""Radial profiles data of the time step.
Set to None if no radial profiles data is available for this time step.
"""
if self.istep not in self.sdat.rprof.index.levels[0]:
return None
return self.sdat.rprof.loc[self.istep] | Radial profiles data of the time step.
Set to None if no radial profiles data is available for this time step. | entailment |
def isnap(self):
"""Snapshot index corresponding to time step.
It is set to None if no snapshot exists for the time step.
"""
if self._isnap is UNDETERMINED:
istep = None
isnap = -1
# could be more efficient if do 0 and -1 then bisection
# (but loose intermediate <- would probably use too much
# memory for what it's worth if search algo is efficient)
while (istep is None or istep < self.istep) and isnap < 99999:
isnap += 1
istep = self.sdat.snaps[isnap].istep
self.sdat.snaps.bind(isnap, istep)
# all intermediate istep could have their ._isnap to None
if istep != self.istep:
self._isnap = None
return self._isnap | Snapshot index corresponding to time step.
It is set to None if no snapshot exists for the time step. | entailment |
def convert_v1_to_v2(
dict # type: Dict[str, Any]
):
# type: (...) -> Dict[str, Any]
"""
Convert v1 schema dict to v2 schema dict.
:param dict: v1 schema dict
:return: v2 schema dict
"""
version = dict['version']
if version != 1:
raise ValueError('Version {} not 1'.format(version))
clk_config = dict['clkConfig']
k = clk_config['k']
clk_hash = clk_config['hash']
def convert_feature(f):
if 'ignored' in f:
return f
hashing = f['hashing']
weight = hashing.get('weight', 1.0)
if weight == 0:
return {
'identifier': f['identifier'],
'ignored': True
}
x = deepcopy(f)
hashing = x['hashing']
if 'weight' in hashing:
del hashing['weight']
hashing['k'] = int(round(weight * k))
hashing['hash'] = clk_hash
return x
result = {
'version': 2,
'clkConfig': {
'l': clk_config['l'],
'xor_folds': clk_config.get('xor_folds', 0),
'kdf': clk_config['kdf']
},
'features': list(map(convert_feature, dict['features']))
}
return result | Convert v1 schema dict to v2 schema dict.
:param dict: v1 schema dict
:return: v2 schema dict | entailment |
def from_json_dict(dct, validate=True):
# type: (Dict[str, Any], bool) -> Schema
""" Create a Schema for v1 or v2 according to dct
:param dct: This dictionary must have a `'features'`
key specifying the columns of the dataset. It must have
a `'version'` key containing the master schema version
that this schema conforms to. It must have a `'hash'`
key with all the globals.
:param validate: (default True) Raise an exception if the
schema does not conform to the master schema.
:return: the Schema
"""
if validate:
# This raises iff the schema is invalid.
validate_schema_dict(dct)
version = dct['version']
if version == 1:
dct = convert_v1_to_v2(dct)
if validate:
validate_schema_dict(dct)
elif version != 2:
msg = ('Schema version {} is not supported. '
'Consider updating clkhash.').format(version)
raise SchemaError(msg)
clk_config = dct['clkConfig']
l = clk_config['l']
xor_folds = clk_config.get('xor_folds', 0)
kdf = clk_config['kdf']
kdf_type = kdf['type']
kdf_hash = kdf.get('hash', 'SHA256')
kdf_info_string = kdf.get('info')
kdf_info = (base64.b64decode(kdf_info_string)
if kdf_info_string is not None
else None)
kdf_salt_string = kdf.get('salt')
kdf_salt = (base64.b64decode(kdf_salt_string)
if kdf_salt_string is not None
else None)
kdf_key_size = kdf.get('keySize', DEFAULT_KDF_KEY_SIZE)
fields = list(map(spec_from_json_dict, dct['features']))
return Schema(fields, l, xor_folds,
kdf_type, kdf_hash, kdf_info, kdf_salt, kdf_key_size) | Create a Schema for v1 or v2 according to dct
:param dct: This dictionary must have a `'features'`
key specifying the columns of the dataset. It must have
a `'version'` key containing the master schema version
that this schema conforms to. It must have a `'hash'`
key with all the globals.
:param validate: (default True) Raise an exception if the
schema does not conform to the master schema.
:return: the Schema | entailment |
def from_json_file(schema_file, validate=True):
# type: (TextIO, bool) -> Schema
""" Load a Schema object from a json file.
:param schema_file: A JSON file containing the schema.
:param validate: (default True) Raise an exception if the
schema does not conform to the master schema.
:raises SchemaError: When the schema is invalid.
:return: the Schema
"""
try:
schema_dict = json.load(schema_file)
except ValueError as e: # In Python 3 we can be more specific
# with json.decoder.JSONDecodeError,
# but that doesn't exist in Python 2.
msg = 'The schema is not a valid JSON file.'
raise_from(SchemaError(msg), e)
return from_json_dict(schema_dict, validate=validate) | Load a Schema object from a json file.
:param schema_file: A JSON file containing the schema.
:param validate: (default True) Raise an exception if the
schema does not conform to the master schema.
:raises SchemaError: When the schema is invalid.
:return: the Schema | entailment |
def _get_master_schema(version):
# type: (Hashable) -> bytes
""" Loads the master schema of given version as bytes.
:param version: The version of the master schema whose path we
wish to retrieve.
:raises SchemaError: When the schema version is unknown. This
usually means that either (a) clkhash is out of date, or (b)
the schema version listed is incorrect.
:return: Bytes of the schema.
"""
try:
file_name = MASTER_SCHEMA_FILE_NAMES[version]
except (TypeError, KeyError) as e:
msg = ('Schema version {} is not supported. '
'Consider updating clkhash.').format(version)
raise_from(SchemaError(msg), e)
try:
schema_bytes = pkgutil.get_data('clkhash', 'schemas/{}'.format(file_name))
except IOError as e: # In Python 3 we can be more specific with
# FileNotFoundError, but that doesn't exist in
# Python 2.
msg = ('The master schema could not be found. The schema cannot be '
'validated. Please file a bug report.')
raise_from(MasterSchemaError(msg), e)
if schema_bytes is None:
msg = ('The master schema could not be loaded. The schema cannot be '
'validated. Please file a bug report.')
raise MasterSchemaError(msg)
return schema_bytes | Loads the master schema of given version as bytes.
:param version: The version of the master schema whose path we
wish to retrieve.
:raises SchemaError: When the schema version is unknown. This
usually means that either (a) clkhash is out of date, or (b)
the schema version listed is incorrect.
:return: Bytes of the schema. | entailment |
def validate_schema_dict(schema):
# type: (Dict[str, Any]) -> None
""" Validate the schema.
This raises iff either the schema or the master schema are
invalid. If it's successful, it returns nothing.
:param schema: The schema to validate, as parsed by `json`.
:raises SchemaError: When the schema is invalid.
:raises MasterSchemaError: When the master schema is invalid.
"""
if not isinstance(schema, dict):
msg = ('The top level of the schema file is a {}, whereas a dict is '
'expected.'.format(type(schema).__name__))
raise SchemaError(msg)
if 'version' in schema:
version = schema['version']
else:
raise SchemaError('A format version is expected in the schema.')
master_schema_bytes = _get_master_schema(version)
try:
master_schema = json.loads(master_schema_bytes.decode('utf-8'))
except ValueError as e: # In Python 3 we can be more specific with
# json.decoder.JSONDecodeError, but that
# doesn't exist in Python 2.
msg = ('The master schema is not a valid JSON file. The schema cannot '
'be validated. Please file a bug report.')
raise_from(MasterSchemaError(msg), e)
try:
jsonschema.validate(schema, master_schema)
except jsonschema.exceptions.ValidationError as e:
raise_from(SchemaError('The schema is not valid.'), e)
except jsonschema.exceptions.SchemaError as e:
msg = ('The master schema is not valid. The schema cannot be '
'validated. Please file a bug report.')
raise_from(MasterSchemaError(msg), e) | Validate the schema.
This raises iff either the schema or the master schema are
invalid. If it's successful, it returns nothing.
:param schema: The schema to validate, as parsed by `json`.
:raises SchemaError: When the schema is invalid.
:raises MasterSchemaError: When the master schema is invalid. | entailment |
def deserialize_bitarray(ser):
# type: (str) -> bitarray
"""Deserialize a base 64 encoded string to a bitarray (bloomfilter)
"""
ba = bitarray()
ba.frombytes(base64.b64decode(ser.encode(encoding='UTF-8', errors='strict')))
return ba | Deserialize a base 64 encoded string to a bitarray (bloomfilter) | entailment |
def analyze_image(image, apis=DEFAULT_APIS, **kwargs):
"""
Given input image, returns the results of specified image apis. Possible apis
include: ['fer', 'facial_features', 'image_features']
Example usage:
.. code-block:: python
>>> import indicoio
>>> import numpy as np
>>> face = np.zeros((48,48)).tolist()
>>> results = indicoio.analyze_image(image = face, apis = ["fer", "facial_features"])
>>> fer = results["fer"]
>>> facial_features = results["facial_features"]
:param text: The text to be analyzed.
:param apis: List of apis to use.
:type text: str or unicode
:type apis: list of str
:rtype: Dictionary of api responses
"""
cloud = kwargs.pop('cloud', None)
batch = kwargs.pop('batch', False)
api_key = kwargs.pop('api_key', None)
return multi(
data=data_preprocess(image, batch=batch),
datatype="image",
cloud=cloud,
batch=batch,
api_key=api_key,
apis=apis,
accepted_apis=IMAGE_APIS,
**kwargs
) | Given input image, returns the results of specified image apis. Possible apis
include: ['fer', 'facial_features', 'image_features']
Example usage:
.. code-block:: python
>>> import indicoio
>>> import numpy as np
>>> face = np.zeros((48,48)).tolist()
>>> results = indicoio.analyze_image(image = face, apis = ["fer", "facial_features"])
>>> fer = results["fer"]
>>> facial_features = results["facial_features"]
:param text: The text to be analyzed.
:param apis: List of apis to use.
:type text: str or unicode
:type apis: list of str
:rtype: Dictionary of api responses | entailment |
def compute_hash_speed(num, quiet=False):
# type: (int, bool) -> float
""" Hash time.
"""
namelist = NameList(num)
os_fd, tmpfile_name = tempfile.mkstemp(text=True)
schema = NameList.SCHEMA
header_row = ','.join([f.identifier for f in schema.fields])
with open(tmpfile_name, 'wt') as f:
f.write(header_row)
f.write('\n')
for person in namelist.names:
print(','.join([str(field) for field in person]), file=f)
with open(tmpfile_name, 'rt') as f:
start = timer()
generate_clk_from_csv(f, ('key1', 'key2'), schema, progress_bar=not quiet)
end = timer()
os.close(os_fd)
os.remove(tmpfile_name)
elapsed_time = end - start
if not quiet:
print("{:6d} hashes in {:.6f} seconds. {:.2f} KH/s".format(num, elapsed_time, num / (1000 * elapsed_time)))
return num / elapsed_time | Hash time. | entailment |
def hash(pii_csv, keys, schema, clk_json, quiet, no_header, check_header, validate):
"""Process data to create CLKs
Given a file containing CSV data as PII_CSV, and a JSON
document defining the expected schema, verify the schema, then
hash the data to create CLKs writing them as JSON to CLK_JSON. Note the CSV
file should contain a header row - however this row is not used
by this tool.
It is important that the keys are only known by the two data providers. Two words should be provided. For example:
$clkutil hash pii.csv horse staple pii-schema.json clk.json
Use "-" for CLK_JSON to write JSON to stdout.
"""
schema_object = clkhash.schema.from_json_file(schema_file=schema)
header = True
if not check_header:
header = 'ignore'
if no_header:
header = False
try:
clk_data = clk.generate_clk_from_csv(
pii_csv, keys, schema_object,
validate=validate,
header=header,
progress_bar=not quiet)
except (validate_data.EntryError, validate_data.FormatError) as e:
msg, = e.args
log(msg)
log('Hashing failed.')
else:
json.dump({'clks': clk_data}, clk_json)
if hasattr(clk_json, 'name'):
log("CLK data written to {}".format(clk_json.name)) | Process data to create CLKs
Given a file containing CSV data as PII_CSV, and a JSON
document defining the expected schema, verify the schema, then
hash the data to create CLKs writing them as JSON to CLK_JSON. Note the CSV
file should contain a header row - however this row is not used
by this tool.
It is important that the keys are only known by the two data providers. Two words should be provided. For example:
$clkutil hash pii.csv horse staple pii-schema.json clk.json
Use "-" for CLK_JSON to write JSON to stdout. | entailment |
def status(server, output, verbose):
"""Connect to an entity matching server and check the service status.
Use "-" to output status to stdout.
"""
if verbose:
log("Connecting to Entity Matching Server: {}".format(server))
service_status = server_get_status(server)
if verbose:
log("Status: {}".format(service_status['status']))
print(json.dumps(service_status), file=output) | Connect to an entity matching server and check the service status.
Use "-" to output status to stdout. | entailment |
def create_project(type, schema, server, name, output, verbose):
"""Create a new project on an entity matching server.
See entity matching service documentation for details on mapping type and schema
Returns authentication details for the created project.
"""
if verbose:
log("Entity Matching Server: {}".format(server))
if schema is not None:
schema_json = json.load(schema)
# Validate the schema
clkhash.schema.validate_schema_dict(schema_json)
else:
raise ValueError("Schema must be provided when creating new linkage project")
name = name if name is not None else ''
# Creating new project
try:
project_creation_reply = project_create(server, schema_json, type, name)
except ServiceError as e:
log("Unexpected response - {}".format(e.status_code))
log(e.text)
raise SystemExit
else:
log("Project created")
json.dump(project_creation_reply, output) | Create a new project on an entity matching server.
See entity matching service documentation for details on mapping type and schema
Returns authentication details for the created project. | entailment |
def create(server, name, project, apikey, output, threshold, verbose):
"""Create a new run on an entity matching server.
See entity matching service documentation for details on threshold.
Returns details for the created run.
"""
if verbose:
log("Entity Matching Server: {}".format(server))
if threshold is None:
raise ValueError("Please provide a threshold")
# Create a new run
try:
response = run_create(server, project, apikey, threshold, name)
except ServiceError as e:
log("Unexpected response with status {}".format(e.status_code))
log(e.text)
else:
json.dump(response, output) | Create a new run on an entity matching server.
See entity matching service documentation for details on threshold.
Returns details for the created run. | entailment |
def upload(clk_json, project, apikey, server, output, verbose):
"""Upload CLK data to entity matching server.
Given a json file containing hashed clk data as CLK_JSON, upload to
the entity resolution service.
Use "-" to read from stdin.
"""
if verbose:
log("Uploading CLK data from {}".format(clk_json.name))
log("To Entity Matching Server: {}".format(server))
log("Project ID: {}".format(project))
log("Uploading CLK data to the server")
response = project_upload_clks(server, project, apikey, clk_json)
if verbose:
log(response)
json.dump(response, output) | Upload CLK data to entity matching server.
Given a json file containing hashed clk data as CLK_JSON, upload to
the entity resolution service.
Use "-" to read from stdin. | entailment |
def results(project, apikey, run, watch, server, output):
"""
Check to see if results are available for a particular mapping
and if so download.
Authentication is carried out using the --apikey option which
must be provided. Depending on the server operating mode this
may return a mask, a linkage table, or a permutation. Consult
the entity service documentation for details.
"""
status = run_get_status(server, project, run, apikey)
log(format_run_status(status))
if watch:
for status in watch_run_status(server, project, run, apikey, 24*60*60):
log(format_run_status(status))
if status['state'] == 'completed':
log("Downloading result")
response = run_get_result_text(server, project, run, apikey)
log("Received result")
print(response, file=output)
elif status['state'] == 'error':
log("There was an error")
error_result = run_get_result_text(server, project, run, apikey)
print(error_result, file=output)
else:
log("No result yet") | Check to see if results are available for a particular mapping
and if so download.
Authentication is carried out using the --apikey option which
must be provided. Depending on the server operating mode this
may return a mask, a linkage table, or a permutation. Consult
the entity service documentation for details. | entailment |
def generate(size, output, schema):
"""Generate fake PII data for testing"""
pii_data = randomnames.NameList(size)
if schema is not None:
raise NotImplementedError
randomnames.save_csv(
pii_data.names,
[f.identifier for f in pii_data.SCHEMA.fields],
output) | Generate fake PII data for testing | entailment |
def generate_default_schema(output):
"""Get default schema for fake PII"""
original_path = os.path.join(os.path.dirname(__file__),
'data',
'randomnames-schema.json')
shutil.copyfile(original_path, output) | Get default schema for fake PII | entailment |
def docx_extraction(docx, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
Given a .docx file, returns the raw text associated with the given .docx file.
The .docx file may be provided as base64 encoded data or as a filepath.
Example usage:
.. code-block:: python
>>> from indicoio import docx_extraction
>>> results = docx_extraction(docx_file)
:param docx: The docx to be analyzed.
:type docx: str or list of strs
:rtype: dict or list of dicts
"""
docx = docx_preprocess(docx, batch=batch)
url_params = {"batch": batch, "api_key": api_key, "version": version}
results = api_handler(docx, cloud=cloud, api="docxextraction", url_params=url_params, **kwargs)
return results | Given a .docx file, returns the raw text associated with the given .docx file.
The .docx file may be provided as base64 encoded data or as a filepath.
Example usage:
.. code-block:: python
>>> from indicoio import docx_extraction
>>> results = docx_extraction(docx_file)
:param docx: The docx to be analyzed.
:type docx: str or list of strs
:rtype: dict or list of dicts | entailment |
def facial_features(image, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
Given an grayscale input image of a face, returns a 48 dimensional feature vector explaining that face.
Useful as a form of feature engineering for face oriented tasks.
Input should be in a list of list format, resizing will be attempted internally but for best
performance, images should be already sized at 48x48 pixels.
Example usage:
.. code-block:: python
>>> from indicoio import facial_features
>>> import numpy as np
>>> face = np.zeros((48,48))
>>> features = facial_features(face)
>>> len(features)
48
:param image: The image to be analyzed.
:type image: list of lists
:rtype: List containing feature responses
"""
image = data_preprocess(image, batch=batch, size=None if kwargs.get("detect") else (48, 48))
url_params = {"batch": batch, "api_key": api_key, "version": version}
return api_handler(image, cloud=cloud, api="facialfeatures", url_params=url_params, **kwargs) | Given an grayscale input image of a face, returns a 48 dimensional feature vector explaining that face.
Useful as a form of feature engineering for face oriented tasks.
Input should be in a list of list format, resizing will be attempted internally but for best
performance, images should be already sized at 48x48 pixels.
Example usage:
.. code-block:: python
>>> from indicoio import facial_features
>>> import numpy as np
>>> face = np.zeros((48,48))
>>> features = facial_features(face)
>>> len(features)
48
:param image: The image to be analyzed.
:type image: list of lists
:rtype: List containing feature responses | entailment |
def wait_for_run(server, project, run, apikey, timeout=None, update_period=1):
"""
Monitor a linkage run and return the final status updates. If a timeout is provided and the
run hasn't entered a terminal state (error or completed) when the timeout is reached a
TimeoutError will be raised.
:param server: Base url of the upstream server.
:param project:
:param run:
:param apikey:
:param timeout: Stop waiting after this many seconds. The default (None) is to never give you up.
:param update_period: Time in seconds between queries to the run's status.
:raises TimeoutError
"""
for status in watch_run_status(server, project, run, apikey, timeout, update_period):
pass
return status | Monitor a linkage run and return the final status updates. If a timeout is provided and the
run hasn't entered a terminal state (error or completed) when the timeout is reached a
TimeoutError will be raised.
:param server: Base url of the upstream server.
:param project:
:param run:
:param apikey:
:param timeout: Stop waiting after this many seconds. The default (None) is to never give you up.
:param update_period: Time in seconds between queries to the run's status.
:raises TimeoutError | entailment |
def watch_run_status(server, project, run, apikey, timeout=None, update_period=1):
"""
Monitor a linkage run and yield status updates. Will immediately yield an update and then
only yield further updates when the status object changes. If a timeout is provided and the
run hasn't entered a terminal state (error or completed) when the timeout is reached,
updates will cease and a TimeoutError will be raised.
:param server: Base url of the upstream server.
:param project:
:param run:
:param apikey:
:param timeout: Stop waiting after this many seconds. The default (None) is to never give you up.
:param update_period: Time in seconds between queries to the run's status.
:raises TimeoutError
"""
start_time = time.time()
status = old_status = run_get_status(server, project, run, apikey)
yield status
def time_not_up():
return (
(timeout is None) or
(time.time() - start_time < timeout)
)
while time_not_up():
if status['state'] in {'error', 'completed'}:
# No point continuing as run has entered a terminal state
yield status
return
if old_status != status:
yield status
time.sleep(update_period)
old_status = status
try:
status = run_get_status(server, project, run, apikey)
except RateLimitedClient:
time.sleep(1)
raise TimeoutError("Timeout exceeded before run {} terminated".format(run)) | Monitor a linkage run and yield status updates. Will immediately yield an update and then
only yield further updates when the status object changes. If a timeout is provided and the
run hasn't entered a terminal state (error or completed) when the timeout is reached,
updates will cease and a TimeoutError will be raised.
:param server: Base url of the upstream server.
:param project:
:param run:
:param apikey:
:param timeout: Stop waiting after this many seconds. The default (None) is to never give you up.
:param update_period: Time in seconds between queries to the run's status.
:raises TimeoutError | entailment |
def docx_preprocess(docx, batch=False):
"""
Load docx files from local filepath if not already b64 encoded
"""
if batch:
return [docx_preprocess(doc, batch=False) for doc in docx]
if os.path.isfile(docx):
# a filepath is provided, read and encode
return b64encode(open(docx, 'rb').read())
else:
# assume doc is already b64 encoded
return docx | Load docx files from local filepath if not already b64 encoded | entailment |
def relevance(data, queries, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
Given input text and a list of query terms / phrases, returns how relevant the query is
to the input text.
Example usage:
.. code-block:: python
>>> import indicoio
>>> text = 'On Monday, president Barack Obama will be giving his keynote address at...'
>>> relevance = indicoio.relevance(text, queries=['president'])
>>> print "Relevance: " + str(relevance[0])
u'Relevance: [0.44755361996336784]'
:param text: The text to be analyzed.
:param queries: a list of terms or phrases to measure similarity against
:type text: str or unicode
:rtype: Dictionary of feature score pairs
"""
url_params = {"batch": batch, "api_key": api_key, "version": version}
kwargs['queries'] = queries
kwargs['synonyms'] = False
return api_handler(data, cloud=cloud, api="relevance", url_params=url_params, **kwargs) | Given input text and a list of query terms / phrases, returns how relevant the query is
to the input text.
Example usage:
.. code-block:: python
>>> import indicoio
>>> text = 'On Monday, president Barack Obama will be giving his keynote address at...'
>>> relevance = indicoio.relevance(text, queries=['president'])
>>> print "Relevance: " + str(relevance[0])
u'Relevance: [0.44755361996336784]'
:param text: The text to be analyzed.
:param queries: a list of terms or phrases to measure similarity against
:type text: str or unicode
:rtype: Dictionary of feature score pairs | entailment |
def ASSIGN(self, node):
"""This is a custom implementation of ASSIGN derived from
handleChildren() in pyflakes 1.3.0.
The point here is that on module level, there's type aliases that we
want to bind eagerly, but defer computation of the values of the
assignments (the type aliases might have forward references).
"""
if not isinstance(self.scope, ModuleScope):
return super().ASSIGN(node)
for target in node.targets:
self.handleNode(target, node)
self.deferHandleNode(node.value, node) | This is a custom implementation of ASSIGN derived from
handleChildren() in pyflakes 1.3.0.
The point here is that on module level, there's type aliases that we
want to bind eagerly, but defer computation of the values of the
assignments (the type aliases might have forward references). | entailment |
def ANNASSIGN(self, node):
"""
Annotated assignments don't have annotations evaluated on function
scope, hence the custom implementation. Compared to the pyflakes
version, we defer evaluation of the annotations (and values on
module level).
"""
if node.value:
# Only bind the *target* if the assignment has value.
# Otherwise it's not really ast.Store and shouldn't silence
# UndefinedLocal warnings.
self.handleNode(node.target, node)
if not isinstance(self.scope, FunctionScope):
self.deferHandleNode(node.annotation, node)
if node.value:
# If the assignment has value, handle the *value*...
if isinstance(self.scope, ModuleScope):
# ...later (if module scope).
self.deferHandleNode(node.value, node)
else:
# ...now.
self.handleNode(node.value, node) | Annotated assignments don't have annotations evaluated on function
scope, hence the custom implementation. Compared to the pyflakes
version, we defer evaluation of the annotations (and values on
module level). | entailment |
def LAMBDA(self, node):
"""This is likely very brittle, currently works for pyflakes 1.3.0.
Deferring annotation handling depends on the fact that during calls
to LAMBDA visiting the function's body is already deferred and the
only eager calls to `handleNode` are for annotations.
"""
self.handleNode, self.deferHandleNode = self.deferHandleNode, self.handleNode
super().LAMBDA(node)
self.handleNode, self.deferHandleNode = self.deferHandleNode, self.handleNode | This is likely very brittle, currently works for pyflakes 1.3.0.
Deferring annotation handling depends on the fact that during calls
to LAMBDA visiting the function's body is already deferred and the
only eager calls to `handleNode` are for annotations. | entailment |
def double_hash_encode_ngrams(ngrams, # type: Iterable[str]
keys, # type: Sequence[bytes]
ks, # type: Sequence[int]
l, # type: int
encoding # type: str
):
# type: (...) -> bitarray
""" Computes the double hash encoding of the ngrams with the given keys.
Using the method from:
Schnell, R., Bachteler, T., & Reiher, J. (2011).
A Novel Error-Tolerant Anonymous Linking Code.
http://grlc.german-microsimulation.de/wp-content/uploads/2017/05/downloadwp-grlc-2011-02.pdf
:param ngrams: list of n-grams to be encoded
:param keys: hmac secret keys for md5 and sha1 as bytes
:param ks: ks[i] is k value to use for ngram[i]
:param l: length of the output bitarray
:param encoding: the encoding to use when turning the ngrams to bytes
:return: bitarray of length l with the bits set which correspond to
the encoding of the ngrams
"""
key_sha1, key_md5 = keys
bf = bitarray(l)
bf.setall(False)
for m, k in zip(ngrams, ks):
sha1hm = int(
hmac.new(key_sha1, m.encode(encoding=encoding), sha1).hexdigest(),
16) % l
md5hm = int(
hmac.new(key_md5, m.encode(encoding=encoding), md5).hexdigest(),
16) % l
for i in range(k):
gi = (sha1hm + i * md5hm) % l
bf[gi] = 1
return bf | Computes the double hash encoding of the ngrams with the given keys.
Using the method from:
Schnell, R., Bachteler, T., & Reiher, J. (2011).
A Novel Error-Tolerant Anonymous Linking Code.
http://grlc.german-microsimulation.de/wp-content/uploads/2017/05/downloadwp-grlc-2011-02.pdf
:param ngrams: list of n-grams to be encoded
:param keys: hmac secret keys for md5 and sha1 as bytes
:param ks: ks[i] is k value to use for ngram[i]
:param l: length of the output bitarray
:param encoding: the encoding to use when turning the ngrams to bytes
:return: bitarray of length l with the bits set which correspond to
the encoding of the ngrams | entailment |
def double_hash_encode_ngrams_non_singular(ngrams, # type: Iterable[str]
keys, # type: Sequence[bytes]
ks, # type: Sequence[int]
l, # type: int
encoding # type: str
):
# type: (...) -> bitarray.bitarray
""" computes the double hash encoding of the n-grams with the given keys.
The original construction of [Schnell2011]_ displays an abnormality for
certain inputs:
An n-gram can be encoded into just one bit irrespective of the number
of k.
Their construction goes as follows: the :math:`k` different indices
:math:`g_i` of the Bloom filter for an n-gram
:math:`x` are defined as:
.. math:: g_{i}(x) = (h_1(x) + i h_2(x)) \\mod l
with :math:`0 \\leq i < k` and :math:`l` is the length of the Bloom
filter. If the value of the hash of :math:`x` of
the second hash function is a multiple of :math:`l`, then
.. math:: h_2(x) = 0 \\mod l
and thus
.. math:: g_i(x) = h_1(x) \\mod l,
irrespective of the value :math:`i`. A discussion of this potential flaw
can be found
`here <https://github.com/data61/clkhash/issues/33>`_.
:param ngrams: list of n-grams to be encoded
:param keys: tuple with (key_sha1, key_md5).
That is, (hmac secret keys for sha1 as bytes, hmac secret keys for
md5 as bytes)
:param ks: ks[i] is k value to use for ngram[i]
:param l: length of the output bitarray
:param encoding: the encoding to use when turning the ngrams to bytes
:return: bitarray of length l with the bits set which correspond to the
encoding of the ngrams
"""
key_sha1, key_md5 = keys
bf = bitarray(l)
bf.setall(False)
for m, k in zip(ngrams, ks):
m_bytes = m.encode(encoding=encoding)
sha1hm_bytes = hmac.new(key_sha1, m_bytes, sha1).digest()
md5hm_bytes = hmac.new(key_md5, m_bytes, md5).digest()
sha1hm = int_from_bytes(sha1hm_bytes, 'big') % l
md5hm = int_from_bytes(md5hm_bytes, 'big') % l
i = 0
while md5hm == 0:
md5hm_bytes = hmac.new(
key_md5, m_bytes + chr(i).encode(), md5).digest()
md5hm = int_from_bytes(md5hm_bytes, 'big') % l
i += 1
for i in range(k):
gi = (sha1hm + i * md5hm) % l
bf[gi] = True
return bf | computes the double hash encoding of the n-grams with the given keys.
The original construction of [Schnell2011]_ displays an abnormality for
certain inputs:
An n-gram can be encoded into just one bit irrespective of the number
of k.
Their construction goes as follows: the :math:`k` different indices
:math:`g_i` of the Bloom filter for an n-gram
:math:`x` are defined as:
.. math:: g_{i}(x) = (h_1(x) + i h_2(x)) \\mod l
with :math:`0 \\leq i < k` and :math:`l` is the length of the Bloom
filter. If the value of the hash of :math:`x` of
the second hash function is a multiple of :math:`l`, then
.. math:: h_2(x) = 0 \\mod l
and thus
.. math:: g_i(x) = h_1(x) \\mod l,
irrespective of the value :math:`i`. A discussion of this potential flaw
can be found
`here <https://github.com/data61/clkhash/issues/33>`_.
:param ngrams: list of n-grams to be encoded
:param keys: tuple with (key_sha1, key_md5).
That is, (hmac secret keys for sha1 as bytes, hmac secret keys for
md5 as bytes)
:param ks: ks[i] is k value to use for ngram[i]
:param l: length of the output bitarray
:param encoding: the encoding to use when turning the ngrams to bytes
:return: bitarray of length l with the bits set which correspond to the
encoding of the ngrams | entailment |
def blake_encode_ngrams(ngrams, # type: Iterable[str]
keys, # type: Sequence[bytes]
ks, # type: Sequence[int]
l, # type: int
encoding # type: str
):
# type: (...) -> bitarray.bitarray
""" Computes the encoding of the ngrams using the BLAKE2 hash function.
We deliberately do not use the double hashing scheme as proposed in [
Schnell2011]_, because this
would introduce an exploitable structure into the Bloom filter. For more
details on the
weakness, see [Kroll2015]_.
In short, the double hashing scheme only allows for :math:`l^2`
different encodings for any possible n-gram,
whereas the use of :math:`k` different independent hash functions gives
you :math:`\\sum_{j=1}^{k}{\\binom{l}{j}}`
combinations.
**Our construction**
It is advantageous to construct Bloom filters using a family of hash
functions with the property of
`k-independence <https://en.wikipedia.org/wiki/K-independent_hashing>`_
to compute the indices for an entry.
This approach minimises the change of collisions.
An informal definition of *k-independence* of a family of hash functions
is, that if selecting a function at random
from the family, it guarantees that the hash codes of any designated k
keys are independent random variables.
Our construction utilises the fact that the output bits of a
cryptographic hash function are uniformly distributed,
independent, binary random variables (well, at least as close to as
possible. See [Kaminsky2011]_ for an analysis).
Thus, slicing the output of a cryptographic hash function into k
different slices gives you k independent random
variables.
We chose Blake2 as the cryptographic hash function mainly for two reasons:
* it is fast.
* in keyed hashing mode, Blake2 provides MACs with just one hash
function call instead of the two calls in the HMAC construction used
in the double hashing scheme.
.. warning::
Please be aware that, although this construction makes the attack of
[Kroll2015]_ infeasible, it is most likely
not enough to ensure security. Or in their own words:
| However, we think that using independent hash functions alone
will not be sufficient to ensure security,
since in this case other approaches (maybe related to or at least
inspired through work from the
area of Frequent Itemset Mining) are promising to detect at least
the most frequent atoms automatically.
:param ngrams: list of n-grams to be encoded
:param keys: secret key for blake2 as bytes
:param ks: ks[i] is k value to use for ngram[i]
:param l: length of the output bitarray (has to be a power of 2)
:param encoding: the encoding to use when turning the ngrams to bytes
:return: bitarray of length l with the bits set which correspond to the
encoding of the ngrams
"""
key, = keys # Unpack.
log_l = int(math.log(l, 2))
if not 2 ** log_l == l:
raise ValueError(
'parameter "l" has to be a power of two for the BLAKE2 encoding, '
'but was: {}'.format(
l))
bf = bitarray(l)
bf.setall(False)
for m, k in zip(ngrams, ks):
random_shorts = [] # type: List[int]
num_macs = (k + 31) // 32
for i in range(num_macs):
hash_bytes = blake2b(m.encode(encoding=encoding), key=key,
salt=str(i).encode()).digest()
random_shorts.extend(struct.unpack('32H',
hash_bytes)) # interpret
# hash bytes as 32 unsigned shorts.
for i in range(k):
idx = random_shorts[i] % l
bf[idx] = 1
return bf | Computes the encoding of the ngrams using the BLAKE2 hash function.
We deliberately do not use the double hashing scheme as proposed in [
Schnell2011]_, because this
would introduce an exploitable structure into the Bloom filter. For more
details on the
weakness, see [Kroll2015]_.
In short, the double hashing scheme only allows for :math:`l^2`
different encodings for any possible n-gram,
whereas the use of :math:`k` different independent hash functions gives
you :math:`\\sum_{j=1}^{k}{\\binom{l}{j}}`
combinations.
**Our construction**
It is advantageous to construct Bloom filters using a family of hash
functions with the property of
`k-independence <https://en.wikipedia.org/wiki/K-independent_hashing>`_
to compute the indices for an entry.
This approach minimises the change of collisions.
An informal definition of *k-independence* of a family of hash functions
is, that if selecting a function at random
from the family, it guarantees that the hash codes of any designated k
keys are independent random variables.
Our construction utilises the fact that the output bits of a
cryptographic hash function are uniformly distributed,
independent, binary random variables (well, at least as close to as
possible. See [Kaminsky2011]_ for an analysis).
Thus, slicing the output of a cryptographic hash function into k
different slices gives you k independent random
variables.
We chose Blake2 as the cryptographic hash function mainly for two reasons:
* it is fast.
* in keyed hashing mode, Blake2 provides MACs with just one hash
function call instead of the two calls in the HMAC construction used
in the double hashing scheme.
.. warning::
Please be aware that, although this construction makes the attack of
[Kroll2015]_ infeasible, it is most likely
not enough to ensure security. Or in their own words:
| However, we think that using independent hash functions alone
will not be sufficient to ensure security,
since in this case other approaches (maybe related to or at least
inspired through work from the
area of Frequent Itemset Mining) are promising to detect at least
the most frequent atoms automatically.
:param ngrams: list of n-grams to be encoded
:param keys: secret key for blake2 as bytes
:param ks: ks[i] is k value to use for ngram[i]
:param l: length of the output bitarray (has to be a power of 2)
:param encoding: the encoding to use when turning the ngrams to bytes
:return: bitarray of length l with the bits set which correspond to the
encoding of the ngrams | entailment |
def hashing_function_from_properties(
fhp # type: FieldHashingProperties
):
# type: (...) -> Callable[[Iterable[str], Sequence[bytes], Sequence[int], int, str], bitarray]
""" Get the hashing function for this field
:param fhp: hashing properties for this field
:return: the hashing function
"""
if fhp.hash_type == 'doubleHash':
if fhp.prevent_singularity:
return double_hash_encode_ngrams_non_singular
else:
return double_hash_encode_ngrams
elif fhp.hash_type == 'blakeHash':
return blake_encode_ngrams
else:
msg = "Unsupported hash type '{}'".format(fhp.hash_type)
raise ValueError(msg) | Get the hashing function for this field
:param fhp: hashing properties for this field
:return: the hashing function | entailment |
def fold_xor(bloomfilter, # type: bitarray
folds # type: int
):
# type: (...) -> bitarray
""" Performs XOR folding on a Bloom filter.
If the length of the original Bloom filter is n and we perform
r folds, then the length of the resulting filter is n / 2 ** r.
:param bloomfilter: Bloom filter to fold
:param folds: number of folds
:return: folded bloom filter
"""
if len(bloomfilter) % 2 ** folds != 0:
msg = ('The length of the bloom filter is {length}. It is not '
'divisible by 2 ** {folds}, so it cannot be folded {folds} '
'times.'
.format(length=len(bloomfilter), folds=folds))
raise ValueError(msg)
for _ in range(folds):
bf1 = bloomfilter[:len(bloomfilter) // 2]
bf2 = bloomfilter[len(bloomfilter) // 2:]
bloomfilter = bf1 ^ bf2
return bloomfilter | Performs XOR folding on a Bloom filter.
If the length of the original Bloom filter is n and we perform
r folds, then the length of the resulting filter is n / 2 ** r.
:param bloomfilter: Bloom filter to fold
:param folds: number of folds
:return: folded bloom filter | entailment |
def crypto_bloom_filter(record, # type: Sequence[Text]
tokenizers, # type: List[Callable[[Text, Optional[Text]], Iterable[Text]]]
schema, # type: Schema
keys # type: Sequence[Sequence[bytes]]
):
# type: (...) -> Tuple[bitarray, Text, int]
""" Computes the composite Bloom filter encoding of a record.
Using the method from
http://www.record-linkage.de/-download=wp-grlc-2011-02.pdf
:param record: plaintext record tuple. E.g. (index, name, dob, gender)
:param tokenizers: A list of tokenizers. A tokenizer is a function that
returns tokens from a string.
:param schema: Schema
:param keys: Keys for the hash functions as a tuple of lists of bytes.
:return: 3-tuple:
- bloom filter for record as a bitarray
- first element of record (usually an index)
- number of bits set in the bloomfilter
"""
hash_l = schema.l * 2 ** schema.xor_folds
bloomfilter = bitarray(hash_l)
bloomfilter.setall(False)
for (entry, tokenize, field, key) \
in zip(record, tokenizers, schema.fields, keys):
fhp = field.hashing_properties
if fhp:
ngrams = list(tokenize(field.format_value(entry)))
hash_function = hashing_function_from_properties(fhp)
bloomfilter |= hash_function(ngrams, key,
fhp.ks(len(ngrams)),
hash_l, fhp.encoding)
c1 = bloomfilter.count()
bloomfilter = fold_xor(bloomfilter, schema.xor_folds)
c2 = bloomfilter.count()
return bloomfilter, record[0], bloomfilter.count() | Computes the composite Bloom filter encoding of a record.
Using the method from
http://www.record-linkage.de/-download=wp-grlc-2011-02.pdf
:param record: plaintext record tuple. E.g. (index, name, dob, gender)
:param tokenizers: A list of tokenizers. A tokenizer is a function that
returns tokens from a string.
:param schema: Schema
:param keys: Keys for the hash functions as a tuple of lists of bytes.
:return: 3-tuple:
- bloom filter for record as a bitarray
- first element of record (usually an index)
- number of bits set in the bloomfilter | entailment |
def stream_bloom_filters(dataset, # type: Iterable[Sequence[Text]]
keys, # type: Sequence[Sequence[bytes]]
schema # type: Schema
):
# type: (...) -> Iterable[Tuple[bitarray, Text, int]]
""" Compute composite Bloom filters (CLKs) for every record in an
iterable dataset.
:param dataset: An iterable of indexable records.
:param schema: An instantiated Schema instance
:param keys: A tuple of two lists of secret keys used in the HMAC.
:return: Generator yielding bloom filters as 3-tuples
"""
tokenizers = [tokenizer.get_tokenizer(field.hashing_properties)
for field in schema.fields]
return (crypto_bloom_filter(s, tokenizers, schema, keys)
for s in dataset) | Compute composite Bloom filters (CLKs) for every record in an
iterable dataset.
:param dataset: An iterable of indexable records.
:param schema: An instantiated Schema instance
:param keys: A tuple of two lists of secret keys used in the HMAC.
:return: Generator yielding bloom filters as 3-tuples | entailment |
def re_compile_full(pattern, flags=0):
# type: (AnyStr, int) -> Pattern
""" Create compiled regular expression such that it matches the
entire string. Calling re.match on the output of this function
is equivalent to calling re.fullmatch on its input.
This is needed to support Python 2. (On Python 3, we would just
call re.fullmatch.)
Kudos: https://stackoverflow.com/a/30212799
:param pattern: The pattern to compile.
:param flags: Regular expression flags. Refer to Python
documentation.
:returns: A compiled regular expression.
"""
# Don't worry, this short-circuits.
assert type(pattern) is str or type(pattern) is unicode # type: ignore
return re.compile(r'(?:{})\Z'.format(pattern), flags=flags) | Create compiled regular expression such that it matches the
entire string. Calling re.match on the output of this function
is equivalent to calling re.fullmatch on its input.
This is needed to support Python 2. (On Python 3, we would just
call re.fullmatch.)
Kudos: https://stackoverflow.com/a/30212799
:param pattern: The pattern to compile.
:param flags: Regular expression flags. Refer to Python
documentation.
:returns: A compiled regular expression. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.