code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
A1, A2, A3 = [], [], [] # set up lists for rotation vector
# put dec inc in direction list and set length to unity
Dir = [dec, inc, 1.]
X = dir2cart(Dir) # get cartesian coordinates
#
# set up rotation matrix
#
A1 = dir2cart([az, pl, 1.])
A2 = dir2cart([az + 90., 0, 1.])
A3 = dir2cart([az - 180., 90. - pl, 1.])
#
# do rotation
#
xp = A1[0] * X[0] + A2[0] * X[1] + A3[0] * X[2]
yp = A1[1] * X[0] + A2[1] * X[1] + A3[1] * X[2]
zp = A1[2] * X[0] + A2[2] * X[1] + A3[2] * X[2]
#
# transform back to dec,inc
#
Dir_geo = cart2dir([xp, yp, zp])
return Dir_geo[0], Dir_geo[1]
|
def dogeo(dec, inc, az, pl)
|
Rotates declination and inclination into geographic coordinates using the
azimuth and plunge of the X direction (lab arrow) of a specimen.
Parameters
----------
dec : declination in specimen coordinates
inc : inclination in specimen coordinates
Returns
-------
rotated_direction : tuple of declination, inclination in geographic coordinates
Examples
--------
>>> pmag.dogeo(0.0,90.0,0.0,45.5)
(180.0, 44.5)
| 2.999594
| 3.0034
| 0.998733
|
indat = indat.transpose()
# unpack input array into separate arrays
dec, inc, az, pl = indat[0], indat[1], indat[2], indat[3]
Dir = np.array([dec, inc]).transpose()
X = dir2cart(Dir).transpose() # get cartesian coordinates
N = np.size(dec)
A1 = dir2cart(np.array([az, pl, np.ones(N)]).transpose()).transpose()
A2 = dir2cart(
np.array([az + 90., np.zeros(N), np.ones(N)]).transpose()).transpose()
A3 = dir2cart(
np.array([az - 180., 90. - pl, np.ones(N)]).transpose()).transpose()
# do rotation
#
xp = A1[0] * X[0] + A2[0] * X[1] + A3[0] * X[2]
yp = A1[1] * X[0] + A2[1] * X[1] + A3[1] * X[2]
zp = A1[2] * X[0] + A2[2] * X[1] + A3[2] * X[2]
cart = np.array([xp, yp, zp]).transpose()
#
# transform back to dec,inc
#
Dir_geo = cart2dir(cart).transpose()
# send back declination and inclination arrays
return Dir_geo[0], Dir_geo[1]
|
def dogeo_V(indat)
|
Rotates declination and inclination into geographic coordinates using the
azimuth and plunge of the X direction (lab arrow) of a specimen.
Parameters
----------
indat: nested list of [dec, inc, az, pl] data
Returns
-------
rotated_directions : arrays of Declinations and Inclinations
| 2.568809
| 2.412502
| 1.06479
|
d, irot = dogeo(D, I, Dbar, 90. - Ibar)
drot = d - 180.
if drot < 360.:
drot = drot + 360.
if drot > 360.:
drot = drot - 360.
return drot, irot
|
def dodirot(D, I, Dbar, Ibar)
|
Rotate a direction (declination, inclination) by the difference between
dec=0 and inc = 90 and the provided desired mean direction
Parameters
----------
D : declination to be rotated
I : inclination to be rotated
Dbar : declination of desired mean
Ibar : inclination of desired mean
Returns
----------
drot, irot : rotated declination and inclination
| 3.193925
| 3.591453
| 0.889313
|
N = di_block.shape[0]
DipDir, Dip = np.ones(N, dtype=np.float).transpose(
)*(Dbar-180.), np.ones(N, dtype=np.float).transpose()*(90.-Ibar)
di_block = di_block.transpose()
data = np.array([di_block[0], di_block[1], DipDir, Dip]).transpose()
drot, irot = dotilt_V(data)
drot = (drot-180.) % 360. #
return np.column_stack((drot, irot))
|
def dodirot_V(di_block, Dbar, Ibar)
|
Rotate an array of dec/inc pairs to coordinate system with Dec,Inc as 0,90
Parameters
___________________
di_block : array of [[Dec1,Inc1],[Dec2,Inc2],....]
Dbar : declination of desired center
Ibar : inclination of desired center
Returns
__________
array of rotated decs and incs: [[rot_Dec1,rot_Inc1],[rot_Dec2,rot_Inc2],....]
| 4.275125
| 4.198705
| 1.018201
|
datablock, or_error, bed_error = [], 0, 0
orient = {}
orient["sample_dip"] = ""
orient["sample_azimuth"] = ""
orient['sample_description'] = ""
for rec in data:
if rec["er_sample_name"].lower() == s.lower():
if 'sample_orientation_flag' in list(rec.keys()) and rec['sample_orientation_flag'] == 'b':
orient['sample_orientation_flag'] = 'b'
return orient
if "magic_method_codes" in list(rec.keys()) and az_type != "0":
methods = rec["magic_method_codes"].replace(" ", "").split(":")
if az_type in methods and "sample_azimuth" in list(rec.keys()) and rec["sample_azimuth"] != "":
orient["sample_azimuth"] = float(rec["sample_azimuth"])
if "sample_dip" in list(rec.keys()) and rec["sample_dip"] != "":
orient["sample_dip"] = float(rec["sample_dip"])
if "sample_bed_dip_direction" in list(rec.keys()) and rec["sample_bed_dip_direction"] != "":
orient["sample_bed_dip_direction"] = float(
rec["sample_bed_dip_direction"])
if "sample_bed_dip" in list(rec.keys()) and rec["sample_bed_dip"] != "":
orient["sample_bed_dip"] = float(rec["sample_bed_dip"])
else:
if "sample_azimuth" in list(rec.keys()):
orient["sample_azimuth"] = float(rec["sample_azimuth"])
if "sample_dip" in list(rec.keys()):
orient["sample_dip"] = float(rec["sample_dip"])
if "sample_bed_dip_direction" in list(rec.keys()):
orient["sample_bed_dip_direction"] = float(
rec["sample_bed_dip_direction"])
if "sample_bed_dip" in list(rec.keys()):
orient["sample_bed_dip"] = float(rec["sample_bed_dip"])
if 'sample_description' in list(rec.keys()):
orient['sample_description'] = rec['sample_description']
if orient["sample_azimuth"] != "":
break
return orient
|
def find_samp_rec(s, data, az_type)
|
find the orientation info for samp s
| 1.826866
| 1.79985
| 1.01501
|
vdata, Dirdata, step_meth = [], [], []
tr0 = data[0][0] # set beginning treatment
data.append("Stop")
k, R = 1, 0
for i in range(k, len(data)):
Dirdata = []
if data[i][0] != tr0:
if i == k: # sample is unique
vdata.append(data[i - 1])
step_meth.append(" ")
else: # sample is not unique
for l in range(k - 1, i):
Dirdata.append([data[l][1], data[l][2], data[l][3]])
dir, R = vector_mean(Dirdata)
vdata.append([data[i - 1][0], dir[0], dir[1],
old_div(R, (i - k + 1)), '1', 'g'])
step_meth.append("DE-VM")
tr0 = data[i][0]
k = i + 1
if tr0 == "stop":
break
del data[-1]
return step_meth, vdata
|
def vspec(data)
|
Takes the vector mean of replicate measurements at a given step
| 5.04625
| 4.711691
| 1.071006
|
A = dir2cart([D1[0], D1[1], 1.])
B = dir2cart([D2[0], D2[1], 1.])
C = []
for i in range(3):
C.append(A[i] - B[i])
return cart2dir(C)
|
def Vdiff(D1, D2)
|
finds the vector difference between two directions D1,D2
| 2.426866
| 2.187819
| 1.109262
|
cart = np.array(cart)
rad = old_div(np.pi, 180.) # constant to convert degrees to radians
if len(cart.shape) > 1:
Xs, Ys, Zs = cart[:, 0], cart[:, 1], cart[:, 2]
else: # single vector
Xs, Ys, Zs = cart[0], cart[1], cart[2]
if np.iscomplexobj(Xs):
Xs = Xs.real
if np.iscomplexobj(Ys):
Ys = Ys.real
if np.iscomplexobj(Zs):
Zs = Zs.real
Rs = np.sqrt(Xs**2 + Ys**2 + Zs**2) # calculate resultant vector length
# calculate declination taking care of correct quadrants (arctan2) and
# making modulo 360.
Decs = (old_div(np.arctan2(Ys, Xs), rad)) % 360.
try:
# calculate inclination (converting to degrees) #
Incs = old_div(np.arcsin(old_div(Zs, Rs)), rad)
except:
print('trouble in cart2dir') # most likely division by zero somewhere
return np.zeros(3)
return np.array([Decs, Incs, Rs]).transpose()
|
def cart2dir(cart)
|
Converts a direction in cartesian coordinates into declination, inclinations
Parameters
----------
cart : input list of [x,y,z] or list of lists [[x1,y1,z1],[x2,y2,z2]...]
Returns
-------
direction_array : returns an array of [declination, inclination, intensity]
Examples
--------
>>> pmag.cart2dir([0,1,0])
array([ 90., 0., 1.])
| 2.956664
| 3.040677
| 0.97237
|
T = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
for row in X:
for k in range(3):
for l in range(3):
T[k][l] += row[k] * row[l]
return T
|
def Tmatrix(X)
|
gets the orientation matrix (T) from data in X
| 1.93683
| 1.962065
| 0.987139
|
ints = np.ones(len(d)).transpose(
) # get an array of ones to plug into dec,inc pairs
d = np.array(d)
rad = np.pi/180.
if len(d.shape) > 1: # array of vectors
decs, incs = d[:, 0] * rad, d[:, 1] * rad
if d.shape[1] == 3:
ints = d[:, 2] # take the given lengths
else: # single vector
decs, incs = np.array(float(d[0])) * rad, np.array(float(d[1])) * rad
if len(d) == 3:
ints = np.array(d[2])
else:
ints = np.array([1.])
cart = np.array([ints * np.cos(decs) * np.cos(incs), ints *
np.sin(decs) * np.cos(incs), ints * np.sin(incs)]).transpose()
return cart
|
def dir2cart(d)
|
Converts a list or array of vector directions in degrees (declination,
inclination) to an array of the direction in cartesian coordinates (x,y,z)
Parameters
----------
d : list or array of [dec,inc] or [dec,inc,intensity]
Returns
-------
cart : array of [x,y,z]
Examples
--------
>>> pmag.dir2cart([200,40,1])
array([-0.71984631, -0.26200263, 0.64278761])
| 2.813031
| 2.851522
| 0.986502
|
datablock = []
for rec in data:
if s == rec[0]:
datablock.append([rec[1], rec[2], rec[3], rec[4]])
return datablock
|
def findrec(s, data)
|
finds all the records belonging to s in data
| 3.099131
| 2.744668
| 1.129146
|
rad = old_div(np.pi, 180.)
D_out, I_out = [], []
dec, dip, alpha = dec * rad, dip * rad, alpha * rad
dec1 = dec + old_div(np.pi, 2.)
isign = 1
if dip != 0:
isign = (old_div(abs(dip), dip))
dip1 = (dip - isign * (old_div(np.pi, 2.)))
t = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
v = [0, 0, 0]
t[0][2] = np.cos(dec) * np.cos(dip)
t[1][2] = np.sin(dec) * np.cos(dip)
t[2][2] = np.sin(dip)
t[0][1] = np.cos(dec) * np.cos(dip1)
t[1][1] = np.sin(dec) * np.cos(dip1)
t[2][1] = np.sin(dip1)
t[0][0] = np.cos(dec1)
t[1][0] = np.sin(dec1)
t[2][0] = 0
for i in range(101):
psi = float(i) * np.pi / 50.
v[0] = np.sin(alpha) * np.cos(psi)
v[1] = np.sin(alpha) * np.sin(psi)
v[2] = np.sqrt(abs(1. - v[0]**2 - v[1]**2))
elli = [0, 0, 0]
for j in range(3):
for k in range(3):
elli[j] = elli[j] + t[j][k] * v[k]
Dir = cart2dir(elli)
D_out.append(Dir[0])
I_out.append(Dir[1])
return D_out, I_out
|
def circ(dec, dip, alpha)
|
function to calculate points on an circle about dec,dip with angle alpha
| 2.030206
| 2.023068
| 1.003528
|
namestring = ""
addmore = 1
while addmore:
scientist = input("Enter name - <Return> when done ")
if scientist != "":
namestring = namestring + ":" + scientist
else:
namestring = namestring[1:]
addmore = 0
return namestring
|
def getnames()
|
get mail names
| 5.207733
| 5.37372
| 0.969111
|
iday = 0
timedate = sundata["date"]
timedate = timedate.split(":")
year = int(timedate[0])
mon = int(timedate[1])
day = int(timedate[2])
hours = float(timedate[3])
min = float(timedate[4])
du = int(sundata["delta_u"])
hrs = hours - du
if hrs > 24:
day += 1
hrs = hrs - 24
if hrs < 0:
day = day - 1
hrs = hrs + 24
julian_day = julian(mon, day, year)
utd = old_div((hrs + old_div(min, 60.)), 24.)
greenwich_hour_angle, delta = gha(julian_day, utd)
H = greenwich_hour_angle + float(sundata["lon"])
if H > 360:
H = H - 360
lat = float(sundata["lat"])
if H > 90 and H < 270:
lat = -lat
# now do spherical trig to get azimuth to sun
lat = np.radians(lat)
delta = np.radians(delta)
H = np.radians(H)
ctheta = np.sin(lat) * np.sin(delta) + np.cos(lat) * \
np.cos(delta) * np.cos(H)
theta = np.arccos(ctheta)
beta = np.cos(delta) * np.sin(H) / np.sin(theta)
#
# check which beta
#
beta = np.degrees(np.arcsin(beta))
if delta < lat:
beta = 180 - beta
sunaz = 180 - beta
sunaz = (sunaz + float(sundata["shadow_angle"])) % 360. # mod 360
return sunaz
|
def dosundec(sundata)
|
returns the declination for a given set of suncompass data
Parameters
__________
sundata : dictionary with these keys:
date: time string with the format 'yyyy:mm:dd:hr:min'
delta_u: time to SUBTRACT from local time for Universal time
lat: latitude of location (negative for south)
lon: longitude of location (negative for west)
shadow_angle: shadow angle of the desired direction with respect to the sun.
Returns
________
sunaz : the declination of the desired direction wrt true north.
| 2.947732
| 2.718802
| 1.084203
|
rad = old_div(np.pi, 180.)
d = julian_day - 2451545.0 + f
L = 280.460 + 0.9856474 * d
g = 357.528 + 0.9856003 * d
L = L % 360.
g = g % 360.
# ecliptic longitude
lamb = L + 1.915 * np.sin(g * rad) + .02 * np.sin(2 * g * rad)
# obliquity of ecliptic
epsilon = 23.439 - 0.0000004 * d
# right ascension (in same quadrant as lambda)
t = (np.tan(old_div((epsilon * rad), 2)))**2
r = old_div(1, rad)
rl = lamb * rad
alpha = lamb - r * t * np.sin(2 * rl) + \
(old_div(r, 2)) * t * t * np.sin(4 * rl)
# alpha=mod(alpha,360.0)
# declination
delta = np.sin(epsilon * rad) * np.sin(lamb * rad)
delta = old_div(np.arcsin(delta), rad)
# equation of time
eqt = (L - alpha)
#
utm = f * 24 * 60
H = old_div(utm, 4) + eqt + 180
H = H % 360.0
return H, delta
|
def gha(julian_day, f)
|
returns greenwich hour angle
| 3.257622
| 3.219088
| 1.011971
|
ig = 15 + 31 * (10 + 12 * 1582)
if year == 0:
print("Julian no can do")
return
if year < 0:
year = year + 1
if mon > 2:
julian_year = year
julian_month = mon + 1
else:
julian_year = year - 1
julian_month = mon + 13
j1 = int(365.25 * julian_year)
j2 = int(30.6001 * julian_month)
j3 = day + 1720995
julian_day = j1 + j2 + j3
if day + 31 * (mon + 12 * year) >= ig:
jadj = int(0.01 * julian_year)
julian_day = julian_day + 2 - jadj + int(0.25 * jadj)
return julian_day
|
def julian(mon, day, year)
|
returns julian day
| 2.871113
| 2.796533
| 1.026669
|
keylist, OutRecs = [], []
for rec in Recs:
for key in list(rec.keys()):
if key not in keylist:
keylist.append(key)
for rec in Recs:
for key in keylist:
if key not in list(rec.keys()):
rec[key] = ""
OutRecs.append(rec)
return OutRecs, keylist
|
def fillkeys(Recs)
|
reconciles keys of dictionaries within Recs.
| 2.032717
| 1.935994
| 1.049961
|
R, Xbar, X, fpars = 0, [0, 0, 0], [], {}
N = len(data)
if N < 2:
return fpars
X = dir2cart(data)
for i in range(len(X)):
for c in range(3):
Xbar[c] += X[i][c]
for c in range(3):
R += Xbar[c]**2
R = np.sqrt(R)
for c in range(3):
Xbar[c] = Xbar[c]/R
dir = cart2dir(Xbar)
fpars["dec"] = dir[0]
fpars["inc"] = dir[1]
fpars["n"] = N
fpars["r"] = R
if N != R:
k = (N - 1.) / (N - R)
fpars["k"] = k
csd = 81./np.sqrt(k)
else:
fpars['k'] = 'inf'
csd = 0.
b = 20.**(1./(N - 1.)) - 1
a = 1 - b * (N - R) / R
if a < -1:
a = -1
a95 = np.degrees(np.arccos(a))
fpars["alpha95"] = a95
fpars["csd"] = csd
if a < 0:
fpars["alpha95"] = 180.0
return fpars
|
def fisher_mean(data)
|
Calculates the Fisher mean and associated parameter from a di_block
Parameters
----------
di_block : a nested list of [dec,inc] or [dec,inc,intensity]
Returns
-------
fpars : dictionary containing the Fisher mean and statistics
dec : mean declination
inc : mean inclination
r : resultant vector length
n : number of data points
k : Fisher k value
csd : Fisher circular standard deviation
alpha95 : Fisher circle of 95% confidence
| 3.430279
| 3.005524
| 1.141324
|
N, mean, d = len(data), 0., 0.
if N < 1:
return "", ""
if N == 1:
return data[0], 0
for j in range(N):
mean += old_div(data[j], float(N))
for j in range(N):
d += (data[j] - mean)**2
stdev = np.sqrt(d * (1./(float(N - 1))))
return mean, stdev
|
def gausspars(data)
|
calculates gaussian statistics for data
| 3.260012
| 3.123548
| 1.043689
|
W, N, mean, d = 0, len(data), 0, 0
if N < 1:
return "", ""
if N == 1:
return data[0][0], 0
for x in data:
W += x[1] # sum of the weights
for x in data:
mean += old_div((float(x[1]) * float(x[0])), float(W))
for x in data:
d += (old_div(float(x[1]), float(W))) * (float(x[0]) - mean)**2
stdev = np.sqrt(d * (old_div(1., (float(N - 1)))))
return mean, stdev
|
def weighted_mean(data)
|
calculates weighted mean of data
| 3.154175
| 3.065279
| 1.029001
|
FisherByPoles = {}
DIblock, nameblock, locblock = [], [], []
for rec in data:
if 'dec' in list(rec.keys()) and 'inc' in list(rec.keys()):
# collect data for fisher calculation
DIblock.append([float(rec["dec"]), float(rec["inc"])])
else:
continue
if 'name' in list(rec.keys()):
nameblock.append(rec['name'])
else:
nameblock.append("")
if 'loc' in list(rec.keys()):
locblock.append(rec['loc'])
else:
locblock.append("")
ppars = doprinc(np.array(DIblock)) # get principal directions
# choose the northerly declination principe component ("normal")
reference_DI = [ppars['dec'], ppars['inc']]
# make reference direction in northern hemisphere
if reference_DI[0] > 90 and reference_DI[0] < 270:
reference_DI[0] = (reference_DI[0] + 180.) % 360
reference_DI[1] = reference_DI[1] * -1.
nDIs, rDIs, all_DI, npars, rpars = [], [], [], [], []
nlist, rlist, alllist = "", "", ""
nloclist, rloclist, allloclist = "", "", ""
for k in range(len(DIblock)):
if angle([DIblock[k][0], DIblock[k][1]], reference_DI) > 90.:
rDIs.append(DIblock[k])
rlist = rlist + ":" + nameblock[k]
if locblock[k] not in rloclist:
rloclist = rloclist + ":" + locblock[k]
all_DI.append([(DIblock[k][0] + 180.) % 360., -1. * DIblock[k][1]])
alllist = alllist + ":" + nameblock[k]
if locblock[k] not in allloclist:
allloclist = allloclist + ":" + locblock[k]
else:
nDIs.append(DIblock[k])
nlist = nlist + ":" + nameblock[k]
if locblock[k] not in nloclist:
nloclist = nloclist + ":" + locblock[k]
all_DI.append(DIblock[k])
alllist = alllist + ":" + nameblock[k]
if locblock[k] not in allloclist:
allloclist = allloclist + ":" + locblock[k]
for mode in ['A', 'B', 'All']:
if mode == 'A' and len(nDIs) > 2:
fpars = fisher_mean(nDIs)
fpars['sites'] = nlist.strip(':')
fpars['locs'] = nloclist.strip(':')
FisherByPoles[mode] = fpars
elif mode == 'B' and len(rDIs) > 2:
fpars = fisher_mean(rDIs)
fpars['sites'] = rlist.strip(':')
fpars['locs'] = rloclist.strip(':')
FisherByPoles[mode] = fpars
elif mode == 'All' and len(all_DI) > 2:
fpars = fisher_mean(all_DI)
fpars['sites'] = alllist.strip(':')
fpars['locs'] = allloclist.strip(':')
FisherByPoles[mode] = fpars
return FisherByPoles
|
def fisher_by_pol(data)
|
input: as in dolnp (list of dictionaries with 'dec' and 'inc')
description: do fisher mean after splitting data into two polarity domains.
output: three dictionaries:
'A'= polarity 'A'
'B = polarity 'B'
'ALL'= switching polarity of 'B' directions, and calculate fisher mean of all data
code modified from eqarea_ell.py b rshaar 1/23/2014
| 2.21313
| 2.133712
| 1.03722
|
if len(Data) == 0:
print("This function requires input Data have at least 1 entry")
return {}
if len(Data) == 1:
ReturnData = {}
ReturnData["dec"] = Data[0]['dir_dec']
ReturnData["inc"] = Data[0]['dir_inc']
ReturnData["n_total"] = '1'
if "DE-BFP" in Data[0]['method_codes']:
ReturnData["n_lines"] = '0'
ReturnData["n_planes"] = '1'
else:
ReturnData["n_planes"] = '0'
ReturnData["n_lines"] = '1'
ReturnData["alpha95"] = ""
ReturnData["R"] = ""
ReturnData["K"] = ""
return ReturnData
else:
LnpData = []
for n, d in enumerate(Data):
LnpData.append({})
LnpData[n]['dec'] = d['dir_dec']
LnpData[n]['inc'] = d['dir_inc']
LnpData[n]['tilt_correction'] = d['dir_tilt_correction']
if 'method_codes' in list(d.keys()):
if "DE-BFP" in d['method_codes']:
LnpData[n]['dir_type'] = 'p'
else:
LnpData[n]['dir_type'] = 'l'
# get a sample average from all specimens
ReturnData = dolnp(LnpData, 'dir_type')
return ReturnData
|
def dolnp3_0(Data)
|
DEPRECATED!! USE dolnp()
Desciption: takes a list of dicts with the controlled vocabulary of 3_0 and calls dolnp on them after reformating for compatibility.
Parameters
__________
Data : nested list of dictionarys with keys
dir_dec
dir_inc
dir_tilt_correction
method_codes
Returns
-------
ReturnData : dictionary with keys
dec : fisher mean dec of data in Data
inc : fisher mean inc of data in Data
n_lines : number of directed lines [method_code = DE-BFL or DE-FM]
n_planes : number of best fit planes [method_code = DE-BFP]
alpha95 : fisher confidence circle from Data
R : fisher R value of Data
K : fisher k value of Data
Effects
prints to screen in case of no data
| 3.214136
| 2.334038
| 1.377071
|
lam, X = 0, []
for k in range(3):
lam = lam + V[k] * L[k]
beta = np.sqrt(1. - lam**2)
for k in range(3):
X.append((old_div((V[k] - lam * L[k]), beta)))
return X
|
def vclose(L, V)
|
gets the closest vector
| 3.866004
| 3.647583
| 1.059881
|
U, XV = E[:], [] # make a copy of E to prevent mutation
for pole in L:
XV.append(vclose(pole, V)) # get some points on the great circle
for c in range(3):
U[c] = U[c] + XV[-1][c]
# iterate to find best agreement
angle_tol = 1.
while angle_tol > 0.1:
angles = []
for k in range(n_planes):
for c in range(3):
U[c] = U[c] - XV[k][c]
R = np.sqrt(U[0]**2 + U[1]**2 + U[2]**2)
for c in range(3):
V[c] = old_div(U[c], R)
XX = vclose(L[k], V)
ang = XX[0] * XV[k][0] + XX[1] * XV[k][1] + XX[2] * XV[k][2]
angles.append(np.arccos(ang) * 180. / np.pi)
for c in range(3):
XV[k][c] = XX[c]
U[c] = U[c] + XX[c]
amax = -1
for ang in angles:
if ang > amax:
amax = ang
angle_tol = amax
return XV
|
def calculate_best_fit_vectors(L, E, V, n_planes)
|
Calculates the best fit vectors for a set of plane interpretations used in fisher mean calculations
@param: L - a list of the "EL, EM, EN" array of MM88 or the cartisian form of dec and inc of the plane interpretation
@param: E - the sum of the cartisian coordinates of all the line fits to be used in the mean
@param: V - inital direction to start iterating from to get plane best fits
@returns: nested list of n_plane by 3 dimension where the 3 are the cartisian dimension of the best fit vector
| 3.199331
| 3.290783
| 0.97221
|
dec_key, inc_key, meth_key = 'dec', 'inc', 'magic_method_codes' # data model 2.5
if 'dir_dec' in data[0].keys(): # this is data model 3.0
dec_key, inc_key, meth_key = 'dir_dec', 'dir_inc', 'method_codes'
n_lines, n_planes = 0, 0
L, fdata = [], []
E = [0, 0, 0]
# sort data into lines and planes and collect cartesian coordinates
for rec in data:
cart = dir2cart([float(rec[dec_key]), float(rec[inc_key])])[0]
if direction_type_key in list(rec.keys()):
if rec[direction_type_key] == 'p': # this is a pole to a plane
n_planes += 1
L.append(cart) # this is the "EL, EM, EN" array of MM88
else: # this is a line
n_lines += 1
# collect data for fisher calculation
fdata.append([float(rec[dec_key]), float(rec[inc_key]), 1.])
E[0] += cart[0]
E[1] += cart[1]
E[2] += cart[2]
elif 'method_codes' in list(rec.keys()):
if "DE-BFP" in rec[meth_key]: # this is a pole to a plane
n_planes += 1
L.append(cart) # this is the "EL, EM, EN" array of MM88
else: # this is a line
n_lines += 1
# collect data for fisher calculation
fdata.append([rec[dec_key], rec[inc_key], 1.])
E[0] += cart[0]
E[1] += cart[1]
E[2] += cart[2]
elif meth_key in list(rec.keys()):
if "DE-BFP" in rec[meth_key]: # this is a pole to a plane
n_planes += 1
L.append(cart) # this is the "EL, EM, EN" array of MM88
else: # this is a line
n_lines += 1
# collect data for fisher calculation
fdata.append([rec[dec_key], rec[inc_key], 1.])
E[0] += cart[0]
E[1] += cart[1]
E[2] += cart[2]
else:
# EVERYTHING IS A LINE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
n_lines += 1
# collect data for fisher calculation
fdata.append([rec[dec_key], rec[inc_key], 1.])
E[0] += cart[0]
E[1] += cart[1]
E[2] += cart[2]
return fdata, n_lines, L, n_planes, E
|
def process_data_for_mean(data, direction_type_key)
|
takes list of dicts with dec and inc as well as direction_type if possible or method_codes and sorts the data into lines and planes and process it for fisher means
@param: data - list of dicts with dec inc and some manner of PCA type info
@param: direction_type_key - key that indicates the direction type variable in the dictionaries of data
@return: tuple with values - (
list of lists with [dec, inc, 1.] for all lines
number of line
list of lists with [EL,EM,EN] of all planes
number of planes
list of sum of the cartezian components of all lines
)
| 2.353179
| 2.111053
| 1.114694
|
# changed radius of the earth from 3.367e6 3/12/2010
fact = ((6.371e6)**3) * 1e7
colat = np.radians(90. - lat)
return fact * B / (np.sqrt(1 + 3 * (np.cos(colat)**2)))
|
def b_vdm(B, lat)
|
Converts a magnetic field value (input in units of tesla) to a virtual
dipole moment (VDM) or a virtual axial dipole moment (VADM); output
in units of Am^2)
Parameters
----------
B: local magnetic field strength in tesla
lat: latitude of site in degrees
Returns
----------
V(A)DM in units of Am^2
Examples
--------
>>> pmag.b_vdm(33e-6,22)*1e-21
71.58815974511788
| 8.018074
| 8.976389
| 0.89324
|
rad = old_div(np.pi, 180.)
# changed radius of the earth from 3.367e6 3/12/2010
fact = ((6.371e6)**3) * 1e7
colat = (90. - lat) * rad
return vdm * (np.sqrt(1 + 3 * (np.cos(colat)**2))) / fact
|
def vdm_b(vdm, lat)
|
Converts a virtual dipole moment (VDM) or a virtual axial dipole moment
(VADM; input in units of Am^2) to a local magnetic field value (output in
units of tesla)
Parameters
----------
vdm : V(A)DM in units of Am^2
lat: latitude of site in degrees
Returns
-------
B: local magnetic field strength in tesla
| 7.208934
| 7.402798
| 0.973812
|
f = open(file, "w")
data.sort()
for j in range(len(data)):
y = old_div(float(j), float(len(data)))
out = str(data[j]) + ' ' + str(y) + '\n'
f.write(out)
f.close()
|
def cdfout(data, file)
|
spits out the cdf for data to file
| 2.627252
| 2.609379
| 1.00685
|
control, X, bpars = [], [], {}
N = len(di_block)
if N < 2:
return bpars
#
# get cartesian coordinates
#
for rec in di_block:
X.append(dir2cart([rec[0], rec[1], 1.]))
#
# put in T matrix
#
T = np.array(Tmatrix(X))
t, V = tauV(T)
w1, w2, w3 = t[2], t[1], t[0]
k1, k2 = binglookup(w1, w2)
PDir = cart2dir(V[0])
EDir = cart2dir(V[1])
ZDir = cart2dir(V[2])
if PDir[1] < 0:
PDir[0] += 180.
PDir[1] = -PDir[1]
PDir[0] = PDir[0] % 360.
bpars["dec"] = PDir[0]
bpars["inc"] = PDir[1]
bpars["Edec"] = EDir[0]
bpars["Einc"] = EDir[1]
bpars["Zdec"] = ZDir[0]
bpars["Zinc"] = ZDir[1]
bpars["n"] = N
#
# now for Bingham ellipses.
#
fac1, fac2 = -2 * N * (k1) * (w3 - w1), -2 * N * (k2) * (w3 - w2)
sig31, sig32 = np.sqrt(old_div(1., fac1)), np.sqrt(old_div(1., fac2))
bpars["Zeta"], bpars["Eta"] = 2.45 * sig31 * \
180. / np.pi, 2.45 * sig32 * 180. / np.pi
return bpars
|
def dobingham(di_block)
|
Calculates the Bingham mean and associated statistical parameters from
directions that are input as a di_block
Parameters
----------
di_block : a nested list of [dec,inc] or [dec,inc,intensity]
Returns
-------
bpars : dictionary containing the Bingham mean and associated statistics
dictionary keys
dec : mean declination
inc : mean inclination
n : number of datapoints
Eta : major ellipse
Edec : declination of major ellipse axis
Einc : inclination of major ellipse axis
Zeta : minor ellipse
Zdec : declination of minor ellipse axis
Zinc : inclination of minor ellipse axis
| 3.769891
| 3.217934
| 1.171525
|
if inc < 0:
inc = -inc
dec = (dec + 180.) % 360.
return dec, inc
|
def doflip(dec, inc)
|
flips lower hemisphere data to upper hemisphere
| 3.816916
| 2.982413
| 1.279808
|
rad, SCOi, SSOi = old_div(np.pi, 180.), 0., 0. # some definitions
abinc = []
for i in inc:
abinc.append(abs(i))
MI, std = gausspars(abinc) # get mean inc and standard deviation
fpars = {}
N = len(inc) # number of data
fpars['n'] = N
fpars['ginc'] = MI
if MI < 30:
fpars['inc'] = MI
fpars['k'] = 0
fpars['alpha95'] = 0
fpars['csd'] = 0
fpars['r'] = 0
print('WARNING: mean inc < 30, returning gaussian mean')
return fpars
for i in inc: # sum over all incs (but take only positive inc)
coinc = (90. - abs(i)) * rad
SCOi += np.cos(coinc)
SSOi += np.sin(coinc)
Oo = (90.0 - MI) * rad # first guess at mean
SCFlag = -1 # sign change flag
epsilon = float(N) * np.cos(Oo) # RHS of zero equations
epsilon += (np.sin(Oo)**2 - np.cos(Oo)**2) * SCOi
epsilon -= 2. * np.sin(Oo) * np.cos(Oo) * SSOi
while SCFlag < 0: # loop until cross zero
if MI > 0:
Oo -= (.01 * rad) # get steeper
if MI < 0:
Oo += (.01 * rad) # get shallower
prev = epsilon
epsilon = float(N) * np.cos(Oo) # RHS of zero equations
epsilon += (np.sin(Oo)**2. - np.cos(Oo)**2.) * SCOi
epsilon -= 2. * np.sin(Oo) * np.cos(Oo) * SSOi
if abs(epsilon) > abs(prev):
MI = -1 * MI # reverse direction
if epsilon * prev < 0:
SCFlag = 1 # changed sign
S, C = 0., 0. # initialize for summation
for i in inc:
coinc = (90. - abs(i)) * rad
S += np.sin(Oo - coinc)
C += np.cos(Oo - coinc)
k = old_div((N - 1.), (2. * (N - C)))
Imle = 90. - (old_div(Oo, rad))
fpars["inc"] = Imle
fpars["r"], R = 2. * C - N, 2 * C - N
fpars["k"] = k
f = fcalc(2, N - 1)
a95 = 1. - (0.5) * (old_div(S, C))**2 - (old_div(f, (2. * C * k)))
# b=20.**(1./(N-1.)) -1.
# a=1.-b*(N-R)/R
# a95=np.arccos(a)*180./np.pi
csd = old_div(81., np.sqrt(k))
fpars["alpha95"] = a95
fpars["csd"] = csd
return fpars
|
def doincfish(inc)
|
gets fisher mean inc from inc only data
input: list of inclination values
output: dictionary of
'n' : number of inclination values supplied
'ginc' : gaussian mean of inclinations
'inc' : estimated Fisher mean
'r' : estimated Fisher R value
'k' : estimated Fisher kappa
'alpha95' : estimated fisher alpha_95
'csd' : estimated circular standard deviation
| 4.481412
| 3.964477
| 1.130392
|
ppars = {}
rad = old_div(np.pi, 180.)
X = dir2cart(data)
# for rec in data:
# dir=[]
# for c in rec: dir.append(c)
# cart= (dir2cart(dir))
# X.append(cart)
# put in T matrix
#
T = np.array(Tmatrix(X))
#
# get sorted evals/evects
#
t, V = tauV(T)
Pdir = cart2dir(V[0])
ppars['Edir'] = cart2dir(V[1]) # elongation direction
dec, inc = doflip(Pdir[0], Pdir[1])
ppars['dec'] = dec
ppars['inc'] = inc
ppars['N'] = len(data)
ppars['tau1'] = t[0]
ppars['tau2'] = t[1]
ppars['tau3'] = t[2]
Pdir = cart2dir(V[1])
dec, inc = doflip(Pdir[0], Pdir[1])
ppars['V2dec'] = dec
ppars['V2inc'] = inc
Pdir = cart2dir(V[2])
dec, inc = doflip(Pdir[0], Pdir[1])
ppars['V3dec'] = dec
ppars['V3inc'] = inc
return ppars
|
def doprinc(data)
|
Gets principal components from data in form of a list of [dec,inc] data.
Parameters
----------
data : nested list of dec, inc directions
Returns
-------
ppars : dictionary with the principal components
dec : principal directiion declination
inc : principal direction inclination
V2dec : intermediate eigenvector declination
V2inc : intermediate eigenvector inclination
V3dec : minor eigenvector declination
V3inc : minor eigenvector inclination
tau1 : major eigenvalue
tau2 : intermediate eigenvalue
tau3 : minor eigenvalue
N : number of points
Edir : elongation direction [dec, inc, length]
| 3.681247
| 2.924759
| 1.258649
|
# gets user input of Rotation pole lat,long, omega for plate and converts
# to radians
E = dir2cart([EP[1], EP[0], 1.]) # EP is pole lat,lon omega
omega = EP[2] * np.pi / 180. # convert to radians
RLats, RLons = [], []
for k in range(len(Lats)):
if Lats[k] <= 90.: # peel off delimiters
# converts to rotation pole to cartesian coordinates
A = dir2cart([Lons[k], Lats[k], 1.])
# defines cartesian coordinates of the pole A
R = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
R[0][0] = E[0] * E[0] * (1 - np.cos(omega)) + np.cos(omega)
R[0][1] = E[0] * E[1] * (1 - np.cos(omega)) - E[2] * np.sin(omega)
R[0][2] = E[0] * E[2] * (1 - np.cos(omega)) + E[1] * np.sin(omega)
R[1][0] = E[1] * E[0] * (1 - np.cos(omega)) + E[2] * np.sin(omega)
R[1][1] = E[1] * E[1] * (1 - np.cos(omega)) + np.cos(omega)
R[1][2] = E[1] * E[2] * (1 - np.cos(omega)) - E[0] * np.sin(omega)
R[2][0] = E[2] * E[0] * (1 - np.cos(omega)) - E[1] * np.sin(omega)
R[2][1] = E[2] * E[1] * (1 - np.cos(omega)) + E[0] * np.sin(omega)
R[2][2] = E[2] * E[2] * (1 - np.cos(omega)) + np.cos(omega)
# sets up rotation matrix
Ap = [0, 0, 0]
for i in range(3):
for j in range(3):
Ap[i] += R[i][j] * A[j]
# does the rotation
Prot = cart2dir(Ap)
RLats.append(Prot[1])
RLons.append(Prot[0])
else: # preserve delimiters
RLats.append(Lats[k])
RLons.append(Lons[k])
return RLats, RLons
|
def pt_rot(EP, Lats, Lons)
|
Rotates points on a globe by an Euler pole rotation using method of
Cox and Hart 1986, box 7-3.
Parameters
----------
EP : Euler pole list [lat,lon,angle]
Lats : list of latitudes of points to be rotated
Lons : list of longitudes of points to be rotated
Returns
_________
RLats : rotated latitudes
RLons : rotated longitudes
| 2.037578
| 2.020707
| 1.008349
|
data = []
f = open(infile, "r")
for line in f.readlines():
tmp = line.split()
rec = (tmp[0], float(tmp[cols[0]]), float(tmp[cols[1]]), float(tmp[cols[2]]),
float(tmp[cols[3]]))
data.append(rec)
f.close()
return data
|
def dread(infile, cols)
|
reads in specimen, tr, dec, inc int into data[]. position of
tr, dec, inc, int determined by cols[]
| 2.110231
| 2.034539
| 1.037203
|
k = np.array(k)
if len(k.shape) != 0:
n = k.shape[0]
else:
n = 1
R1 = random.random(size=n)
R2 = random.random(size=n)
L = np.exp(-2 * k)
a = R1 * (1 - L) + L
fac = np.sqrt(-np.log(a)/(2 * k))
inc = 90. - np.degrees(2 * np.arcsin(fac))
dec = np.degrees(2 * np.pi * R2)
if n == 1:
return dec[0], inc[0] # preserve backward compatibility
else:
return dec, inc
|
def fshdev(k)
|
Generate a random draw from a Fisher distribution with mean declination
of 0 and inclination of 90 with a specified kappa.
Parameters
----------
k : kappa (precision parameter) of the distribution
k can be a single number or an array of values
Returns
----------
dec, inc : declination and inclination of random Fisher distribution draw
if k is an array, dec, inc are returned as arrays, otherwise, single values
| 3.529949
| 3.268561
| 1.079971
|
lmax = data[-1][0]
Ls = list(range(1, lmax+1))
Rs = []
recno = 0
for l in Ls:
pow = 0
for m in range(0, l + 1):
pow += (l + 1) * ((1e-3 * data[recno][2])
** 2 + (1e-3 * data[recno][3])**2)
recno += 1
Rs.append(pow)
return Ls, Rs
|
def lowes(data)
|
gets Lowe's power spectrum from gauss coefficients
Parameters
_________
data : nested list of [[l,m,g,h],...] as from pmag.unpack()
Returns
_______
Ls : list of degrees (l)
Rs : power at degree l
| 4.097456
| 3.29888
| 1.242075
|
rad = old_div(np.pi, 180.)
paleo_lat = old_div(np.arctan(0.5 * np.tan(inc * rad)), rad)
return paleo_lat
|
def magnetic_lat(inc)
|
returns magnetic latitude from inclination
| 3.565226
| 3.433955
| 1.038228
|
Dir = np.zeros((3), 'f')
Dir[0] = InDir[0]
Dir[1] = InDir[1]
Dir[2] = 1.
chi, chi_inv = check_F(AniSpec)
if chi[0][0] == 1.:
return Dir # isotropic
X = dir2cart(Dir)
M = np.array(X)
H = np.dot(M, chi_inv)
return cart2dir(H)
|
def Dir_anis_corr(InDir, AniSpec)
|
takes the 6 element 's' vector and the Dec,Inc 'InDir' data,
performs simple anisotropy correction. returns corrected Dec, Inc
| 4.832476
| 5.310118
| 0.910051
|
AniSpecRec = {}
for key in list(PmagSpecRec.keys()):
AniSpecRec[key] = PmagSpecRec[key]
Dir = np.zeros((3), 'f')
Dir[0] = float(PmagSpecRec["specimen_dec"])
Dir[1] = float(PmagSpecRec["specimen_inc"])
Dir[2] = float(PmagSpecRec["specimen_int"])
# check if F test passes! if anisotropy_sigma available
chi, chi_inv = check_F(AniSpec)
if chi[0][0] == 1.: # isotropic
cDir = [Dir[0], Dir[1]] # no change
newint = Dir[2]
else:
X = dir2cart(Dir)
M = np.array(X)
H = np.dot(M, chi_inv)
cDir = cart2dir(H)
Hunit = [old_div(H[0], cDir[2]), old_div(H[1], cDir[2]), old_div(
H[2], cDir[2])] # unit vector parallel to Banc
Zunit = [0, 0, -1.] # unit vector parallel to lab field
Hpar = np.dot(chi, Hunit) # unit vector applied along ancient field
Zpar = np.dot(chi, Zunit) # unit vector applied along lab field
# intensity of resultant vector from ancient field
HparInt = cart2dir(Hpar)[2]
# intensity of resultant vector from lab field
ZparInt = cart2dir(Zpar)[2]
newint = Dir[2] * ZparInt / HparInt
if cDir[0] - Dir[0] > 90:
cDir[1] = -cDir[1]
cDir[0] = (cDir[0] - 180.) % 360.
AniSpecRec["specimen_dec"] = '%7.1f' % (cDir[0])
AniSpecRec["specimen_inc"] = '%7.1f' % (cDir[1])
AniSpecRec["specimen_int"] = '%9.4e' % (newint)
AniSpecRec["specimen_correction"] = 'c'
if 'magic_method_codes' in list(AniSpecRec.keys()):
methcodes = AniSpecRec["magic_method_codes"]
else:
methcodes = ""
if methcodes == "":
methcodes = "DA-AC-" + AniSpec['anisotropy_type']
if methcodes != "":
methcodes = methcodes + ":DA-AC-" + AniSpec['anisotropy_type']
if chi[0][0] == 1.: # isotropic
# indicates anisotropy was checked and no change necessary
methcodes = methcodes + ':DA-AC-ISO'
AniSpecRec["magic_method_codes"] = methcodes.strip(":")
return AniSpecRec
|
def doaniscorr(PmagSpecRec, AniSpec)
|
takes the 6 element 's' vector and the Dec,Inc, Int 'Dir' data,
performs simple anisotropy correction. returns corrected Dec, Inc, Int
| 3.290152
| 3.225772
| 1.019958
|
cart_1 = dir2cart([pars_1["dec"], pars_1["inc"], pars_1["r"]])
cart_2 = dir2cart([pars_2['dec'], pars_2['inc'], pars_2["r"]])
Sw = pars_1['k'] * pars_1['r'] + pars_2['k'] * pars_2['r'] # k1*r1+k2*r2
xhat_1 = pars_1['k'] * cart_1[0] + pars_2['k'] * cart_2[0] # k1*x1+k2*x2
xhat_2 = pars_1['k'] * cart_1[1] + pars_2['k'] * cart_2[1] # k1*y1+k2*y2
xhat_3 = pars_1['k'] * cart_1[2] + pars_2['k'] * cart_2[2] # k1*z1+k2*z2
Rw = np.sqrt(xhat_1**2 + xhat_2**2 + xhat_3**2)
return 2 * (Sw - Rw)
|
def vfunc(pars_1, pars_2)
|
Calculate the Watson Vw test statistic. Calculated as 2*(Sw-Rw)
Parameters
----------
pars_1 : dictionary of Fisher statistics from population 1
pars_2 : dictionary of Fisher statistics from population 2
Returns
-------
Vw : Watson's Vw statistic
| 1.857441
| 1.809016
| 1.026769
|
plong = plong % 360
slong = slong % 360
signdec = 1.
delphi = abs(plong - slong)
if delphi != 0:
signdec = (plong - slong) / delphi
if slat == 90.:
slat = 89.99
thetaS = np.radians(90. - slat)
thetaP = np.radians(90. - plat)
delphi = np.radians(delphi)
cosp = np.cos(thetaS) * np.cos(thetaP) + np.sin(thetaS) * \
np.sin(thetaP) * np.cos(delphi)
thetaM = np.arccos(cosp)
cosd = old_div((np.cos(thetaP) - np.cos(thetaM) *
np.cos(thetaS)), (np.sin(thetaM) * np.sin(thetaS)))
C = abs(1. - cosd**2)
if C != 0:
dec = -np.arctan(cosd/np.sqrt(abs(C))) + (np.pi/2.)
else:
dec = np.arccos(cosd)
if -np.pi < signdec * delphi and signdec < 0:
dec = 2. * np.pi - dec # checking quadrant
if signdec * delphi > np.pi:
dec = 2. * np.pi - dec
dec = np.degrees(dec) % 360.
inc = np.degrees(np.arctan2(2. * np.cos(thetaM), np.sin(thetaM)))
return dec, inc
|
def vgp_di(plat, plong, slat, slong)
|
Converts a pole position (pole latitude, pole longitude) to a direction
(declination, inclination) at a given location (slat, slong) assuming a
dipolar field.
Parameters
----------
plat : latitude of pole (vgp latitude)
plong : longitude of pole (vgp longitude)
slat : latitude of site
slong : longitude of site
Returns
----------
dec,inc : tuple of declination and inclination
| 2.741046
| 2.719748
| 1.007831
|
counter, NumSims = 0, 500
#
# first calculate the fisher means and cartesian coordinates of each set of Directions
#
pars_1 = fisher_mean(Dir1)
pars_2 = fisher_mean(Dir2)
#
# get V statistic for these
#
V = vfunc(pars_1, pars_2)
#
# do monte carlo simulation of datasets with same kappas, but common mean
#
Vp = [] # set of Vs from simulations
print("Doing ", NumSims, " simulations")
for k in range(NumSims):
counter += 1
if counter == 50:
print(k + 1)
counter = 0
Dirp = []
# get a set of N1 fisher distributed vectors with k1, calculate fisher stats
for i in range(pars_1["n"]):
Dirp.append(fshdev(pars_1["k"]))
pars_p1 = fisher_mean(Dirp)
# get a set of N2 fisher distributed vectors with k2, calculate fisher stats
Dirp = []
for i in range(pars_2["n"]):
Dirp.append(fshdev(pars_2["k"]))
pars_p2 = fisher_mean(Dirp)
# get the V for these
Vk = vfunc(pars_p1, pars_p2)
Vp.append(Vk)
#
# sort the Vs, get Vcrit (95th one)
#
Vp.sort()
k = int(.95 * NumSims)
return V, Vp[k]
|
def watsonsV(Dir1, Dir2)
|
calculates Watson's V statistic for two sets of directions
| 4.750677
| 4.539102
| 1.046612
|
try:
D = float(D)
I = float(I)
except TypeError: # is an array
return dimap_V(D, I)
# DEFINE FUNCTION VARIABLES
# initialize equal area projection x,y
XY = [0., 0.]
# GET CARTESIAN COMPONENTS OF INPUT DIRECTION
X = dir2cart([D, I, 1.])
# CHECK IF Z = 1 AND ABORT
if X[2] == 1.0:
return XY # return [0,0]
# TAKE THE ABSOLUTE VALUE OF Z
if X[2] < 0:
# this only works on lower hemisphere projections
X[2] = -X[2]
# CALCULATE THE X,Y COORDINATES FOR THE EQUAL AREA PROJECTION
# from Collinson 1983
R = old_div(np.sqrt(1. - X[2]), (np.sqrt(X[0]**2 + X[1]**2)))
XY[1], XY[0] = X[0] * R, X[1] * R
# RETURN XY[X,Y]
return XY
|
def dimap(D, I)
|
Function to map directions to x,y pairs in equal area projection
Parameters
----------
D : list or array of declinations (as float)
I : list or array or inclinations (as float)
Returns
-------
XY : x, y values of directions for equal area projection [x,y]
| 5.522252
| 5.226068
| 1.056674
|
# GET CARTESIAN COMPONENTS OF INPUT DIRECTION
DI = np.array([D, I]).transpose()
X = dir2cart(DI).transpose()
# CALCULATE THE X,Y COORDINATES FOR THE EQUAL AREA PROJECTION
# from Collinson 1983
R = np.sqrt(1. - abs(X[2]))/(np.sqrt(X[0]**2 + X[1]**2))
XY = np.array([X[1] * R, X[0] * R]).transpose()
# RETURN XY[X,Y]
return XY
|
def dimap_V(D, I)
|
FUNCTION TO MAP DECLINATION, INCLINATIONS INTO EQUAL AREA PROJECTION, X,Y
Usage: dimap_V(D, I)
D and I are both numpy arrays
| 6.337451
| 5.774812
| 1.09743
|
meths = []
if method_type == 'GM':
meths.append('GM-PMAG-APWP')
meths.append('GM-ARAR')
meths.append('GM-ARAR-AP')
meths.append('GM-ARAR-II')
meths.append('GM-ARAR-NI')
meths.append('GM-ARAR-TF')
meths.append('GM-CC-ARCH')
meths.append('GM-CC-ARCHMAG')
meths.append('GM-C14')
meths.append('GM-FOSSIL')
meths.append('GM-FT')
meths.append('GM-INT-L')
meths.append('GM-INT-S')
meths.append('GM-ISO')
meths.append('GM-KAR')
meths.append('GM-PMAG-ANOM')
meths.append('GM-PMAG-POL')
meths.append('GM-PBPB')
meths.append('GM-RATH')
meths.append('GM-RBSR')
meths.append('GM-RBSR-I')
meths.append('GM-RBSR-MA')
meths.append('GM-SMND')
meths.append('GM-SMND-I')
meths.append('GM-SMND-MA')
meths.append('GM-CC-STRAT')
meths.append('GM-LUM-TH')
meths.append('GM-UPA')
meths.append('GM-UPB')
meths.append('GM-UTH')
meths.append('GM-UTHHE')
else:
pass
return meths
|
def getmeths(method_type)
|
returns MagIC method codes available for a given type
| 3.052929
| 2.875016
| 1.061882
|
keylist = []
pmag_out = open(ofile, 'a')
outstring = "tab \t" + file_type + "\n"
pmag_out.write(outstring)
keystring = ""
for key in list(Rec.keys()):
keystring = keystring + '\t' + key
keylist.append(key)
keystring = keystring + '\n'
pmag_out.write(keystring[1:])
pmag_out.close()
return keylist
|
def first_up(ofile, Rec, file_type)
|
writes the header for a MagIC template file
| 2.924918
| 2.884522
| 1.014004
|
site = Rec[sitekey]
gotone = 0
if len(Ages) > 0:
for agerec in Ages:
if agerec["er_site_name"] == site:
if "age" in list(agerec.keys()) and agerec["age"] != "":
Rec[keybase + "age"] = agerec["age"]
gotone = 1
if "age_unit" in list(agerec.keys()):
Rec[keybase + "age_unit"] = agerec["age_unit"]
if "age_sigma" in list(agerec.keys()):
Rec[keybase + "age_sigma"] = agerec["age_sigma"]
if gotone == 0 and len(DefaultAge) > 1:
sigma = 0.5 * (float(DefaultAge[1]) - float(DefaultAge[0]))
age = float(DefaultAge[0]) + sigma
Rec[keybase + "age"] = '%10.4e' % (age)
Rec[keybase + "age_sigma"] = '%10.4e' % (sigma)
Rec[keybase + "age_unit"] = DefaultAge[2]
return Rec
|
def get_age(Rec, sitekey, keybase, Ages, DefaultAge)
|
finds the age record for a given site
| 2.052409
| 2.02967
| 1.011203
|
# get a list of age_units first
age_units, AgesOut, factors, factor, maxunit, age_unit = [], [], [], 1, 1, "Ma"
for agerec in AgesIn:
if agerec[1] not in age_units:
age_units.append(agerec[1])
if agerec[1] == "Ga":
factors.append(1e9)
maxunit, age_unit, factor = 1e9, "Ga", 1e9
if agerec[1] == "Ma":
if maxunit == 1:
maxunit, age_unt, factor = 1e6, "Ma", 1e6
factors.append(1e6)
if agerec[1] == "Ka":
factors.append(1e3)
if maxunit == 1:
maxunit, age_unit, factor = 1e3, "Ka", 1e3
if "Years" in agerec[1].split():
factors.append(1)
if len(age_units) == 1: # all ages are of same type
for agerec in AgesIn:
AgesOut.append(agerec[0])
elif len(age_units) > 1:
for agerec in AgesIn: # normalize all to largest age unit
if agerec[1] == "Ga":
AgesOut.append(agerec[0] * 1e9 / factor)
if agerec[1] == "Ma":
AgesOut.append(agerec[0] * 1e6 / factor)
if agerec[1] == "Ka":
AgesOut.append(agerec[0] * 1e3 / factor)
if "Years" in agerec[1].split():
if agerec[1] == "Years BP":
AgesOut.append(old_div(agerec[0], factor))
if agerec[1] == "Years Cal BP":
AgesOut.append(old_div(agerec[0], factor))
if agerec[1] == "Years AD (+/-)":
# convert to years BP first
AgesOut.append(old_div((1950 - agerec[0]), factor))
if agerec[1] == "Years Cal AD (+/-)":
AgesOut.append(old_div((1950 - agerec[0]), factor))
return AgesOut, age_unit
|
def adjust_ages(AgesIn)
|
Function to adjust ages to a common age_unit
| 2.312335
| 2.274246
| 1.016748
|
#
# get uniform directions [dec,inc]
z = random.uniform(-1., 1., size=N)
t = random.uniform(0., 360., size=N) # decs
i = np.arcsin(z) * 180. / np.pi # incs
return np.array([t, i]).transpose()
# def get_unf(N): #Jeff's way
|
def get_unf(N=100)
|
Generates N uniformly distributed directions
using the way described in Fisher et al. (1987).
Parameters
__________
N : number of directions, default is 100
Returns
______
array of nested dec,inc pairs
| 6.648943
| 5.965042
| 1.114652
|
a = np.zeros((3, 3,), 'f') # make the a matrix
for i in range(3):
a[i][i] = s[i]
a[0][1], a[1][0] = s[3], s[3]
a[1][2], a[2][1] = s[4], s[4]
a[0][2], a[2][0] = s[5], s[5]
return a
|
def s2a(s)
|
convert 6 element "s" list to 3,3 a matrix (see Tauxe 1998)
| 2.263308
| 2.028619
| 1.115689
|
s = np.zeros((6,), 'f') # make the a matrix
for i in range(3):
s[i] = a[i][i]
s[3] = a[0][1]
s[4] = a[1][2]
s[5] = a[0][2]
return s
|
def a2s(a)
|
convert 3,3 a matrix to 6 element "s" list (see Tauxe 1998)
| 3.254892
| 2.649601
| 1.228446
|
#
A = s2a(s) # convert s to a (see Tauxe 1998)
tau, V = tauV(A) # convert to eigenvalues (t), eigenvectors (V)
Vdirs = []
for v in V: # convert from cartesian to direction
Vdir = cart2dir(v)
if Vdir[1] < 0:
Vdir[1] = -Vdir[1]
Vdir[0] = (Vdir[0] + 180.) % 360.
Vdirs.append([Vdir[0], Vdir[1]])
return tau, Vdirs
|
def doseigs(s)
|
convert s format for eigenvalues and eigenvectors
Parameters
__________
s=[x11,x22,x33,x12,x23,x13] : the six tensor elements
Return
__________
tau : [t1,t2,t3]
tau is an list of eigenvalues in decreasing order:
V : [[V1_dec,V1_inc],[V2_dec,V2_inc],[V3_dec,V3_inc]]
is an list of the eigenvector directions
| 4.6252
| 4.123529
| 1.121661
|
t = np.zeros((3, 3,), 'f') # initialize the tau diagonal matrix
V = []
for j in range(3):
t[j][j] = tau[j] # diagonalize tau
for k in range(3):
V.append(dir2cart([Vdirs[k][0], Vdirs[k][1], 1.0]))
V = np.transpose(V)
tmp = np.dot(V, t)
chi = np.dot(tmp, np.transpose(V))
return a2s(chi)
|
def doeigs_s(tau, Vdirs)
|
get elements of s from eigenvaulues - note that this is very unstable
Input:
tau,V:
tau is an list of eigenvalues in decreasing order:
[t1,t2,t3]
V is an list of the eigenvector directions
[[V1_dec,V1_inc],[V2_dec,V2_inc],[V3_dec,V3_inc]]
Output:
The six tensor elements as a list:
s=[x11,x22,x33,x12,x23,x13]
| 4.117332
| 4.061491
| 1.013749
|
if type(Ss) == list:
Ss = np.array(Ss)
npts = Ss.shape[0]
Ss = Ss.transpose()
avd, avs = [], []
# D=np.array([Ss[0],Ss[1],Ss[2],Ss[3]+0.5*(Ss[0]+Ss[1]),Ss[4]+0.5*(Ss[1]+Ss[2]),Ss[5]+0.5*(Ss[0]+Ss[2])]).transpose()
D = np.array([Ss[0], Ss[1], Ss[2], Ss[3] + 0.5 * (Ss[0] + Ss[1]),
Ss[4] + 0.5 * (Ss[1] + Ss[2]), Ss[5] + 0.5 * (Ss[0] + Ss[2])])
for j in range(6):
avd.append(np.average(D[j]))
avs.append(np.average(Ss[j]))
D = D.transpose()
# for s in Ss:
# print 'from sbar: ',s
# D.append(s[:]) # append a copy of s
# D[-1][3]=D[-1][3]+0.5*(s[0]+s[1])
# D[-1][4]=D[-1][4]+0.5*(s[1]+s[2])
# D[-1][5]=D[-1][5]+0.5*(s[0]+s[2])
# for j in range(6):
# avd[j]+=(D[-1][j])/float(npts)
# avs[j]+=(s[j])/float(npts)
# calculate sigma
nf = (npts - 1) * 6 # number of degrees of freedom
s0 = 0
Dels = (D - avd)**2
s0 = np.sum(Dels)
sigma = np.sqrt(s0/float(nf))
return nf, sigma, avs
|
def sbar(Ss)
|
calculate average s,sigma from list of "s"s.
| 2.414395
| 2.342783
| 1.030567
|
if npos == 15:
#
# rotatable design of Jelinek for kappabridge (see Tauxe, 1998)
#
A = np.array([[.5, .5, 0, -1., 0, 0], [.5, .5, 0, 1., 0, 0], [1, .0, 0, 0, 0, 0], [.5, .5, 0, -1., 0, 0], [.5, .5, 0, 1., 0, 0], [0, .5, .5, 0, -1., 0], [0, .5, .5, 0, 1., 0], [0, 1., 0, 0, 0, 0],
[0, .5, .5, 0, -1., 0], [0, .5, .5, 0, 1., 0], [.5, 0, .5, 0, 0, -1.], [.5, 0, .5, 0, 0, 1.], [0, 0, 1., 0, 0, 0], [.5, 0, .5, 0, 0, -1.], [.5, 0, .5, 0, 0, 1.]]) # design matrix for 15 measurment positions
elif npos == 6:
A = np.array([[1., 0, 0, 0, 0, 0], [0, 1., 0, 0, 0, 0], [0, 0, 1., 0, 0, 0], [.5, .5, 0, 1., 0, 0], [
0, .5, .5, 0, 1., 0], [.5, 0, .5, 0, 0, 1.]]) # design matrix for 6 measurment positions
else:
print("measurement protocol not supported yet ")
return
B = np.dot(np.transpose(A), A)
B = linalg.inv(B)
B = np.dot(B, np.transpose(A))
return A, B
|
def design(npos)
|
make a design matrix for an anisotropy experiment
| 2.137051
| 2.111054
| 1.012315
|
#
A, B = design(15) # get design matrix for 15 measurements
sbar = np.dot(B, k15) # get mean s
t = (sbar[0] + sbar[1] + sbar[2]) # trace
bulk = old_div(t, 3.) # bulk susceptibility
Kbar = np.dot(A, sbar) # get best fit values for K
dels = k15 - Kbar # get deltas
dels, sbar = old_div(dels, t), old_div(sbar, t) # normalize by trace
So = sum(dels**2)
sigma = np.sqrt(old_div(So, 9.)) # standard deviation
return sbar, sigma, bulk
|
def dok15_s(k15)
|
calculates least-squares matrix for 15 measurements from Jelinek [1976]
| 5.855499
| 5.225193
| 1.120628
|
x = v[1] * w[2] - v[2] * w[1]
y = v[2] * w[0] - v[0] * w[2]
z = v[0] * w[1] - v[1] * w[0]
return [x, y, z]
|
def cross(v, w)
|
cross product of two vectors
| 1.39067
| 1.322088
| 1.051875
|
#
a = s2a(s) # convert to 3,3 matrix
# first get three orthogonal axes
X1 = dir2cart((az, pl, 1.))
X2 = dir2cart((az + 90, 0., 1.))
X3 = cross(X1, X2)
A = np.transpose([X1, X2, X3])
b = np.zeros((3, 3,), 'f') # initiale the b matrix
for i in range(3):
for j in range(3):
dum = 0
for k in range(3):
for l in range(3):
dum += A[i][k] * A[j][l] * a[k][l]
b[i][j] = dum
s_rot = a2s(b) # afer rotation
return s_rot
|
def dosgeo(s, az, pl)
|
rotates matrix a to az,pl returns s
Parameters
__________
s : [x11,x22,x33,x12,x23,x13] - the six tensor elements
az : the azimuth of the specimen X direction
pl : the plunge (inclination) of the specimen X direction
Return
s_rot : [x11,x22,x33,x12,x23,x13] - after rotation
| 4.225716
| 3.735666
| 1.131181
|
tau, Vdirs = doseigs(s)
Vrot = []
for evec in Vdirs:
d, i = dotilt(evec[0], evec[1], bed_az, bed_dip)
Vrot.append([d, i])
s_rot = doeigs_s(tau, Vrot)
return s_rot
|
def dostilt(s, bed_az, bed_dip)
|
Rotates "s" tensor to stratigraphic coordinates
Parameters
__________
s : [x11,x22,x33,x12,x23,x13] - the six tensor elements
bed_az : bedding dip direction
bed_dip : bedding dip
Return
s_rot : [x11,x22,x33,x12,x23,x13] - after rotation
| 7.284803
| 8.200815
| 0.888302
|
#
Is = random.randint(0, len(Ss) - 1, size=len(Ss)) # draw N random integers
#Ss = np.array(Ss)
if not ipar: # ipar == 0:
BSs = Ss[Is]
else: # need to recreate measurement - then do the parametric stuffr
A, B = design(6) # get the design matrix for 6 measurementsa
K, BSs = [], []
for k in range(len(Ss)):
K.append(np.dot(A, Ss[k][0:6]))
Pars = np.random.normal(K, sigma)
for k in range(len(Ss)):
BSs.append(np.dot(B, Pars[k]))
return np.array(BSs)
|
def apseudo(Ss, ipar, sigma)
|
draw a bootstrap sample of Ss
| 6.151206
| 5.549274
| 1.10847
|
#
Tau1s, Tau2s, Tau3s = [], [], []
V1s, V2s, V3s = [], [], []
nb = len(Taus)
bpars = {}
for k in range(nb):
Tau1s.append(Taus[k][0])
Tau2s.append(Taus[k][1])
Tau3s.append(Taus[k][2])
V1s.append(Vs[k][0])
V2s.append(Vs[k][1])
V3s.append(Vs[k][2])
x, sig = gausspars(Tau1s)
bpars["t1_sigma"] = sig
x, sig = gausspars(Tau2s)
bpars["t2_sigma"] = sig
x, sig = gausspars(Tau3s)
bpars["t3_sigma"] = sig
V1s=flip(V1s,combine=True)
kpars = dokent(V1s, len(V1s))
bpars["v1_dec"] = kpars["dec"]
bpars["v1_inc"] = kpars["inc"]
bpars["v1_zeta"] = (kpars["Zeta"] * np.sqrt(nb)) % 360.
bpars["v1_eta"] = (kpars["Eta"] * np.sqrt(nb)) % 360.
bpars["v1_zeta_dec"] = kpars["Zdec"]
bpars["v1_zeta_inc"] = kpars["Zinc"]
bpars["v1_eta_dec"] = kpars["Edec"]
bpars["v1_eta_inc"] = kpars["Einc"]
V2s=flip(V2s,combine=True)
kpars = dokent(V2s, len(V2s))
bpars["v2_dec"] = kpars["dec"]
bpars["v2_inc"] = kpars["inc"]
bpars["v2_zeta"] = (kpars["Zeta"] * np.sqrt(nb)) % 360.
bpars["v2_eta"] = (kpars["Eta"] * np.sqrt(nb)) % 360.
bpars["v2_zeta_dec"] = kpars["Zdec"]
bpars["v2_zeta_inc"] = kpars["Zinc"]
bpars["v2_eta_dec"] = kpars["Edec"]
bpars["v2_eta_inc"] = kpars["Einc"]
V3s=flip(V3s,combine=True)
kpars = dokent(V3s, len(V3s))
bpars["v3_dec"] = kpars["dec"]
bpars["v3_inc"] = kpars["inc"]
bpars["v3_zeta"] = (kpars["Zeta"] * np.sqrt(nb)) % 360.
bpars["v3_eta"] = (kpars["Eta"] * np.sqrt(nb)) % 360.
bpars["v3_zeta_dec"] = kpars["Zdec"]
bpars["v3_zeta_inc"] = kpars["Zinc"]
bpars["v3_eta_dec"] = kpars["Edec"]
bpars["v3_eta_inc"] = kpars["Einc"]
return bpars
|
def sbootpars(Taus, Vs)
|
get bootstrap parameters for s data
| 1.494519
| 1.490942
| 1.002399
|
#npts = len(Ss)
Ss = np.array(Ss)
npts = Ss.shape[0]
# get average s for whole dataset
nf, Sigma, avs = sbar(Ss)
Tmean, Vmean = doseigs(avs) # get eigenvectors of mean tensor
#
# now do bootstrap to collect Vs and taus of bootstrap means
#
Taus, Vs = [], [] # number of bootstraps, list of bootstrap taus and eigenvectors
#
for k in range(int(float(nb))): # repeat nb times
# if k%50==0:print k,' out of ',nb
# get a pseudosample - if ipar=1, do a parametric bootstrap
BSs = apseudo(Ss, ipar, Sigma)
nf, sigma, avbs = sbar(BSs) # get bootstrap mean s
tau, Vdirs = doseigs(avbs) # get bootstrap eigenparameters
Taus.append(tau)
Vs.append(Vdirs)
return Tmean, Vmean, Taus, Vs
|
def s_boot(Ss, ipar=0, nb=1000)
|
Returns bootstrap parameters for S data
Parameters
__________
Ss : nested array of [[x11 x22 x33 x12 x23 x13],....] data
ipar : if True, do a parametric bootstrap
nb : number of bootstraps
Returns
________
Tmean : average eigenvalues
Vmean : average eigvectors
Taus : bootstrapped eigenvalues
Vs : bootstrapped eigenvectors
| 8.144463
| 6.861835
| 1.186922
|
#
if npos != 9:
print('Sorry - only 9 positions available')
return
Dec = [315., 225., 180., 135., 45., 90., 270.,
270., 270., 90., 0., 0., 0., 180., 180.]
Dip = [0., 0., 0., 0., 0., -45., -45., 0.,
45., 45., 45., -45., -90., -45., 45.]
index9 = [0, 1, 2, 5, 6, 7, 10, 11, 12]
H = []
for ind in range(15):
Dir = [Dec[ind], Dip[ind], 1.]
H.append(dir2cart(Dir)) # 15 field directionss
#
# make design matrix A
#
A = np.zeros((npos * 3, 6), 'f')
tmpH = np.zeros((npos, 3), 'f') # define tmpH
if npos == 9:
for i in range(9):
k = index9[i]
ind = i * 3
A[ind][0] = H[k][0]
A[ind][3] = H[k][1]
A[ind][5] = H[k][2]
ind = i * 3 + 1
A[ind][3] = H[k][0]
A[ind][1] = H[k][1]
A[ind][4] = H[k][2]
ind = i * 3 + 2
A[ind][5] = H[k][0]
A[ind][4] = H[k][1]
A[ind][2] = H[k][2]
for j in range(3):
tmpH[i][j] = H[k][j]
At = np.transpose(A)
ATA = np.dot(At, A)
ATAI = linalg.inv(ATA)
B = np.dot(ATAI, At)
else:
print("B matrix not yet supported")
return
return B, H, tmpH
|
def designAARM(npos)
|
calculates B matrix for AARM calculations.
| 2.683684
| 2.62509
| 1.022321
|
for rec in Recs:
type = ".0"
meths = []
tmp = rec["magic_method_codes"].split(':')
for meth in tmp:
meths.append(meth.strip())
if 'LT-T-I' in meths:
type = ".1"
if 'LT-PTRM-I' in meths:
type = ".2"
if 'LT-PTRM-MD' in meths:
type = ".3"
treatment = float(rec["treatment_temp"]) - 273
tr = '%i' % (treatment) + type
inten = '%8.7e ' % (float(rec["measurement_magn_moment"]) * 1e3)
outstring = rec["er_specimen_name"] + " " + tr + " " + rec["measurement_csd"] + \
" " + inten + " " + rec["measurement_dec"] + \
" " + rec["measurement_inc"] + "\n"
file.write(outstring)
|
def domagicmag(file, Recs)
|
converts a magic record back into the SIO mag format
| 3.800001
| 3.6713
| 1.035056
|
cont = 0
Nmin = len(first_I)
if len(first_Z) < Nmin:
Nmin = len(first_Z)
for kk in range(Nmin):
if first_I[kk][0] != first_Z[kk][0]:
print("\n WARNING: ")
if first_I[kk] < first_Z[kk]:
del first_I[kk]
else:
del first_Z[kk]
print("Unmatched step number: ", kk + 1, ' ignored')
cont = 1
if cont == 1:
return first_I, first_Z, cont
return first_I, first_Z, cont
|
def cleanup(first_I, first_Z)
|
cleans up unbalanced steps
failure can be from unbalanced final step, or from missing steps,
this takes care of missing steps
| 3.269574
| 3.171232
| 1.031011
|
model, date, itype = 0, 0, 1
sv = np.zeros(len(gh))
colat = 90. - lat
x, y, z, f = magsyn(gh, sv, model, date, itype, alt, colat, lon)
return x, y, z, f
|
def docustom(lon, lat, alt, gh)
|
Passes the coefficients to the Malin and Barraclough
routine (function pmag.magsyn) to calculate the field from the coefficients.
Parameters:
-----------
lon = east longitude in degrees (0 to 360 or -180 to 180)
lat = latitude in degrees (-90 to 90)
alt = height above mean sea level in km (itype = 1 assumed)
| 6.389973
| 5.951783
| 1.073623
|
data = []
k, l = 0, 1
while k + 1 < len(gh):
for m in range(l + 1):
if m == 0:
data.append([l, m, gh[k], 0])
k += 1
else:
data.append([l, m, gh[k], gh[k + 1]])
k += 2
l += 1
return data
|
def unpack(gh)
|
unpacks gh list into l m g h type list
Parameters
_________
gh : list of gauss coefficients (as returned by, e.g., doigrf)
Returns
data : nested list of [[l,m,g,h],...]
| 2.6608
| 2.244617
| 1.185414
|
convention = str(convention)
site = sample # default is that site = sample
#
#
# Sample is final letter on site designation eg: TG001a (used by SIO lab
# in San Diego)
if convention == "1":
return sample[:-1] # peel off terminal character
#
# Site-Sample format eg: BG94-1 (used by PGL lab in Beijing)
#
if convention == "2":
parts = sample.strip('-').split('-')
return parts[0]
#
# Sample is XXXX.YY where XXX is site and YY is sample
#
if convention == "3":
parts = sample.split('.')
return parts[0]
#
# Sample is XXXXYYY where XXX is site desgnation and YYY is Z long integer
#
if convention == "4":
k = int(Z) - 1
return sample[0:-k] # peel off Z characters from site
if convention == "5": # sample == site
return sample
if convention == "6": # should be names in orient.txt
print("-W- Finding names in orient.txt is not currently supported")
if convention == "7": # peel off Z characters for site
k = int(Z)
return sample[0:k]
if convention == "8": # peel off Z characters for site
return ""
if convention == "9": # peel off Z characters for site
return sample
print("Error in site parsing routine")
return
|
def parse_site(sample, convention, Z)
|
parse the site name from the sample name using the specified convention
| 6.215631
| 6.151229
| 1.01047
|
#
samp_con, Z = "", ""
while samp_con == "":
samp_con = input()
#
if samp_con == "" or samp_con == "1":
samp_con, Z = "1", 1
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
samp_con = ""
else:
Z = samp_con.split("-")[1]
samp_con = "4"
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
samp_con = ""
else:
Z = samp_con.split("-")[1]
samp_con = "7"
if samp_con.isdigit() == False or int(samp_con) > 7:
print("Try again\n ")
samp_con = ""
return samp_con, Z
|
def get_samp_con()
|
get sample naming convention
| 2.493801
| 2.420304
| 1.030367
|
# strike is horizontal line equidistant from two input directions
SCart = [0, 0, 0] # cartesian coordites of Strike
SCart[2] = 0. # by definition
# cartesian coordites of Geographic D
GCart = dir2cart([dec_geo, inc_geo, 1.])
TCart = dir2cart([dec_tilt, inc_tilt, 1.]) # cartesian coordites of Tilt D
X = old_div((TCart[1] - GCart[1]), (GCart[0] - TCart[0]))
SCart[1] = np.sqrt(old_div(1, (X**2 + 1.)))
SCart[0] = SCart[1] * X
SDir = cart2dir(SCart)
DipDir = (SDir[0] - 90.) % 360.
DipDir = (SDir[0] + 90.) % 360.
# D is creat circle distance between geo direction and strike
# theta is GCD between geo and tilt (on unit sphere). use law of cosines
# to get small cirlce between geo and tilt (dip)
cosd = GCart[0] * SCart[0] + GCart[1] * \
SCart[1] # cosine of angle between two
d = np.arccos(cosd)
cosTheta = GCart[0] * TCart[0] + GCart[1] * TCart[1] + GCart[2] * TCart[2]
Dip = (old_div(180., np.pi)) * \
np.arccos(-(old_div((cosd**2 - cosTheta), np.sin(d)**2)))
if Dip > 90:
Dip = -Dip
return DipDir, Dip
|
def get_tilt(dec_geo, inc_geo, dec_tilt, inc_tilt)
|
Function to return the dip direction and dip that would yield the tilt
corrected direction if applied to the uncorrected direction (geographic
coordinates)
Parameters
----------
dec_geo : declination in geographic coordinates
inc_geo : inclination in geographic coordinates
dec_tilt : declination in tilt-corrected coordinates
inc_tilt : inclination in tilt-corrected coordinates
Returns
-------
DipDir, Dip : tuple of dip direction and dip
| 4.627479
| 4.458997
| 1.037785
|
TOL = 1e-4
rad = old_div(np.pi, 180.)
Xp = dir2cart([gdec, ginc, 1.])
X = dir2cart([cdec, cinc, 1.])
# find plunge first
az, pl, zdif, ang = 0., -90., 1., 360.
while zdif > TOL and pl < 180.:
znew = X[0] * np.sin(pl * rad) + X[2] * np.cos(pl * rad)
zdif = abs(Xp[2] - znew)
pl += .01
while ang > 0.1 and az < 360.:
d, i = dogeo(cdec, cinc, az, pl)
ang = angle([gdec, ginc], [d, i])
az += .01
return az - .01, pl - .01
|
def get_azpl(cdec, cinc, gdec, ginc)
|
gets azimuth and pl from specimen dec inc (cdec,cinc) and gdec,ginc (geographic) coordinates
| 4.394217
| 4.383326
| 1.002485
|
# if ask set to 1, then can change priorities
SO_methods = [meth.strip() for meth in SO_methods]
SO_defaults = ['SO-SUN', 'SO-GPS-DIFF', 'SO-SUN-SIGHT', 'SO-SIGHT', 'SO-SIGHT-BS',
'SO-CMD-NORTH', 'SO-MAG', 'SO-SM', 'SO-REC', 'SO-V', 'SO-CORE', 'SO-NO']
SO_priorities, prior_list = [], []
if len(SO_methods) >= 1:
for l in range(len(SO_defaults)):
if SO_defaults[l] in SO_methods:
SO_priorities.append(SO_defaults[l])
pri, change = 0, "1"
if ask == 1:
print()
for m in range(len(SO_defaults)):
if SO_defaults[m] in SO_methods:
SO_priorities[SO_methods.index(SO_defaults[m])] = pri
pri += 1
while change == "1":
prior_list = SO_priorities
for m in range(len(SO_methods)):
print(SO_methods[m], SO_priorities[m])
change = input("Change these? 1/[0] ")
if change != "1":
break
SO_priorities = []
for l in range(len(SO_methods)):
print(SO_methods[l])
print(" Priority? ", prior_list)
pri = int(input())
SO_priorities.append(pri)
del prior_list[prior_list.index(pri)]
return SO_priorities
|
def set_priorities(SO_methods, ask)
|
figure out which sample_azimuth to use, if multiple orientation methods
| 3.776379
| 3.76483
| 1.003067
|
f = open(file, 'r')
firstline = f.read(350)
EOL = ""
for k in range(350):
if firstline[k:k + 2] == "\r\n":
print(file, ' appears to be a dos file')
EOL = '\r\n'
break
if EOL == "":
for k in range(350):
if firstline[k] == "\r":
print(file, ' appears to be a mac file')
EOL = '\r'
if EOL == "":
print(file, " appears to be a unix file")
EOL = '\n'
f.close()
return EOL
|
def get_EOL(file)
|
find EOL of input file (whether mac,PC or unix format)
| 2.517591
| 2.392587
| 1.052246
|
for rec in datablock:
methcodes = rec["magic_method_codes"].split(":")
step = float(rec["treatment_ac_field"])
str = float(rec["measurement_magn_moment"])
if "LT-NO" in methcodes:
NRM.append([0, str])
if "LT-T-I" in methcodes:
TRM.append([0, str])
field = float(rec["treatment_dc_field"])
if "LT-AF-I" in methcodes:
ARM1.append([0, str])
if "LT-AF-I-2" in methcodes:
ARM2.append([0, str])
if "LT-AF-Z" in methcodes:
if "LP-ARM-AFD" in methcodes:
ARM1.append([step, str])
elif "LP-TRM-AFD" in methcodes:
TRM.append([step, str])
elif "LP-ARM2-AFD" in methcodes:
ARM2.append([step, str])
else:
NRM.append([step, str])
cont = 1
while cont == 1:
if len(NRM) != len(TRM):
print("Uneven NRM/TRM steps: ")
NRM, TRM, cont = cleanup(TRM, NRM)
else:
cont = 0
cont = 1
while cont == 1:
if len(ARM1) != len(ARM2):
print("Uneven ARM1/ARM2 steps: ")
ARM1, ARM2, cont = cleanup(ARM2, ARM1)
else:
cont = 0
#
# final check
#
if len(NRM) != len(TRM) or len(ARM1) != len(ARM2):
print(len(NRM), len(TRM), len(ARM1), len(ARM2))
print(" Something wrong with this specimen! Better fix it or delete it ")
input(" press return to acknowledge message")
# now do the ratio to "fix" NRM/TRM data
# a
TRM_ADJ = []
for kk in range(len(TRM)):
step = TRM[kk][0]
for k in range(len(ARM1)):
if ARM1[k][0] == step:
TRM_ADJ.append([step, TRM[kk][1] * ARM1[k][1] / ARM2[k][1]])
break
shawblock = (NRM, TRM, ARM1, ARM2, TRM_ADJ)
return shawblock, field
|
def sortshaw(s, datablock)
|
sorts data block in to ARM1,ARM2 NRM,TRM,ARM1,ARM2=[],[],[],[]
stick first zero field stuff into first_Z
| 3.291854
| 3.134727
| 1.050124
|
sv = []
pad = 120 - len(gh)
for x in range(pad):
gh.append(0.)
for x in range(len(gh)):
sv.append(0.)
#! convert to colatitude for MB routine
itype = 1
colat = 90. - lat
date, alt = 2000., 0. # use a dummy date and altitude
x, y, z, f = magsyn(gh, sv, date, date, itype, alt, colat, lon)
vec = cart2dir([x, y, z])
vec[2] = f
return vec
|
def getvec(gh, lat, lon)
|
Evaluates the vector at a given latitude and longitude for a specified
set of coefficients
Parameters
----------
gh : a list of gauss coefficients
lat : latitude of location
long : longitude of location
Returns
-------
vec : direction in [dec, inc, intensity]
| 7.374694
| 7.235608
| 1.019222
|
a2 = alpha**2
c_a = 0.547
s_l = np.sqrt(old_div(((c_a**(2. * l)) * a2), ((l + 1.) * (2. * l + 1.))))
return s_l
|
def s_l(l, alpha)
|
get sigma as a function of degree l from Constable and Parker (1988)
| 5.501237
| 5.10847
| 1.076885
|
# random.seed(n)
p = 0
n = seed
gh = []
g10, sfact, afact = -18e3, 3.8, 2.4
g20 = G2 * g10
g30 = G3 * g10
alpha = g10/afact
s1 = s_l(1, alpha)
s10 = sfact * s1
gnew = random.normal(g10, s10)
if p == 1:
print(1, 0, gnew, 0)
gh.append(gnew)
gh.append(random.normal(0, s1))
gnew = gh[-1]
gh.append(random.normal(0, s1))
hnew = gh[-1]
if p == 1:
print(1, 1, gnew, hnew)
for l in range(2, terms + 1):
for m in range(l + 1):
OFF = 0.0
if l == 2 and m == 0:
OFF = g20
if l == 3 and m == 0:
OFF = g30
s = s_l(l, alpha)
j = (l - m) % 2
if j == 1:
s = s * sfact
gh.append(random.normal(OFF, s))
gnew = gh[-1]
if m == 0:
hnew = 0
else:
gh.append(random.normal(0, s))
hnew = gh[-1]
if p == 1:
print(l, m, gnew, hnew)
return gh
|
def mktk03(terms, seed, G2, G3)
|
generates a list of gauss coefficients drawn from the TK03 distribution
| 3.152534
| 3.082566
| 1.022698
|
tanl = np.tan(np.radians(lat))
inc = np.arctan(2. * tanl)
return np.degrees(inc)
|
def pinc(lat)
|
calculate paleoinclination from latitude using dipole formula: tan(I) = 2tan(lat)
Parameters
________________
lat : either a single value or an array of latitudes
Returns
-------
array of inclinations
| 4.065423
| 5.431014
| 0.748557
|
tani = np.tan(np.radians(inc))
lat = np.arctan(tani/2.)
return np.degrees(lat)
|
def plat(inc)
|
calculate paleolatitude from inclination using dipole formula: tan(I) = 2tan(lat)
Parameters
________________
inc : either a single value or an array of inclinations
Returns
-------
array of latitudes
| 5.372387
| 6.154035
| 0.872986
|
if random_seed != None:
np.random.seed(random_seed)
Inds = np.random.randint(len(DIs), size=len(DIs))
D = np.array(DIs)
return D[Inds]
|
def pseudo(DIs, random_seed=None)
|
Draw a bootstrap sample of directions returning as many bootstrapped samples
as in the input directions
Parameters
----------
DIs : nested list of dec, inc lists (known as a di_block)
random_seed : set random seed for reproducible number generation (default is None)
Returns
-------
Bootstrap_directions : nested list of dec, inc lists that have been
bootstrapped resampled
| 2.62509
| 3.574944
| 0.734302
|
#
# now do bootstrap to collect BDIs bootstrap means
#
BDIs = [] # number of bootstraps, list of bootstrap directions
#
for k in range(nb): # repeat nb times
# if k%50==0:print k,' out of ',nb
pDIs = pseudo(DIs) # get a pseudosample
bfpars = fisher_mean(pDIs) # get bootstrap mean bootstrap sample
BDIs.append([bfpars['dec'], bfpars['inc']])
return BDIs
|
def di_boot(DIs, nb=5000)
|
returns bootstrap means for Directional data
Parameters
_________________
DIs : nested list of Dec,Inc pairs
nb : number of bootstrap pseudosamples
Returns
-------
BDIs: nested list of bootstrapped mean Dec,Inc pairs
| 11.075277
| 8.28019
| 1.337563
|
N = dir_df.dir_dec.values.shape[0] # number of data points
BDIs = []
for k in range(nb):
pdir_df = dir_df.sample(n=N, replace=True) # bootstrap pseudosample
pdir_df.reset_index(inplace=True) # reset the index
if par: # do a parametric bootstrap
for i in pdir_df.index: # set through the pseudosample
n = pdir_df.loc[i, 'dir_n'] # get number of samples/site
# get ks for each sample
ks = np.ones(shape=n)*pdir_df.loc[i, 'dir_k']
# draw a fisher distributed set of directions
decs, incs = fshdev(ks)
di_block = np.column_stack((decs, incs))
# rotate them to the mean
di_block = dodirot_V(
di_block, pdir_df.loc[i, 'dir_dec'], pdir_df.loc[i, 'dir_inc'])
# get the new mean direction for the pseudosample
fpars = fisher_mean(di_block)
# replace the pseudo sample mean direction
pdir_df.loc[i, 'dir_dec'] = fpars['dec']
pdir_df.loc[i, 'dir_inc'] = fpars['inc']
# get bootstrap mean bootstrap sample
bfpars = dir_df_fisher_mean(pdir_df)
BDIs.append([bfpars['dec'], bfpars['inc']])
return BDIs
|
def dir_df_boot(dir_df, nb=5000, par=False)
|
Performs a bootstrap for direction DataFrame with optional parametric bootstrap
Parameters
_________
dir_df : Pandas DataFrame with columns:
dir_dec : mean declination
dir_inc : mean inclination
Required for parametric bootstrap
dir_n : number of data points in mean
dir_k : Fisher k statistic for mean
nb : number of bootstraps, default is 5000
par : if True, do a parameteric bootstrap
Returns
_______
BDIs: nested list of bootstrapped mean Dec,Inc pairs
| 4.496456
| 3.746019
| 1.200329
|
N = dir_df.dir_dec.values.shape[0] # number of data points
fpars = {}
if N < 2:
return fpars
dirs = dir_df[['dir_dec', 'dir_inc']].values
X = dir2cart(dirs).transpose()
Xbar = np.array([X[0].sum(), X[1].sum(), X[2].sum()])
R = np.sqrt(Xbar[0]**2+Xbar[1]**2+Xbar[2]**2)
Xbar = Xbar/R
dir = cart2dir(Xbar)
fpars["dec"] = dir[0]
fpars["inc"] = dir[1]
fpars["n"] = N
fpars["r"] = R
if N != R:
k = (N - 1.) / (N - R)
fpars["k"] = k
csd = 81./np.sqrt(k)
else:
fpars['k'] = 'inf'
csd = 0.
b = 20.**(1./(N - 1.)) - 1
a = 1 - b * (N - R) / R
if a < -1:
a = -1
a95 = np.degrees(np.arccos(a))
fpars["alpha95"] = a95
fpars["csd"] = csd
if a < 0:
fpars["alpha95"] = 180.0
return fpars
|
def dir_df_fisher_mean(dir_df)
|
calculates fisher mean for Pandas data frame
Parameters
__________
dir_df: pandas data frame with columns:
dir_dec : declination
dir_inc : inclination
Returns
-------
fpars : dictionary containing the Fisher mean and statistics
dec : mean declination
inc : mean inclination
r : resultant vector length
n : number of data points
k : Fisher k value
csd : Fisher circular standard deviation
alpha95 : Fisher circle of 95% confidence
| 3.449702
| 2.909291
| 1.185754
|
#
BXs = []
for k in range(len(x)):
ind = random.randint(0, len(x) - 1)
BXs.append(x[ind])
return BXs
|
def pseudosample(x)
|
draw a bootstrap sample of x
| 3.81487
| 3.338006
| 1.142859
|
plate, site_lat, site_lon, age = data[0], data[1], data[2], data[3]
apwp = get_plate_data(plate)
recs = apwp.split()
#
# put it into usable form in plate_data
#
k, plate_data = 0, []
while k < len(recs) - 3:
rec = [float(recs[k]), float(recs[k + 1]), float(recs[k + 2])]
plate_data.append(rec)
k = k + 3
#
# find the right pole for the age
#
for i in range(len(plate_data)):
if age >= plate_data[i][0] and age <= plate_data[i + 1][0]:
if (age - plate_data[i][0]) < (plate_data[i][0] - age):
rec = i
else:
rec = i + 1
break
pole_lat = plate_data[rec][1]
pole_lon = plate_data[rec][2]
return pole_lat, pole_lon
|
def bc02(data)
|
get APWP from Besse and Courtillot 2002 paper
Parameters
----------
Takes input as [plate, site_lat, site_lon, age]
plate : string (options: AF, ANT, AU, EU, GL, IN, NA, SA)
site_lat : float
site_lon : float
age : float in Myr
Returns
----------
| 3.25116
| 2.918527
| 1.113973
|
if len(x) != len(y):
print('x and y must be same length')
return
xx, yy, xsum, ysum, xy, n, sum = 0, 0, 0, 0, 0, len(x), 0
linpars = {}
for i in range(n):
xx += x[i] * x[i]
yy += y[i] * y[i]
xy += x[i] * y[i]
xsum += x[i]
ysum += y[i]
xsig = np.sqrt(old_div((xx - old_div(xsum**2, n)), (n - 1.)))
ysig = np.sqrt(old_div((yy - old_div(ysum**2, n)), (n - 1.)))
linpars['slope'] = old_div(
(xy - (xsum * ysum / n)), (xx - old_div((xsum**2), n)))
linpars['b'] = old_div((ysum - linpars['slope'] * xsum), n)
linpars['r'] = old_div((linpars['slope'] * xsig), ysig)
for i in range(n):
a = y[i] - linpars['b'] - linpars['slope'] * x[i]
sum += a
linpars['sigma'] = old_div(sum, (n - 2.))
linpars['n'] = n
return linpars
|
def linreg(x, y)
|
does a linear regression
| 2.081151
| 2.090228
| 0.995658
|
incs = np.radians(incs)
I_o = f * np.tan(incs) # multiply tangent by flattening factor
return np.degrees(np.arctan(I_o))
|
def squish(incs, f)
|
returns 'flattened' inclination, assuming factor, f and King (1955) formula:
tan (I_o) = f tan (I_f)
Parameters
__________
incs : array of inclination (I_f) data to flatten
f : flattening factor
Returns
_______
I_o : inclinations after flattening
| 6.50906
| 4.962331
| 1.311694
|
namespace = kwargs
exec("b = {}".format(st), namespace)
return namespace['b']
|
def execute(st, **kwargs)
|
Work around for Python3 exec function which doesn't allow changes to the local namespace because of scope.
This breaks a lot of the old functionality in the code which was origionally in Python2. So this function
runs just like exec except that it returns the output of the input statement to the local namespace. It may
break if you start feeding it multiline monoliths of statements (haven't tested) but you shouldn't do that
anyway (bad programming).
Parameters
-----------
st : the statement you want executed and for which you want the return
kwargs : anything that may need to be in this namespace to execute st
Returns
-------
The return value of executing the input statement
| 11.131316
| 14.362589
| 0.775022
|
if var:
var = flag + " " + str(var)
else:
var = ""
return var
|
def add_flag(var, flag)
|
for use when calling command-line scripts from withing a program.
if a variable is present, add its proper command_line flag.
return a string.
| 5.034616
| 5.003364
| 1.006246
|
if name in sys.argv: # if the command line flag is found in sys.argv
ind = sys.argv.index(name)
return sys.argv[ind + 1]
if reqd: # if arg is required but not present
raise MissingCommandLineArgException(name)
return default_val
|
def get_named_arg(name, default_val=None, reqd=False)
|
Extract the value after a command-line flag such as '-f' and return it.
If the command-line flag is missing, return default_val.
If reqd == True and the command-line flag is missing, throw an error.
Parameters
----------
name : str
command line flag, e.g. "-f"
default_val
value to use if command line flag is missing, e.g. "measurements.txt"
default is None
reqd : bool
throw error if reqd==True and command line flag is missing.
if reqd == True, default_val will be ignored.
default is False.
Returns
---------
Desired value from sys.argv if available, otherwise default_val.
| 3.410543
| 3.467293
| 0.983633
|
'''
take a list of recs [rec1,rec2,rec3....], each rec is a dictionary.
make sure that all recs have the same headers.
'''
headers = []
for rec in recs:
keys = list(rec.keys())
for key in keys:
if key not in headers:
headers.append(key)
for rec in recs:
for header in headers:
if header not in list(rec.keys()):
rec[header] = ""
return recs
|
def merge_recs_headers(recs)
|
take a list of recs [rec1,rec2,rec3....], each rec is a dictionary.
make sure that all recs have the same headers.
| 2.584756
| 1.655707
| 1.561119
|
if not fname:
return ''
file_dir_path, file_name = os.path.split(fname)
if (not file_dir_path) or (file_dir_path == '.'):
full_file = os.path.join(dir_path, fname)
else:
full_file = fname
return os.path.realpath(full_file)
|
def resolve_file_name(fname, dir_path='.')
|
Parse file name information and output full path.
Allows input as:
fname == /path/to/file.txt
or
fname == file.txt, dir_path == /path/to
Either way, returns /path/to/file.txt.
Used in conversion scripts.
Parameters
----------
fname : str
short filename or full path to file
dir_path : str
directory, optional
Returns
----------
full_file : str
full path/to/file.txt
| 2.477023
| 2.506266
| 0.988332
|
CheckDec = ['_dec', '_lon', '_azimuth', 'dip_direction']
adjust = False
for dec_key in CheckDec:
if dec_key in key:
if key.endswith(dec_key) or key.endswith('_'):
adjust = True
if not val:
return ''
elif not adjust:
return val
elif adjust:
new_val = float(val) % 360
if new_val != float(val):
print('-I- adjusted {} {} to 0=>360.: {}'.format(key, val, new_val))
return new_val
|
def adjust_to_360(val, key)
|
Take in a value and a key. If the key is of the type:
declination/longitude/azimuth/direction, adjust it to be within
the range 0-360 as required by the MagIC data model
| 5.39738
| 4.920387
| 1.096942
|
for key in dictionary:
dictionary[key] = adjust_to_360(dictionary[key], key)
return dictionary
|
def adjust_all_to_360(dictionary)
|
Take a dictionary and check each key/value pair.
If this key is of type: declination/longitude/azimuth/direction,
adjust it to be within 0-360 as required by the MagIC data model
| 2.413953
| 3.451761
| 0.699339
|
if resolution=='low':
incr = 10 # we can vary to the resolution of the model
elif resolution=='high':
incr = 2 # we can vary to the resolution of the model
if lon_0 == 180:
lon_0 = 179.99
if lon_0 > 180:
lon_0 = lon_0-360.
# get some parameters for our arrays of lat/lon
lonmax = (lon_0 + 180.) % 360 + incr
lonmin = (lon_0 - 180.)
latmax = 90 + incr
# make a 1D array of longitudes (like elons)
lons = np.arange(lonmin, lonmax, incr)
# make a 1D array of longitudes (like elats)
lats = np.arange(-90, latmax, incr)
# set up some containers for the field elements
B = np.zeros((len(lats), len(lons)))
Binc = np.zeros((len(lats), len(lons)))
Bdec = np.zeros((len(lats), len(lons)))
Brad = np.zeros((len(lats), len(lons)))
if mod == 'custom' and file != '':
gh = []
lmgh = np.loadtxt(file).transpose()
gh.append(lmgh[2][0])
for i in range(1, lmgh.shape[1]):
gh.append(lmgh[2][i])
if lmgh[1][i] != 0:
gh.append(lmgh[3][i])
for j in range(len(lats)): # step through the latitudes
for i in range(len(lons)): # and the longitudes
# get the field elements
if mod == 'custom':
x, y, z, f = docustom(lons[i], lats[j], alt, gh)
else:
x, y, z, f = doigrf(
lons[i], lats[j], alt, date, mod=mod, file=file)
# turn them into polar coordinates
Dec, Inc, Int = cart2dir([x, y, z])
if mod != 'custom':
# convert the string to microtesla (from nT)
B[j][i] = Int * 1e-3
else:
B[j][i] = Int # convert the string to microtesla (from nT)
Binc[j][i] = Inc # store the inclination value
if Dec > 180:
Dec = Dec-360.
Bdec[j][i] = Dec # store the declination value
if mod != 'custom':
Brad[j][i] = z*1e-3
else:
Brad[j][i] = z
return Bdec, Binc, B, Brad, lons, lats
|
def do_mag_map(date, lon_0=0, alt=0, file="", mod="cals10k",resolution='low')
|
returns lists of declination, inclination and intensities for lat/lon grid for
desired model and date.
Parameters:
_________________
date = Required date in decimal years (Common Era, negative for Before Common Era)
Optional Parameters:
______________
mod = model to use ('arch3k','cals3k','pfm9k','hfm10k','cals10k.2','shadif14k','cals10k.1b','custom')
file = l m g h formatted filefor custom model
lon_0 : central longitude for Hammer projection
alt = altitude
resolution = ['low','high'] default is low
Returns:
______________
Bdec=list of declinations
Binc=list of inclinations
B = list of total field intensities in nT
Br = list of radial field intensities
lons = list of longitudes evaluated
lats = list of latitudes evaluated
| 3.055528
| 2.893062
| 1.056157
|
xp, yp = y, x # need to switch into geographic convention
r = np.sqrt(xp**2+yp**2)
z = 1.-r**2
t = np.arcsin(z)
if UP == 1:
t = -t
p = np.arctan2(yp, xp)
dec, inc = np.degrees(p) % 360, np.degrees(t)
return dec, inc
|
def doeqdi(x, y, UP=False)
|
Takes digitized x,y, data and returns the dec,inc, assuming an
equal area projection
Parameters
__________________
x : array of digitized x from point on equal area projection
y : array of igitized y from point on equal area projection
UP : if True, is an upper hemisphere projection
Output :
dec : declination
inc : inclination
| 4.568303
| 4.418977
| 1.033792
|
ppars = doprinc(di_block)
di_df = pd.DataFrame(di_block) # turn into a data frame for easy filtering
di_df.columns = ['dec', 'inc']
di_df['pdec'] = ppars['dec']
di_df['pinc'] = ppars['inc']
di_df['angle'] = angle(di_df[['dec', 'inc']].values,
di_df[['pdec', 'pinc']].values)
mode1_df = di_df[di_df['angle'] <= 90]
mode2_df = di_df[di_df['angle'] > 90]
mode1 = mode1_df[['dec', 'inc']].values.tolist()
mode2 = mode2_df[['dec', 'inc']].values.tolist()
return mode1, mode2
|
def separate_directions(di_block)
|
Separates set of directions into two modes based on principal direction
Parameters
_______________
di_block : block of nested dec,inc pairs
Return
mode_1_block,mode_2_block : two lists of nested dec,inc pairs
| 2.690039
| 2.589183
| 1.038953
|
vgp_df['delta'] = 90.-vgp_df['vgp_lat'].values
ASD = np.sqrt(np.sum(vgp_df.delta**2)/(vgp_df.shape[0]-1))
A = 1.8 * ASD + 5.
delta_max = vgp_df.delta.max()
while delta_max > A:
delta_max = vgp_df.delta.max()
if delta_max < A:
return vgp_df, A, ASD
vgp_df = vgp_df[vgp_df.delta < delta_max]
ASD = np.sqrt(np.sum(vgp_df.delta**2)/(vgp_df.shape[0]-1))
A = 1.8 * ASD + 5.
|
def dovandamme(vgp_df)
|
determine the S_b value for VGPs using the Vandamme (1994) method
for determining cutoff value for "outliers".
Parameters
___________
vgp_df : pandas DataFrame with required column "vgp_lat"
This should be in the desired coordinate system and assumes one polarity
Returns
_________
vgp_df : after applying cutoff
cutoff : colatitude cutoff
S_b : S_b of vgp_df after applying cutoff
| 2.419556
| 2.336512
| 1.035542
|
vgp_df['delta'] = 90.-vgp_df.vgp_lat.values
# filter by cutoff, kappa, and n
vgp_df = vgp_df[vgp_df.delta <= cutoff]
vgp_df = vgp_df[vgp_df.dir_k >= kappa]
vgp_df = vgp_df[vgp_df.dir_n_samples >= n]
if spin: # do transformation to pole
Pvgps = vgp_df[['vgp_lon', 'vgp_lat']].values
ppars = doprinc(Pvgps)
Bdirs = np.full((Pvgps.shape[0]), ppars['dec']-180.)
Bdips = np.full((Pvgps.shape[0]), 90.-ppars['inc'])
Pvgps = np.column_stack((Pvgps, Bdirs, Bdips))
lons, lats = dotilt_V(Pvgps)
vgp_df['vgp_lon'] = lons
vgp_df['vgp_lat'] = lats
vgp_df['delta'] = 90.-vgp_df.vgp_lat
if anti:
print('flipping reverse')
vgp_rev = vgp_df[vgp_df.vgp_lat < 0]
vgp_norm = vgp_df[vgp_df.vgp_lat >= 0]
vgp_anti = vgp_rev
vgp_anti['vgp_lat'] = -vgp_anti['vgp_lat']
vgp_anti['vgp_lon'] = (vgp_anti['vgp_lon']-180) % 360
vgp_df = pd.concat([vgp_norm, vgp_anti], sort=True)
if rev:
vgp_df = vgp_df[vgp_df.vgp_lat < 0] # use only reverse data
if v:
vgp_df, cutoff, S_v = dovandamme(vgp_df) # do vandamme cutoff
S_B = get_sb_df(vgp_df, mm97=mm97) # get
N = vgp_df.shape[0]
SBs, low, high = [], 0, 0
if boot:
for i in range(nb): # now do bootstrap
bs_df = vgp_df.sample(n=N, replace=True)
Sb_bs = get_sb_df(bs_df)
SBs.append(Sb_bs)
SBs.sort()
low = SBs[int(.025 * nb)]
high = SBs[int(.975 * nb)]
return N, S_B, low, high, cutoff
|
def scalc_vgp_df(vgp_df, anti=0, rev=0, cutoff=180., kappa=0, n=0, spin=0, v=0, boot=0, mm97=0, nb=1000)
|
Calculates Sf for a dataframe with VGP Lat., and optional Fisher's k, site latitude and N information can be used to correct for within site scatter (McElhinny & McFadden, 1997)
Parameters
_________
df : Pandas Dataframe with columns
REQUIRED:
vgp_lat : VGP latitude
ONLY REQUIRED for MM97 correction:
dir_k : Fisher kappa estimate
dir_n_samples : number of samples per site
lat : latitude of the site
mm97 : if True, will do the correction for within site scatter
OPTIONAL:
boot : if True. do bootstrap
nb : number of bootstraps, default is 1000
Returns
_____________
N : number of VGPs used in calculation
S : S
low : 95% confidence lower bound [0 if boot=0]
high 95% confidence upper bound [0 if boot=0]
cutoff : cutoff used in calculation of S
| 2.990159
| 2.81668
| 1.06159
|
# first calculate R for the combined data set, then R1 and R2 for each individually.
# create a new array from two smaller ones
DI = np.concatenate((DI1, DI2), axis=0)
fpars = fisher_mean(DI) # re-use our functionfrom problem 1b
fpars1 = fisher_mean(DI1)
fpars2 = fisher_mean(DI2)
N = fpars['n']
R = fpars['r']
R1 = fpars1['r']
R2 = fpars2['r']
F = (N-2.)*((R1+R2-R)/(N-R1-R2))
Fcrit = fcalc(2, 2*(N-2))
return F, Fcrit
|
def watsons_f(DI1, DI2)
|
calculates Watson's F statistic (equation 11.16 in Essentials text book).
Parameters
_________
DI1 : nested array of [Dec,Inc] pairs
DI2 : nested array of [Dec,Inc] pairs
Returns
_______
F : Watson's F
Fcrit : critical value from F table
| 6.125679
| 5.959202
| 1.027936
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.