_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q11600
|
fshdev
|
train
|
def fshdev(k):
"""
Generate a random draw from a Fisher distribution with mean declination
of 0 and inclination of 90 with a specified kappa.
Parameters
----------
k : kappa (precision parameter) of the distribution
k can be a single number or an array of values
Returns
----------
dec, inc : declination and inclination of random Fisher distribution draw
if k is an array, dec, inc are returned as arrays, otherwise, single values
"""
k = np.array(k)
if len(k.shape) != 0:
n = k.shape[0]
else:
n = 1
R1 = random.random(size=n)
R2 = random.random(size=n)
L = np.exp(-2 * k)
a = R1 * (1 - L) + L
fac = np.sqrt(-np.log(a)/(2 * k))
inc = 90. - np.degrees(2 * np.arcsin(fac))
dec = np.degrees(2 * np.pi * R2)
if n == 1:
return dec[0], inc[0] # preserve backward compatibility
else:
return dec, inc
|
python
|
{
"resource": ""
}
|
q11601
|
lowes
|
train
|
def lowes(data):
"""
gets Lowe's power spectrum from gauss coefficients
Parameters
_________
data : nested list of [[l,m,g,h],...] as from pmag.unpack()
Returns
_______
Ls : list of degrees (l)
Rs : power at degree l
"""
lmax = data[-1][0]
Ls = list(range(1, lmax+1))
Rs = []
recno = 0
for l in Ls:
pow = 0
for m in range(0, l + 1):
pow += (l + 1) * ((1e-3 * data[recno][2])
** 2 + (1e-3 * data[recno][3])**2)
recno += 1
Rs.append(pow)
return Ls, Rs
|
python
|
{
"resource": ""
}
|
q11602
|
magnetic_lat
|
train
|
def magnetic_lat(inc):
"""
returns magnetic latitude from inclination
"""
rad = old_div(np.pi, 180.)
paleo_lat = old_div(np.arctan(0.5 * np.tan(inc * rad)), rad)
return paleo_lat
|
python
|
{
"resource": ""
}
|
q11603
|
Dir_anis_corr
|
train
|
def Dir_anis_corr(InDir, AniSpec):
"""
takes the 6 element 's' vector and the Dec,Inc 'InDir' data,
performs simple anisotropy correction. returns corrected Dec, Inc
"""
Dir = np.zeros((3), 'f')
Dir[0] = InDir[0]
Dir[1] = InDir[1]
Dir[2] = 1.
chi, chi_inv = check_F(AniSpec)
if chi[0][0] == 1.:
return Dir # isotropic
X = dir2cart(Dir)
M = np.array(X)
H = np.dot(M, chi_inv)
return cart2dir(H)
|
python
|
{
"resource": ""
}
|
q11604
|
doaniscorr
|
train
|
def doaniscorr(PmagSpecRec, AniSpec):
"""
takes the 6 element 's' vector and the Dec,Inc, Int 'Dir' data,
performs simple anisotropy correction. returns corrected Dec, Inc, Int
"""
AniSpecRec = {}
for key in list(PmagSpecRec.keys()):
AniSpecRec[key] = PmagSpecRec[key]
Dir = np.zeros((3), 'f')
Dir[0] = float(PmagSpecRec["specimen_dec"])
Dir[1] = float(PmagSpecRec["specimen_inc"])
Dir[2] = float(PmagSpecRec["specimen_int"])
# check if F test passes! if anisotropy_sigma available
chi, chi_inv = check_F(AniSpec)
if chi[0][0] == 1.: # isotropic
cDir = [Dir[0], Dir[1]] # no change
newint = Dir[2]
else:
X = dir2cart(Dir)
M = np.array(X)
H = np.dot(M, chi_inv)
cDir = cart2dir(H)
Hunit = [old_div(H[0], cDir[2]), old_div(H[1], cDir[2]), old_div(
H[2], cDir[2])] # unit vector parallel to Banc
Zunit = [0, 0, -1.] # unit vector parallel to lab field
Hpar = np.dot(chi, Hunit) # unit vector applied along ancient field
Zpar = np.dot(chi, Zunit) # unit vector applied along lab field
# intensity of resultant vector from ancient field
HparInt = cart2dir(Hpar)[2]
# intensity of resultant vector from lab field
ZparInt = cart2dir(Zpar)[2]
newint = Dir[2] * ZparInt / HparInt
if cDir[0] - Dir[0] > 90:
cDir[1] = -cDir[1]
cDir[0] = (cDir[0] - 180.) % 360.
AniSpecRec["specimen_dec"] = '%7.1f' % (cDir[0])
AniSpecRec["specimen_inc"] = '%7.1f' % (cDir[1])
AniSpecRec["specimen_int"] = '%9.4e' % (newint)
AniSpecRec["specimen_correction"] = 'c'
if 'magic_method_codes' in list(AniSpecRec.keys()):
methcodes = AniSpecRec["magic_method_codes"]
else:
methcodes = ""
if methcodes == "":
methcodes = "DA-AC-" + AniSpec['anisotropy_type']
if methcodes != "":
methcodes = methcodes + ":DA-AC-" + AniSpec['anisotropy_type']
if chi[0][0] == 1.: # isotropic
# indicates anisotropy was checked and no change necessary
methcodes = methcodes + ':DA-AC-ISO'
AniSpecRec["magic_method_codes"] = methcodes.strip(":")
return AniSpecRec
|
python
|
{
"resource": ""
}
|
q11605
|
watsonsV
|
train
|
def watsonsV(Dir1, Dir2):
"""
calculates Watson's V statistic for two sets of directions
"""
counter, NumSims = 0, 500
#
# first calculate the fisher means and cartesian coordinates of each set of Directions
#
pars_1 = fisher_mean(Dir1)
pars_2 = fisher_mean(Dir2)
#
# get V statistic for these
#
V = vfunc(pars_1, pars_2)
#
# do monte carlo simulation of datasets with same kappas, but common mean
#
Vp = [] # set of Vs from simulations
print("Doing ", NumSims, " simulations")
for k in range(NumSims):
counter += 1
if counter == 50:
print(k + 1)
counter = 0
Dirp = []
# get a set of N1 fisher distributed vectors with k1, calculate fisher stats
for i in range(pars_1["n"]):
Dirp.append(fshdev(pars_1["k"]))
pars_p1 = fisher_mean(Dirp)
# get a set of N2 fisher distributed vectors with k2, calculate fisher stats
Dirp = []
for i in range(pars_2["n"]):
Dirp.append(fshdev(pars_2["k"]))
pars_p2 = fisher_mean(Dirp)
# get the V for these
Vk = vfunc(pars_p1, pars_p2)
Vp.append(Vk)
#
# sort the Vs, get Vcrit (95th one)
#
Vp.sort()
k = int(.95 * NumSims)
return V, Vp[k]
|
python
|
{
"resource": ""
}
|
q11606
|
dimap
|
train
|
def dimap(D, I):
"""
Function to map directions to x,y pairs in equal area projection
Parameters
----------
D : list or array of declinations (as float)
I : list or array or inclinations (as float)
Returns
-------
XY : x, y values of directions for equal area projection [x,y]
"""
try:
D = float(D)
I = float(I)
except TypeError: # is an array
return dimap_V(D, I)
# DEFINE FUNCTION VARIABLES
# initialize equal area projection x,y
XY = [0., 0.]
# GET CARTESIAN COMPONENTS OF INPUT DIRECTION
X = dir2cart([D, I, 1.])
# CHECK IF Z = 1 AND ABORT
if X[2] == 1.0:
return XY # return [0,0]
# TAKE THE ABSOLUTE VALUE OF Z
if X[2] < 0:
# this only works on lower hemisphere projections
X[2] = -X[2]
# CALCULATE THE X,Y COORDINATES FOR THE EQUAL AREA PROJECTION
# from Collinson 1983
R = old_div(np.sqrt(1. - X[2]), (np.sqrt(X[0]**2 + X[1]**2)))
XY[1], XY[0] = X[0] * R, X[1] * R
# RETURN XY[X,Y]
return XY
|
python
|
{
"resource": ""
}
|
q11607
|
dimap_V
|
train
|
def dimap_V(D, I):
"""
FUNCTION TO MAP DECLINATION, INCLINATIONS INTO EQUAL AREA PROJECTION, X,Y
Usage: dimap_V(D, I)
D and I are both numpy arrays
"""
# GET CARTESIAN COMPONENTS OF INPUT DIRECTION
DI = np.array([D, I]).transpose()
X = dir2cart(DI).transpose()
# CALCULATE THE X,Y COORDINATES FOR THE EQUAL AREA PROJECTION
# from Collinson 1983
R = np.sqrt(1. - abs(X[2]))/(np.sqrt(X[0]**2 + X[1]**2))
XY = np.array([X[1] * R, X[0] * R]).transpose()
# RETURN XY[X,Y]
return XY
|
python
|
{
"resource": ""
}
|
q11608
|
getmeths
|
train
|
def getmeths(method_type):
"""
returns MagIC method codes available for a given type
"""
meths = []
if method_type == 'GM':
meths.append('GM-PMAG-APWP')
meths.append('GM-ARAR')
meths.append('GM-ARAR-AP')
meths.append('GM-ARAR-II')
meths.append('GM-ARAR-NI')
meths.append('GM-ARAR-TF')
meths.append('GM-CC-ARCH')
meths.append('GM-CC-ARCHMAG')
meths.append('GM-C14')
meths.append('GM-FOSSIL')
meths.append('GM-FT')
meths.append('GM-INT-L')
meths.append('GM-INT-S')
meths.append('GM-ISO')
meths.append('GM-KAR')
meths.append('GM-PMAG-ANOM')
meths.append('GM-PMAG-POL')
meths.append('GM-PBPB')
meths.append('GM-RATH')
meths.append('GM-RBSR')
meths.append('GM-RBSR-I')
meths.append('GM-RBSR-MA')
meths.append('GM-SMND')
meths.append('GM-SMND-I')
meths.append('GM-SMND-MA')
meths.append('GM-CC-STRAT')
meths.append('GM-LUM-TH')
meths.append('GM-UPA')
meths.append('GM-UPB')
meths.append('GM-UTH')
meths.append('GM-UTHHE')
else:
pass
return meths
|
python
|
{
"resource": ""
}
|
q11609
|
first_up
|
train
|
def first_up(ofile, Rec, file_type):
"""
writes the header for a MagIC template file
"""
keylist = []
pmag_out = open(ofile, 'a')
outstring = "tab \t" + file_type + "\n"
pmag_out.write(outstring)
keystring = ""
for key in list(Rec.keys()):
keystring = keystring + '\t' + key
keylist.append(key)
keystring = keystring + '\n'
pmag_out.write(keystring[1:])
pmag_out.close()
return keylist
|
python
|
{
"resource": ""
}
|
q11610
|
get_age
|
train
|
def get_age(Rec, sitekey, keybase, Ages, DefaultAge):
"""
finds the age record for a given site
"""
site = Rec[sitekey]
gotone = 0
if len(Ages) > 0:
for agerec in Ages:
if agerec["er_site_name"] == site:
if "age" in list(agerec.keys()) and agerec["age"] != "":
Rec[keybase + "age"] = agerec["age"]
gotone = 1
if "age_unit" in list(agerec.keys()):
Rec[keybase + "age_unit"] = agerec["age_unit"]
if "age_sigma" in list(agerec.keys()):
Rec[keybase + "age_sigma"] = agerec["age_sigma"]
if gotone == 0 and len(DefaultAge) > 1:
sigma = 0.5 * (float(DefaultAge[1]) - float(DefaultAge[0]))
age = float(DefaultAge[0]) + sigma
Rec[keybase + "age"] = '%10.4e' % (age)
Rec[keybase + "age_sigma"] = '%10.4e' % (sigma)
Rec[keybase + "age_unit"] = DefaultAge[2]
return Rec
|
python
|
{
"resource": ""
}
|
q11611
|
adjust_ages
|
train
|
def adjust_ages(AgesIn):
"""
Function to adjust ages to a common age_unit
"""
# get a list of age_units first
age_units, AgesOut, factors, factor, maxunit, age_unit = [], [], [], 1, 1, "Ma"
for agerec in AgesIn:
if agerec[1] not in age_units:
age_units.append(agerec[1])
if agerec[1] == "Ga":
factors.append(1e9)
maxunit, age_unit, factor = 1e9, "Ga", 1e9
if agerec[1] == "Ma":
if maxunit == 1:
maxunit, age_unt, factor = 1e6, "Ma", 1e6
factors.append(1e6)
if agerec[1] == "Ka":
factors.append(1e3)
if maxunit == 1:
maxunit, age_unit, factor = 1e3, "Ka", 1e3
if "Years" in agerec[1].split():
factors.append(1)
if len(age_units) == 1: # all ages are of same type
for agerec in AgesIn:
AgesOut.append(agerec[0])
elif len(age_units) > 1:
for agerec in AgesIn: # normalize all to largest age unit
if agerec[1] == "Ga":
AgesOut.append(agerec[0] * 1e9 / factor)
if agerec[1] == "Ma":
AgesOut.append(agerec[0] * 1e6 / factor)
if agerec[1] == "Ka":
AgesOut.append(agerec[0] * 1e3 / factor)
if "Years" in agerec[1].split():
if agerec[1] == "Years BP":
AgesOut.append(old_div(agerec[0], factor))
if agerec[1] == "Years Cal BP":
AgesOut.append(old_div(agerec[0], factor))
if agerec[1] == "Years AD (+/-)":
# convert to years BP first
AgesOut.append(old_div((1950 - agerec[0]), factor))
if agerec[1] == "Years Cal AD (+/-)":
AgesOut.append(old_div((1950 - agerec[0]), factor))
return AgesOut, age_unit
|
python
|
{
"resource": ""
}
|
q11612
|
doseigs
|
train
|
def doseigs(s):
"""
convert s format for eigenvalues and eigenvectors
Parameters
__________
s=[x11,x22,x33,x12,x23,x13] : the six tensor elements
Return
__________
tau : [t1,t2,t3]
tau is an list of eigenvalues in decreasing order:
V : [[V1_dec,V1_inc],[V2_dec,V2_inc],[V3_dec,V3_inc]]
is an list of the eigenvector directions
"""
#
A = s2a(s) # convert s to a (see Tauxe 1998)
tau, V = tauV(A) # convert to eigenvalues (t), eigenvectors (V)
Vdirs = []
for v in V: # convert from cartesian to direction
Vdir = cart2dir(v)
if Vdir[1] < 0:
Vdir[1] = -Vdir[1]
Vdir[0] = (Vdir[0] + 180.) % 360.
Vdirs.append([Vdir[0], Vdir[1]])
return tau, Vdirs
|
python
|
{
"resource": ""
}
|
q11613
|
sbar
|
train
|
def sbar(Ss):
"""
calculate average s,sigma from list of "s"s.
"""
if type(Ss) == list:
Ss = np.array(Ss)
npts = Ss.shape[0]
Ss = Ss.transpose()
avd, avs = [], []
# D=np.array([Ss[0],Ss[1],Ss[2],Ss[3]+0.5*(Ss[0]+Ss[1]),Ss[4]+0.5*(Ss[1]+Ss[2]),Ss[5]+0.5*(Ss[0]+Ss[2])]).transpose()
D = np.array([Ss[0], Ss[1], Ss[2], Ss[3] + 0.5 * (Ss[0] + Ss[1]),
Ss[4] + 0.5 * (Ss[1] + Ss[2]), Ss[5] + 0.5 * (Ss[0] + Ss[2])])
for j in range(6):
avd.append(np.average(D[j]))
avs.append(np.average(Ss[j]))
D = D.transpose()
# for s in Ss:
# print 'from sbar: ',s
# D.append(s[:]) # append a copy of s
# D[-1][3]=D[-1][3]+0.5*(s[0]+s[1])
# D[-1][4]=D[-1][4]+0.5*(s[1]+s[2])
# D[-1][5]=D[-1][5]+0.5*(s[0]+s[2])
# for j in range(6):
# avd[j]+=(D[-1][j])/float(npts)
# avs[j]+=(s[j])/float(npts)
# calculate sigma
nf = (npts - 1) * 6 # number of degrees of freedom
s0 = 0
Dels = (D - avd)**2
s0 = np.sum(Dels)
sigma = np.sqrt(s0/float(nf))
return nf, sigma, avs
|
python
|
{
"resource": ""
}
|
q11614
|
design
|
train
|
def design(npos):
"""
make a design matrix for an anisotropy experiment
"""
if npos == 15:
#
# rotatable design of Jelinek for kappabridge (see Tauxe, 1998)
#
A = np.array([[.5, .5, 0, -1., 0, 0], [.5, .5, 0, 1., 0, 0], [1, .0, 0, 0, 0, 0], [.5, .5, 0, -1., 0, 0], [.5, .5, 0, 1., 0, 0], [0, .5, .5, 0, -1., 0], [0, .5, .5, 0, 1., 0], [0, 1., 0, 0, 0, 0],
[0, .5, .5, 0, -1., 0], [0, .5, .5, 0, 1., 0], [.5, 0, .5, 0, 0, -1.], [.5, 0, .5, 0, 0, 1.], [0, 0, 1., 0, 0, 0], [.5, 0, .5, 0, 0, -1.], [.5, 0, .5, 0, 0, 1.]]) # design matrix for 15 measurment positions
elif npos == 6:
A = np.array([[1., 0, 0, 0, 0, 0], [0, 1., 0, 0, 0, 0], [0, 0, 1., 0, 0, 0], [.5, .5, 0, 1., 0, 0], [
0, .5, .5, 0, 1., 0], [.5, 0, .5, 0, 0, 1.]]) # design matrix for 6 measurment positions
else:
print("measurement protocol not supported yet ")
return
B = np.dot(np.transpose(A), A)
B = linalg.inv(B)
B = np.dot(B, np.transpose(A))
return A, B
|
python
|
{
"resource": ""
}
|
q11615
|
cross
|
train
|
def cross(v, w):
"""
cross product of two vectors
"""
x = v[1] * w[2] - v[2] * w[1]
y = v[2] * w[0] - v[0] * w[2]
z = v[0] * w[1] - v[1] * w[0]
return [x, y, z]
|
python
|
{
"resource": ""
}
|
q11616
|
dostilt
|
train
|
def dostilt(s, bed_az, bed_dip):
"""
Rotates "s" tensor to stratigraphic coordinates
Parameters
__________
s : [x11,x22,x33,x12,x23,x13] - the six tensor elements
bed_az : bedding dip direction
bed_dip : bedding dip
Return
s_rot : [x11,x22,x33,x12,x23,x13] - after rotation
"""
tau, Vdirs = doseigs(s)
Vrot = []
for evec in Vdirs:
d, i = dotilt(evec[0], evec[1], bed_az, bed_dip)
Vrot.append([d, i])
s_rot = doeigs_s(tau, Vrot)
return s_rot
|
python
|
{
"resource": ""
}
|
q11617
|
apseudo
|
train
|
def apseudo(Ss, ipar, sigma):
"""
draw a bootstrap sample of Ss
"""
#
Is = random.randint(0, len(Ss) - 1, size=len(Ss)) # draw N random integers
#Ss = np.array(Ss)
if not ipar: # ipar == 0:
BSs = Ss[Is]
else: # need to recreate measurement - then do the parametric stuffr
A, B = design(6) # get the design matrix for 6 measurementsa
K, BSs = [], []
for k in range(len(Ss)):
K.append(np.dot(A, Ss[k][0:6]))
Pars = np.random.normal(K, sigma)
for k in range(len(Ss)):
BSs.append(np.dot(B, Pars[k]))
return np.array(BSs)
|
python
|
{
"resource": ""
}
|
q11618
|
s_boot
|
train
|
def s_boot(Ss, ipar=0, nb=1000):
"""
Returns bootstrap parameters for S data
Parameters
__________
Ss : nested array of [[x11 x22 x33 x12 x23 x13],....] data
ipar : if True, do a parametric bootstrap
nb : number of bootstraps
Returns
________
Tmean : average eigenvalues
Vmean : average eigvectors
Taus : bootstrapped eigenvalues
Vs : bootstrapped eigenvectors
"""
#npts = len(Ss)
Ss = np.array(Ss)
npts = Ss.shape[0]
# get average s for whole dataset
nf, Sigma, avs = sbar(Ss)
Tmean, Vmean = doseigs(avs) # get eigenvectors of mean tensor
#
# now do bootstrap to collect Vs and taus of bootstrap means
#
Taus, Vs = [], [] # number of bootstraps, list of bootstrap taus and eigenvectors
#
for k in range(int(float(nb))): # repeat nb times
# if k%50==0:print k,' out of ',nb
# get a pseudosample - if ipar=1, do a parametric bootstrap
BSs = apseudo(Ss, ipar, Sigma)
nf, sigma, avbs = sbar(BSs) # get bootstrap mean s
tau, Vdirs = doseigs(avbs) # get bootstrap eigenparameters
Taus.append(tau)
Vs.append(Vdirs)
return Tmean, Vmean, Taus, Vs
|
python
|
{
"resource": ""
}
|
q11619
|
designAARM
|
train
|
def designAARM(npos):
#
"""
calculates B matrix for AARM calculations.
"""
if npos != 9:
print('Sorry - only 9 positions available')
return
Dec = [315., 225., 180., 135., 45., 90., 270.,
270., 270., 90., 0., 0., 0., 180., 180.]
Dip = [0., 0., 0., 0., 0., -45., -45., 0.,
45., 45., 45., -45., -90., -45., 45.]
index9 = [0, 1, 2, 5, 6, 7, 10, 11, 12]
H = []
for ind in range(15):
Dir = [Dec[ind], Dip[ind], 1.]
H.append(dir2cart(Dir)) # 15 field directionss
#
# make design matrix A
#
A = np.zeros((npos * 3, 6), 'f')
tmpH = np.zeros((npos, 3), 'f') # define tmpH
if npos == 9:
for i in range(9):
k = index9[i]
ind = i * 3
A[ind][0] = H[k][0]
A[ind][3] = H[k][1]
A[ind][5] = H[k][2]
ind = i * 3 + 1
A[ind][3] = H[k][0]
A[ind][1] = H[k][1]
A[ind][4] = H[k][2]
ind = i * 3 + 2
A[ind][5] = H[k][0]
A[ind][4] = H[k][1]
A[ind][2] = H[k][2]
for j in range(3):
tmpH[i][j] = H[k][j]
At = np.transpose(A)
ATA = np.dot(At, A)
ATAI = linalg.inv(ATA)
B = np.dot(ATAI, At)
else:
print("B matrix not yet supported")
return
return B, H, tmpH
|
python
|
{
"resource": ""
}
|
q11620
|
domagicmag
|
train
|
def domagicmag(file, Recs):
"""
converts a magic record back into the SIO mag format
"""
for rec in Recs:
type = ".0"
meths = []
tmp = rec["magic_method_codes"].split(':')
for meth in tmp:
meths.append(meth.strip())
if 'LT-T-I' in meths:
type = ".1"
if 'LT-PTRM-I' in meths:
type = ".2"
if 'LT-PTRM-MD' in meths:
type = ".3"
treatment = float(rec["treatment_temp"]) - 273
tr = '%i' % (treatment) + type
inten = '%8.7e ' % (float(rec["measurement_magn_moment"]) * 1e3)
outstring = rec["er_specimen_name"] + " " + tr + " " + rec["measurement_csd"] + \
" " + inten + " " + rec["measurement_dec"] + \
" " + rec["measurement_inc"] + "\n"
file.write(outstring)
|
python
|
{
"resource": ""
}
|
q11621
|
cleanup
|
train
|
def cleanup(first_I, first_Z):
"""
cleans up unbalanced steps
failure can be from unbalanced final step, or from missing steps,
this takes care of missing steps
"""
cont = 0
Nmin = len(first_I)
if len(first_Z) < Nmin:
Nmin = len(first_Z)
for kk in range(Nmin):
if first_I[kk][0] != first_Z[kk][0]:
print("\n WARNING: ")
if first_I[kk] < first_Z[kk]:
del first_I[kk]
else:
del first_Z[kk]
print("Unmatched step number: ", kk + 1, ' ignored')
cont = 1
if cont == 1:
return first_I, first_Z, cont
return first_I, first_Z, cont
|
python
|
{
"resource": ""
}
|
q11622
|
unpack
|
train
|
def unpack(gh):
"""
unpacks gh list into l m g h type list
Parameters
_________
gh : list of gauss coefficients (as returned by, e.g., doigrf)
Returns
data : nested list of [[l,m,g,h],...]
"""
data = []
k, l = 0, 1
while k + 1 < len(gh):
for m in range(l + 1):
if m == 0:
data.append([l, m, gh[k], 0])
k += 1
else:
data.append([l, m, gh[k], gh[k + 1]])
k += 2
l += 1
return data
|
python
|
{
"resource": ""
}
|
q11623
|
parse_site
|
train
|
def parse_site(sample, convention, Z):
"""
parse the site name from the sample name using the specified convention
"""
convention = str(convention)
site = sample # default is that site = sample
#
#
# Sample is final letter on site designation eg: TG001a (used by SIO lab
# in San Diego)
if convention == "1":
return sample[:-1] # peel off terminal character
#
# Site-Sample format eg: BG94-1 (used by PGL lab in Beijing)
#
if convention == "2":
parts = sample.strip('-').split('-')
return parts[0]
#
# Sample is XXXX.YY where XXX is site and YY is sample
#
if convention == "3":
parts = sample.split('.')
return parts[0]
#
# Sample is XXXXYYY where XXX is site desgnation and YYY is Z long integer
#
if convention == "4":
k = int(Z) - 1
return sample[0:-k] # peel off Z characters from site
if convention == "5": # sample == site
return sample
if convention == "6": # should be names in orient.txt
print("-W- Finding names in orient.txt is not currently supported")
if convention == "7": # peel off Z characters for site
k = int(Z)
return sample[0:k]
if convention == "8": # peel off Z characters for site
return ""
if convention == "9": # peel off Z characters for site
return sample
print("Error in site parsing routine")
return
|
python
|
{
"resource": ""
}
|
q11624
|
get_samp_con
|
train
|
def get_samp_con():
"""
get sample naming convention
"""
#
samp_con, Z = "", ""
while samp_con == "":
samp_con = input("""
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
select one:
""")
#
if samp_con == "" or samp_con == "1":
samp_con, Z = "1", 1
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
samp_con = ""
else:
Z = samp_con.split("-")[1]
samp_con = "4"
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
samp_con = ""
else:
Z = samp_con.split("-")[1]
samp_con = "7"
if samp_con.isdigit() == False or int(samp_con) > 7:
print("Try again\n ")
samp_con = ""
return samp_con, Z
|
python
|
{
"resource": ""
}
|
q11625
|
set_priorities
|
train
|
def set_priorities(SO_methods, ask):
"""
figure out which sample_azimuth to use, if multiple orientation methods
"""
# if ask set to 1, then can change priorities
SO_methods = [meth.strip() for meth in SO_methods]
SO_defaults = ['SO-SUN', 'SO-GPS-DIFF', 'SO-SUN-SIGHT', 'SO-SIGHT', 'SO-SIGHT-BS',
'SO-CMD-NORTH', 'SO-MAG', 'SO-SM', 'SO-REC', 'SO-V', 'SO-CORE', 'SO-NO']
SO_priorities, prior_list = [], []
if len(SO_methods) >= 1:
for l in range(len(SO_defaults)):
if SO_defaults[l] in SO_methods:
SO_priorities.append(SO_defaults[l])
pri, change = 0, "1"
if ask == 1:
print("""These methods of sample orientation were found:
They have been assigned a provisional priority (top = zero, last = highest number) """)
for m in range(len(SO_defaults)):
if SO_defaults[m] in SO_methods:
SO_priorities[SO_methods.index(SO_defaults[m])] = pri
pri += 1
while change == "1":
prior_list = SO_priorities
for m in range(len(SO_methods)):
print(SO_methods[m], SO_priorities[m])
change = input("Change these? 1/[0] ")
if change != "1":
break
SO_priorities = []
for l in range(len(SO_methods)):
print(SO_methods[l])
print(" Priority? ", prior_list)
pri = int(input())
SO_priorities.append(pri)
del prior_list[prior_list.index(pri)]
return SO_priorities
|
python
|
{
"resource": ""
}
|
q11626
|
getvec
|
train
|
def getvec(gh, lat, lon):
"""
Evaluates the vector at a given latitude and longitude for a specified
set of coefficients
Parameters
----------
gh : a list of gauss coefficients
lat : latitude of location
long : longitude of location
Returns
-------
vec : direction in [dec, inc, intensity]
"""
sv = []
pad = 120 - len(gh)
for x in range(pad):
gh.append(0.)
for x in range(len(gh)):
sv.append(0.)
#! convert to colatitude for MB routine
itype = 1
colat = 90. - lat
date, alt = 2000., 0. # use a dummy date and altitude
x, y, z, f = magsyn(gh, sv, date, date, itype, alt, colat, lon)
vec = cart2dir([x, y, z])
vec[2] = f
return vec
|
python
|
{
"resource": ""
}
|
q11627
|
mktk03
|
train
|
def mktk03(terms, seed, G2, G3):
"""
generates a list of gauss coefficients drawn from the TK03 distribution
"""
# random.seed(n)
p = 0
n = seed
gh = []
g10, sfact, afact = -18e3, 3.8, 2.4
g20 = G2 * g10
g30 = G3 * g10
alpha = g10/afact
s1 = s_l(1, alpha)
s10 = sfact * s1
gnew = random.normal(g10, s10)
if p == 1:
print(1, 0, gnew, 0)
gh.append(gnew)
gh.append(random.normal(0, s1))
gnew = gh[-1]
gh.append(random.normal(0, s1))
hnew = gh[-1]
if p == 1:
print(1, 1, gnew, hnew)
for l in range(2, terms + 1):
for m in range(l + 1):
OFF = 0.0
if l == 2 and m == 0:
OFF = g20
if l == 3 and m == 0:
OFF = g30
s = s_l(l, alpha)
j = (l - m) % 2
if j == 1:
s = s * sfact
gh.append(random.normal(OFF, s))
gnew = gh[-1]
if m == 0:
hnew = 0
else:
gh.append(random.normal(0, s))
hnew = gh[-1]
if p == 1:
print(l, m, gnew, hnew)
return gh
|
python
|
{
"resource": ""
}
|
q11628
|
pseudo
|
train
|
def pseudo(DIs, random_seed=None):
"""
Draw a bootstrap sample of directions returning as many bootstrapped samples
as in the input directions
Parameters
----------
DIs : nested list of dec, inc lists (known as a di_block)
random_seed : set random seed for reproducible number generation (default is None)
Returns
-------
Bootstrap_directions : nested list of dec, inc lists that have been
bootstrapped resampled
"""
if random_seed != None:
np.random.seed(random_seed)
Inds = np.random.randint(len(DIs), size=len(DIs))
D = np.array(DIs)
return D[Inds]
|
python
|
{
"resource": ""
}
|
q11629
|
dir_df_boot
|
train
|
def dir_df_boot(dir_df, nb=5000, par=False):
"""
Performs a bootstrap for direction DataFrame with optional parametric bootstrap
Parameters
_________
dir_df : Pandas DataFrame with columns:
dir_dec : mean declination
dir_inc : mean inclination
Required for parametric bootstrap
dir_n : number of data points in mean
dir_k : Fisher k statistic for mean
nb : number of bootstraps, default is 5000
par : if True, do a parameteric bootstrap
Returns
_______
BDIs: nested list of bootstrapped mean Dec,Inc pairs
"""
N = dir_df.dir_dec.values.shape[0] # number of data points
BDIs = []
for k in range(nb):
pdir_df = dir_df.sample(n=N, replace=True) # bootstrap pseudosample
pdir_df.reset_index(inplace=True) # reset the index
if par: # do a parametric bootstrap
for i in pdir_df.index: # set through the pseudosample
n = pdir_df.loc[i, 'dir_n'] # get number of samples/site
# get ks for each sample
ks = np.ones(shape=n)*pdir_df.loc[i, 'dir_k']
# draw a fisher distributed set of directions
decs, incs = fshdev(ks)
di_block = np.column_stack((decs, incs))
# rotate them to the mean
di_block = dodirot_V(
di_block, pdir_df.loc[i, 'dir_dec'], pdir_df.loc[i, 'dir_inc'])
# get the new mean direction for the pseudosample
fpars = fisher_mean(di_block)
# replace the pseudo sample mean direction
pdir_df.loc[i, 'dir_dec'] = fpars['dec']
pdir_df.loc[i, 'dir_inc'] = fpars['inc']
# get bootstrap mean bootstrap sample
bfpars = dir_df_fisher_mean(pdir_df)
BDIs.append([bfpars['dec'], bfpars['inc']])
return BDIs
|
python
|
{
"resource": ""
}
|
q11630
|
dir_df_fisher_mean
|
train
|
def dir_df_fisher_mean(dir_df):
"""
calculates fisher mean for Pandas data frame
Parameters
__________
dir_df: pandas data frame with columns:
dir_dec : declination
dir_inc : inclination
Returns
-------
fpars : dictionary containing the Fisher mean and statistics
dec : mean declination
inc : mean inclination
r : resultant vector length
n : number of data points
k : Fisher k value
csd : Fisher circular standard deviation
alpha95 : Fisher circle of 95% confidence
"""
N = dir_df.dir_dec.values.shape[0] # number of data points
fpars = {}
if N < 2:
return fpars
dirs = dir_df[['dir_dec', 'dir_inc']].values
X = dir2cart(dirs).transpose()
Xbar = np.array([X[0].sum(), X[1].sum(), X[2].sum()])
R = np.sqrt(Xbar[0]**2+Xbar[1]**2+Xbar[2]**2)
Xbar = Xbar/R
dir = cart2dir(Xbar)
fpars["dec"] = dir[0]
fpars["inc"] = dir[1]
fpars["n"] = N
fpars["r"] = R
if N != R:
k = (N - 1.) / (N - R)
fpars["k"] = k
csd = 81./np.sqrt(k)
else:
fpars['k'] = 'inf'
csd = 0.
b = 20.**(1./(N - 1.)) - 1
a = 1 - b * (N - R) / R
if a < -1:
a = -1
a95 = np.degrees(np.arccos(a))
fpars["alpha95"] = a95
fpars["csd"] = csd
if a < 0:
fpars["alpha95"] = 180.0
return fpars
|
python
|
{
"resource": ""
}
|
q11631
|
pseudosample
|
train
|
def pseudosample(x):
"""
draw a bootstrap sample of x
"""
#
BXs = []
for k in range(len(x)):
ind = random.randint(0, len(x) - 1)
BXs.append(x[ind])
return BXs
|
python
|
{
"resource": ""
}
|
q11632
|
bc02
|
train
|
def bc02(data):
"""
get APWP from Besse and Courtillot 2002 paper
Parameters
----------
Takes input as [plate, site_lat, site_lon, age]
plate : string (options: AF, ANT, AU, EU, GL, IN, NA, SA)
site_lat : float
site_lon : float
age : float in Myr
Returns
----------
"""
plate, site_lat, site_lon, age = data[0], data[1], data[2], data[3]
apwp = get_plate_data(plate)
recs = apwp.split()
#
# put it into usable form in plate_data
#
k, plate_data = 0, []
while k < len(recs) - 3:
rec = [float(recs[k]), float(recs[k + 1]), float(recs[k + 2])]
plate_data.append(rec)
k = k + 3
#
# find the right pole for the age
#
for i in range(len(plate_data)):
if age >= plate_data[i][0] and age <= plate_data[i + 1][0]:
if (age - plate_data[i][0]) < (plate_data[i][0] - age):
rec = i
else:
rec = i + 1
break
pole_lat = plate_data[rec][1]
pole_lon = plate_data[rec][2]
return pole_lat, pole_lon
|
python
|
{
"resource": ""
}
|
q11633
|
linreg
|
train
|
def linreg(x, y):
"""
does a linear regression
"""
if len(x) != len(y):
print('x and y must be same length')
return
xx, yy, xsum, ysum, xy, n, sum = 0, 0, 0, 0, 0, len(x), 0
linpars = {}
for i in range(n):
xx += x[i] * x[i]
yy += y[i] * y[i]
xy += x[i] * y[i]
xsum += x[i]
ysum += y[i]
xsig = np.sqrt(old_div((xx - old_div(xsum**2, n)), (n - 1.)))
ysig = np.sqrt(old_div((yy - old_div(ysum**2, n)), (n - 1.)))
linpars['slope'] = old_div(
(xy - (xsum * ysum / n)), (xx - old_div((xsum**2), n)))
linpars['b'] = old_div((ysum - linpars['slope'] * xsum), n)
linpars['r'] = old_div((linpars['slope'] * xsig), ysig)
for i in range(n):
a = y[i] - linpars['b'] - linpars['slope'] * x[i]
sum += a
linpars['sigma'] = old_div(sum, (n - 2.))
linpars['n'] = n
return linpars
|
python
|
{
"resource": ""
}
|
q11634
|
add_flag
|
train
|
def add_flag(var, flag):
"""
for use when calling command-line scripts from withing a program.
if a variable is present, add its proper command_line flag.
return a string.
"""
if var:
var = flag + " " + str(var)
else:
var = ""
return var
|
python
|
{
"resource": ""
}
|
q11635
|
get_named_arg
|
train
|
def get_named_arg(name, default_val=None, reqd=False):
"""
Extract the value after a command-line flag such as '-f' and return it.
If the command-line flag is missing, return default_val.
If reqd == True and the command-line flag is missing, throw an error.
Parameters
----------
name : str
command line flag, e.g. "-f"
default_val
value to use if command line flag is missing, e.g. "measurements.txt"
default is None
reqd : bool
throw error if reqd==True and command line flag is missing.
if reqd == True, default_val will be ignored.
default is False.
Returns
---------
Desired value from sys.argv if available, otherwise default_val.
"""
if name in sys.argv: # if the command line flag is found in sys.argv
ind = sys.argv.index(name)
return sys.argv[ind + 1]
if reqd: # if arg is required but not present
raise MissingCommandLineArgException(name)
return default_val
|
python
|
{
"resource": ""
}
|
q11636
|
separate_directions
|
train
|
def separate_directions(di_block):
"""
Separates set of directions into two modes based on principal direction
Parameters
_______________
di_block : block of nested dec,inc pairs
Return
mode_1_block,mode_2_block : two lists of nested dec,inc pairs
"""
ppars = doprinc(di_block)
di_df = pd.DataFrame(di_block) # turn into a data frame for easy filtering
di_df.columns = ['dec', 'inc']
di_df['pdec'] = ppars['dec']
di_df['pinc'] = ppars['inc']
di_df['angle'] = angle(di_df[['dec', 'inc']].values,
di_df[['pdec', 'pinc']].values)
mode1_df = di_df[di_df['angle'] <= 90]
mode2_df = di_df[di_df['angle'] > 90]
mode1 = mode1_df[['dec', 'inc']].values.tolist()
mode2 = mode2_df[['dec', 'inc']].values.tolist()
return mode1, mode2
|
python
|
{
"resource": ""
}
|
q11637
|
import_basemap
|
train
|
def import_basemap():
"""
Try to import Basemap and print out a useful help message
if Basemap is either not installed or is missing required
environment variables.
Returns
---------
has_basemap : bool
Basemap : Basemap package if possible else None
"""
Basemap = None
has_basemap = True
has_cartopy = import_cartopy()[0]
try:
from mpl_toolkits.basemap import Basemap
WARNINGS['has_basemap'] = True
except ImportError:
has_basemap = False
# if they have installed cartopy, no warning is needed
if has_cartopy:
return has_basemap, False
# if they haven't installed Basemap or cartopy, they need to be warned
if not WARNINGS['basemap']:
print(
"-W- You haven't installed a module for plotting maps (cartopy or Basemap)")
print(" Recommended: install cartopy. With conda:")
print(" conda install cartopy")
print(
" For more information, see http://earthref.org/PmagPy/Cookbook#getting_python")
except (KeyError, FileNotFoundError):
has_basemap = False
# if cartopy is installed, no warning is needed
if has_cartopy:
return has_basemap, False
if not WARNINGS['basemap']:
print('-W- Basemap is installed but could not be imported.')
print(' You are probably missing a required environment variable')
print(
' If you need to use Basemap, you will need to run this program or notebook in a conda env.')
print(' For more on how to create a conda env, see: https://conda.io/docs/user-guide/tasks/manage-environments.html')
print(
' Recommended alternative: install cartopy for plotting maps. With conda:')
print(' conda install cartopy')
if has_basemap and not has_cartopy:
print("-W- You have installed Basemap but not cartopy.")
print(" In the future, Basemap will no longer be supported.")
print(" To continue to make maps, install using conda:")
print(' conda install cartopy')
WARNINGS['basemap'] = True
return has_basemap, Basemap
|
python
|
{
"resource": ""
}
|
q11638
|
import_cartopy
|
train
|
def import_cartopy():
"""
Try to import cartopy and print out a help message
if it is not installed
Returns
---------
has_cartopy : bool
cartopy : cartopy package if available else None
"""
cartopy = None
has_cartopy = True
try:
import cartopy
WARNINGS['has_cartopy'] = True
except ImportError:
has_cartopy = False
if not WARNINGS['cartopy']:
print('-W- cartopy is not installed')
print(' If you want to make maps, install using conda:')
print(' conda install cartopy')
WARNINGS['cartopy'] = True
return has_cartopy, cartopy
|
python
|
{
"resource": ""
}
|
q11639
|
method_codes_to_geomagia
|
train
|
def method_codes_to_geomagia(magic_method_codes,geomagia_table):
"""
Looks at the MagIC method code list and returns the correct GEOMAGIA code number depending
on the method code list and the GEOMAGIA table specified. Returns O, GEOMAGIA's "Not specified" value, if no match.
When mutiple codes are matched they are separated with -
"""
codes=magic_method_codes
geomagia=geomagia_table.lower()
geomagia_code='0'
if geomagia=='alteration_monit_corr':
if "DA-ALT-V" or "LP-PI-ALT-PTRM" or "LP-PI-ALT-PMRM" in codes:
geomagia_code='1'
elif "LP-PI-ALT-SUSC" in codes:
geomagia_code='2'
elif "DA-ALT-RS" or "LP-PI-ALT-AFARM" in codes:
geomagia_code='3'
elif "LP-PI-ALT-WALTON" in codes:
geomagia_code='4'
elif "LP-PI-ALT-TANGUY" in codes:
geomagia_code='5'
elif "DA-ALT" in codes:
geomagia_code='6' #at end to fill generic if others don't exist
elif "LP-PI-ALT-FABIAN" in codes:
geomagia_code='7'
if geomagia=='md_checks':
if ("LT-PTRM-MD" in codes) or ("LT-PMRM-MD" in codes):
geomagia_code='1:'
if ("LP-PI-BT-LT" in codes) or ("LT-LT-Z" in codes):
if "0" in geomagia_code:
geomagia_code="23:"
else:
geomagia_code+='2:'
geomagia_code=geomagia_code[:-1]
if geomagia=='anisotropy_correction':
if "DA-AC-AMS" in codes:
geomagia_code='1'
elif "DA-AC-AARM" in codes:
geomagia_code='2'
elif "DA-AC-ATRM" in codes:
geomagia_code='3'
elif "LT-NRM-PAR" in codes:
geomagia_code='4'
elif "DA-AC-AIRM" in codes:
geomagia_code='6'
elif "DA-AC" in codes: #at end to fill generic if others don't exist
geomagia_code='5'
if geomagia=='cooling_rate':
if "DA-CR" in codes: #all current CR codes but CR-EG are a 1 but may change in the future
geomagia_code='1'
if "DA-CR-EG" in codes:
geomagia_code='2'
if geomagia=='dm_methods':
if "LP-DIR-AF" in codes:
geomagia_code='1'
elif "LT-AF-D" in codes:
geomagia_code='1'
elif "LT-AF-G" in codes:
geomagia_code='1'
elif "LT-AF-Z" in codes:
geomagia_code='1'
elif "LP-DIR-T" in codes:
geomagia_code='2'
elif "LT-AF-Z" in codes:
geomagia_code='2'
elif "LP-DIR-M" in codes:
geomagia_code='5'
elif "LT-M-Z" in codes:
geomagia_code='5'
if geomagia=='dm_analysis':
if "DE-BFL" in codes:
geomagia_code='1'
elif "DE-BLANKET" in codes:
geomagia_code='2'
elif "DE-FM" in codes:
geomagia_code='3'
elif "DE-NRM" in codes:
geomagia_code='6'
if geomagia=='specimen_type_id':
if "SC-TYPE-CYC" in codes:
geomagia_code='1'
elif "SC-TYPE-CUBE" in codes:
geomagia_code='2'
elif "SC-TYPE-MINI" in codes:
geomagia_code='3'
elif "SC-TYPE-SC" in codes:
geomagia_code='4'
elif "SC-TYPE-UC" in codes:
geomagia_code='5'
elif "SC-TYPE-LARGE" in codes:
geomagia_code='6'
return geomagia_code
|
python
|
{
"resource": ""
}
|
q11640
|
do_walk
|
train
|
def do_walk(data_path):
"""
Walk through data_files and list all in dict format
"""
data_files = {}
def cond(File, prefix):
"""
Return True for useful files
Return False for non-useful files
"""
file_path = path.join(prefix, 'data_files', File)
return (not File.startswith('!') and
not File.endswith('~') and
not File.endswith('#') and
not File.endswith('.pyc') and
not File.startswith('.') and
path.exists(path.join(prefix, File)))
for (dir_path, dirs, files) in os.walk(data_path):
data_files[dir_path] = [f for f in files if cond(f, dir_path)]
if not dirs:
continue
else:
for Dir in dirs:
do_walk(os.path.join(dir_path, Dir))
return data_files
|
python
|
{
"resource": ""
}
|
q11641
|
main
|
train
|
def main():
"""
NAME
change_case_magic.py
DESCRIPTION
picks out key and converts to upper or lower case
SYNTAX
change_case_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-F FILE: specify output magic format file , default is to overwrite input file
-keys KEY1:KEY2 specify colon delimited list of keys to convert
-[U,l] : specify [U]PPER or [l]ower case, default is lower
"""
dir_path="./"
change='l'
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
magic_file=dir_path+'/'+sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
out_file=dir_path+'/'+sys.argv[ind+1]
else: out_file=magic_file
if '-keys' in sys.argv:
ind=sys.argv.index('-keys')
grab_keys=sys.argv[ind+1].split(":")
else:
print(main.__doc__)
sys.exit()
if '-U' in sys.argv: change='U'
#
#
# get data read in
Data,file_type=pmag.magic_read(magic_file)
if len(Data)>0:
for grab_key in grab_keys:
for rec in Data:
if change=='l':
rec[grab_key]=rec[grab_key].lower()
else:
rec[grab_key]=rec[grab_key].upper()
else:
print('bad file name')
pmag.magic_write(out_file,Data,file_type)
|
python
|
{
"resource": ""
}
|
q11642
|
main
|
train
|
def main():
"""
NAME
download_magic.py
DESCRIPTION
unpacks a magic formatted smartbook .txt file from the MagIC database into the
tab delimited MagIC format txt files for use with the MagIC-Py programs.
SYNTAX
download_magic.py command line options]
INPUT
takes either the upload.txt file created by upload_magic.py or a file
downloaded from the MagIC database (http://earthref.org/MagIC)
OPTIONS
-h prints help message and quits
-i allows interactive entry of filename
-f FILE specifies input file name
-sep write location data to separate subdirectories (Location_*), (default False)
-O do not overwrite duplicate Location_* directories while downloading
-DM data model (2 or 3, default 3)
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
# interactive entry
if '-i' in sys.argv:
infile=input("Magic txt file for unpacking? ")
dir_path = '.'
input_dir_path = '.'
# non-interactive
else:
infile = pmag.get_named_arg("-f", reqd=True)
# if -O flag is present, overwrite is False
overwrite = pmag.get_flag_arg_from_sys("-O", true=False, false=True)
# if -sep flag is present, sep is True
sep = pmag.get_flag_arg_from_sys("-sep", true=True, false=False)
data_model = pmag.get_named_arg("-DM", default_val=3, reqd=False)
dir_path = pmag.get_named_arg("-WD", default_val=".", reqd=False)
input_dir_path = pmag.get_named_arg("-ID", default_val=".", reqd=False)
#if '-ID' not in sys.argv and '-WD' in sys.argv:
# input_dir_path = dir_path
if "-WD" not in sys.argv and "-ID" not in sys.argv:
input_dir_path = os.path.split(infile)[0]
if not input_dir_path:
input_dir_path = "."
ipmag.download_magic(infile, dir_path, input_dir_path, overwrite, True, data_model, sep)
|
python
|
{
"resource": ""
}
|
q11643
|
smooth
|
train
|
def smooth(x,window_len,window='bartlett'):
"""smooth the data using a sliding window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by padding the beginning and the end of the signal
with average of the first (last) ten values of the signal, to evoid jumps
at the beggining/end
input:
x: the input signal, equaly spaced!
window_len: the dimension of the smoothing window
window: type of window from numpy library ['flat','hanning','hamming','bartlett','blackman']
-flat window will produce a moving average smoothing.
-Bartlett window is very similar to triangular window,
but always ends with zeros at points 1 and n,
-hanning,hamming,blackman are used for smoothing the Fourier transfrom
for curie temperature calculation the default is Bartlett
output:
aray of the smoothed signal
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
# numpy available windows
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
# padding the beggining and the end of the signal with an average value to evoid edge effect
start=[average(x[0:10])]*window_len
end=[average(x[-10:])]*window_len
s=start+list(x)+end
#s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': #moving average
w=ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(old_div(w,w.sum()),s,mode='same')
return array(y[window_len:-window_len])
|
python
|
{
"resource": ""
}
|
q11644
|
deriv1
|
train
|
def deriv1(x,y,i,n):
"""
alternative way to smooth the derivative of a noisy signal
using least square fit.
x=array of x axis
y=array of y axis
n=smoothing factor
i= position
in this method the slope in position i is calculated by least square fit of n points
before and after position.
"""
m_,x_,y_,xy_,x_2=0.,0.,0.,0.,0.
for ix in range(i,i+n,1):
x_=x_+x[ix]
y_=y_+y[ix]
xy_=xy_+x[ix]*y[ix]
x_2=x_2+x[ix]**2
m= old_div(( (n*xy_) - (x_*y_) ), ( n*x_2-(x_)**2))
return(m)
|
python
|
{
"resource": ""
}
|
q11645
|
main
|
train
|
def main():
"""
NAME
extract_methods.py
DESCRIPTION
reads in a magic table and creates a file with method codes
SYNTAX
extract_methods.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify magic format input file, default is magic_measurements.txt
-F FILE: specify method code output file, default is magic_methods.txt
"""
citation='This study'
args=sys.argv
outfile='magic_methods.txt'
infile='magic_measurements.txt'
#
# get command line arguments
#
dir_path='.'
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if '-F' in args:
ind=args.index("-F")
outfile=args[ind+1]
if '-f' in args:
ind=args.index("-f")
infile=args[ind+1]
infile=dir_path+'/'+infile
outfile=dir_path+'/'+outfile
data,file_type=pmag.magic_read(infile)
MethRecs=[]
methods=[]
for rec in data:
meths=rec['magic_method_codes'].split(":")
for meth in meths:
if meth not in methods:
MethRec={}
methods.append(meth)
MethRec['magic_method_code']=meth
MethRecs.append(MethRec)
pmag.magic_write(outfile,MethRecs,'magic_methods')
|
python
|
{
"resource": ""
}
|
q11646
|
main
|
train
|
def main():
"""
NAME
gofish.py
DESCRIPTION
calculates fisher parameters from dec inc data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
gofish.py [options] [< filename]
OPTIONS
-h prints help message and quits
-i for interactive filename entry
-f FILE, specify input file
-F FILE, specifies output file name
< filename for reading from standard input
OUTPUT
mean dec, mean inc, N, R, k, a95, csd
"""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-i' in sys.argv: # ask for filename
file=input("Enter file name with dec, inc data: ")
f=open(file,'r')
data=f.readlines()
elif '-f' in sys.argv:
dat=[]
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
else:
data = sys.stdin.readlines() # read from standard input
ofile = ""
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
DIs= [] # set up list for dec inc data
for line in data: # read in the data from standard input
if '\t' in line:
rec=line.split('\t') # split each line on space to get records
else:
rec=line.split() # split each line on space to get records
DIs.append((float(rec[0]),float(rec[1])))
#
fpars=pmag.fisher_mean(DIs)
outstring='%7.1f %7.1f %i %10.4f %8.1f %7.1f %7.1f'%(fpars['dec'],fpars['inc'],fpars['n'],fpars['r'],fpars['k'],fpars['alpha95'], fpars['csd'])
if ofile == "":
print(outstring)
else:
out.write(outstring+'\n')
|
python
|
{
"resource": ""
}
|
q11647
|
main
|
train
|
def main():
"""
NAME
huji_sample_magic.py
DESCRIPTION
takes tab delimited Hebrew University sample file and converts to MagIC formatted tables
SYNTAX
huji_sample_magic.py [command line options]
OPTIONS
-f FILE: specify input file
-Fsa FILE: specify sample output file, default is: samples.txt
-Fsi FILE: specify site output file, default is: sites.txt
-Iso: import sample orientation info - default is to set sample_az/dip to 0,0
-ncn NCON: specify naming convention: default is #1 below
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM:SO-SUN]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
-loc: location name, default="unknown"
-DM: data model number (MagIC 2 or 3, default 3)
INPUT FORMAT
Input files must be tab delimited:
Samp Az Dip Dip_dir Dip
Orientation convention:
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
e.g. field_dip is degrees from horizontal of drill direction
Magnetic declination convention:
Az is already corrected in file
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail ltauxe@ucsd.edu for help.
OUTPUT
output saved in samples will overwrite any existing files
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
#
# initialize variables
Z = 1
# get arguments from the command line
orient_file = pmag.get_named_arg("-f", reqd=True)
data_model_num = int(float(pmag.get_named_arg("-DM", 3)))
if data_model_num == 2:
samp_file = pmag.get_named_arg("-Fsa", "er_samples.txt")
site_file = pmag.get_named_arg("-Fsi", "er_sites.txt")
else:
samp_file = pmag.get_named_arg("-Fsa", "samples.txt")
site_file = pmag.get_named_arg("-Fsi", "sites.txt")
samp_con = pmag.get_named_arg("-ncn", "1")
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 3-Z where Z is an integer")
sys.exit()
else:
Z = samp_con.split("-")[1]
#samp_con = "4"
print(samp_con)#, Z)
meths = pmag.get_named_arg("-mcd", 'FS-FD:SO-POM:SO-SUN')
location_name = pmag.get_named_arg("-loc", "unknown")
if "-Iso" in args:
ignore = 0
else:
ignore = 1
convert.huji_sample(orient_file, meths, location_name, samp_con, ignore)
|
python
|
{
"resource": ""
}
|
q11648
|
main
|
train
|
def main():
"""
NAME
vector_mean.py
DESCRIPTION
calculates vector mean of vector data
INPUT FORMAT
takes dec, inc, int from an input file
SYNTAX
vector_mean.py [command line options] [< filename]
OPTIONS
-h prints help message and quits
-f FILE, specify input file
-F FILE, specify output file
< filename for reading from standard input
OUTPUT
mean dec, mean inc, R, N
"""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-f' in sys.argv:
dat=[]
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
else:
file = sys.stdin # read from standard input
ofile=""
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
DIIs=numpy.loadtxt(file,dtype=numpy.float) # read in the data
#
vpars,R=pmag.vector_mean(DIIs)
outstring='%7.1f %7.1f %10.3e %i'%(vpars[0],vpars[1],R,len(DIIs))
if ofile == "":
print(outstring)
else:
out.write(outstring + "\n")
|
python
|
{
"resource": ""
}
|
q11649
|
array_map
|
train
|
def array_map(f, ar):
"Apply an ordinary function to all values in an array."
flat_ar = ravel(ar)
out = zeros(len(flat_ar), flat_ar.typecode())
for i in range(len(flat_ar)):
out[i] = f(flat_ar[i])
out.shape = ar.shape
return out
|
python
|
{
"resource": ""
}
|
q11650
|
VGP_Dialog.on_plot_select
|
train
|
def on_plot_select(self,event):
"""
Select data point if cursor is in range of a data point
@param: event -> the wx Mouseevent for that click
"""
if not self.xdata or not self.ydata: return
pos=event.GetPosition()
width, height = self.canvas.get_width_height()
pos[1] = height - pos[1]
xpick_data,ypick_data = pos
xdata_org = self.xdata
ydata_org = self.ydata
data_corrected = self.map.transData.transform(vstack([xdata_org,ydata_org]).T)
xdata,ydata = data_corrected.T
xdata = list(map(float,xdata))
ydata = list(map(float,ydata))
e = 4e0
index = None
for i,(x,y) in enumerate(zip(xdata,ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index==None: print("Couldn't find point %.1f,%.1f"%(xpick_data,ypick_data))
self.change_selected(index)
|
python
|
{
"resource": ""
}
|
q11651
|
user_input.get_values
|
train
|
def get_values(self):
"""
Applies parsing functions to each input as specified in init before returning a tuple with first entry being a boolean which specifies if the user entered all values and a second entry which is a dictionary of input names to parsed values.
"""
return_dict = {}
for i,ctrl in enumerate(self.list_ctrls):
if hasattr(self.parse_funcs,'__getitem__') and len(self.parse_funcs)>i and hasattr(self.parse_funcs[i],'__call__'):
try: return_dict[self.inputs[i]] = self.parse_funcs[i](ctrl.GetValue())
except: return_dict[self.inputs[i]] = ctrl.GetValue()
else:
return_dict[self.inputs[i]] = ctrl.GetValue()
return ('' not in list(return_dict.values()), return_dict)
|
python
|
{
"resource": ""
}
|
q11652
|
main
|
train
|
def main():
"""
NAME
upload_magic.py
DESCRIPTION
This program will prepare your MagIC text files for uploading to the MagIC database
it will check for all the MagIC text files and skip the missing ones
SYNTAX
upload_magic.py
INPUT
MagIC txt files
OPTIONS
-h prints help message and quits
-all include all the measurement data, default is only those used in interpretations
-DM specify which MagIC data model number to use (2 or 3). Default is 3.
OUTPUT
upload file: file for uploading to MagIC database
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
else:
data_model_num = pmag.get_named_arg("-DM", 3)
dataframe = extractor.command_line_dataframe([['cat', False, 0], ['F', False, ''], ['f', False, '']])
checked_args = extractor.extract_and_check_args(sys.argv, dataframe)
dir_path, concat = extractor.get_vars(['WD', 'cat'], checked_args)
data_model_num = int(float(data_model_num))
if data_model_num == 2:
ipmag.upload_magic2(concat, dir_path)
else:
ipmag.upload_magic(concat, dir_path)
|
python
|
{
"resource": ""
}
|
q11653
|
split_lines
|
train
|
def split_lines(lines):
"""
split a MagIC upload format file into lists.
the lists are split by the '>>>' lines between file_types.
"""
container = []
new_list = []
for line in lines:
if '>>>' in line:
container.append(new_list)
new_list = []
else:
new_list.append(line)
container.append(new_list)
return container
|
python
|
{
"resource": ""
}
|
q11654
|
fisher_angular_deviation
|
train
|
def fisher_angular_deviation(dec=None, inc=None, di_block=None, confidence=95):
'''
The angle from the true mean within which a chosen percentage of directions
lie can be calculated from the Fisher distribution. This function uses the
calculated Fisher concentration parameter to estimate this angle from
directional data. The 63 percent confidence interval is often called the
angular standard deviation.
Parameters
----------
dec : list of declinations or longitudes
inc : list of inclinations or latitudes
di_block : a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it
will be used. Either dec, inc lists or a di_block need to be provided.
confidence : 50 percent, 63 percent or 95 percent
Returns
-------
theta : critical angle of interest from the mean which contains the
percentage of directions specified by the confidence parameter
'''
if di_block is None:
di_block = make_di_block(dec, inc)
mean = pmag.fisher_mean(di_block)
else:
mean = pmag.fisher_mean(di_block)
if confidence == 50:
theta = old_div(67.5, np.sqrt(mean['k']))
if confidence == 63:
theta = old_div(81, np.sqrt(mean['k']))
if confidence == 95:
theta = old_div(140, np.sqrt(mean['k']))
return theta
|
python
|
{
"resource": ""
}
|
q11655
|
print_direction_mean
|
train
|
def print_direction_mean(mean_dictionary):
"""
Does a pretty job printing a Fisher mean and associated statistics for
directional data.
Parameters
----------
mean_dictionary: output dictionary of pmag.fisher_mean
Examples
--------
Generate a Fisher mean using ``ipmag.fisher_mean`` and then print it nicely
using ``ipmag.print_direction_mean``
>>> my_mean = ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
>>> ipmag.print_direction_mean(my_mean)
Dec: 136.3 Inc: 21.3
Number of directions in mean (n): 4
Angular radius of 95% confidence (a_95): 7.3
Precision parameter (k) estimate: 159.7
"""
print('Dec: ' + str(round(mean_dictionary['dec'], 1)) +
' Inc: ' + str(round(mean_dictionary['inc'], 1)))
print('Number of directions in mean (n): ' + str(mean_dictionary['n']))
print('Angular radius of 95% confidence (a_95): ' +
str(round(mean_dictionary['alpha95'], 1)))
print('Precision parameter (k) estimate: ' +
str(round(mean_dictionary['k'], 1)))
|
python
|
{
"resource": ""
}
|
q11656
|
print_pole_mean
|
train
|
def print_pole_mean(mean_dictionary):
"""
Does a pretty job printing a Fisher mean and associated statistics for
mean paleomagnetic poles.
Parameters
----------
mean_dictionary: output dictionary of pmag.fisher_mean
Examples
--------
Generate a Fisher mean using ``ipmag.fisher_mean`` and then print it nicely
using ``ipmag.print_pole_mean``
>>> my_mean = ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
>>> ipmag.print_pole_mean(my_mean)
Plon: 136.3 Plat: 21.3
Number of directions in mean (n): 4
Angular radius of 95% confidence (A_95): 7.3
Precision parameter (k) estimate: 159.7
"""
print('Plon: ' + str(round(mean_dictionary['dec'], 1)) +
' Plat: ' + str(round(mean_dictionary['inc'], 1)))
print('Number of directions in mean (n): ' + str(mean_dictionary['n']))
print('Angular radius of 95% confidence (A_95): ' +
str(round(mean_dictionary['alpha95'], 1)))
print('Precision parameter (k) estimate: ' +
str(round(mean_dictionary['k'], 1)))
|
python
|
{
"resource": ""
}
|
q11657
|
fishrot
|
train
|
def fishrot(k=20, n=100, dec=0, inc=90, di_block=True):
"""
Generates Fisher distributed unit vectors from a specified distribution
using the pmag.py fshdev and dodirot functions.
Parameters
----------
k : kappa precision parameter (default is 20)
n : number of vectors to determine (default is 100)
dec : mean declination of distribution (default is 0)
inc : mean inclination of distribution (default is 90)
di_block : this function returns a nested list of [dec,inc,1.0] as the default
if di_block = False it will return a list of dec and a list of inc
Returns
---------
di_block : a nested list of [dec,inc,1.0] (default)
dec, inc : a list of dec and a list of inc (if di_block = False)
Examples
--------
>>> ipmag.fishrot(k=20, n=5, dec=40, inc=60)
[[44.766285502555775, 37.440866867657235, 1.0],
[33.866315796883725, 64.732532250463436, 1.0],
[47.002912770597163, 54.317853800896977, 1.0],
[36.762165614432547, 56.857240672884252, 1.0],
[71.43950604474395, 59.825830945715431, 1.0]]
"""
directions = []
declinations = []
inclinations = []
if di_block == True:
for data in range(n):
d, i = pmag.fshdev(k)
drot, irot = pmag.dodirot(d, i, dec, inc)
directions.append([drot, irot, 1.])
return directions
else:
for data in range(n):
d, i = pmag.fshdev(k)
drot, irot = pmag.dodirot(d, i, dec, inc)
declinations.append(drot)
inclinations.append(irot)
return declinations, inclinations
|
python
|
{
"resource": ""
}
|
q11658
|
lat_from_inc
|
train
|
def lat_from_inc(inc, a95=None):
"""
Calculate paleolatitude from inclination using the dipole equation
Required Parameter
----------
inc: (paleo)magnetic inclination in degrees
Optional Parameter
----------
a95: 95% confidence interval from Fisher mean
Returns
----------
if a95 is provided paleo_lat, paleo_lat_max, paleo_lat_min are returned
otherwise, it just returns paleo_lat
"""
rad = old_div(np.pi, 180.)
paleo_lat = old_div(np.arctan(0.5 * np.tan(inc * rad)), rad)
if a95 is not None:
paleo_lat_max = old_div(
np.arctan(0.5 * np.tan((inc + a95) * rad)), rad)
paleo_lat_min = old_div(
np.arctan(0.5 * np.tan((inc - a95) * rad)), rad)
return paleo_lat, paleo_lat_max, paleo_lat_min
else:
return paleo_lat
|
python
|
{
"resource": ""
}
|
q11659
|
lat_from_pole
|
train
|
def lat_from_pole(ref_loc_lon, ref_loc_lat, pole_plon, pole_plat):
"""
Calculate paleolatitude for a reference location based on a paleomagnetic pole
Required Parameters
----------
ref_loc_lon: longitude of reference location in degrees
ref_loc_lat: latitude of reference location
pole_plon: paleopole longitude in degrees
pole_plat: paleopole latitude in degrees
"""
ref_loc = (ref_loc_lon, ref_loc_lat)
pole = (pole_plon, pole_plat)
paleo_lat = 90 - pmag.angle(pole, ref_loc)
return float(paleo_lat)
|
python
|
{
"resource": ""
}
|
q11660
|
inc_from_lat
|
train
|
def inc_from_lat(lat):
"""
Calculate inclination predicted from latitude using the dipole equation
Parameter
----------
lat : latitude in degrees
Returns
-------
inc : inclination calculated using the dipole equation
"""
rad = old_div(np.pi, 180.)
inc = old_div(np.arctan(2 * np.tan(lat * rad)), rad)
return inc
|
python
|
{
"resource": ""
}
|
q11661
|
plot_net
|
train
|
def plot_net(fignum):
"""
Draws circle and tick marks for equal area projection.
"""
# make the perimeter
plt.figure(num=fignum,)
plt.clf()
plt.axis("off")
Dcirc = np.arange(0, 361.)
Icirc = np.zeros(361, 'f')
Xcirc, Ycirc = [], []
for k in range(361):
XY = pmag.dimap(Dcirc[k], Icirc[k])
Xcirc.append(XY[0])
Ycirc.append(XY[1])
plt.plot(Xcirc, Ycirc, 'k')
# put on the tick marks
Xsym, Ysym = [], []
for I in range(10, 100, 10):
XY = pmag.dimap(0., I)
Xsym.append(XY[0])
Ysym.append(XY[1])
plt.plot(Xsym, Ysym, 'k+')
Xsym, Ysym = [], []
for I in range(10, 90, 10):
XY = pmag.dimap(90., I)
Xsym.append(XY[0])
Ysym.append(XY[1])
plt.plot(Xsym, Ysym, 'k+')
Xsym, Ysym = [], []
for I in range(10, 90, 10):
XY = pmag.dimap(180., I)
Xsym.append(XY[0])
Ysym.append(XY[1])
plt.plot(Xsym, Ysym, 'k+')
Xsym, Ysym = [], []
for I in range(10, 90, 10):
XY = pmag.dimap(270., I)
Xsym.append(XY[0])
Ysym.append(XY[1])
plt.plot(Xsym, Ysym, 'k+')
for D in range(0, 360, 10):
Xtick, Ytick = [], []
for I in range(4):
XY = pmag.dimap(D, I)
Xtick.append(XY[0])
Ytick.append(XY[1])
plt.plot(Xtick, Ytick, 'k')
plt.axis("equal")
plt.axis((-1.05, 1.05, -1.05, 1.05))
|
python
|
{
"resource": ""
}
|
q11662
|
plot_di
|
train
|
def plot_di(dec=None, inc=None, di_block=None, color='k', marker='o', markersize=20, legend='no', label='', title='', edge='',alpha=1):
"""
Plot declination, inclination data on an equal area plot.
Before this function is called a plot needs to be initialized with code that looks
something like:
>fignum = 1
>plt.figure(num=fignum,figsize=(10,10),dpi=160)
>ipmag.plot_net(fignum)
Required Parameters
-----------
dec : declination being plotted
inc : inclination being plotted
or
di_block: a nested list of [dec,inc,1.0]
(di_block can be provided instead of dec, inc in which case it will be used)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default marker is a circle ('o')
markersize : default size is 20
label : the default label is blank ('')
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
edge : marker edge color - if blank, is color of marker
alpha : opacity
"""
X_down = []
X_up = []
Y_down = []
Y_up = []
color_down = []
color_up = []
if di_block is not None:
di_lists = unpack_di_block(di_block)
if len(di_lists) == 3:
dec, inc, intensity = di_lists
if len(di_lists) == 2:
dec, inc = di_lists
try:
length = len(dec)
for n in range(len(dec)):
XY = pmag.dimap(dec[n], inc[n])
if inc[n] >= 0:
X_down.append(XY[0])
Y_down.append(XY[1])
if type(color) == list:
color_down.append(color[n])
else:
color_down.append(color)
else:
X_up.append(XY[0])
Y_up.append(XY[1])
if type(color) == list:
color_up.append(color[n])
else:
color_up.append(color)
except:
XY = pmag.dimap(dec, inc)
if inc >= 0:
X_down.append(XY[0])
Y_down.append(XY[1])
color_down.append(color)
else:
X_up.append(XY[0])
Y_up.append(XY[1])
color_up.append(color)
if len(X_up) > 0:
plt.scatter(X_up, Y_up, facecolors='none', edgecolors=color_up,
s=markersize, marker=marker, label=label,alpha=alpha)
if len(X_down) > 0:
plt.scatter(X_down, Y_down, facecolors=color_down, edgecolors=edge,
s=markersize, marker=marker, label=label,alpha=alpha)
if legend == 'yes':
plt.legend(loc=2)
plt.tight_layout()
if title != "":
plt.title(title)
|
python
|
{
"resource": ""
}
|
q11663
|
make_orthographic_map
|
train
|
def make_orthographic_map(central_longitude=0, central_latitude=0, figsize=(8, 8),
add_land=True, land_color='tan', add_ocean=False, ocean_color='lightblue', grid_lines=True,
lat_grid=[-80., -60., -30.,
0., 30., 60., 80.],
lon_grid=[-180., -150., -120., -90., -60., -30., 0., 30., 60., 90., 120., 150., 180.]):
'''
Function creates and returns an orthographic map projection using cartopy
Example
-------
>>> map_axis = make_orthographic_map(central_longitude=200,central_latitude=30)
Optional Parameters
-----------
central_longitude : central longitude of projection (default is 0)
central_latitude : central latitude of projection (default is 0)
figsize : size of the figure (default is 8x8)
add_land : chose whether land is plotted on map (default is true)
land_color : specify land color (default is 'tan')
add_ocean : chose whether land is plotted on map (default is False, change to True to plot)
ocean_color : specify ocean color (default is 'lightblue')
grid_lines : chose whether gird lines are plotted on map (default is true)
lat_grid : specify the latitude grid (default is 30 degree spacing)
lon_grid : specify the longitude grid (default is 30 degree spacing)
'''
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.make_orthographic_map')
return
fig = plt.figure(figsize=figsize)
map_projection = ccrs.Orthographic(
central_longitude=central_longitude, central_latitude=central_latitude)
ax = plt.axes(projection=map_projection)
ax.set_global()
if add_ocean == True:
ax.add_feature(cartopy.feature.OCEAN, zorder=0, facecolor=ocean_color)
if add_land == True:
ax.add_feature(cartopy.feature.LAND, zorder=0,
facecolor=land_color, edgecolor='black')
if grid_lines == True:
ax.gridlines(xlocs=lon_grid, ylocs=lat_grid, linewidth=1,
color='black', linestyle='dotted')
return ax
|
python
|
{
"resource": ""
}
|
q11664
|
plot_pole
|
train
|
def plot_pole(map_axis, plon, plat, A95, label='', color='k', edgecolor='k', marker='o', markersize=20, legend='no'):
"""
This function plots a paleomagnetic pole and A95 error ellipse on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> plon = 200
>>> plat = 60
>>> A95 = 6
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_pole(map_axis, plon, plat, A95 ,color='red',markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_pole')
return
A95_km = A95 * 111.32
map_axis.scatter(plon, plat, marker=marker,
color=color, edgecolors=edgecolor, s=markersize,
label=label, zorder=101, transform=ccrs.Geodetic())
equi(map_axis, plon, plat, A95_km, color)
if legend == 'yes':
plt.legend(loc=2)
|
python
|
{
"resource": ""
}
|
q11665
|
plot_poles
|
train
|
def plot_poles(map_axis, plon, plat, A95, label='', color='k', edgecolor='k', marker='o', markersize=20, legend='no'):
"""
This function plots paleomagnetic poles and A95 error ellipses on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Examples
-------
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95 = [6, 3, 10]
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color='red', markersize=40)
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95 = [6, 3, 10]
>>> colors = ['red','green','blue']
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color=colors, markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
a list of colors can also be given so that each pole has a distinct color
edgecolor : the default edgecolor is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
"""
map_axis.scatter(plon, plat, marker=marker,
color=color, edgecolors=edgecolor, s=markersize,
label=label, zorder=101, transform=ccrs.Geodetic())
if isinstance(color,str)==True:
for n in range(0,len(A95)):
A95_km = A95[n] * 111.32
equi(map_axis, plon[n], plat[n], A95_km, color)
else:
for n in range(0,len(A95)):
A95_km = A95[n] * 111.32
equi(map_axis, plon[n], plat[n], A95_km, color[n])
if legend == 'yes':
plt.legend(loc=2)
|
python
|
{
"resource": ""
}
|
q11666
|
plot_poles_colorbar
|
train
|
def plot_poles_colorbar(map_axis, plons, plats, A95s, colorvalues, vmin, vmax,
colormap='viridis', edgecolor='k', marker='o', markersize='20',
alpha=1.0, colorbar=True, colorbar_label='pole age (Ma)'):
"""
This function plots multiple paleomagnetic pole and A95 error ellipse on a cartopy map axis.
The poles are colored by the defined colormap.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95s = [6, 3, 10]
>>> ages = [100,200,300]
>>> vmin = 0
>>> vmax = 300
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles_colorbar(map_axis, plons, plats, A95s, ages, vmin, vmax)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plons : the longitude of the paleomagnetic pole being plotted (in degrees E)
plats : the latitude of the paleomagnetic pole being plotted (in degrees)
A95s : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
colorvalues : what attribute is being used to determine the colors
vmin : what is the minimum range for the colormap
vmax : what is the maximum range for the colormap
Optional Parameters (defaults are used if not specified)
-----------
colormap : the colormap used (default is 'viridis'; others should be put as a string with quotes, e.g. 'plasma')
edgecolor : the color desired for the symbol outline
marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle)
colorbar : the default is to include a colorbar (True). Putting False will make it so no legend is plotted.
colorbar_label : label for the colorbar
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_poles_colorbar')
return
color_mapping = plt.cm.ScalarMappable(cmap=colormap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
colors = color_mapping.to_rgba(colorvalues).tolist()
plot_poles(map_axis, plons, plats, A95s,
label='', color=colors, edgecolor=edgecolor, marker=marker)
if colorbar == True:
sm = plt.cm.ScalarMappable(
cmap=colormap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
plt.colorbar(sm, orientation='horizontal', shrink=0.8,
pad=0.05, label=colorbar_label)
|
python
|
{
"resource": ""
}
|
q11667
|
plot_vgp
|
train
|
def plot_vgp(map_axis, vgp_lon=None, vgp_lat=None, di_block=None, label='', color='k', marker='o',
edge='black', markersize=20, legend=False):
"""
This function plots a paleomagnetic pole position on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> vgps = ipmag.fishrot(dec=200,inc=30)
>>> vgp_lon_list,vgp_lat_list,intensities= ipmag.unpack_di_block(vgps)
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_vgp(map_axis,vgp_lon=vgp_lon_list,vgp_lat=vgp_lat_list,color='red',markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the color desired for the symbol (default is 'k' aka black)
marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle)
edge : the color of the edge of the marker (default is black)
markersize : size of the marker in pt (default is 20)
label : the default is no label. Labels can be assigned.
legend : the default is no legend (False). Putting True will plot a legend.
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_vgp')
return
if di_block != None:
di_lists = unpack_di_block(di_block)
if len(di_lists) == 3:
vgp_lon, vgp_lat, intensity = di_lists
if len(di_lists) == 2:
vgp_lon, vgp_lat = di_lists
map_axis.scatter(vgp_lon, vgp_lat, marker=marker, edgecolors=[edge],
s=markersize, color=color, label=label, zorder=100, transform=ccrs.Geodetic())
map_axis.set_global()
if legend == True:
plt.legend(loc=2)
|
python
|
{
"resource": ""
}
|
q11668
|
plot_dmag
|
train
|
def plot_dmag(data="", title="", fignum=1, norm=1,dmag_key='treat_ac_field',intensity='',
quality=False):
"""
plots demagenetization data versus step for all specimens in pandas dataframe datablock
Parameters
______________
data : Pandas dataframe with MagIC data model 3 columns:
fignum : figure number
specimen : specimen name
dmag_key : one of these: ['treat_temp','treat_ac_field','treat_mw_energy']
selected using method_codes : ['LT_T-Z','LT-AF-Z','LT-M-Z'] respectively
intensity : if blank will choose one of these: ['magn_moment', 'magn_volume', 'magn_mass']
quality : if True use the quality column of the DataFrame
title : title for plot
norm : if True, normalize data to first step
Output :
matptlotlib plot
"""
plt.figure(num=fignum, figsize=(5, 5))
if intensity:
int_key=intensity
else:
intlist = ['magn_moment', 'magn_volume', 'magn_mass']
# get which key we have
IntMeths = [col_name for col_name in data.columns if col_name in intlist]
int_key = IntMeths[0]
data = data[data[int_key].notnull()] # fish out all data with this key
units = "U" # this sets the units for plotting to undefined
if not dmag_key:
if 'treat_temp' in data.columns: units = "K" # kelvin
elif 'treat_ac_field' in data.columns: units = "T" # tesla
elif 'treat_mw_energy' in data.columns: units = "J" # joules
if dmag_key=='treat_temp': units='K'
if dmag_key=='treat_ac_field': units='T'
if dmag_key=='treat_mw_energy': units='J'
spcs = data.specimen.unique() # get a list of all specimens in DataFrame data
if len(spcs)==0:
print('no data for plotting')
return
# step through specimens to put on plot
for spc in spcs:
spec_data = data[data.specimen.str.contains(spc)]
INTblock = []
for ind, rec in spec_data.iterrows():
INTblock.append([float(rec[dmag_key]), 0, 0,
float(rec[int_key]), 1, rec['quality']])
if len(INTblock) > 2:
pmagplotlib.plot_mag(fignum, INTblock, title, 0, units, norm)
|
python
|
{
"resource": ""
}
|
q11669
|
eigs_s
|
train
|
def eigs_s(infile="", dir_path='.'):
"""
Converts eigenparamters format data to s format
Parameters
___________________
Input:
file : input file name with eigenvalues (tau) and eigenvectors (V) with format:
tau_1 V1_dec V1_inc tau_2 V2_dec V2_inc tau_3 V3_dec V3_inc
Output
the six tensor elements as a nested array
[[x11,x22,x33,x12,x23,x13],....]
"""
file = os.path.join(dir_path, infile)
eigs_data = np.loadtxt(file)
Ss = []
for ind in range(eigs_data.shape[0]):
tau, Vdirs = [], []
for k in range(0, 9, 3):
tau.append(eigs_data[ind][k])
Vdirs.append([eigs_data[ind][k+1], eigs_data[ind][k+2]])
s = list(pmag.doeigs_s(tau, Vdirs))
Ss.append(s)
return Ss
|
python
|
{
"resource": ""
}
|
q11670
|
specimens_extract
|
train
|
def specimens_extract(spec_file='specimens.txt', output_file='specimens.xls', landscape=False,
longtable=False, output_dir_path='.', input_dir_path='', latex=False):
"""
Extracts specimen results from a MagIC 3.0 format specimens.txt file.
Default output format is an Excel file.
typeset with latex on your own computer.
Parameters
___________
spec_file : str, default "specimens.txt"
input file name
output_file : str, default "specimens.xls"
output file name
landscape : boolean, default False
if True output latex landscape table
longtable : boolean
if True output latex longtable
output_dir_path : str, default "."
output file directory
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
latex : boolean, default False
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], data table error type : True if successful
Effects :
writes xls or latex formatted tables for use in publications
"""
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
try:
fname = pmag.resolve_file_name(spec_file, input_dir_path)
except IOError:
print("bad specimen file name")
return False, "bad specimen file name"
spec_df = pd.read_csv(fname, sep='\t', header=1)
spec_df.dropna('columns', how='all', inplace=True)
if 'int_abs' in spec_df.columns:
spec_df.dropna(subset=['int_abs'], inplace=True)
if len(spec_df) > 0:
table_df = map_magic.convert_specimen_dm3_table(spec_df)
out_file = pmag.resolve_file_name(output_file, output_dir_path)
if latex:
if out_file.endswith('.xls'):
out_file = out_file.rsplit('.')[0] + ".tex"
info_out = open(out_file, 'w+', errors="backslashreplace")
info_out.write('\documentclass{article}\n')
info_out.write('\\usepackage{booktabs}\n')
if landscape:
info_out.write('\\usepackage{lscape}')
if longtable:
info_out.write('\\usepackage{longtable}\n')
info_out.write('\\begin{document}\n')
if landscape:
info_out.write('\\begin{landscape}\n')
info_out.write(table_df.to_latex(index=False, longtable=longtable,
escape=True, multicolumn=False))
if landscape:
info_out.write('\end{landscape}\n')
info_out.write('\end{document}\n')
info_out.close()
else:
table_df.to_excel(out_file, index=False)
else:
print("No specimen data for ouput.")
return True, [out_file]
|
python
|
{
"resource": ""
}
|
q11671
|
criteria_extract
|
train
|
def criteria_extract(crit_file='criteria.txt', output_file='criteria.xls',
output_dir_path='.', input_dir_path='', latex=False):
"""
Extracts criteria from a MagIC 3.0 format criteria.txt file.
Default output format is an Excel file.
typeset with latex on your own computer.
Parameters
___________
crit_file : str, default "criteria.txt"
input file name
output_file : str, default "criteria.xls"
output file name
output_dir_path : str, default "."
output file directory
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
latex : boolean, default False
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], data table error type : True if successful
Effects :
writes xls or latex formatted tables for use in publications
"""
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
try:
fname = pmag.resolve_file_name(crit_file, input_dir_path)
except IOError:
print("bad criteria file name")
return False, "bad criteria file name"
crit_df = pd.read_csv(fname, sep='\t', header=1)
if len(crit_df) > 0:
out_file = pmag.resolve_file_name(output_file, output_dir_path)
s = crit_df['table_column'].str.split(pat='.', expand=True)
crit_df['table'] = s[0]
crit_df['column'] = s[1]
crit_df = crit_df[['table', 'column',
'criterion_value', 'criterion_operation']]
crit_df.columns = ['Table', 'Statistic', 'Threshold', 'Operation']
if latex:
if out_file.endswith('.xls'):
out_file = out_file.rsplit('.')[0] + ".tex"
crit_df.loc[crit_df['Operation'].str.contains(
'<'), 'operation'] = 'maximum'
crit_df.loc[crit_df['Operation'].str.contains(
'>'), 'operation'] = 'minimum'
crit_df.loc[crit_df['Operation'] == '=', 'operation'] = 'equal to'
info_out = open(out_file, 'w+', errors="backslashreplace")
info_out.write('\documentclass{article}\n')
info_out.write('\\usepackage{booktabs}\n')
# info_out.write('\\usepackage{longtable}\n')
# T1 will ensure that symbols like '<' are formatted correctly
info_out.write("\\usepackage[T1]{fontenc}\n")
info_out.write('\\begin{document}')
info_out.write(crit_df.to_latex(index=False, longtable=False,
escape=True, multicolumn=False))
info_out.write('\end{document}\n')
info_out.close()
else:
crit_df.to_excel(out_file, index=False)
else:
print("No criteria for ouput.")
return True, [out_file]
|
python
|
{
"resource": ""
}
|
q11672
|
Site.parse_fits
|
train
|
def parse_fits(self, fit_name):
'''USE PARSE_ALL_FITS unless otherwise necessary
Isolate fits by the name of the fit; we also set 'specimen_tilt_correction' to zero in order
to only include data in geographic coordinates - THIS NEEDS TO BE GENERALIZED
'''
fits = self.fits.loc[self.fits.specimen_comp_name ==
fit_name].loc[self.fits.specimen_tilt_correction == 0]
fits.reset_index(inplace=True)
means = self.means.loc[self.means.site_comp_name ==
fit_name].loc[self.means.site_tilt_correction == 0]
means.reset_index(inplace=True)
mean_name = str(fit_name) + "_mean"
setattr(self, fit_name, fits)
setattr(self, mean_name, means)
|
python
|
{
"resource": ""
}
|
q11673
|
MagICMenu.on_show_mainframe
|
train
|
def on_show_mainframe(self, event):
"""
Show mainframe window
"""
self.parent.Enable()
self.parent.Show()
self.parent.Raise()
|
python
|
{
"resource": ""
}
|
q11674
|
get_PD_direction
|
train
|
def get_PD_direction(X1_prime, X2_prime, X3_prime, PD):
"""takes arrays of X1_prime, X2_prime, X3_prime, and the PD.
checks that the PD vector direction is correct"""
n = len(X1_prime) - 1
X1 = X1_prime[0] - X1_prime[n]
X2 = X2_prime[0] - X2_prime[n]
X3 = X3_prime[0] - X3_prime[n]
R= numpy.array([X1, X2, X3])
#print 'R (reference vector for PD direction)', R
dot = numpy.dot(PD, R) # dot product of reference vector and the principal axis of the V matrix
#print 'dot (dot of PD and R)', dot
if dot < -1:
dot = -1
elif dot > 1:
dot = 1
if numpy.arccos(dot) > old_div(numpy.pi, 2.):
#print 'numpy.arccos(dot) {} > numpy.pi / 2. {}'.format(numpy.arccos(dot), numpy.pi / 2)
#print 'correcting PD direction'
PD = -1. * numpy.array(PD)
#print 'PD after get PD direction', PD
return PD
|
python
|
{
"resource": ""
}
|
q11675
|
dir2cart
|
train
|
def dir2cart(d): # from pmag.py
"""converts list or array of vector directions, in degrees, to array of cartesian coordinates, in x,y,z form """
ints = numpy.ones(len(d)).transpose() # get an array of ones to plug into dec,inc pairs
d = numpy.array(d)
rad = old_div(numpy.pi, 180.)
if len(d.shape) > 1: # array of vectors
decs, incs = d[:,0] * rad, d[:,1] * rad
if d.shape[1] == 3: ints = d[:,2] # take the given lengths
else: # single vector
decs, incs = numpy.array(d[0]) * rad, numpy.array(d[1]) * rad
if len(d) == 3:
ints = numpy.array(d[2])
else:
ints = numpy.array([1.])
cart = numpy.array([ints * numpy.cos(decs) * numpy.cos(incs),
ints * numpy.sin(decs) * numpy.cos(incs),
ints * numpy.sin(incs)
]).transpose()
return cart
|
python
|
{
"resource": ""
}
|
q11676
|
pmag_angle
|
train
|
def pmag_angle(D1,D2): # use this
"""
finds the angle between lists of two directions D1,D2
"""
D1 = numpy.array(D1)
if len(D1.shape) > 1:
D1 = D1[:,0:2] # strip off intensity
else: D1 = D1[:2]
D2 = numpy.array(D2)
if len(D2.shape) > 1:
D2 = D2[:,0:2] # strip off intensity
else: D2 = D2[:2]
X1 = dir2cart(D1) # convert to cartesian from polar
X2 = dir2cart(D2)
angles = [] # set up a list for angles
for k in range(X1.shape[0]): # single vector
angle = numpy.arccos(numpy.dot(X1[k],X2[k]))*180./numpy.pi # take the dot product
angle = angle%360.
angles.append(angle)
return numpy.array(angles)
|
python
|
{
"resource": ""
}
|
q11677
|
new_get_angle_diff
|
train
|
def new_get_angle_diff(v1,v2):
"""returns angular difference in degrees between two vectors. may be more precise in certain cases. see SPD"""
v1 = numpy.array(v1)
v2 = numpy.array(v2)
angle = numpy.arctan2(numpy.linalg.norm(numpy.cross(v1, v2)), numpy.dot(v1, v2))
return math.degrees(angle)
|
python
|
{
"resource": ""
}
|
q11678
|
get_angle_difference
|
train
|
def get_angle_difference(v1, v2):
"""returns angular difference in degrees between two vectors. takes in cartesian coordinates."""
v1 = numpy.array(v1)
v2 = numpy.array(v2)
angle=numpy.arccos(old_div((numpy.dot(v1, v2) ), (numpy.sqrt(math.fsum(v1**2)) * numpy.sqrt(math.fsum(v2**2)))))
return math.degrees(angle)
|
python
|
{
"resource": ""
}
|
q11679
|
get_ptrms_angle
|
train
|
def get_ptrms_angle(ptrms_best_fit_vector, B_lab_vector):
"""
gives angle between principal direction of the ptrm data and the b_lab vector. this is NOT in SPD, but taken from Ron Shaar's old thellier_gui.py code. see PmagPy on github
"""
ptrms_angle = math.degrees(math.acos(old_div(numpy.dot(ptrms_best_fit_vector,B_lab_vector),(numpy.sqrt(sum(ptrms_best_fit_vector**2)) * numpy.sqrt(sum(B_lab_vector**2)))))) # from old thellier_gui.py code
return ptrms_angle
|
python
|
{
"resource": ""
}
|
q11680
|
main
|
train
|
def main():
"""
Take out dos problem characters from any file
"""
filename = pmag.get_named_arg('-f')
if not filename:
return
with open(filename, 'rb+') as f:
content = f.read()
f.seek(0)
f.write(content.replace(b'\r', b''))
f.truncate()
|
python
|
{
"resource": ""
}
|
q11681
|
main
|
train
|
def main():
"""
NAME
kly4s_magic.py
DESCRIPTION
converts files generated by SIO kly4S labview program to MagIC formated
files for use with PmagPy plotting software
SYNTAX
kly4s_magic.py -h [command line options]
OPTIONS
-h: prints the help message and quits
-f FILE: specify .ams input file name
-fad AZDIP: specify AZDIP file with orientations, will create er_samples.txt file
-fsa SFILE: specify existing er_samples.txt file with orientation information
-fsp SPFILE: specify existing er_specimens.txt file for appending
-F MFILE: specify magic_measurements output file
-Fa AFILE: specify rmag_anisotropy output file
-ocn ORCON: specify orientation convention: default is #3 below -only with AZDIP file
-usr USER: specify who made the measurements
-loc LOC: specify location name for study
-ins INST: specify instrument used
-spc SPEC: specify number of characters to specify specimen from sample
-ncn NCON: specify naming convention: default is #1 below
DEFAULTS
MFILE: magic_measurements.txt
AFILE: rmag_anisotropy.txt
SPFILE: create new er_specimens.txt file
USER: ""
LOC: "unknown"
INST: "SIO-KLY4S"
SPEC: 1 specimen name is same as sample (if SPEC is 1, sample is all but last character)
NOTES:
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail ltauxe@ucsd.edu for help.
Orientation convention:
[1] Lab arrow azimuth= azimuth; Lab arrow dip=-dip
i.e., dip is degrees from vertical down - the hade [default]
[2] Lab arrow azimuth = azimuth-90; Lab arrow dip = -dip
i.e., azimuth is strike and dip is hade
[3] Lab arrow azimuth = azimuth; Lab arrow dip = dip-90
e.g. dip is degrees from horizontal of drill direction
[4] Lab arrow azimuth = azimuth; Lab arrow dip = dip
[5] Lab arrow azimuth = azimuth; Lab arrow dip = 90-dip
[6] all others you will have to either customize your
self or e-mail ltauxe@ucsd.edu for help.
"""
args = sys.argv
if '-h' in args:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([['f', True, ''], ['fad', False, ''],
['fsa', False, ''], ['fsp', False, ''],
['Fsp', False, 'specimens.txt'], ['F', False, 'measurements.txt'],
['Fa', False, 'rmag_anisotropy.txt'], ['ocn', False, '3'],
['usr', False, ''], ['loc', False, ''],
['ins', False, 'SIO-KLY4S'], ['spc', False, 0],
['ncn', False, '1'], ['WD', False, '.'],
['ID', False, '.'], ['DM', False, 3 ]])
checked_args = extractor.extract_and_check_args(args, dataframe)
infile, azdip_infile, samp_infile, spec_infile, spec_outfile, measfile, aniso_outfile, or_con, user, locname, inst, specnum, samp_con, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(['f', 'fad', 'fsa', 'fsp', 'Fsp', 'F', 'Fa', 'ocn', 'usr', 'loc', 'ins', 'spc', 'ncn', 'WD', 'ID', 'DM'], checked_args)
convert.kly4s(infile, specnum=specnum, locname=locname, inst=inst,
user=user, measfile=measfile,or_con=or_con,
samp_con=samp_con, aniso_outfile=aniso_outfile,
samp_infile=samp_infile, spec_infile=spec_infile,
spec_outfile=spec_outfile, azdip_infile=azdip_infile,
dir_path=output_dir_path, input_dir_path=input_dir_path,
data_model_num=data_model_num)
|
python
|
{
"resource": ""
}
|
q11682
|
main
|
train
|
def main():
"""
NAME
sort_specimens.py
DESCRIPTION
Reads in a pmag_specimen formatted file and separates it into different components (A,B...etc.)
SYNTAX
sort_specimens.py [-h] [command line options]
INPUT
takes pmag_specimens.txt formatted input file
OPTIONS
-h: prints help message and quits
-f FILE: specify input file, default is 'pmag_specimens.txt'
OUTPUT
makes pmag_specimen formatted files with input filename plus _X_Y
where X is the component name and Y is s,g,t for coordinate system
"""
dir_path='.'
inspec="pmag_specimens.txt"
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
inspec=sys.argv[ind+1]
basename=inspec.split('.')[:-1]
inspec=dir_path+"/"+inspec
ofile_base=dir_path+"/"+basename[0]
#
# read in data
#
prior_spec_data,file_type=pmag.magic_read(inspec)
if file_type != 'pmag_specimens':
print(file_type, " this is not a valid pmag_specimens file")
sys.exit()
# get list of specimens in file, components, coordinate systems available
specs,comps,coords=[],[],[]
for spec in prior_spec_data:
if spec['er_specimen_name'] not in specs:specs.append(spec['er_specimen_name'])
if 'specimen_comp_name' not in list(spec.keys()):spec['specimen_comp_name']='A'
if 'specimen_tilt_correction' not in list(spec.keys()):spec['tilt_correction']='-1' # assume specimen coordinates
if spec['specimen_comp_name'] not in comps:comps.append(spec['specimen_comp_name'])
if spec['specimen_tilt_correction'] not in coords:coords.append(spec['specimen_tilt_correction'])
# work on separating out components, coordinate systems by specimen
for coord in coords:
print(coord)
for comp in comps:
print(comp)
speclist=[]
for spec in prior_spec_data:
if spec['specimen_tilt_correction']==coord and spec['specimen_comp_name']==comp:speclist.append(spec)
ofile=ofile_base+'_'+coord+'_'+comp+'.txt'
pmag.magic_write(ofile,speclist,'pmag_specimens')
print('coordinate system: ',coord,' component name: ',comp,' saved in ',ofile)
|
python
|
{
"resource": ""
}
|
q11683
|
ErMagicCheckFrame3.InitLocCheck
|
train
|
def InitLocCheck(self):
"""
make an interactive grid in which users can edit locations
"""
# if there is a location without a name, name it 'unknown'
self.contribution.rename_item('locations', 'nan', 'unknown')
# propagate lat/lon values from sites table
self.contribution.get_min_max_lat_lon()
# propagate lithologies & geologic classes from sites table
self.contribution.propagate_cols_up(['lithologies',
'geologic_classes'], 'locations', 'sites')
res = self.contribution.propagate_min_max_up()
if cb.not_null(res):
self.contribution.propagate_cols_up(['age_unit'], 'locations', 'sites')
# set up frame
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'locations', 'locations', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitAgeCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSiteCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, min_size=self.min_size)
# center
self.grid_frame.Centre()
return
|
python
|
{
"resource": ""
}
|
q11684
|
ErMagicCheckFrame3.validate
|
train
|
def validate(self, grid):
"""
Using the MagIC data model, generate validation errors on a MagicGrid.
Parameters
----------
grid : dialogs.magic_grid3.MagicGrid
The MagicGrid to be validated
Returns
---------
warnings: dict
Empty dict if no warnings, otherwise a dict with format {name of problem: [problem_columns]}
"""
grid_name = str(grid.GetName())
dmodel = self.contribution.dmodel
reqd_headers = dmodel.get_reqd_headers(grid_name)
df = self.contribution.tables[grid_name].df
df = df.replace('', np.nan) # python does not view empty strings as null
if df.empty:
return {}
col_names = set(df.columns)
missing_headers = set(reqd_headers) - col_names
present_headers = set(reqd_headers) - set(missing_headers)
non_null_headers = df.dropna(how='all', axis='columns').columns
null_reqd_headers = present_headers - set(non_null_headers)
if any(missing_headers) or any (null_reqd_headers):
warnings = {'missing required column(s)': sorted(missing_headers),
'no data in required column(s)': sorted(null_reqd_headers)}
else:
warnings = {}
return warnings
|
python
|
{
"resource": ""
}
|
q11685
|
ErMagicCheckFrame3.on_saveButton
|
train
|
def on_saveButton(self, event, grid):
"""saves any editing of the grid but does not continue to the next window"""
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
if self.grid_frame.drop_down_menu: # unhighlight selected columns, etc.
self.grid_frame.drop_down_menu.clean_up()
# remove '**' and '^^' from col labels
starred_cols, hatted_cols = grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid.HideCellEditControl() # removes focus from cell that was being edited
if grid.changes:
self.onSave(grid)
for col in starred_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '**')
for col in hatted_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '^^')
del wait
|
python
|
{
"resource": ""
}
|
q11686
|
ErMagicCheckFrame.onMouseOver
|
train
|
def onMouseOver(self, event, grid):
"""
Displays a tooltip over any cell in a certain column
"""
x, y = grid.CalcUnscrolledPosition(event.GetX(), event.GetY())
coords = grid.XYToCell(x, y)
col = coords[1]
row = coords[0]
# creates tooltip message for cells with long values
# note: this works with EPD for windows, and modern wxPython, but not with Canopy Python
msg = grid.GetCellValue(row, col)
if len(msg) > 15:
event.GetEventObject().SetToolTipString(msg)
else:
event.GetEventObject().SetToolTipString('')
|
python
|
{
"resource": ""
}
|
q11687
|
ErMagicCheckFrame.on_helpButton
|
train
|
def on_helpButton(self, event, page=None):
"""shows html help page"""
# for use on the command line:
path = find_pmag_dir.get_pmag_dir()
# for use with pyinstaller
#path = self.main_frame.resource_dir
help_page = os.path.join(path, 'dialogs', 'help_files', page)
# if using with py2app, the directory structure is flat,
# so check to see where the resource actually is
if not os.path.exists(help_page):
help_page = os.path.join(path, 'help_files', page)
html_frame = pw.HtmlFrame(self, page=help_page)
html_frame.Show()
|
python
|
{
"resource": ""
}
|
q11688
|
ErMagicCheckFrame.onDeleteRow
|
train
|
def onDeleteRow(self, event, data_type):
"""
On button click, remove relevant object from both the data model and the grid.
"""
ancestry = self.er_magic_data.ancestry
child_type = ancestry[ancestry.index(data_type) - 1]
names = [self.grid.GetCellValue(row, 0) for row in self.selected_rows]
if data_type == 'site':
how_to_fix = 'Make sure to select a new site for each orphaned sample in the next step'
else:
how_to_fix = 'Go back a step and select a new {} for each orphaned {}'.format(data_type, child_type)
orphans = []
for name in names:
row = self.grid.row_labels.index(name)
orphan = self.er_magic_data.delete_methods[data_type](name)
if orphan:
orphans.extend(orphan)
self.grid.remove_row(row)
if orphans:
orphan_names = self.er_magic_data.make_name_list(orphans)
pw.simple_warning('You have deleted:\n\n {}\n\nthe parent(s) of {}(s):\n\n {}\n\n{}'.format(', '.join(names), child_type, ', '.join(orphan_names), how_to_fix))
self.selected_rows = set()
# update grid and data model
self.update_grid(self.grid)#, grids[grid_name])
self.grid.Refresh()
|
python
|
{
"resource": ""
}
|
q11689
|
ErMagicCheckFrame.onSelectRow
|
train
|
def onSelectRow(self, event):
"""
Highlight or unhighlight a row for possible deletion.
"""
grid = self.grid
row = event.Row
default = (255, 255, 255, 255)
highlight = (191, 216, 216, 255)
cell_color = grid.GetCellBackgroundColour(row, 0)
attr = wx.grid.GridCellAttr()
if cell_color == default:
attr.SetBackgroundColour(highlight)
self.selected_rows.add(row)
else:
attr.SetBackgroundColour(default)
try:
self.selected_rows.remove(row)
except KeyError:
pass
if self.selected_rows and self.deleteRowButton:
self.deleteRowButton.Enable()
else:
self.deleteRowButton.Disable()
grid.SetRowAttr(row, attr)
grid.Refresh()
|
python
|
{
"resource": ""
}
|
q11690
|
ErMagicCheckFrame.update_grid
|
train
|
def update_grid(self, grid):
"""
takes in wxPython grid and ErMagic data object to be updated
"""
data_methods = {'specimen': self.er_magic_data.change_specimen,
'sample': self.er_magic_data.change_sample,
'site': self.er_magic_data.change_site,
'location': self.er_magic_data.change_location,
'age': self.er_magic_data.change_age}
grid_name = str(grid.GetName())
cols = list(range(grid.GetNumberCols()))
col_labels = []
for col in cols:
col_labels.append(grid.GetColLabelValue(col))
for row in grid.changes: # go through changes and update data structures
if row == -1:
continue
else:
data_dict = {}
for num, label in enumerate(col_labels):
if label:
data_dict[str(label)] = str(grid.GetCellValue(row, num))
new_name = str(grid.GetCellValue(row, 0))
old_name = self.temp_data[grid_name][row]
data_methods[grid_name](new_name, old_name, data_dict)
grid.changes = False
|
python
|
{
"resource": ""
}
|
q11691
|
main
|
train
|
def main():
"""
NAME
update_measurements.py
DESCRIPTION
update the magic_measurements table with new orientation info
SYNTAX
update_measurements.py [command line options]
OPTIONS
-h prints help message and quits
-f MFILE, specify magic_measurements file; default is magic_measurements.txt
-fsa SFILE, specify er_samples table; default is er_samples.txt
-F OFILE, specify output file, default is same as MFILE
"""
dir_path='.'
meas_file='magic_measurements.txt'
samp_file="er_samples.txt"
out_file='magic_measurements.txt'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index('-f')
meas_file=sys.argv[ind+1]
if '-fsa' in sys.argv:
ind = sys.argv.index('-fsa')
samp_file=sys.argv[ind+1]
if '-F' in sys.argv:
ind = sys.argv.index('-F')
out_file=sys.argv[ind+1]
# read in measurements file
meas_file=dir_path+'/'+meas_file
out_file=dir_path+'/'+out_file
samp_file=dir_path+'/'+samp_file
data,file_type=pmag.magic_read(meas_file)
samps,file_type=pmag.magic_read(samp_file)
MeasRecs=[]
sampnames,sflag=[],0
for rec in data:
for samp in samps:
if samp['er_sample_name'].lower()==rec['er_sample_name'].lower():
if samp['er_sample_name'] not in sampnames:sampnames.append(samp['er_sample_name'].lower())
rec['er_site_name']=samp['er_site_name']
rec['er_location_name']=samp['er_location_name']
MeasRecs.append(rec)
break
if rec['er_sample_name'].lower() not in sampnames:
sampnames.append(rec['er_sample_name'].lower())
sflag=1
SampRec={}
for key in list(samps[0].keys()):SampRec[key]=""
SampRec['er_sample_name']=rec['er_sample_name']
SampRec['er_citation_names']="This study"
SampRec['er_site_name']='MISSING'
SampRec['er_location_name']='MISSING'
SampRec['sample_desription']='recorded added by update_measurements - edit as needed'
samps.append(SampRec)
print(rec['er_sample_name'],' missing from er_samples.txt file - edit orient.txt file and re-import')
rec['er_site_name']='MISSING'
rec['er_location_name']='MISSING'
MeasRecs.append(rec)
pmag.magic_write(out_file,MeasRecs,'magic_measurements')
print("updated measurements file stored in ", out_file)
if sflag==1:
pmag.magic_write(samp_file,samps,'er_samples')
print("updated sample file stored in ", samp_file)
|
python
|
{
"resource": ""
}
|
q11692
|
main
|
train
|
def main():
"""
NAME
angle.py
DESCRIPTION
calculates angle between two input directions D1,D2
INPUT (COMMAND LINE ENTRY)
D1_dec D1_inc D1_dec D2_inc
OUTPUT
angle
SYNTAX
angle.py [-h][-i] [command line options] [< filename]
OPTIONS
-h prints help and quits
-i for interactive data entry
-f FILE input filename
-F FILE output filename (required if -F set)
Standard I/O
"""
out = ""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind = sys.argv.index('-F')
o = sys.argv[ind + 1]
out = open(o, 'w')
if '-i' in sys.argv:
cont = 1
while cont == 1:
dir1, dir2 = [], []
try:
ans = input('Declination 1: [ctrl-D to quit] ')
dir1.append(float(ans))
ans = input('Inclination 1: ')
dir1.append(float(ans))
ans = input('Declination 2: ')
dir2.append(float(ans))
ans = input('Inclination 2: ')
dir2.append(float(ans))
except:
print("\nGood bye\n")
sys.exit()
# send dirs to angle and spit out result
ang = pmag.angle(dir1, dir2)
print('%7.1f ' % (ang))
elif '-f' in sys.argv:
ind = sys.argv.index('-f')
file = sys.argv[ind + 1]
file_input = numpy.loadtxt(file)
else:
# read from standard input
file_input = numpy.loadtxt(sys.stdin.readlines(), dtype=numpy.float)
if len(file_input.shape) > 1: # list of directions
dir1, dir2 = file_input[:, 0:2], file_input[:, 2:]
else:
dir1, dir2 = file_input[0:2], file_input[2:]
angs = pmag.angle(dir1, dir2)
for ang in angs: # read in the data (as string variable), line by line
print('%7.1f' % (ang))
if out != "":
out.write('%7.1f \n' % (ang))
if out:
out.close()
|
python
|
{
"resource": ""
}
|
q11693
|
main
|
train
|
def main():
"""
NAME
fishrot.py
DESCRIPTION
generates set of Fisher distributed data from specified distribution
SYNTAX
fishrot.py [-h][-i][command line options]
OPTIONS
-h prints help message and quits
-i for interactive entry
-k kappa specify kappa, default is 20
-n N specify N, default is 100
-D D specify mean Dec, default is 0
-I I specify mean Inc, default is 90
where:
kappa: fisher distribution concentration parameter
N: number of directions desired
OUTPUT
dec, inc
"""
N,kappa,D,I=100,20.,0.,90.
if len(sys.argv)!=0 and '-h' in sys.argv:
print(main.__doc__)
sys.exit()
elif '-i' in sys.argv:
ans=input(' Kappa: ')
kappa=float(ans)
ans=input(' N: ')
N=int(ans)
ans=input(' Mean Dec: ')
D=float(ans)
ans=input(' Mean Inc: ')
I=float(ans)
else:
if '-k' in sys.argv:
ind=sys.argv.index('-k')
kappa=float(sys.argv[ind+1])
if '-n' in sys.argv:
ind=sys.argv.index('-n')
N=int(sys.argv[ind+1])
if '-D' in sys.argv:
ind=sys.argv.index('-D')
D=float(sys.argv[ind+1])
if '-I' in sys.argv:
ind=sys.argv.index('-I')
I=float(sys.argv[ind+1])
for k in range(N):
dec,inc= pmag.fshdev(kappa) # send kappa to fshdev
drot,irot=pmag.dodirot(dec,inc,D,I)
print('%7.1f %7.1f ' % (drot,irot))
|
python
|
{
"resource": ""
}
|
q11694
|
Arai_GUI.cart2dir
|
train
|
def cart2dir(self,cart):
"""
converts a direction to cartesian coordinates
"""
# print "calling cart2dir(), not in anything"
cart=numpy.array(cart)
rad=old_div(numpy.pi,180.) # constant to convert degrees to radians
if len(cart.shape)>1:
Xs,Ys,Zs=cart[:,0],cart[:,1],cart[:,2]
else: #single vector
Xs,Ys,Zs=cart[0],cart[1],cart[2]
Rs=numpy.sqrt(Xs**2+Ys**2+Zs**2) # calculate resultant vector length
Decs=(old_div(numpy.arctan2(Ys,Xs),rad))%360. # calculate declination taking care of correct quadrants (arctan2) and making modulo 360.
try:
Incs=old_div(numpy.arcsin(old_div(Zs,Rs)),rad) # calculate inclination (converting to degrees) #
except:
print('trouble in cart2dir') # most likely division by zero somewhere
return numpy.zeros(3)
return numpy.array([Decs,Incs,Rs]).transpose()
|
python
|
{
"resource": ""
}
|
q11695
|
Arai_GUI.magic_read
|
train
|
def magic_read(self,infile):
"""
reads a Magic template file, puts data in a list of dictionaries
"""
# print "calling magic_read(self, infile)", infile
hold,magic_data,magic_record,magic_keys=[],[],{},[]
try:
f=open(infile,"r")
except:
return [],'bad_file'
d = f.readline()[:-1].strip('\n')
if d[0]=="s" or d[1]=="s":
delim='space'
elif d[0]=="t" or d[1]=="t":
delim='tab'
else:
print('error reading ', infile)
sys.exit()
if delim=='space':file_type=d.split()[1]
if delim=='tab':file_type=d.split('\t')[1]
if file_type=='delimited':
if delim=='space':file_type=d.split()[2]
if delim=='tab':file_type=d.split('\t')[2]
if delim=='space':line =f.readline()[:-1].split()
if delim=='tab':line =f.readline()[:-1].split('\t')
for key in line:
magic_keys.append(key)
lines=f.readlines()
for line in lines[:-1]:
line.replace('\n','')
if delim=='space':rec=line[:-1].split()
if delim=='tab':rec=line[:-1].split('\t')
hold.append(rec)
line = lines[-1].replace('\n','')
if delim=='space':rec=line[:-1].split()
if delim=='tab':rec=line.split('\t')
hold.append(rec)
for rec in hold:
magic_record={}
if len(magic_keys) != len(rec):
print("Warning: Uneven record lengths detected: ")
#print magic_keys
#print rec
for k in range(len(rec)):
magic_record[magic_keys[k]]=rec[k].strip('\n')
magic_data.append(magic_record)
magictype=file_type.lower().split("_")
Types=['er','magic','pmag','rmag']
if magictype in Types:file_type=file_type.lower()
# print "magic data from magic_read:"
# print str(magic_data)[:500] + "..."
# print "file_type", file_type
return magic_data,file_type
|
python
|
{
"resource": ""
}
|
q11696
|
Arai_GUI.get_specs
|
train
|
def get_specs(self,data):
"""
takes a magic format file and returns a list of unique specimen names
"""
# sort the specimen names
#
# print "calling get_specs()"
speclist=[]
for rec in data:
spec=rec["er_specimen_name"]
if spec not in speclist:speclist.append(spec)
speclist.sort()
#print speclist
return speclist
|
python
|
{
"resource": ""
}
|
q11697
|
main
|
train
|
def main():
"""
NAME
orientation_magic.py
DESCRIPTION
takes tab delimited field notebook information and converts to MagIC formatted tables
SYNTAX
orientation_magic.py [command line options]
OPTIONS
-f FILE: specify input file, default is: orient.txt
-Fsa FILE: specify output file, default is: er_samples.txt
-Fsi FILE: specify output site location file, default is: er_sites.txt
-app append/update these data in existing er_samples.txt, er_sites.txt files
-ocn OCON: specify orientation convention, default is #1 below
-dcn DCON [DEC]: specify declination convention, default is #1 below
if DCON = 2, you must supply the declination correction
-BCN don't correct bedding_dip_dir for magnetic declination -already corrected
-ncn NCON: specify naming convention: default is #1 below
-a: averages all bedding poles and uses average for all samples: default is NO
-gmt HRS: specify hours to subtract from local time to get GMT: default is 0
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
-DM: specify data model (2 or 3). Default: 3. Will output to the appropriate format.
Orientation convention:
Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below.
[1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down)
of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth;
lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade.
[2] Field arrow is the strike of the plane orthogonal to the drill direction,
Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90
Lab arrow dip = -field_dip
[3] Lab arrow is the same as the drill direction;
hade was measured in the field.
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
[4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too
[5] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag_azimuth and field_dip;
lab arrow is as in [1] above.
lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] all others you will have to either customize your
self or e-mail ltauxe@ucsd.edu for help.
Magnetic declination convention:
[1] Use the IGRF value at the lat/long and date supplied [default]
[2] Will supply declination correction
[3] mag_az is already corrected in file
[4] Correct mag_az but not bedding_dip_dir
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail ltauxe@ucsd.edu for help.
OUTPUT
output saved in er_samples.txt and er_sites.txt (or samples.txt and sites.txt if using data model 3.0)
- this will overwrite any existing files
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
else:
info = [['WD', False, '.'], ['ID', False, ''], ['f', False, 'orient.txt'],
['app', False, False], ['ocn', False, 1], ['dcn', False, 1],
['BCN', False, True], ['ncn', False, '1'], ['gmt', False, 0],
['mcd', False, ''], ['a', False, False], ['DM', False, 3]]
#output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, samp_con, hours_from_gmt, method_codes, average_bedding
# leave off -Fsa, -Fsi b/c defaults in command_line_extractor
dataframe = extractor.command_line_dataframe(info)
checked_args = extractor.extract_and_check_args(args, dataframe)
output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, samp_file, site_file, data_model = extractor.get_vars(['WD', 'ID', 'f', 'app', 'ocn', 'dcn', 'BCN', 'ncn', 'gmt', 'mcd', 'a', 'Fsa', 'Fsi', 'DM'], checked_args)
if input_dir_path == '.':
input_dir_path = output_dir_path
if not isinstance(dec_correction_con, int):
if len(dec_correction_con) > 1:
dec_correction = int(dec_correction_con.split()[1])
dec_correction_con = int(dec_correction_con.split()[0])
else:
dec_correction = 0
else:
dec_correction = 0
ipmag.orientation_magic(or_con, dec_correction_con, dec_correction, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, orient_file, samp_file, site_file, output_dir_path, input_dir_path, append, data_model)
|
python
|
{
"resource": ""
}
|
q11698
|
MagicGrid.add_row
|
train
|
def add_row(self, label='', item=''):
"""
Add a row to the grid
"""
self.AppendRows(1)
last_row = self.GetNumberRows() - 1
self.SetCellValue(last_row, 0, str(label))
self.row_labels.append(label)
self.row_items.append(item)
|
python
|
{
"resource": ""
}
|
q11699
|
MagicGrid.remove_row
|
train
|
def remove_row(self, row_num=None):
"""
Remove a row from the grid
"""
#DeleteRows(self, pos, numRows, updateLabel
if not row_num and row_num != 0:
row_num = self.GetNumberRows() - 1
label = self.GetCellValue(row_num, 0)
self.DeleteRows(pos=row_num, numRows=1, updateLabels=True)
# remove label from row_labels
try:
self.row_labels.remove(label)
except ValueError:
# if label name hasn't been saved yet, simply truncate row_labels
self.row_labels = self.row_labels[:-1]
self.row_items.pop(row_num)
if not self.changes:
self.changes = set()
self.changes.add(-1)
# fix #s for rows edited:
self.update_changes_after_row_delete(row_num)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.