code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
# (must use fillna to replace np.nan with False for indexing)
if use_slice:
df = sli.copy()
else:
df = self.df.copy()
# if meth_code not provided, return unchanged dataframe
if not meth_code:
return df
# get regex
if not strict_match:
# grab any record that contains any part of meth_code
cond = df['method_codes'].str.contains(meth_code).fillna(False)
else:
# grab only an exact match
pattern = re.compile('{}(?=:|\s|\Z)'.format(meth_code))
cond = df['method_codes'].str.contains(pattern).fillna(False)
if incl:
# return a copy of records with that method code:
return df[cond]
else:
# return a copy of records without that method code
return df[~cond]
|
def get_records_for_code(self, meth_code, incl=True, use_slice=False,
sli=None, strict_match=True)
|
Use regex to see if meth_code is in the method_codes ":" delimited list.
If incl == True, return all records WITH meth_code.
If incl == False, return all records WITHOUT meth_code.
If strict_match == True, return only records with the exact meth_code.
If strict_match == False, return records that contain the meth_code partial string,
(i.e., "DE-").
Not inplace
| 4.296887
| 4.203293
| 1.022267
|
if self.df.empty:
return df1
elif df1.empty:
return self.df
#copy to prevent mutation
cdf2 = self.df.copy()
#split data into types and decide which to replace
# if replace_dir_or_int == 'dir' and 'method_codes' in cdf2.columns:
# cdf2 = cdf2[cdf2['method_codes'].notnull()]
# acdf2 = cdf2[cdf2['method_codes'].str.contains('LP-PI')]
# mcdf2 = cdf2[cdf2['method_codes'].str.contains('LP-DIR')]
# elif replace_dir_or_int == 'int' and 'method_codes' in cdf2.columns:
# cdf2 = cdf2[cdf2['method_codes'].notnull()]
# mcdf2 = cdf2[cdf2['method_codes'].str.contains('LP-PI')]
# acdf2 = cdf2[cdf2['method_codes'].str.contains('LP-DIR')]
# else:
# mcdf2 = cdf2
# acdf2 = pd.DataFrame(columns=mcdf2.columns)
#get rid of stupid duplicates
# [mcdf2.drop(cx,inplace=True,axis=1) for cx in mcdf2.columns if cx in df1.columns]
#join the new calculated data with the old data of same type
if self.dtype.endswith('s'): dtype = self.dtype[:-1]
else: dtype = self.dtype
index_name = dtype + "_name"
for df in [df1, cdf2]:
df.index.name = index_name
mdf = df1.join(cdf2, how='outer', rsuffix='_remove', on=index_name)
def keep_non_null_vals(column):
extra_column = column + "_remove"
if column in mdf.columns and extra_column in mdf.columns:
mdf[column] = np.where(mdf[column].apply(lambda x: not_null(x, False)), mdf[column], mdf[extra_column])
# merge values in the following columns
# e.g., combine info from specimen + specimen_remove into specimen column
for col in ['specimen', 'sample', 'site', 'location', 'lat', 'lon']:
keep_non_null_vals(col)
#drop duplicate columns if they were created
[mdf.drop(col,inplace=True,axis=1) for col in mdf.columns if col.endswith("_remove")]
#duplicates rows for some freaking reason
mdf.drop_duplicates(inplace=True,subset=[col for col in mdf.columns if col != 'description'])
#merge the data of the other type with the new data
# mdf = mdf.merge(acdf2, how='outer')
if dtype in mdf.columns:
#fix freaking indecies because pandas
mdf = mdf.set_index(dtype)
#really? I wanted the index changed not a column deleted?!?
mdf[dtype] = mdf.index
mdf.index.name = index_name
mdf.sort_index(inplace=True)
return mdf
|
def merge_dfs(self, df1)
|
Description: takes new calculated data and replaces the corresponding data in self.df with the new input data preserving the most important metadata if they are not otherwise saved. Note this does not mutate self.df it simply returns the merged dataframe if you want to replace self.df you'll have to do that yourself.
@param: df1 - first DataFrame whose data will preferentially be used.
| 3.573515
| 3.527367
| 1.013083
|
# don't let custom name start with "./"
if custom_name:
if custom_name.startswith('.'):
custom_name = os.path.split(custom_name)[1]
# put columns in logical order (by group)
self.sort_dataframe_cols()
# if indexing column was put in, remove it
if "num" in self.df.columns:
self.df = self.df.drop("num", axis=1)
#
# make sure name is a string
name = self.get_singular_and_plural_dtype(self.dtype)[0]
if name in self.df.columns:
self.df[name] = self.df[name].astype(str)
#
if df is None:
df = self.df
# get full file path
dir_path = os.path.realpath(dir_path)
if custom_name:
fname = pmag.resolve_file_name(custom_name, dir_path) # os.path.join(dir_path, custom_name)
elif self.magic_file:
fname = pmag.resolve_file_name(self.magic_file, dir_path)
else:
fname = os.path.join(dir_path, self.dtype + ".txt")
# see if there's any data
if not len(df):
print('-W- No data to write to {}'.format(fname))
return False
# add to existing file
if append:
print('-I- appending {} data to {}'.format(self.dtype, fname))
mode = "a"
# overwrite existing file
elif os.path.exists(fname):
print('-I- overwriting {}'.format(fname))
mode = "w"
# or create new file
else:
print('-I- writing {} records to {}'.format(self.dtype, fname))
mode = "w"
f = open(fname, mode)
if append:
header = False
if multi_type:
header = True
f.write('tab\t{}\n'.format(self.dtype))
f.flush()
df.to_csv(f, sep="\t", header=header, index=False, mode='a')
else:
f.write('tab\t{}\n'.format(self.dtype))
f.flush()
df.to_csv(f, sep="\t", header=True, index=False, mode='a')
print('-I- {} records written to {} file'.format(len(df), self.dtype))
f.close()
return fname
|
def write_magic_file(self, custom_name=None, dir_path=".",
append=False, multi_type=False, df=None)
|
Write self.df out to tab-delimited file.
By default will use standard MagIC filenames (specimens.txt, etc.),
or you can provide a custom_name to write to instead.
By default will write to custom_name if custom_name is a full path,
or will write to dir_path + custom_name if custom_name
is not a full path.
Parameters
----------
self : MagIC DataFrame
custom_name : str
custom file name
dir_path : str
dir_path (used if custom_name is not a full path), default "."
append : bool
append to existing file, default False
multi_type : bool
for creating upload file
Return
--------
fname : str
output file name
| 2.785546
| 2.749264
| 1.013197
|
table_dm = self.data_model.dm[self.dtype]
approved_cols = table_dm.index
unrecognized_cols = (set(self.df.columns) - set(approved_cols))
return unrecognized_cols
|
def get_non_magic_cols(self)
|
Find all columns in self.df that are not real MagIC 3 columns.
Returns
--------
unrecognized_cols : list
| 6.966476
| 6.010682
| 1.159016
|
short_df = self.df.loc[ind_name, col_name]
mask = pd.notnull(short_df)
print(short_df[mask])
try:
val = short_df[mask].unique()[0]
except IndexError:
val = None
return val
|
def get_first_non_null_value(self, ind_name, col_name)
|
For a given index and column, find the first non-null value.
Parameters
----------
self : MagicDataFrame
ind_name : str
index name for indexing
col_name : str
column name for indexing
Returns
---------
single value of str, float, or int
| 3.036253
| 3.307678
| 0.917941
|
dtype = dtype.strip()
if dtype.endswith('s'):
return dtype[:-1], dtype
elif dtype == 'criteria':
return 'table_column', 'criteria'
elif dtype == 'contribution':
return 'doi', 'contribution'
|
def get_singular_and_plural_dtype(self, dtype)
|
Parameters
----------
dtype : str
MagIC table type (specimens, samples, contribution, etc.)
Returns
---------
name : str
singular name for MagIC table ('specimen' for specimens table, etc.)
dtype : str
plural dtype for MagIC table ('specimens' for specimens table, etc.)
| 5.738036
| 4.852006
| 1.182611
|
args = sys.argv
fmt = pmag.get_named_arg('-fmt', 'svg')
output_dir_path = pmag.get_named_arg('-WD', '.')
input_dir_path = pmag.get_named_arg('-ID', "")
if "-h" in args:
print(main.__doc__)
sys.exit()
meas_file = pmag.get_named_arg('-f', 'measurements.txt')
spec_file = pmag.get_named_arg('-F', 'specimens.txt')
make_plots = True
save_plots = False
if '-P' in args:
make_plots = False
if '-sav' in args:
save_plots = True
pltspec = pmag.get_named_arg('-spc', 0)
ipmag.hysteresis_magic(output_dir_path, input_dir_path, spec_file, meas_file,
fmt, save_plots, make_plots, pltspec)
|
def main()
|
NAME
hysteresis_magic.py
DESCRIPTION
calculates hystereis parameters and saves them in 3.0 specimen format file
makes plots if option selected
SYNTAX
hysteresis_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input file, default is agm_measurements.txt
-F: specify specimens.txt output file
-WD: directory to output files to (default : current directory)
Note: if using Windows, all figures will output to current directory
-ID: directory to read files from (default : same as -WD)
-P: do not make the plots
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg]
| 2.876342
| 2.295226
| 1.253184
|
if 'data_files' in os.listdir(sys.prefix):
return os.path.join(sys.prefix, 'data_files')
else:
return os.path.join(get_pmag_dir(), 'data_files')
|
def get_data_files_dir()
|
Find directory with data_files (sys.prefix or local PmagPy/data_files)
and return the path.
| 3.139017
| 2.034421
| 1.542954
|
# this is correct for py2exe (DEPRECATED)
#win_frozen = is_frozen()
#if win_frozen:
# path = os.path.abspath(unicode(sys.executable, sys.getfilesystemencoding()))
# path = os.path.split(path)[0]
# return path
# this is correct for py2app
try:
return os.environ['RESOURCEPATH']
# this works for everything else
except KeyError: pass
# new way:
# if we're in the local PmagPy directory:
if os.path.isfile(os.path.join(os.getcwd(), 'pmagpy', 'pmag.py')):
lib_dir = os.path.join(os.getcwd(), 'pmagpy')
# if we're anywhere else:
elif getattr(sys, 'frozen', False): #pyinstaller datafile directory
return sys._MEIPASS
else:
# horrible, hack-y fix
# (prevents namespace issue between
# local github PmagPy and pip-installed PmagPy).
# must reload because we may have
# changed directories since importing
temp = os.getcwd()
os.chdir('..')
reload(locator)
lib_file = resource_filename('locator', 'resource.py')
full_dir = os.path.split(lib_file)[0]
ind = full_dir.rfind(os.sep)
lib_dir = full_dir[:ind+1]
lib_dir = os.path.realpath(os.path.join(lib_dir, 'pmagpy'))
os.chdir(temp)
# end fix
# old way:
#lib_dir = os.path.dirname(os.path.realpath(__file__))
if not os.path.isfile(os.path.join(lib_dir, 'pmag.py')):
lib_dir = os.getcwd()
fname = os.path.join(lib_dir, 'pmag.py')
if not os.path.isfile(fname):
pmag_dir = os.path.split(os.path.split(__file__)[0])[0]
if os.path.isfile(os.path.join(pmag_dir,'pmagpy','pmag.py')):
return pmag_dir
else:
print('-W- Can\'t find the data model! Make sure you have installed pmagpy using pip: "pip install pmagpy --upgrade"')
return '.'
# strip "/" or "\" and "pmagpy" to return proper PmagPy directory
if lib_dir.endswith(os.sep):
lib_dir = lib_dir[:-1]
if lib_dir.endswith('pmagpy'):
pmag_dir = os.path.split(lib_dir)[0]
else:
pmag_dir = lib_dir
return pmag_dir
|
def get_pmag_dir()
|
Returns directory in which PmagPy is installed
| 3.555745
| 3.537633
| 1.00512
|
dir_path="./"
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
magic_file=dir_path+'/'+sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-xkey' in sys.argv:
ind=sys.argv.index('-xkey')
xkey=sys.argv[ind+1]
if '-ykey' in sys.argv:
ind=sys.argv.index('-ykey')
ykey=sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-b' in sys.argv:
ind=sys.argv.index('-b')
xmin=float(sys.argv[ind+1])
xmax=float(sys.argv[ind+2])
ymin=float(sys.argv[ind+3])
ymax=float(sys.argv[ind+4])
#
#
# get data read in
X,Y=[],[]
Data,file_type=pmag.magic_read(magic_file)
if len(Data)>0:
for rec in Data:
if xkey in list(rec.keys()) and rec[xkey]!="" and ykey in list(rec.keys()) and rec[ykey]!="":
try:
X.append(float(rec[xkey]))
Y.append(float(rec[ykey]))
except:
pass
FIG={'fig':1}
pmagplotlib.plot_init(FIG['fig'],5,5)
if '-b' in sys.argv:
pmagplotlib.plot_xy(FIG['fig'],X,Y,sym='ro',xlab=xkey,ylab=ykey,xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax )
else:
pmagplotlib.plot_xy(FIG['fig'],X,Y,sym='ro',xlab=xkey,ylab=ykey)
pmagplotlib.draw_figs(FIG)
ans=input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans=="q": sys.exit()
if ans=="a":
files = {}
for key in list(FIG.keys()):
files[key]=str(key) + ".svg"
pmagplotlib.save_plots(FIG,files)
sys.exit()
else:
print('no data to plot')
|
def main()
|
NAME
plot_magic_keys.py
DESCRIPTION
picks out keys and makes and xy plot
SYNTAX
plot_magic_keys.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-xkey KEY: specify key for X
-ykey KEY: specify key for Y
-b xmin xmax ymin ymax, sets bounds
| 2.052176
| 1.92815
| 1.064324
|
title = ""
files, fmt = {}, 'svg'
sym = {'lower': ['o', 'r'], 'upper': ['o', 'w']}
plot = 0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-sav' in sys.argv:
plot = 1
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind + 1]
if '-s' in sys.argv:
ind = sys.argv.index('-s')
sym['size'] = int(sys.argv[ind + 1])
else:
sym['size'] = 20
if '-Lsym' in sys.argv:
ind = sys.argv.index('-Lsym')
sym['lower'][0] = sys.argv[ind + 1]
sym['lower'][1] = sys.argv[ind + 2]
if '-Usym' in sys.argv:
ind = sys.argv.index('-Usym')
sym['upper'][0] = sys.argv[ind + 1]
sym['upper'][1] = sys.argv[ind + 2]
if '-f' in sys.argv: # ask for filename
ind = sys.argv.index('-f')
fname = sys.argv[ind + 1]
else:
print(main.__doc__)
print(' \n -f option required')
sys.exit() # graceful quit
DI = numpy.loadtxt(fname)
EQ = {'eq': 1}
pmagplotlib.plot_init(EQ['eq'], 5, 5)
pmagplotlib.plot_eq_sym(EQ['eq'], DI, 'Equal Area Plot', sym) # make plot
if plot == 0:
pmagplotlib.draw_figs(EQ) # make it visible
for key in list(EQ.keys()):
files[key] = key + '.' + fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles = {}
titles['eq'] = 'Equal Area Plot'
EQ = pmagplotlib.add_borders(EQ, titles, black, purple)
pmagplotlib.save_plots(EQ, files)
elif plot == 1:
fname = os.path.split(fname)[1].split('.')[0]
files['eq'] = fname + '_eq.' + fmt
pmagplotlib.save_plots(EQ, files)
else:
ans = input(" S[a]ve to save plot, [q]uit without saving: ")
if ans == "a":
pmagplotlib.save_plots(EQ, files)
|
def main()
|
NAME
eqarea.py
DESCRIPTION
makes equal area projections from declination/inclination data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
eqarea.py [options]
OPTIONS
-f FILE, specify file on command line
-sav save figure and quit
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-s SIZE specify symbol size - default is 20
-Lsym SHAPE COLOR specify shape and color for lower hemisphere
-Usym SHAPE COLOR specify shape and color for upper hemisphere
shapes: 's': square,'o': circle,'^,>,v,<': [up,right,down,left] triangle, 'd': diamond,
'p': pentagram, 'h': hexagon, '8': octagon, '+': plus, 'x': cross
colors: [b]lue,[g]reen,[r]ed,[c]yan,[m]agenta,[y]ellow,blac[k],[w]hite
| 2.680557
| 2.302145
| 1.164374
|
currentDirectory = self.WD #os.getcwd()
change_dir_dialog = wx.DirDialog(self.panel,
"Choose your working directory to create or edit a MagIC contribution:",
defaultPath=currentDirectory,
style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON | wx.DD_CHANGE_DIR)
result = change_dir_dialog.ShowModal()
if result == wx.ID_CANCEL:
return
if result == wx.ID_OK:
self.WD = change_dir_dialog.GetPath()
self.dir_path.SetValue(self.WD)
change_dir_dialog.Destroy()
wait = wx.BusyInfo('Initializing data object in new directory, please wait...')
wx.SafeYield()
print('-I- Initializing magic data object')
# make new builder object, but reuse old data_model
self.er_magic = builder.ErMagicBuilder(self.WD, self.er_magic.data_model)
print('-I- Read in any available data from working directory')
self.er_magic.get_all_magic_info()
print('-I- Initializing headers')
self.er_magic.init_default_headers()
self.er_magic.init_actual_headers()
del wait
|
def on_change_dir_button(self, event)
|
create change directory frame
| 4.512805
| 4.511103
| 1.000377
|
if self.grid_frame:
print('-I- You already have a grid frame open')
pw.simple_warning("You already have a grid open")
return
try:
grid_type = event.GetButtonObj().Name[:-4] # remove '_btn'
except AttributeError:
grid_type = self.FindWindowById(event.Id).Name[:-4] # remove ('_btn')
wait = wx.BusyInfo('Making {} grid, please wait...'.format(grid_type))
wx.SafeYield()
# hide mainframe
self.on_open_grid_frame()
self.grid_frame = grid_frame.GridFrame(self.er_magic, self.WD, grid_type, grid_type, self.panel)
if self.validation_mode:
if grid_type in self.validation_mode:
self.grid_frame.grid.paint_invalid_cells(self.warn_dict[grid_type])
#self.grid_frame.msg_boxsizer
current_label = self.grid_frame.msg_text.GetLabel()
add_text =
self.grid_frame.msg_text.SetLabel(add_text)
#self.on_finish_change_dir(self.change_dir_dialog)
del wait
|
def make_grid_frame(self, event)
|
Create a GridFrame for data type of the button that was clicked
| 6.619886
| 6.508301
| 1.017145
|
# coherence validations
wait = wx.BusyInfo('Validating data, please wait...')
wx.SafeYield()
spec_warnings, samp_warnings, site_warnings, loc_warnings = self.er_magic.validate_data()
result_warnings = self.er_magic.validate_results(self.er_magic.results)
meas_warnings = self.er_magic.validate_measurements(self.er_magic.measurements)
self.warn_dict = {'specimen': spec_warnings, 'sample': samp_warnings,
'site': site_warnings, 'location': loc_warnings,
'result': result_warnings, 'age': {}, 'measurement': meas_warnings}
# done coherence validations
del wait
# write upload file and perform data validations
wait = wx.BusyInfo('Making upload file, please wait...')
wx.SafeYield()
self.er_magic.write_files()
upfile, error_message, errors = ipmag.upload_magic(dir_path=self.WD,
data_model=self.data_model)
del wait
if upfile:
text = "You are ready to upload.\nYour file:\n{}\nwas generated in directory: \n{}\nDrag and drop this file in the MagIC database.".format(os.path.split(upfile)[1], self.WD)
dlg = wx.MessageDialog(self, caption="Saved", message=text, style=wx.OK)
else:
text = "There were some problems with the creation of your upload file.\nError message: {}\nSee Terminal/Command Prompt for details".format(error_message)
dlg = wx.MessageDialog(self, caption="Error", message=text, style=wx.OK)
result = dlg.ShowModal()
if result == wx.ID_OK:
dlg.Destroy()
self.edited = False
## add together data & coherence errors into one dictionary
if errors:
for item_type in errors:
for item_name in errors[item_type]:
if item_name in self.warn_dict[item_type]:
self.warn_dict[item_type][item_name].update(errors[item_type][item_name])
else:
self.warn_dict[item_type][item_name] = errors[item_type][item_name]
has_problems = []
for item_type, warnings in list(self.warn_dict.items()):
if warnings:
has_problems.append(item_type)
# for any dtypes with validation problems (data or coherence),
# highlight the button to the corresponding grid
# skip this step for Windows
if sys.platform in ['win32', 'win62']:
pass
else:
for dtype in self.warn_dict:
wind = self.FindWindowByName(dtype + '_btn')
if wind:
if dtype in has_problems:
wind.Bind(wx.EVT_PAINT, self.highlight_button)
else:
wind.Unbind(wx.EVT_PAINT, handler=self.highlight_button)
self.Refresh()
if has_problems:
self.validation_mode = set(has_problems)
if sys.platform in ['win32', 'win62']:
self.message.SetLabel('The following grid(s) have incorrect or incomplete data:\n{}'.format(', '.join(self.validation_mode)))
else:
self.message.SetLabel('Highlighted grids have incorrect or incomplete data')
self.bSizer_msg.ShowItems(True)
self.hbox.Fit(self)
if not has_problems:
self.validation_mode = set()
self.message.SetLabel('')
self.bSizer_msg.ShowItems(False)
self.hbox.Fit(self)
|
def on_upload_file(self, event)
|
Write all data to appropriate er_* and pmag_* files.
Then use those files to create a MagIC upload format file.
Validate the upload file.
| 3.611288
| 3.413794
| 1.057852
|
if self.parent.grid_frame:
if self.parent.grid_frame.grid.changes:
dlg = wx.MessageDialog(self,caption="Message:", message="Are you sure you want to exit the program?\nYou have a grid open with unsaved changes.\n ", style=wx.OK|wx.CANCEL)
result = dlg.ShowModal()
if result == wx.ID_OK:
dlg.Destroy()
else:
dlg.Destroy()
return
if self.parent.grid_frame:
self.parent.grid_frame.Destroy()
# if there have been edits, save all data to files
# before quitting
if self.parent.edited:
self.parent.er_magic.write_files()
self.parent.Close()
try:
sys.exit()
except TypeError:
pass
|
def on_quit(self, event)
|
shut down application
| 4.162089
| 4.203822
| 0.990073
|
dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD)
clear = dia.do_clear()
if clear:
print('-I- Clear data object')
self.parent.er_magic = builder.ErMagicBuilder(self.parent.WD, self.parent.data_model)
print('-I- Initializing headers')
self.parent.er_magic.init_default_headers()
self.parent.er_magic.init_actual_headers()
|
def on_clear(self, event)
|
initialize window to allow user to empty the working directory
| 7.880715
| 7.513271
| 1.048906
|
#
# initialize variables
#
version_num=pmag.get_version()
orient_file,samp_file = "orient","er_samples.txt"
args=sys.argv
dir_path,out_path='.','.'
default_outfile = True
#
#
if '-WD' in args:
ind=args.index('-WD')
dir_path=args[ind+1]
if '-OD' in args:
ind=args.index('-OD')
out_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-F" in args:
ind=args.index("-F")
orient_file=sys.argv[ind+1]
default_outfile = False
if "-f" in args:
ind=args.index("-f")
samp_file=sys.argv[ind+1]
orient_file=out_path+'/'+orient_file
samp_file=dir_path+'/'+samp_file
#
# read in file to convert
#
ErSamples=[]
Required=['sample_class','sample_type','sample_lithology','lat','long']
Samps,file_type=pmag.magic_read(samp_file)
Locs=[]
OrKeys=['sample_name','site_name','mag_azimuth','field_dip','sample_class','sample_type','sample_lithology','lat','long','stratigraphic_height','method_codes','site_description']
print("file_type", file_type) # LJ
if file_type.lower()=='er_samples':
SampKeys=['er_sample_name','er_site_name','sample_azimuth','sample_dip','sample_class','sample_type','sample_lithology','sample_lat','sample_lon','sample_height','magic_method_codes','er_sample_description']
elif file_type.lower()=='magic_measurements':
SampKeys=['er_sample_name','er_site_name']
else:
print('wrong file format; must be er_samples or magic_measurements only')
for samp in Samps:
if samp['er_location_name'] not in Locs:Locs.append(samp['er_location_name']) # get all the location names
for location_name in Locs:
loc_samps=pmag.get_dictitem(Samps,'er_location_name',location_name,'T')
OrOut=[]
for samp in loc_samps:
if samp['er_sample_name'] not in ErSamples:
ErSamples.append(samp['er_sample_name'])
OrRec={}
if 'sample_date' in list(samp.keys()) and samp['sample_date'].strip()!="":
date=samp['sample_date'].split(':')
OrRec['date']=date[1]+'/'+date[2]+'/'+date[0][2:4]
for i in range(len(SampKeys)):
if SampKeys[i] in list(samp.keys()):OrRec[OrKeys[i]]=samp[SampKeys[i]]
for key in Required:
if key not in list(OrRec.keys()):OrRec[key]="" # fill in blank required keys
OrOut.append(OrRec)
loc=location_name.replace(" ","_")
if default_outfile:
outfile=orient_file+'_'+loc+'.txt'
else:
outfile=orient_file
pmag.magic_write(outfile,OrOut,location_name)
print("Data saved in: ", outfile)
|
def main()
|
NAME
convert_samples.py
DESCRIPTION
takes an er_samples or magic_measurements format file and creates an orient.txt template
SYNTAX
convert_samples.py [command line options]
OPTIONS
-f FILE: specify input file, default is er_samples.txt
-F FILE: specify output file, default is: orient_LOCATION.txt
INPUT FORMAT
er_samples.txt or magic_measurements format file
OUTPUT
orient.txt format file
| 2.998103
| 2.802967
| 1.069618
|
if len(sys.argv) > 0:
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-f' in sys.argv: # ask for filename
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
else:
data=sys.stdin.readlines() # read in data from standard input
DIs= [] # set up list for dec inc data
ofile = ""
if '-F' in sys.argv: # set up output file
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
for line in data: # read in the data from standard input
if '\t' in line:
rec=line.split('\t') # split each line on space to get records
else:
rec=line.split() # split each line on space to get records
DIs.append((float(rec[0]),float(rec[1])))
#
bpars=pmag.dobingham(DIs)
output = '%7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %i' % (bpars["dec"],bpars["inc"],bpars["Eta"],bpars["Edec"],bpars["Einc"],bpars["Zeta"],bpars["Zdec"],bpars["Zinc"],bpars["n"])
if ofile == "":
print(output)
else:
out.write(output+'\n')
|
def main()
|
NAME
gobing.py
DESCRIPTION
calculates Bingham parameters from dec inc data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
gobing.py [options]
OPTIONS
-f FILE to read from FILE
-F, specifies output file name
< filename for reading from standard input
OUTPUT
mean dec, mean inc, Eta, Deta, Ieta, Zeta, Zdec, Zinc, N
| 3.188945
| 2.567588
| 1.242
|
# initialize some parameters
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
#if "-Fa" in args:
# ind = args.index("-Fa")
# rmag_anis = args[ind + 1]
#if "-Fr" in args:
# ind = args.index("-Fr")
# rmag_res = args[ind + 1]
#meas_file = "atrm_measurements.txt"
#rmag_anis = "trm_anisotropy.txt"
#rmag_res = "atrm_results.txt"
dir_path = pmag.get_named_arg("-WD", ".")
input_dir_path = pmag.get_named_arg("-ID", "")
meas_file = pmag.get_named_arg("-f", "measurements.txt")
data_model_num = int(pmag.get_named_arg("-DM", 3))
spec_outfile = pmag.get_named_arg("-Fsp", "specimens.txt")
spec_infile = pmag.get_named_arg("-fsp", "specimens.txt")
ipmag.atrm_magic(meas_file, dir_path, input_dir_path,
spec_infile, spec_outfile, data_model_num)
|
def main()
|
NAME
atrm_magic.py
DESCRIPTION
Converts ATRM data to best-fit tensor (6 elements plus sigma)
Original program ARMcrunch written to accomodate ARM anisotropy data
collected from 6 axial directions (+X,+Y,+Z,-X,-Y,-Z) using the
off-axis remanence terms to construct the tensor. A better way to
do the anisotropy of ARMs is to use 9,12 or 15 measurements in
the Hext rotational scheme.
SYNTAX
atrm_magic.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is atrm_measurements.txt
-fsp FILE: specimen input file, default is specimens.txt (optional)
-Fsp FILE: specify output file, default is specimens.txt (MagIC 3 only)
-DM DATA_MODEL: specify MagIC 2 or MagIC 3, default is 3
INPUT
Input for the present program is a TRM acquisition data with an optional baseline.
The order of the measurements is:
Decs=[0,90,0,180,270,0,0,90,0]
Incs=[0,0,90,0,0,-90,0,0,90]
The last two measurements are optional
| 3.096891
| 2.512175
| 1.232753
|
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=sys.argv[ind+1]
out=open(ofile,'w')
else:
out=''
if '-i' in sys.argv: # if one is -i
a95=0
while 1:
try:
ans = input("Input Declination: <cntrl-D to quit> ")
Dec = float(ans) # assign input to Dec, after conversion to floating point
ans = input("Input Inclination: ")
Inc = float(ans)
ans = input("Input Site Latitude: ")
slat = float(ans)
ans = input("Input Site Longitude: ")
slong = float(ans)
output = pmag.dia_vgp(Dec,Inc,a95,slat,slong)
print('%7.1f %7.1f'%(output[0],output[1]))
except:
print("\n Good-bye\n")
sys.exit()
elif '-f' in sys.argv: # input of file name
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
data=numpy.loadtxt(file)
else: #
data = numpy.loadtxt(sys.stdin,dtype=numpy.float) # read from S/I
if len(data.shape)>1: # 2-D array
N=data.shape[0]
if data.shape[1]==4: # only dec,inc,sitelat, site long -no alpha95
data=data.transpose()
inlist=numpy.array([data[0],data[1],numpy.zeros(N),data[2],data[3]]).transpose()
output = pmag.dia_vgp(inlist)
for k in range(N):
if out=='':
print('%7.1f %7.1f'%(output[0][k],output[1][k]))
else:
out.write('%7.1f %7.1f\n'%(output[0][k],output[1][k]))
else: # single line of data
if len(data)==4:
data=[data[0],data[1],0,data[2],data[3]]
output = pmag.dia_vgp(data)
if out=='': # spit to standard output
print('%7.1f %7.1f'%(output[0],output[1]))
else: # write to file
out.write('%7.1f %7.1f\n'%(output[0],output[1]))
|
def main()
|
NAME
di_vgp.py
DESCRIPTION
converts declination/inclination to virtual geomagnetic pole
SYNTAX
di_vgp.py [-h] [options]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify intput file
-F FILE to specify output file
<filename to read/write from/to standard input
INPUT
for file entry:
D I SLAT SLON
where:
D: declination
I: inclination
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
PLON PLAT
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
| 3.124416
| 2.867468
| 1.089608
|
os.chdir(self.WD)
options = {}
HUJI_file = self.bSizer0.return_value()
if not HUJI_file:
pw.simple_warning("You must select a HUJI format file")
return False
options['magfile'] = HUJI_file
dat_file = self.bSizer0A.return_value()
if os.path.isfile(dat_file): options['datafile'] = dat_file
else: dat_file=""
magicoutfile=os.path.split(HUJI_file)[1]+".magic"
outfile=os.path.join(self.WD, magicoutfile)
options['meas_file'] = outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_specimens.txt"
spec_outfile=os.path.join(self.WD, magicoutfile)
options['spec_file'] = spec_outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_samples.txt"
samp_outfile=os.path.join(self.WD, magicoutfile)
options['samp_file'] = samp_outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_sites.txt"
site_outfile=os.path.join(self.WD, magicoutfile)
options['site_file'] = site_outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_locations.txt"
loc_outfile=os.path.join(self.WD, magicoutfile)
options['loc_file'] = loc_outfile
user = self.bSizer1.return_value()
options['user'] = user
if user:
user = '-usr ' + user
experiment_type = self.bSizer2.return_value()
options['codelist'] = experiment_type
if not experiment_type:
pw.simple_warning("You must select an experiment type")
return False
cooling_rate = self.cooling_rate.GetValue() or 0
if cooling_rate:
experiment_type = experiment_type + " " + cooling_rate
lab_field = self.bSizer3.return_value()
if not lab_field:
lab_field = "0 0 0"
lab_field_list = lab_field.split()
options['labfield'] = lab_field_list[0]
options['phi'] = lab_field_list[1]
options['theta'] = lab_field_list[2]
lab_field = '-dc ' + lab_field
spc = self.bSizer4.return_value()
options['specnum'] = spc or 0
if not spc:
spc = '-spc 0'
else:
spc = '-spc ' + spc
ncn = self.bSizer5.return_value()
options['samp_con'] = ncn
loc_name = self.bSizer6.return_value()
options['location'] = loc_name
if loc_name:
loc_name = '-loc ' + loc_name
#peak_AF = self.bSizer7.return_value()
#options['peakfield'] = peak_AF
replicate = self.bSizer8.return_value()
if replicate:
options['noave'] = 0
replicate = ''
else:
options['noave'] = 1
replicate = '-A'
COMMAND = "huji_magic_new.py -f {} -fd {} -F {} -Fsp {} -Fsa {} -Fsi {} -Flo {} {} -LP {} {} -ncn {} {} {} {}".format(HUJI_file, dat_file, outfile, spec_outfile, samp_outfile, site_outfile, loc_outfile, user, experiment_type, loc_name, ncn, lab_field, spc, replicate)
program_ran, error_message = convert.huji(**options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
|
def on_okButton(self, event)
|
grab user input values, format them, and run huji_magic.py with the appropriate flags
| 2.654307
| 2.564778
| 1.034907
|
'''
create an editable grid showing demag_orient.txt
'''
#--------------------------------
# orient.txt supports many other headers
# but we will only initialize with
# the essential headers for
# sample orientation and headers present
# in existing demag_orient.txt file
#--------------------------------
#--------------------------------
# create the grid
#--------------------------------
samples_list = list(self.orient_data.keys())
samples_list.sort()
self.samples_list = [ sample for sample in samples_list if sample is not "" ]
#self.headers.extend(self.add_extra_headers(samples_list))
display_headers = [header[1] for header in self.headers]
self.grid = magic_grid.MagicGrid(self.panel, 'orient grid',
self.samples_list, display_headers)
self.grid.InitUI()
#--------------------------------
# color the columns by groups
#--------------------------------
for i in range(len(self.samples_list)):
self.grid.SetCellBackgroundColour(i, 0, "LIGHT GREY")
self.grid.SetCellBackgroundColour(i, 1, "LIGHT STEEL BLUE")
self.grid.SetCellBackgroundColour(i, 2, "YELLOW")
self.grid.SetCellBackgroundColour(i, 3, "YELLOW")
self.grid.SetCellBackgroundColour(i, 4, "PALE GREEN")
self.grid.SetCellBackgroundColour(i, 5, "PALE GREEN")
self.grid.SetCellBackgroundColour(i, 6, "KHAKI")
self.grid.SetCellBackgroundColour(i, 7, "KHAKI")
self.grid.SetCellBackgroundColour(i, 8, "KHAKI")
self.grid.SetCellBackgroundColour(i, 9, "KHAKI")
self.grid.SetCellBackgroundColour(i, 10, "KHAKI")
self.grid.SetCellBackgroundColour(i, 11, "LIGHT MAGENTA")
self.grid.SetCellBackgroundColour(i, 12, "LIGHT MAGENTA")
#--------------------------------
# fill data from self.orient_data
#--------------------------------
headers = [header[0] for header in self.headers]
for sample in self.samples_list:
for key in list(self.orient_data[sample].keys()):
if key in headers:
sample_index = self.samples_list.index(sample)
i = headers.index(key)
val = str(self.orient_data[sample][key])
# if it's a pmag_object, use its name
try:
val = val.name
except AttributeError:
pass
if val and val != "None":
self.grid.SetCellValue(sample_index, i, val)
#--------------------------------
#--------------------------------
# fill in some default values
#--------------------------------
for row in range(self.grid.GetNumberRows()):
col = 1
if not self.grid.GetCellValue(row, col):
self.grid.SetCellValue(row, col, 'g')
#--------------------------------
# temporary trick to get drop-down-menus to work
self.grid.changes = {'a'}
self.grid.AutoSize()
#self.drop_down_menu = drop_down_menus.Menus("orient", self, self.grid, '')
self.drop_down_menu = drop_down_menus3.Menus("orient", self.contribution, self.grid)
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
|
def create_sheet(self)
|
create an editable grid showing demag_orient.txt
| 3.023397
| 2.798974
| 1.080181
|
'''
open orient.txt
read the data
display the data from the file in a new grid
'''
dlg = wx.FileDialog(
self, message="choose orient file",
defaultDir=self.WD,
defaultFile="",
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
orient_file = dlg.GetPath()
dlg.Destroy()
new_data, dtype, keys = pmag.magic_read_dict(orient_file,
sort_by_this_name="sample_name",
return_keys=True)
if len(new_data) > 0:
self.orient_data={}
self.orient_data=new_data
#self.create_sheet()
self.update_sheet()
print("-I- If you don't see a change in the spreadsheet, you may need to manually re-size the window")
|
def on_m_open_file(self,event)
|
open orient.txt
read the data
display the data from the file in a new grid
| 5.787887
| 4.446056
| 1.301803
|
if not sample_names:
return []
full_headers = list(self.orient_data[sample_names[0]].keys())
add_ons = []
for head in full_headers:
if head not in self.header_names:
add_ons.append((head, head))
return add_ons
|
def add_extra_headers(self, sample_names)
|
If there are samples, add any additional keys they might use
to supplement the default headers.
Return the headers headers for adding, with the format:
[(header_name, header_display_name), ....]
| 3.828915
| 3.598292
| 1.064092
|
'''
open orient.txt
read the data
display the data from the file in a new grid
'''
dlg = wx.FileDialog(
self, message="choose orient file",
defaultDir=self.WD,
defaultFile="",
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
orient_file = dlg.GetPath()
dlg.Destroy()
new_data = self.er_magic_data.read_magic_file(orient_file, "sample_name")[0]
if len(new_data) > 0:
self.orient_data={}
self.orient_data=new_data
#self.create_sheet()
self.update_sheet()
print("-I- If you don't see a change in the spreadsheet, you may need to manually re-size the window")
|
def on_m_open_file(self,event)
|
open orient.txt
read the data
display the data from the file in a new grid
| 5.675565
| 4.247652
| 1.336165
|
'''
save demag_orient.txt
(only the columns that appear on the grid frame)
'''
fout = open(os.path.join(self.WD, "demag_orient.txt"), 'w')
STR = "tab\tdemag_orient\n"
fout.write(STR)
headers = [header[0] for header in self.headers]
STR = "\t".join(headers) + "\n"
fout.write(STR)
for sample in self.samples_list:
STR = ""
for header in headers:
sample_index = self.samples_list.index(sample)
i = headers.index(header)
value = self.grid.GetCellValue(sample_index, i)
STR = STR + value + "\t"
fout.write(STR[:-1] + "\n")
if event != None:
dlg1 = wx.MessageDialog(None,caption="Message:", message="data saved in file demag_orient.txt" ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy()
|
def on_m_save_file(self,event)
|
save demag_orient.txt
(only the columns that appear on the grid frame)
| 3.581652
| 2.622743
| 1.365613
|
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
zfile, tfile = 'zeq_redo', 'thellier_redo'
zredo, tredo = "", ""
dir_path = pmag.get_named_arg('-WD', '.')
inspec = pmag.get_named_arg('-f', 'specimens.txt')
if '-F' in sys.argv:
ind = sys.argv.index('-F')
redo = sys.argv[ind + 1]
tfile = redo
zfile = redo
inspec = pmag.resolve_file_name(inspec, dir_path)
zfile = pmag.resolve_file_name(zfile, dir_path)
tfile = pmag.resolve_file_name(tfile, dir_path)
#
# read in data
#
specs = []
prior_spec_data, file_type = pmag.magic_read(inspec)
if file_type != 'specimens':
print(file_type, " this is not a valid pmag_specimens file")
sys.exit()
outstrings = []
for spec in prior_spec_data:
tmp = spec["method_codes"].split(":")
meths = []
for meth in tmp:
methods = meth.strip().split('-')
for m in methods:
if m not in meths:
meths.append(m)
if 'DIR' in meths: # DE-BFL, DE-BFP or DE-FM
specs.append(spec['specimen'])
if 'dir_comp' in list(spec.keys()) and spec['dir_comp'] != "" and spec['dir_comp'] != " ":
comp_name = spec['dir_comp']
else:
comp_name = string.ascii_uppercase[specs.count(
spec['specimen']) - 1]
calculation_type = "DE-BFL" # assume default calculation type is best-fit line
if "BFP" in meths:
calculation_type = 'DE-BFP'
elif "FM" in meths:
calculation_type = 'DE-FM'
if zredo == "":
zredo = open(zfile, "w")
outstring = '%s %s %s %s %s \n' % (
spec["specimen"], calculation_type, spec["meas_step_min"], spec["meas_step_max"], comp_name)
if outstring not in outstrings:
zredo.write(outstring)
outstrings.append(outstring) # only writes unique interpretions
elif "PI" in meths and "TRM" in meths: # thellier record
if tredo == "":
tredo = open(tfile, "w")
outstring = '%s %i %i \n' % (spec["specimen"], float(
spec["meas_step_min"]), float(spec["meas_step_max"]))
if outstring not in outstrings:
tredo.write(outstring)
outstrings.append(outstring) # only writes unique interpretions
print('Redo files saved to: ', zfile, tfile)
|
def main()
|
NAME
mk_redo.py
DESCRIPTION
Makes thellier_redo and zeq_redo files from existing pmag_specimens format file
SYNTAX
mk_redo.py [-h] [command line options]
INPUT
takes specimens.txt formatted input file
OPTIONS
-h: prints help message and quits
-f FILE: specify input file, default is 'specimens.txt'
-F REDO: specify output file suffix, default is redo so that
output filenames are 'thellier_redo' for thellier data and 'zeq_redo' for direction only data
OUTPUT
makes a thellier_redo or a zeq_redo format file
| 3.019249
| 2.746263
| 1.099403
|
minx, miny, maxx, maxy = ls.bounds
points = {'left': [(minx, miny), (minx, maxy)],
'right': [(maxx, miny), (maxx, maxy)],
'bottom': [(minx, miny), (maxx, miny)],
'top': [(minx, maxy), (maxx, maxy)],}
return sgeom.LineString(points[side])
|
def find_side(ls, side)
|
Given a shapely LineString which is assumed to be rectangular, return the
line corresponding to a given side of the rectangle.
| 2.060503
| 1.869859
| 1.101957
|
te = lambda xy: xy[0]
lc = lambda t, n, b: np.vstack((np.zeros(n) + t, np.linspace(b[2], b[3], n))).T
xticks, xticklabels = _lambert_ticks(ax, ticks, 'bottom', lc, te)
ax.xaxis.tick_bottom()
ax.set_xticks(xticks)
ax.set_xticklabels([ax.xaxis.get_major_formatter()(xtick) for xtick in xticklabels])
|
def lambert_xticks(ax, ticks)
|
Draw ticks on the bottom x-axis of a Lambert Conformal projection.
| 3.116162
| 2.901938
| 1.073821
|
te = lambda xy: xy[1]
lc = lambda t, n, b: np.vstack((np.linspace(b[0], b[1], n), np.zeros(n) + t)).T
yticks, yticklabels = _lambert_ticks(ax, ticks, 'left', lc, te)
ax.yaxis.tick_left()
ax.set_yticks(yticks)
ax.set_yticklabels([ax.yaxis.get_major_formatter()(ytick) for ytick in yticklabels])
|
def lambert_yticks(ax, ticks)
|
Draw ricks on the left y-axis of a Lamber Conformal projection.
| 3.048911
| 2.919083
| 1.044476
|
outline_patch = sgeom.LineString(ax.outline_patch.get_path().vertices.tolist())
axis = find_side(outline_patch, tick_location)
n_steps = 30
extent = ax.get_extent(ccrs.PlateCarree())
_ticks = []
for t in ticks:
xy = line_constructor(t, n_steps, extent)
proj_xyz = ax.projection.transform_points(ccrs.Geodetic(), xy[:, 0], xy[:, 1])
xyt = proj_xyz[..., :2]
ls = sgeom.LineString(xyt.tolist())
locs = axis.intersection(ls)
if not locs:
tick = [None]
else:
tick = tick_extractor(locs.xy)
_ticks.append(tick[0])
# Remove ticks that aren't visible:
ticklabels = copy(ticks)
while True:
try:
index = _ticks.index(None)
except ValueError:
break
_ticks.pop(index)
ticklabels.pop(index)
return _ticks, ticklabels
|
def _lambert_ticks(ax, ticks, tick_location, line_constructor, tick_extractor)
|
Get the tick locations and labels for an axis of a Lambert Conformal projection.
| 3.157393
| 3.067827
| 1.029195
|
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
# initialize variables from command line + defaults
dir_path = pmag.get_named_arg("-WD", default_val=".")
input_dir_path = pmag.get_named_arg('-ID', '')
if not input_dir_path:
input_dir_path = dir_path
in_file = pmag.get_named_arg("-f", default_val="measurements.txt")
in_file = pmag.resolve_file_name(in_file, input_dir_path)
if "-ID" not in sys.argv:
input_dir_path = os.path.split(in_file)[0]
plot_by = pmag.get_named_arg("-obj", default_val="loc")
LT = pmag.get_named_arg("-LT", "AF")
no_norm = pmag.get_flag_arg_from_sys("-N")
norm = False if no_norm else True
save_plots = pmag.get_flag_arg_from_sys("-sav")
fmt = pmag.get_named_arg("-fmt", "svg")
XLP = pmag.get_named_arg("-XLP", "")
spec_file = pmag.get_named_arg("-fsp", default_val="specimens.txt")
samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt")
site_file = pmag.get_named_arg("-fsi", default_val="sites.txt")
loc_file = pmag.get_named_arg("-flo", default_val="locations.txt")
dmag_magic(in_file, dir_path, input_dir_path, spec_file, samp_file,
site_file, loc_file, plot_by, LT, norm, XLP,
save_plots, fmt)
|
def main()
|
NAME
dmag_magic.py
DESCRIPTION
plots intensity decay curves for demagnetization experiments
SYNTAX
dmag_magic -h [command line options]
INPUT
takes magic formatted measurements.txt files
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is: measurements.txt
-obj OBJ: specify object [loc, sit, sam, spc] for plot,
default is by location
-LT [AF,T,M]: specify lab treatment type, default AF
-XLP [PI]: exclude specific lab protocols,
(for example, method codes like LP-PI)
-N do not normalize by NRM magnetization
-sav save plots silently and quit
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
NOTE
loc: location (study); sit: site; sam: sample; spc: specimen
| 2.647302
| 2.062853
| 1.283321
|
norm=0
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
data=np.loadtxt(file)
dates=[2000]
elif '-d' in sys.argv:
ind=sys.argv.index('-d')
dates=[float(sys.argv[ind+1])]
elif '-r' in sys.argv:
ind=sys.argv.index('-r')
dates=np.loadtxt(sys.argv[ind+1])
if '-n' in sys.argv: norm=1
if len(sys.argv)!=0 and '-h' in sys.argv:
print(main.__doc__)
sys.exit()
plt.semilogy()
plt.xlabel('Degree (l)')
plt.ylabel('Power ($\mu$T$^2$)')
labels=[]
for date in dates:
if date!=2000:
gh=pmag.doigrf(0,0,0,date,coeffs=1)
data=pmag.unpack(gh)
Ls,Rs=pmag.lowes(data)
labels.append(str(date))
print(date,Rs[0])
if norm==1:
Rs=old_div(np.array(Rs),Rs[0])
#plt.plot(Ls,Rs,'ro')
plt.plot(Ls,Rs,linewidth=2)
plt.legend(labels,'upper right')
plt.draw()
input()
|
def main()
|
NAME
lowes.py
DESCRIPTION
Plots Lowes spectrum for input IGRF-like file
SYNTAX
lowes.py [options]
OPTIONS:
-h prints help message and quits
-f FILE specify file name with input data
-d date specify desired date
-r read desired dates from file
-n normalize to dipole term
INPUT FORMAT:
l m g h
| 3.55561
| 3.1958
| 1.112588
|
import numpy
X=arange(.1,10.1,.2) #make a list of numbers
Y=myfunc(X) # calls myfunc with argument X
for i in range(len(X)):
print(X[i],Y[i])
|
def main()
|
This program prints doubled values!
| 6.906274
| 6.034245
| 1.144513
|
d,i,file2="","",""
fmt,plot='svg',0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-sav' in sys.argv: plot=1
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file1=sys.argv[ind+1]
if '-f2' in sys.argv:
ind=sys.argv.index('-f2')
file2=sys.argv[ind+1]
if '-dir' in sys.argv:
ind=sys.argv.index('-dir')
d=float(sys.argv[ind+1])
i=float(sys.argv[ind+2])
D1=numpy.loadtxt(file1,dtype=numpy.float)
if file2!="": D2=numpy.loadtxt(file2,dtype=numpy.float)
#
counter,NumSims=0,1000
#
# get bootstrapped means for first data set
#
print("Doing first set of directions, please be patient..")
BDI1=pmag.di_boot(D1)
#
# convert to cartesian coordinates X1,X2, Y1,Y2 and Z1, Z2
#
if d=="": # repeat for second data set
print("Doing second set of directions, please be patient..")
BDI2=pmag.di_boot(D2)
else:
BDI2=[]
# set up plots
CDF={'X':1,'Y':2,'Z':3}
pmagplotlib.plot_init(CDF['X'],4,4)
pmagplotlib.plot_init(CDF['Y'],4,4)
pmagplotlib.plot_init(CDF['Z'],4,4)
# draw the cdfs
pmagplotlib.plot_com(CDF,BDI1,BDI2,[d,i])
files={}
files['X']='CD_X.'+fmt
files['Y']='CD_Y.'+fmt
files['Z']='CD_Z.'+fmt
if plot==0:
pmagplotlib.draw_figs(CDF)
ans=input("S[a]ve plots, <Return> to quit ")
if ans=="a":
pmagplotlib.save_plots(CDF,files)
else:
sys.exit()
else:
pmagplotlib.save_plots(CDF,files)
sys.exit()
|
def main()
|
NAME
common_mean.py
DESCRIPTION
calculates bootstrap statistics to test for common mean
INPUT FORMAT
takes dec/inc as first two columns in two space delimited files
SYNTAX
common_mean.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE, input file
-f2 FILE, optional second file to compare with first file
-dir D I, optional direction to compare with input file
-fmt [svg,jpg,pnd,pdf] set figure format [default is svg]
NOTES
must have either F2 OR dir but not both
| 3.165464
| 2.985441
| 1.0603
|
# makes sure all values are floats, then norms them by largest value
X = numpy.array(list(map(float, x)))
X = old_div(X, max(X))
Y = numpy.array(list(map(float, y)))
Y = old_div(Y, max(Y))
XY = numpy.array(list(zip(X, Y)))
#Provide the intitial estimate
E1=TaubinSVD(XY);
#Determine the iterative solution
E2=LMA(XY, E1);
estimates=[E2[2], E2[0], E2[1]];
best_a = E2[0]
best_b = E2[1]
best_r = E2[2]
if best_a <= numpy.mean(X) and best_b <= numpy.mean(Y):
k = old_div(-1.,best_r)
else:
k = old_div(1.,best_r)
SSE = get_SSE(best_a, best_b, best_r, X, Y)
return k, best_a, best_b, SSE
|
def AraiCurvature(x=x,y=y)
|
input: list of x points, list of y points
output: k, a, b, SSE. curvature, circle center, and SSE
Function for calculating the radius of the best fit circle to a set of
x-y coordinates.
Paterson, G. A., (2011), A simple test for the presence of multidomain
behaviour during paleointensity experiments, J. Geophys. Res., in press,
doi: 10.1029/2011JB008369
| 3.988959
| 3.85191
| 1.03558
|
XY = numpy.array(XY)
X = XY[:,0] - numpy.mean(XY[:,0]) # norming points by x avg
Y = XY[:,1] - numpy.mean(XY[:,1]) # norming points by y avg
centroid = [numpy.mean(XY[:,0]), numpy.mean(XY[:,1])]
Z = X * X + Y * Y
Zmean = numpy.mean(Z)
Z0 = old_div((Z - Zmean), (2. * numpy.sqrt(Zmean)))
ZXY = numpy.array([Z0, X, Y]).T
U, S, V = numpy.linalg.svd(ZXY, full_matrices=False) #
V = V.transpose()
A = V[:,2]
A[0] = old_div(A[0], (2. * numpy.sqrt(Zmean)))
A = numpy.concatenate([A, [(-1. * Zmean * A[0])]], axis=0)
a, b = (-1 * A[1:3]) / A[0] / 2 + centroid
r = numpy.sqrt(A[1]*A[1]+A[2]*A[2]-4*A[0]*A[3])/abs(A[0])/2;
return a,b,r
|
def TaubinSVD(XY)
|
algebraic circle fit
input: list [[x_1, y_1], [x_2, y_2], ....]
output: a, b, r. a and b are the center of the fitting circle, and r is the radius
Algebraic circle fit by Taubin
G. Taubin, "Estimation Of Planar Curves, Surfaces And Nonplanar
Space Curves Defined By Implicit Equations, With
Applications To Edge And Range Image Segmentation",
IEEE Trans. PAMI, Vol. 13, pages 1115-1138, (1991)
| 3.291682
| 3.072565
| 1.071314
|
XY = numpy.array(XY)
n = len(XY)
if n < 4:
raise Warning("Circle cannot be calculated with less than 4 data points. Please include more data")
Dx = XY[:,0] - Par[0]
Dy = XY[:,1] - Par[1]
D = numpy.sqrt(Dx * Dx + Dy * Dy) - Par[2]
result = old_div(numpy.dot(D, D),(n-3))
return result
|
def VarCircle(XY, Par): # must have at least 4 sets of xy points or else division by zero occurs
if type(XY) != numpy.ndarray
|
computing the sample variance of distances from data points (XY) to the circle Par = [a b R]
| 3.741473
| 3.3297
| 1.123667
|
SSE = 0
X = numpy.array(x)
Y = numpy.array(y)
for i in range(len(X)):
x = X[i]
y = Y[i]
v = (numpy.sqrt( (x -a)**2 + (y - b)**2 ) - r )**2
SSE += v
return SSE
|
def get_SSE(a,b,r,x,y)
|
input: a, b, r, x, y. circle center, radius, xpts, ypts
output: SSE
| 2.524791
| 2.605513
| 0.969019
|
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines() # read in data from standard input
for line in data: # step through line by line
dec=spitout(line)
sys.exit()
if '-i' in sys.argv:
while 1: # repeat this block until program killed
sundata={} # dictionary with sundata in it
print ("Time difference between Greenwich Mean Time (hrs to subtract from local time to get GMT): ")
try:
sundata["delta_u"]=input("<cntl-D> to quit ")
except:
print("\n Good-bye\n")
sys.exit()
date=""
date=date+input("Year: <cntl-D to quit> ")
date=date+":"+input("Month: ")
date=date+":"+input("Day: ")
date=date+":"+input("hour: ")
date=date+":"+input("minute: ")
sundata["date"]=date
sundata["lat"]=input("Latitude of sampling site (negative in southern hemisphere): ")
sundata["lon"]=input("Longitude of sampling site (negative for western hemisphere): ")
sundata["shadow_angle"]=input("Shadow angle: ")
print('%7.1f'%(pmag.dosundec(sundata))) # call sundec function from pmag module and print
else:
data=sys.stdin.readlines() # read in data from standard input
for line in data: # step through line by line
dec=spitout(line)
|
def main()
|
NAME
sundec.py
DESCRIPTION
calculates calculates declination from sun compass measurements
INPUT FORMAT
GMT_offset, lat,long,year,month,day,hours,minutes,shadow_angle
where GMT_offset is the hours to subtract from local time for GMT.
SYNTAX
sundec.py [-i][-f FILE] [< filename ]
OPTIONS
-i for interactive data entry
-f FILE to set file name on command line
otherwise put data in input format in space delimited file
OUTPUT:
declination
| 4.680617
| 4.097637
| 1.142272
|
# set defaults
site_file="er_sites.txt"
loc_file="er_locations.txt"
Names,user=[],"unknown"
Done=[]
version_num=pmag.get_version()
args=sys.argv
dir_path='.'
# get command line stuff
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if '-f' in args:
ind=args.index("-f")
site_file=args[ind+1]
if '-F' in args:
ind=args.index("-F")
loc_file=args[ind+1]
#
site_file=dir_path+'/'+site_file
loc_file=dir_path+'/'+loc_file
Sites,file_type=pmag.magic_read(site_file)
if file_type != 'er_sites':
print(file_type)
print(file_type,"This is not a valid er_sites file ")
sys.exit()
# read in site data
#
LocNames,Locations=[],[]
for site in Sites:
if site['er_location_name'] not in LocNames: # new location name
LocNames.append(site['er_location_name'])
sites_locs=pmag.get_dictitem(Sites,'er_location_name',site['er_location_name'],'T') # get all sites for this loc
lats=pmag.get_dictkey(sites_locs,'site_lat','f') # get all the latitudes as floats
lons=pmag.get_dictkey(sites_locs,'site_lon','f') # get all the longitudes as floats
LocRec={'er_citation_names':'This study','er_location_name':site['er_location_name'],'location_type':''}
LocRec['location_begin_lat']=str(min(lats))
LocRec['location_end_lat']=str(max(lats))
LocRec['location_begin_lon']=str(min(lons))
LocRec['location_end_lon']=str(max(lons))
Locations.append(LocRec)
if len(Locations)>0:
pmag.magic_write(loc_file,Locations,"er_locations")
print("Locations written to: ",loc_file)
|
def main()
|
NAME
sites_locations.py
DESCRIPTION
reads in er_sites.txt file and finds all locations and bounds of locations
outputs er_locations.txt file
SYNTAX
sites_locations.py [command line options]
OPTIONS
-h prints help message and quits
-f: specimen input er_sites format file, default is "er_sites.txt"
-F: locations table: default is "er_locations.txt"
| 2.684956
| 2.550357
| 1.052777
|
cond = method_list['dtype'] == mtype
codes = method_list[cond]
return codes
|
def get_one_meth_type(self, mtype, method_list)
|
Get all codes of one type (i.e., 'anisotropy_estimation')
| 8.624582
| 4.815665
| 1.790943
|
categories = Series(code_types[code_types[category] == True].index)
cond = all_codes['dtype'].isin(categories)
codes = all_codes[cond]
return codes
|
def get_one_meth_category(self, category, all_codes, code_types)
|
Get all codes in one category (i.e., all pmag codes).
This can include multiple method types (i.e., 'anisotropy_estimation', 'sample_prepartion', etc.)
| 5.855303
| 5.682757
| 1.030363
|
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([['f', False, 'orient.txt'], ['Fsa', False, 'samples.txt'], ['ncn', False, "1"], ['mcd', False, 'FS-FD'], ['loc', False, 'unknown'], ['app', False, False], ['WD', False, '.'], ['ID', False, '.'], ['DM', False, 3]])
checked_args = extractor.extract_and_check_args(args, dataframe)
#print('checked_args:', checked_args)
orient_file, samp_file, samp_con, method_codes, location_name, append, output_dir, input_dir, data_model = extractor.get_vars(['f', 'Fsa', 'ncn', 'mcd', 'loc', 'app', 'WD', 'ID', 'DM'], checked_args)
if len(str(samp_con)) > 1:
samp_con, Z = samp_con.split('-')
Z = float(Z)
else:
Z = 1
ipmag.azdip_magic(orient_file, samp_file, samp_con, Z, method_codes, location_name, append, output_dir, input_dir, data_model)
|
def main()
|
NAME
azdip_magic.py
DESCRIPTION
takes space delimited AzDip file and converts to MagIC formatted tables
SYNTAX
azdip_magic.py [command line options]
OPTIONS
-f FILE: specify input file
-Fsa FILE: specify output file, default is: er_samples.txt/samples.txt
-ncn NCON: specify naming convention: default is #1 below
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
-loc: location name, default="unknown"
-app appends to existing samples file, default is to overwrite
INPUT FORMAT
Input files must be space delimited:
Samp Az Dip Strike Dip
Orientation convention:
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
e.g. field_dip is degrees from horizontal of drill direction
Magnetic declination convention:
Az is already corrected in file
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
OUTPUT
output saved in samples file will overwrite any existing files
| 5.471479
| 3.132997
| 1.746404
|
args = sys.argv
if '-h' in args:
print(do_help())
sys.exit()
# def k15_magic(k15file, specnum=0, sample_naming_con='1', er_location_name="unknown", measfile='magic_measurements.txt', sampfile="er_samples.txt", aniso_outfile='rmag_anisotropy.txt', result_file="rmag_results.txt", input_dir_path='.', output_dir_path='.'):
dataframe = extractor.command_line_dataframe([['f', True, ''], ['F', False, 'measurements.txt'], ['Fsa', False, 'samples.txt'], ['Fa', False, 'specimens.txt'], [
'Fr', False, 'rmag_results.txt'], ['spc', False, 0], ['ncn', False, '1'], ['loc', False, 'unknown'], ['WD', False, '.'], ['ID', False, '.'], ['DM', False, 3]])
checked_args = extractor.extract_and_check_args(args, dataframe)
k15file, measfile, sampfile, aniso_outfile, result_file, specnum, sample_naming_con, location_name, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(
['f', 'F', 'Fsa', 'Fa', 'Fr', 'spc', 'ncn', 'loc', 'WD', 'ID', 'DM'], checked_args)
program_ran, error_message = convert.k15(k15file, specnum=specnum, sample_naming_con=sample_naming_con, location=location_name, meas_file=measfile,
samp_file=sampfile, aniso_outfile=aniso_outfile, result_file=result_file, input_dir_path=input_dir_path, dir_path=output_dir_path, data_model_num=data_model_num)
|
def main()
|
NAME
k15_magic.py
DESCRIPTION
converts .k15 format data to magic_measurements format.
assums Jelinek Kappabridge measurement scheme
SYNTAX
k15_magic.py [-h] [command line options]
OPTIONS
-h prints help message and quits
-DM DATA_MODEL: specify data model 2 or 3 (default 3)
-f KFILE: specify .k15 format input file
-F MFILE: specify measurement output file
-Fsa SFILE, specify sample file for output
-Fa AFILE, specify specimen file for output [rmag_anisotropy for data model 2 only]
#-ins INST: specify instrument that measurements were made on # not implemented
-spc NUM: specify number of digits for specimen ID, default is 0
-ncn NCOM: specify naming convention (default is #1)
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail ltauxe@ucsd.edu for help.
DEFAULTS
MFILE: measurements.txt
SFILE: samples.txt
AFILE: specimens.txt
INPUT
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
| 4.278393
| 3.310023
| 1.292557
|
missing = []
for col in reqd_cols:
if col not in data[0]:
missing.append(col)
return missing
|
def check_for_reqd_cols(data, reqd_cols)
|
Check data (PmagPy list of dicts) for required columns
| 2.774794
| 2.336335
| 1.18767
|
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
else:
data=sys.stdin.readlines() # read in data from standard input
if '-dir' in sys.argv: #
ind=sys.argv.index('-dir')
typ=sys.argv[ind+1]
if typ=='L': calculation_type='DE-BFL'
if typ=='P': calculation_type='DE-BFP'
if typ=='F': calculation_type='DE-FM'
beg_pca = int(sys.argv[ind+2])
end_pca = int(sys.argv[ind+3])
#
#
datablock= [] # set up list for data
s=""
ind=0
for line in data: # read in the data from standard input
rec=line.split() # split each line on space to get records
if s=="":
s=rec[0]
print(s, calculation_type)
print(ind,rec[1],rec[3],rec[4],rec[2])
ind+=1
datablock.append([float(rec[1]),float(rec[3]),float(rec[4]),float(rec[2]),'0']) # treatment,dec,inc,int,dummy
mpars=pmag.domean(datablock,beg_pca,end_pca,calculation_type)
if calculation_type=="DE-FM":
print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s,calculation_type,mpars["specimen_n"],mpars["measurement_step_min"],mpars["measurement_step_max"],mpars["specimen_a95"],mpars["specimen_dec"],mpars["specimen_inc"]))
else:
print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s,calculation_type,mpars["specimen_n"],mpars["measurement_step_min"],mpars["measurement_step_max"],mpars["specimen_mad"],mpars["specimen_dec"],mpars["specimen_inc"]))
|
def main()
|
NAME
pca.py
DESCRIPTION
calculates best-fit line/plane through demagnetization data
INPUT FORMAT
takes specimen_name treatment intensity declination inclination in space delimited file
SYNTAX
pca.py [command line options][< filename]
OPTIONS
-h prints help and quits
-f FILE
-dir [L,P,F][BEG][END] specify direction type, beginning and end
(L:line, P:plane or F:fisher mean of unit vectors)
BEG: first step (NRM = step zero)
END: last step (NRM = step zero)
< filename for reading from standard input
OUTPUT:
specimen_name calculation_type N beg end MAD dec inc
if calculation_type is 'p', dec and inc are pole to plane, otherwise, best-fit direction
EXAMPLE:
pca.py -dir L 1 5 <ex3.3
will calculate best-fit line through demagnetization steps 1 and 5 from file ex5.1
| 3.110276
| 2.691899
| 1.155421
|
# if column name is present, no need to check if it is required
if col_name in df.columns:
return None
arg_list = arg.split(",")
arg_list = [argument.strip('"') for argument in arg_list]
msg = ""
for a in arg_list:
# ignore validations that reference a different table
if "." in a:
continue
if a not in df.columns:
msg += "{} column is required unless {} is present. ".format(col_name, a)
if msg:
return msg
else:
return None
return None
|
def requiredUnless(col_name, arg, dm, df, *args)
|
Arg is a string in the format "str1, str2, ..."
Each string will be a column name.
Col_name is required in df unless each column from arg is present.
| 4.19059
| 4.219822
| 0.993073
|
table_name = arg
if col_name in df.columns:
return None
elif not con:
return None
elif table_name in con.tables:
return None
else:
return "{} column is required unless table {} is present".format(col_name, table_name)
|
def requiredUnlessTable(col_name, arg, dm, df, con=None)
|
Col_name must be present in df unless
arg (table_name) is present in contribution
| 3.495086
| 3.479565
| 1.00446
|
group_name = arg
groups = set()
columns = df.columns
for col in columns:
if col not in dm.index:
continue
group = dm.loc[col]['group']
groups.add(group)
if group_name in groups:
if col_name in columns:
return None
else:
return "{} column is required if column group {} is used".format(col_name, group_name)
return None
|
def requiredIfGroup(col_name, arg, dm, df, *args)
|
Col_name is required if other columns of
the group arg are present.
| 3.312187
| 3.340847
| 0.991421
|
if col_name in df.columns:
return None
else:
return '"{}" column is required'.format(col_name)
|
def required(col_name, arg, dm, df, *args)
|
Col_name is required in df.columns.
Return error message if not.
| 3.498728
| 3.174848
| 1.102014
|
#grade = df.apply(func, args=(validation_name, arg, dm), axis=1)
cell_value = row[col_name]
cell_value = str(cell_value)
if not cell_value:
return None
elif cell_value == 'None':
return None
elif cell_value == 'nan':
return None
elif not con:
return None
# if it's in another table
cell_values = [v.strip(" ") for v in cell_value.split(":")]
if "." in arg:
table_name, table_col_name = arg.split(".")
if table_name not in con.tables:
return None
#return "Must contain a value from {} table. Missing {} table.".format(table_name, table_name)
if table_col_name not in con.tables[table_name].df.columns:
return '{} table is missing "{}" column, which is required for validating "{}" column'.format(table_name, table_col_name, col_name)
possible_values = con.tables[table_name].df[table_col_name].unique()
for value in cell_values:
if value not in possible_values:
trunc_possible_values = [val.replace(' ', '') for val in possible_values if val]
trunc_cell_value = cell_value.replace(' ', '')
if trunc_cell_value not in trunc_possible_values:
if trunc_cell_value != value:
return 'This value (long): "{}" is not found in: {} column in {} table. Also (short): {} is not in {}'.format(value, table_col_name, table_name, trunc_cell_value, arg)
else:
return 'This value: "{}" is not found in: {} column in {} table'.format(value, table_col_name, table_name)
break
# if it's in the present table:
else:
possible_values = df[arg].unique()
for value in cell_values:
if value not in possible_values:
return 'This value: "{}" is not found in: {} column'.format(value, arg)
break
return None
|
def isIn(row, col_name, arg, dm, df, con=None)
|
row[col_name] must contain a value from another column.
If not, return error message.
| 3.012755
| 2.895299
| 1.040568
|
cell_value = row[col_name]
if not cell_value:
return None
elif isinstance(cell_value, float):
if np.isnan(cell_value):
return None
try:
arg_val = float(arg)
except ValueError:
if arg in row.index:
arg_val = row[arg]
else:
return None
if cb.is_null(arg_val):
return None
#arg = float(arg)
try:
if float(cell_value) <= float(arg_val):
return None
else:
return "{} ({}) must be <= {} ({})".format(str(cell_value), col_name, str(arg_val), str(arg))
# this happens when the value isn't a float (an error which will be caught elsewhere)
except ValueError:
return None
|
def checkMax(row, col_name, arg, *args)
|
row[col_name] must be less than or equal to arg.
else, return error message.
| 3.187425
| 3.155139
| 1.010233
|
vocabulary = con.vocab.vocabularies
cell_value = str(row[col_name])
if not cell_value:
return None
elif cell_value == "None":
return None
cell_values = cell_value.split(":")
cell_values = [c.strip() for c in cell_values]
# get possible values for controlled vocabulary
# exclude weird unicode
possible_values = []
for val in vocabulary[col_name]:
try:
possible_values.append(str(val).lower())
except UnicodeEncodeError as ex:
print(val, ex)
for value in cell_values:
if str(value).lower() == "nan":
continue
elif str(value).lower() in possible_values:
continue
elif value.lower() == "none":
continue
else:
try:
if str(float(value)) in possible_values:
continue
except:
pass
return '"{}" is not in controlled vocabulary for {}'.format(value, arg)
return None
|
def cv(row, col_name, arg, current_data_model, df, con)
|
row[col_name] must contain only values from the appropriate controlled vocabulary
| 3.379461
| 3.174306
| 1.06463
|
if col_name in df.columns:
# if the column name is present, return nothing
return None
else:
# if the column name is missing, return column name
return col_name
|
def requiredOneInGroup(col_name, group, dm, df, *args)
|
If col_name is present in df, the group validation is satisfied.
If not, it still may be satisfied, but not by THIS col_name.
If col_name is missing, return col_name, else return None.
Later, we will validate to see if there is at least one None (non-missing)
value for this group.
| 4.002305
| 3.726749
| 1.07394
|
# check column validity
required_one = {} # keep track of req'd one in group validations here
cols = df.columns
invalid_cols = [col for col in cols if col not in dm.index]
# go through and run all validations for the data type
for validation_name, validation in dm.iterrows():
value_type = validation['type']
if validation_name in df.columns:
output = df[validation_name].apply(test_type, args=(value_type,))
df["type_pass" + "_" + validation_name + "_" + value_type] = output
#
val_list = validation['validations']
if not val_list or isinstance(val_list, float):
continue
for num, val in enumerate(val_list):
func_name, arg = split_func(val)
if arg == "magic_table_column":
continue
# first validate for presence
if func_name in presence_operations:
func = presence_operations[func_name]
#grade = func(validation_name, df, arg, dm)
grade = func(validation_name, arg, dm, df, con)
pass_col_name = "presence_pass_" + validation_name + "_" + func.__name__
df[pass_col_name] = grade
# then validate for correct values
elif func_name in value_operations:
func = value_operations[func_name]
if validation_name in df.columns:
grade = df.apply(func, args=(validation_name, arg, dm, df, con), axis=1)
col_name = "value_pass_" + validation_name + "_" + func.__name__
if col_name in df.columns:
num_range = list(range(1, 10))
for num in num_range:
if (col_name + str(num)) in df.columns:
continue
else:
col_name = col_name + str(num)
break
df[col_name] = grade.astype(object)
# last, validate at the column group level
elif func_name in group_operations:
func = group_operations[func_name]
missing = func(validation_name, arg, dm, df)
if arg not in required_one:
required_one[arg] = [missing]
else:
required_one[arg].append(missing)
# format the group validation columns
for key, value in list(required_one.items()):
if None in value:
# this means at least one value from the required group is present,
# so the validation passes
continue
else:
# otherwise, all of the values from the required group are missing,
# so the validation fails
df["group_pass_{}".format(key)] = "you must have one column from group {}: {}".format(key, ", ".join(value))
return df
|
def validate_df(df, dm, con=None)
|
Take in a DataFrame and corresponding data model.
Run all validations for that DataFrame.
Output is the original DataFrame with some new columns
that contain the validation output.
Validation columns start with:
presence_pass_ (checking that req'd columns are present)
type_pass_ (checking that the data is of the correct type)
value_pass_ (checking that the value is within the appropriate range)
group_pass_ (making sure that group validations pass)
| 3.600406
| 3.397886
| 1.059602
|
value_cols = df.columns.str.match("^value_pass_")
present_cols = df.columns.str.match("^presence_pass")
type_cols = df.columns.str.match("^type_pass_")
groups_missing = df.columns.str.match("^group_pass_")
#
value_col_names = df.columns[value_cols]
present_col_names = df.columns[present_cols]
type_col_names = df.columns[type_cols]
group_missing_names = df.columns[groups_missing]
#
# all validation columns
validation_cols = np.where(value_cols, value_cols, present_cols)
validation_cols = np.where(validation_cols, validation_cols, type_cols)
validation_col_names = df.columns[validation_cols]
return value_col_names, present_col_names, type_col_names, group_missing_names, validation_col_names
|
def get_validation_col_names(df)
|
Input: validated pandas DataFrame (using validate_df)
Output: names of all value validation columns,
names of all presence validation columns,
names of all type validation columns,
names of all missing group columns,
names of all validation columns (excluding groups).
| 2.284206
| 1.995639
| 1.144599
|
if outfile_name:
outfile = open(outfile_name, "w")
outfile.write("\t".join(["name", "row_number", "problem_type",
"problem_col", "error_message"]))
outfile.write("\n")
else:
outfile = None
for ind, row in failing_items.iterrows():
issues = row['issues']
string = "{:10} | row number: {}".format(ind, str(row["num"]))
first_string = "\t".join([str(ind), str(row["num"])])
if verbose:
print(first_string)
#if outfile:
# ofile.write("{}\n".format(string))
for key, issue in list(issues.items()):
issue_type, issue_col = extract_col_name(key)
string = "{:10} | {:10} | {}".format(issue_type, issue_col, issue)
string = "\t".join([issue_type, issue_col, issue])
if verbose:
print(string)
if outfile:
outfile.write(first_string + "\t" + string + "\n")
if outfile:
outfile.close()
|
def print_row_failures(failing_items, verbose=False, outfile_name=None)
|
Take output from get_row_failures (DataFrame), and output it to
stdout, an outfile, or both.
| 2.932698
| 2.835089
| 1.034429
|
# set temporary numeric index
df["num"] = list(range(len(df)))
# get column names for value & type validations
names = value_cols.union(type_cols)
# drop all non validation columns
value_problems = df[names.union(["num"])]
# drop validation columns that contain no problems
failing_items = value_problems.dropna(how="all", subset=names)
if not len(failing_items):
if verbose:
print("No problems")
return []
failing_items = failing_items.dropna(how="all", axis=1)
# get names of the failing items
bad_items = list(failing_items.index)
# get index numbers of the failing items
bad_indices = list(failing_items["num"])
failing_items['issues'] = failing_items.drop("num", axis=1).apply(make_row_dict, axis=1).values
# take output and print/write to file
print_row_failures(failing_items, verbose, outfile)
return failing_items
|
def get_row_failures(df, value_cols, type_cols, verbose=False, outfile=None)
|
Input: already validated DataFrame, value & type column names,
and output options.
Get details on each detected issue, row by row.
Output: DataFrame with type & value validation columns,
plus an "issues" column with a dictionary of every problem
for that row.
| 4.707396
| 4.388013
| 1.072785
|
df["num"] = list(range(len(df)))
problems = df[validation_names.union(["num"])]
all_problems = problems.dropna(how='all', axis=0, subset=validation_names)
value_problems = problems.dropna(how='all', axis=0, subset=type_col_names.union(value_col_names))
all_problems = all_problems.dropna(how='all', axis=1)
value_problems = value_problems.dropna(how='all', axis=1)
if not len(problems):
return None, None, None
#
bad_cols = all_problems.columns
prefixes = ["value_pass_", "type_pass_"]
missing_prefix = "presence_pass_"
problem_cols = []
missing_cols = []
long_missing_cols = []
problem_rows = []
for col in bad_cols:
pre, stripped_col = extract_col_name(col)
for prefix in prefixes:
if col.startswith(prefix):
problem_cols.append(stripped_col)
continue
if col.startswith(missing_prefix):
missing_cols.append(stripped_col)
long_missing_cols.append(col)
if len(value_problems):
bad_rows = list(zip(list(value_problems["num"]), list(value_problems.index)))
else:
bad_rows = []
if verbose:
if bad_rows:
formatted_rows = ["row: {}, name: {}".format(row[0], row[1]) for row in bad_rows]
if len(bad_rows) > 5:
print("-W- these rows have problems:\n", "\n".join(formatted_rows[:5]), " ...")
print("(for full error output see error file)")
else:
print("-W- these rows have problems:", "\n".join(formatted_rows))
if problem_cols:
print("-W- these columns contain bad values:", ", ".join(set(problem_cols)))
if missing_cols:
print("-W- these required columns are missing:", ", ".join(missing_cols))
return bad_rows, problem_cols, missing_cols
|
def get_bad_rows_and_cols(df, validation_names, type_col_names,
value_col_names, verbose=False)
|
Input: validated DataFrame, all validation names, names of the type columns,
names of the value columns, verbose (True or False).
Output: list of rows with bad values, list of columns with bad values,
list of missing (but required) columns.
| 2.811609
| 2.844595
| 0.988404
|
print("-I- Validating {}".format(dtype))
# grab dataframe
current_df = the_con.tables[dtype].df
# grab data model
current_dm = the_con.tables[dtype].data_model.dm[dtype]
# run all validations (will add columns to current_df)
current_df = validate_df(current_df, current_dm, the_con)
# get names of the added columns
value_col_names, present_col_names, type_col_names, missing_groups, validation_col_names = get_validation_col_names(current_df)
# print out failure messages
ofile = os.path.join(output_dir, "{}_errors.txt".format(dtype))
failing_items = get_row_failures(current_df, value_col_names,
type_col_names, verbose, outfile=ofile)
bad_rows, bad_cols, missing_cols = get_bad_rows_and_cols(current_df, validation_col_names,
value_col_names, type_col_names,
verbose=True)
# delete all validation rows
current_df.drop(validation_col_names, axis=1, inplace=True)
current_df.drop(missing_groups, axis=1, inplace=True)
if len(failing_items):
print("-I- Complete list of row errors can be found in {}".format(ofile))
return dtype, bad_rows, bad_cols, missing_cols, missing_groups, failing_items
elif len(missing_cols) or len(missing_groups):
print("-I- You are missing some required headers")
if len(missing_cols):
print("-I- You are missing these required headers: {}".format(", ".join(missing_cols)))
if len(missing_groups):
formatted_groups = [group[11:] for group in missing_groups]
print('-I- You need at least one header from these groups: {}'.format(", ".join(formatted_groups)))
else:
formatted_groups = []
return dtype, bad_rows, bad_cols, missing_cols, formatted_groups, failing_items
else:
print("-I- No row errors found!")
return False
|
def validate_table(the_con, dtype, verbose=False, output_dir=".")
|
Return name of bad table, or False if no errors found.
Calls validate_df then parses its output.
| 3.407808
| 3.276973
| 1.039926
|
passing = True
for dtype in list(the_con.tables.keys()):
print("validating {}".format(dtype))
fail = validate_table(the_con, dtype)
if fail:
passing = False
print('--')
|
def validate_contribution(the_con)
|
Go through a Contribution and validate each table
| 6.017747
| 5.010869
| 1.200939
|
ind = string.index("(")
return string[:ind], string[ind+1:-1].strip('"')
|
def split_func(string)
|
Take a string like 'requiredIf("arg_name")'
return the function name and the argument:
(requiredIf, arg_name)
| 4.988427
| 4.717177
| 1.057502
|
vals = ['lon_w', 'lon_e', 'lat_lon_precision', 'pole_lon',
'paleolon', 'paleolon_sigma',
'lon', 'lon_sigma', 'vgp_lon', 'paleo_lon', 'paleo_lon_sigma',
'azimuth', 'azimuth_dec_correction', 'dir_dec',
'geographic_precision', 'bed_dip_direction']
relevant_cols = list(set(vals).intersection(df.columns))
return relevant_cols
|
def get_degree_cols(df)
|
Take in a pandas DataFrame, and return a list of columns
that are in that DataFrame AND should be between 0 - 360 degrees.
| 7.383753
| 7.111902
| 1.038225
|
prefixes = ["presence_pass_", "value_pass_", "type_pass_"]
end = string.rfind("_")
for prefix in prefixes:
if string.startswith(prefix):
return prefix[:-6], string[len(prefix):end]
return string, string
|
def extract_col_name(string)
|
Take a string and split it.
String will be a format like "presence_pass_azimuth",
where "azimuth" is the MagIC column name and "presence_pass"
is the validation.
Return "presence", "azimuth".
| 6.548547
| 4.377915
| 1.495814
|
ind = row[row.notnull()].index
values = row[row.notnull()].values
# to transformation with extract_col_name here???
return dict(list(zip(ind, values)))
|
def make_row_dict(row)
|
Takes in a DataFrame row (Series),
and return a dictionary with the row's index as key,
and the row's values as values.
{col1_name: col1_value, col2_name: col2_value}
| 13.64119
| 12.705272
| 1.073664
|
out=""
UP=0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
dat=[]
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
input=f.readlines()
else:
input = sys.stdin.readlines() # read from standard input
# NEW
ofile = ""
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=sys.argv[ind+1]
out=open(ofile, 'w + a')
# end NEW
if '-up' in sys.argv: UP=1
for line in input:
rec=line.split()
x,y=float(rec[1]),float(rec[0]) # swap x,y cartesian for x,y geographic
#d,i=pmag.doeqdi(x,y)
r=math.sqrt(x**2+y**2)
z=1.-r**2
t=math.asin(z)
if UP==1:t=-t
if x==0.:
if y<0:
p=3.*math.pi/2.
else:
p=old_div(math.pi,2.)
else:
p=math.atan2(y,x)
d,i=p*180./math.pi,t*180./math.pi
if d<0:d+=360.
# new
outstring = '%7.1f %7.1f'%(d,i)
if ofile == "":
# print '%7.1f %7.1f'%(d,i)
print(outstring)
else:
out.write(outstring+'\n')
|
def main()
|
NAME
eq_di.py
DESCRIPTION
converts x,y pairs digitized from equal area projection to dec inc data
SYNTAX
eq_di.py [command line options] [< filename]
OPTIONS
-f FILE, input file
-F FILE, specifies output file name
-up if data are upper hemisphere
| 3.528221
| 3.241194
| 1.088556
|
print(main.__doc__)
if '-h' in sys.argv:sys.exit()
cont,Int,Top=1,[],[]
while cont==1:
try:
Int.append(int(input(" Enter desired treatment step interval: <return> to quit ")))
Top.append(int(input(" Enter upper bound for this interval: ")))
except:
cont=0
pmag.chart_maker(Int,Top)
|
def main()
|
Welcome to the thellier-thellier experiment automatic chart maker.
Please select desired step interval and upper bound for which it is valid.
e.g.,
50
500
10
600
a blank entry signals the end of data entry.
which would generate steps with 50 degree intervals up to 500, followed by 10 degree intervals up to 600.
chart is stored in: chart.txt
| 10.486938
| 7.362645
| 1.424344
|
inc=[]
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-i' in sys.argv: # ask for filename
file=input("Enter file name with inc data: ")
inc=numpy.loadtxt(file)
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
inc=numpy.loadtxt(file)
else:
inc = numpy.loadtxt(sys.stdin,dtype=numpy.float)
ofile=""
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
#
#get doincfish to do the dirty work:
fpars= pmag.doincfish(inc)
outstring='%7.1f %7.1f %i %8.1f %7.1f %7.1f'%(fpars['ginc'],fpars['inc'],fpars['n'],fpars['r'],fpars['k'],fpars['alpha95'])
if ofile == "":
print(outstring)
else:
out.write(outstring+'\n')
|
def main()
|
NAME
incfish.py
DESCRIPTION
calculates fisher parameters from inc only data
INPUT FORMAT
takes inc data
SYNTAX
incfish.py [options] [< filename]
OPTIONS
-h prints help message and quits
-i for interactive filename entry
-f FILE, specify input file name
-F FILE, specify output file name
< filename for reading from standard input
OUTPUT
mean inc,Fisher inc, N, R, k, a95
NOTES
takes the absolute value of inclinations (to take into account reversals),
but returns gaussian mean if < 50.0, because of polarity ambiguity and
lack of bias.
| 4.308864
| 3.282904
| 1.312516
|
decorated = [(len(dict_[sort_on]) if hasattr(dict_[sort_on], '__len__') else dict_[
sort_on], index) for (index, dict_) in enumerate(undecorated)]
decorated.sort()
return[undecorated[index] for (key, index) in decorated]
|
def sort_diclist(undecorated, sort_on)
|
Sort a list of dictionaries by the value in each
dictionary for the sorting key
Parameters
----------
undecorated : list of dicts
sort_on : str, numeric
key that is present in all dicts to sort on
Returns
---------
ordered list of dicts
Examples
---------
>>> lst = [{'key1': 10, 'key2': 2}, {'key1': 1, 'key2': 20}]
>>> sort_diclist(lst, 'key1')
[{'key2': 20, 'key1': 1}, {'key2': 2, 'key1': 10}]
>>> sort_diclist(lst, 'key2')
[{'key2': 2, 'key1': 10}, {'key2': 20, 'key1': 1}]
| 3.801591
| 4.806747
| 0.790886
|
if float_to_int:
try:
v = str(math.trunc(float(v)))
except ValueError: # catches non floatable strings
pass
except TypeError: # catches None
pass
fixed_In = []
for dictionary in In:
if k in dictionary:
val = dictionary[k]
try:
val = str(math.trunc(float(val)))
except ValueError: # catches non floatable strings
pass
except TypeError: # catches None
pass
dictionary[k] = val
fixed_In.append(dictionary)
In = fixed_In
if flag == "T":
# return that which is
return [dictionary for dictionary in In if k in list(dictionary.keys()) and str(dictionary[k]).lower() == str(v).lower()]
if flag == "F":
# return that which is not
return [dictionary for dictionary in In if k in list(dictionary.keys()) and str(dictionary[k]).lower() != str(v).lower()]
if flag == "has":
# return that which is contained
return [dictionary for dictionary in In if k in list(dictionary.keys()) and str(v).lower() in str(dictionary[k]).lower()]
if flag == "not":
# return that which is not contained
return [dictionary for dictionary in In if k in list(dictionary.keys()) and str(v).lower() not in str(dictionary[k]).lower()]
if flag == "eval":
A = [dictionary for dictionary in In if k in list(dictionary.keys(
)) and dictionary[k] != ''] # find records with no blank values for key
# return that which is
return [dictionary for dictionary in A if k in list(dictionary.keys()) and float(dictionary[k]) == float(v)]
if flag == "min":
A = [dictionary for dictionary in In if k in list(dictionary.keys(
)) and dictionary[k] != ''] # find records with no blank values for key
# return that which is greater than
return [dictionary for dictionary in A if k in list(dictionary.keys()) and float(dictionary[k]) >= float(v)]
if flag == "max":
A = [dictionary for dictionary in In if k in list(dictionary.keys(
)) and dictionary[k] != ''] # find records with no blank values for key
# return that which is less than
return [dictionary for dictionary in A if k in list(dictionary.keys()) and float(dictionary[k]) <= float(v)]
if flag == 'not_null':
return [dictionary for dictionary in In if dictionary[k]]
|
def get_dictitem(In, k, v, flag, float_to_int=False)
|
returns a list of dictionaries from list In with key,k = value, v . CASE INSENSITIVE # allowed keywords:
requires that the value of k in the dictionaries contained in In be castable to string and requires that v be castable to a string if flag is T,F
,has or not and requires they be castable to float if flag is eval, min, or max.
float_to_int goes through the relvant values in In and truncates them,
(like "0.0" to "0") for evaluation, default is False
Parameters
__________
In : list of dictionaries to work on
k : key to test
v : key value to test
flag : [T,F,has, or not]
float_to int : if True, truncates to integer
Returns
______
list of dictionaries that meet condition
| 1.933703
| 1.780541
| 1.08602
|
Out = []
for d in In:
if dtype == '':
Out.append(d[k])
if dtype == 'f':
if d[k] == "":
Out.append(0)
elif d[k] == None:
Out.append(0)
else:
Out.append(float(d[k]))
if dtype == 'int':
if d[k] == "":
Out.append(0)
elif d[k] == None:
Out.append(0)
else:
Out.append(int(d[k]))
return Out
|
def get_dictkey(In, k, dtype)
|
returns list of given key (k) from input list of dictionaries (In) in data type dtype. uses command:
get_dictkey(In,k,dtype). If dtype =="", data are strings; if "int", data are integers; if "f", data are floats.
| 1.876867
| 1.62434
| 1.155464
|
if isinstance(samp_data, pd.DataFrame):
samp_data = (samp_data.T.apply(dict))
# set orientation priorities
EX = ["SO-ASC", "SO-POM"]
samp_key, az_key, dip_key = 'er_sample_name', 'sample_azimuth', 'sample_dip'
disc_key, or_key, meth_key = 'sample_description', 'sample_orientation_flag',\
'magic_method_codes'
if 'data_model' in list(kwargs.keys()) and kwargs['data_model'] == 3:
samp_key, az_key, dip_key = 'sample', 'azimuth', 'dip'
disc_key, or_key, meth_key = 'description', 'orientation_quality',\
'method_codes'
orient = {samp_key: er_sample_name, az_key: "",
dip_key: "", disc_key: ""}
# get all the orientation data for this sample
orients = get_dictitem(samp_data, samp_key, er_sample_name, 'T')
if len(orients) > 0 and or_key in list(orients[0].keys()):
# exclude all samples with bad orientation flag
orients = get_dictitem(orients, or_key, 'b', 'F')
if len(orients) > 0:
orient = orients[0] # re-initialize to first one
methods = get_dictitem(orients, meth_key, 'SO-', 'has')
# get a list of all orientation methods for this sample
methods = get_dictkey(methods, meth_key, '')
SO_methods = []
for methcode in methods:
meths = methcode.split(":")
for meth in meths:
if (meth.strip() not in EX) and meth.startswith('SO-'):
SO_methods.append(meth.strip())
# find top priority orientation method
if len(SO_methods) == 0:
print("no orientation data for sample ", er_sample_name)
# preserve meta-data anyway even though orientation is bad
# get all the orientation data for this sample
orig_data = get_dictitem(samp_data, samp_key, er_sample_name, 'T')
if len(orig_data) > 0:
orig_data = orig_data[0]
else:
orig_data = []
az_type = "SO-NO"
else:
SO_priorities = set_priorities(SO_methods, 0)
az_type = SO_methods[SO_methods.index(SO_priorities[0])]
orient = get_dictitem(orients, meth_key, az_type, 'has')[
0] # re-initialize to best one
return orient, az_type
|
def get_orient(samp_data, er_sample_name, **kwargs)
|
samp_data : PmagPy list of dicts or pandas DataFrame
er_sample_name : sample name
| 3.7603
| 3.61183
| 1.041107
|
poly_tk03 = [3.15976125e-06, -3.52459817e-04, -
1.46641090e-02, 2.89538539e+00]
return poly_tk03[0] * inc**3 + poly_tk03[1] * inc**2 + poly_tk03[2] * inc + poly_tk03[3]
|
def EI(inc)
|
Given a mean inclination value of a distribution of directions, this
function calculates the expected elongation of this distribution using a
best-fit polynomial of the TK03 GAD secular variation model (Tauxe and
Kent, 2004).
Parameters
----------
inc : inclination in degrees (int or float)
Returns
---------
elongation : float
Examples
---------
>>> pmag.EI(20)
2.4863973732
>>> pmag.EI(90)
1.0241570135500004
| 4.507572
| 4.183495
| 1.077466
|
rad = np.pi/180.
Es, Is, Fs, V2s = [], [], [], []
ppars = doprinc(data)
D = ppars['dec']
Decs, Incs = data.transpose()[0], data.transpose()[1]
Tan_Incs = np.tan(Incs * rad)
for f in np.arange(1., .2, -.01):
U = old_div(np.arctan((old_div(1., f)) * Tan_Incs), rad)
fdata = np.array([Decs, U]).transpose()
ppars = doprinc(fdata)
Fs.append(f)
Es.append(old_div(ppars["tau2"], ppars["tau3"]))
ang = angle([D, 0], [ppars["V2dec"], 0])
if 180. - ang < ang:
ang = 180. - ang
V2s.append(ang)
Is.append(abs(ppars["inc"]))
if EI(abs(ppars["inc"])) <= Es[-1]:
del Es[-1]
del Is[-1]
del Fs[-1]
del V2s[-1]
if len(Fs) > 0:
for f in np.arange(Fs[-1], .2, -.005):
U = old_div(np.arctan((old_div(1., f)) * Tan_Incs), rad)
fdata = np.array([Decs, U]).transpose()
ppars = doprinc(fdata)
Fs.append(f)
Es.append(old_div(ppars["tau2"], ppars["tau3"]))
Is.append(abs(ppars["inc"]))
ang = angle([D, 0], [ppars["V2dec"], 0])
if 180. - ang < ang:
ang = 180. - ang
V2s.append(ang)
if EI(abs(ppars["inc"])) <= Es[-1]:
return Es, Is, Fs, V2s
return [0], [0], [0], [0]
|
def find_f(data)
|
Given a distribution of directions, this function determines parameters
(elongation, inclination, flattening factor, and elongation direction) that
are consistent with the TK03 secular variation model.
Parameters
----------
data : array of declination, inclination pairs
(e.g. np.array([[140,21],[127,23],[142,19],[136,22]]))
Returns
---------
Es : list of elongation values
Is : list of inclination values
Fs : list of flattening factors
V2s : list of elongation directions (relative to the distribution)
The function will return a zero list ([0]) for each of these parameters if the directions constitute a pathological distribution.
Examples
---------
>>> directions = np.array([[140,21],[127,23],[142,19],[136,22]])
>>> Es, Is, Fs, V2s = pmag.find_f(directions)
| 2.786108
| 2.552096
| 1.091694
|
New = []
for rec in Recs:
if 'model_lat' in list(rec.keys()) and rec['model_lat'] != "":
New.append(rec)
elif 'average_age' in list(rec.keys()) and rec['average_age'] != "" and float(rec['average_age']) <= 5.:
if 'site_lat' in list(rec.keys()) and rec['site_lat'] != "":
rec['model_lat'] = rec['site_lat']
New.append(rec)
elif 'average_inc' in list(rec.keys()) and rec['average_inc'] != "":
rec['model_lat'] = '%7.1f' % (plat(float(rec['average_inc'])))
New.append(rec)
return New
|
def convert_lat(Recs)
|
uses lat, for age<5Ma, model_lat if present, else tries to use average_inc to estimate plat.
| 2.540287
| 1.976513
| 1.285237
|
if data_model == 3:
site_key = 'site'
agekey = "age"
keybase = ""
else:
site_key = 'er_site_names'
agekey = find('age', list(rec.keys()))
if agekey != "":
keybase = agekey.split('_')[0] + '_'
New = []
for rec in Recs:
age = ''
if rec[keybase + 'age'] != "":
age = float(rec[keybase + "age"])
elif rec[keybase + 'age_low'] != "" and rec[keybase + 'age_high'] != '':
age = np.mean([rec[keybase + 'age_high'],
rec[keybase + "age_low"]])
# age = float(rec[keybase + 'age_low']) + old_div(
# (float(rec[keybase + 'age_high']) - float(rec[keybase + 'age_low'])), 2.)
if age != '':
rec[keybase + 'age_unit']
if rec[keybase + 'age_unit'] == 'Ma':
rec[keybase + 'age'] = '%10.4e' % (age)
elif rec[keybase + 'age_unit'] == 'ka' or rec[keybase + 'age_unit'] == 'Ka':
rec[keybase + 'age'] = '%10.4e' % (age * .001)
elif rec[keybase + 'age_unit'] == 'Years AD (+/-)':
rec[keybase + 'age'] = '%10.4e' % ((2011 - age) * 1e-6)
elif rec[keybase + 'age_unit'] == 'Years BP':
rec[keybase + 'age'] = '%10.4e' % ((age) * 1e-6)
rec[keybase + 'age_unit'] = 'Ma'
New.append(rec)
else:
if 'site_key' in list(rec.keys()):
print('problem in convert_ages:', rec['site_key'])
elif 'er_site_name' in list(rec.keys()):
print('problem in convert_ages:', rec['site_key'])
else:
print('problem in convert_ages:', rec)
if len(New) == 0:
print('no age key:', rec)
return New
|
def convert_ages(Recs, data_model=3)
|
converts ages to Ma
Parameters
_________
Recs : list of dictionaries in data model by data_model
data_model : MagIC data model (default is 3)
| 2.608892
| 2.515391
| 1.037172
|
new_recs = []
for rec in data:
new_rec = map_magic.mapping(rec, mapping)
new_recs.append(new_rec)
return new_recs
|
def convert_items(data, mapping)
|
Input: list of dicts (each dict a record for one item),
mapping with column names to swap into the records.
Output: updated list of dicts.
| 3.700096
| 3.805186
| 0.972382
|
convert = {'specimens': map_magic.spec_magic2_2_magic3_map,
'samples': map_magic.samp_magic2_2_magic3_map,
'sites': map_magic.site_magic2_2_magic3_map,
'locations': map_magic.loc_magic2_2_magic3_map,
'ages': map_magic.age_magic2_2_magic3_map}
full_name = os.path.join(input_dir, meas_fname)
if not os.path.exists(full_name):
print("-W- {} is not a file".format(full_name))
return False, False, False
# read in data model 2.5 measurements file
data2, filetype = magic_read(full_name)
# convert list of dicts to 3.0
NewMeas = convert_items(data2, map_magic.meas_magic2_2_magic3_map)
# write 3.0 output to file
ofile = os.path.join(output_dir, 'measurements.txt')
magic_write(ofile, NewMeas, 'measurements')
upgraded = []
if os.path.exists(ofile):
print("-I- 3.0 format measurements file was successfully created: {}".format(ofile))
upgraded.append("measurements.txt")
else:
print("-W- 3.0 format measurements file could not be created")
#
no_upgrade = []
if not meas_only:
# try to convert specimens, samples, sites, & locations
for dtype in ['specimens', 'samples', 'sites', 'locations', 'ages']:
mapping = convert[dtype]
res = convert_and_combine_2_to_3(
dtype, mapping, input_dir, output_dir, data_model)
if res:
upgraded.append(res)
# try to upgrade criteria file
if os.path.exists(os.path.join(input_dir, 'pmag_criteria.txt')):
crit_file = convert_criteria_file_2_to_3(input_dir=input_dir,
output_dir=output_dir,
data_model=data_model)[0]
if crit_file:
upgraded.append(crit_file)
else:
no_upgrade.append("pmag_criteria.txt")
# create list of all un-upgradeable files
for fname in os.listdir(input_dir):
if fname in ['measurements.txt', 'specimens.txt', 'samples.txt',
'sites.txt', 'locations.txt']:
continue
elif 'rmag' in fname:
no_upgrade.append(fname)
elif fname in ['pmag_results.txt', 'er_synthetics.txt', 'er_images.txt',
'er_plots.txt']:
no_upgrade.append(fname)
return NewMeas, upgraded, no_upgrade
|
def convert_directory_2_to_3(meas_fname="magic_measurements.txt", input_dir=".",
output_dir=".", meas_only=False, data_model=None)
|
Convert 2.0 measurements file into 3.0 measurements file.
Merge and convert specimen, sample, site, and location data.
Also translates criteria data.
Parameters
----------
meas_name : name of measurement file (do not include full path,
default is "magic_measurements.txt")
input_dir : name of input directory (default is ".")
output_dir : name of output directory (default is ".")
meas_only : boolean, convert only measurement data (default is False)
data_model : data_model3.DataModel object (default is None)
Returns
---------
NewMeas : 3.0 measurements data (output of pmag.convert_items)
upgraded : list of files successfully upgraded to 3.0
no_upgrade: list of 2.5 files not upgraded to 3.0
| 2.962224
| 2.523801
| 1.173716
|
# read in er_ data & make DataFrame
er_file = os.path.join(input_dir, 'er_{}.txt'.format(dtype))
er_data, er_dtype = magic_read(er_file)
if len(er_data):
er_df = pd.DataFrame(er_data)
if dtype == 'ages':
pass
# remove records with blank ages
#er_data = get_dictitem(er_data, 'age', '', "F")
#er_df = pd.DataFrame(er_data)
else:
er_df.index = er_df['er_{}_name'.format(dtype[:-1])]
else:
er_df = pd.DataFrame()
#
if dtype == 'ages':
full_df = er_df
else:
# read in pmag_ data & make DataFrame
pmag_file = os.path.join(input_dir, 'pmag_{}.txt'.format(dtype))
pmag_data, pmag_dtype = magic_read(pmag_file)
if len(pmag_data):
pmag_df = pd.DataFrame(pmag_data)
pmag_df.index = pmag_df['er_{}_name'.format(dtype[:-1])]
else:
pmag_df = pd.DataFrame()
# combine the two Dataframes
full_df = pd.concat([er_df, pmag_df], sort=True)
# sort the DataFrame so that all records from one item are together
full_df.sort_index(inplace=True)
# fix the column names to be 3.0
full_df.rename(columns=map_dict, inplace=True)
# create a MagicDataFrame object, providing the dataframe and the data type
new_df = cb.MagicDataFrame(dtype=dtype, df=full_df, dmodel=data_model)
# write out the data to file
if len(new_df.df):
new_df.write_magic_file(dir_path=output_dir)
return dtype + ".txt"
else:
print("-I- No {} data found.".format(dtype))
return None
|
def convert_and_combine_2_to_3(dtype, map_dict, input_dir=".", output_dir=".", data_model=None)
|
Read in er_*.txt file and pmag_*.txt file in working directory.
Combine the data, then translate headers from 2.5 --> 3.0.
Last, write out the data in 3.0.
Parameters
----------
dtype : string for input type (specimens, samples, sites, etc.)
map_dict : dictionary with format {header2_format: header3_format, ...} (from mapping.map_magic module)
input_dir : input directory, default "."
output_dir : output directory, default "."
data_model : data_model3.DataModel object, default None
Returns
---------
output_file_name with 3.0 format data (or None if translation failed)
| 3.11743
| 2.8061
| 1.110948
|
# get criteria from infile
fname = os.path.join(input_dir, fname)
if not os.path.exists(fname):
return False, None
orig_crit, warnings = read_criteria_from_file(fname, initialize_acceptance_criteria(),
data_model=2, return_warnings=True)
converted_crit = {}
# get data model including criteria map
if not data_model:
from . import data_model3 as dm3
DM = dm3.DataModel()
else:
DM = data_model
crit_map = DM.crit_map
# drop all empty mappings
stripped_crit_map = crit_map.dropna(axis='rows')
# go through criteria and get 3.0 name and criterion_operation
for crit in orig_crit:
if orig_crit[crit]['value'] in [-999, '-999', -999.]:
continue
if crit in stripped_crit_map.index:
criterion_operation = stripped_crit_map.loc[crit]['criteria_map']['criterion_operation']
table_col = stripped_crit_map.loc[crit]['criteria_map']['table_column']
orig_crit[crit]['criterion_operation'] = criterion_operation
converted_crit[table_col] = orig_crit[crit]
else:
print('-W- Could not convert {} to 3.0, skipping'.format(crit))
# switch axes
converted_df = pd.DataFrame(converted_crit).transpose()
# name the index
converted_df.index.name = "table_column"
# rename columns to 3.0 values
# 'category' --> criterion (uses defaults from initalize_default_criteria)
# 'pmag_criteria_code' --> criterion (uses what's actually in the translated file)
converted_df.rename(columns={'pmag_criteria_code': 'criterion', 'er_citation_names': 'citations',
'criteria_definition': 'description', 'value': 'criterion_value'},
inplace=True)
# drop unused columns
valid_cols = DM.dm['criteria'].index
drop_cols = set(converted_df.columns) - set(valid_cols)
converted_df.drop(drop_cols, axis='columns', inplace=True)
# move 'table_column' from being the index to being a column
converted_df['table_column'] = converted_df.index
crit_container = cb.MagicDataFrame(dtype='criteria', df=converted_df)
crit_container.write_magic_file(dir_path=output_dir)
return "criteria.txt", crit_container
|
def convert_criteria_file_2_to_3(fname="pmag_criteria.txt", input_dir=".",
output_dir=".", data_model=None)
|
Convert a criteria file from 2.5 to 3.0 format and write it out to file
Parameters
----------
fname : string of filename (default "pmag_criteria.txt")
input_dir : string of input directory (default ".")
output_dir : string of output directory (default ".")
data_model : data_model.DataModel object (default None)
Returns
---------
outfile : string output criteria filename, or False
crit_container : cb.MagicDataFrame with 3.0 criteria table
| 4.459457
| 4.181808
| 1.066395
|
or_con = str(or_con)
if mag_azimuth == -999:
return "", ""
if or_con == "1": # lab_mag_az=mag_az; sample_dip = -dip
return mag_azimuth, -field_dip
if or_con == "2":
return mag_azimuth - 90., -field_dip
if or_con == "3": # lab_mag_az=mag_az; sample_dip = 90.-dip
return mag_azimuth, 90. - field_dip
if or_con == "4": # lab_mag_az=mag_az; sample_dip = dip
return mag_azimuth, field_dip
if or_con == "5": # lab_mag_az=mag_az; sample_dip = dip-90.
return mag_azimuth, field_dip - 90.
if or_con == "6": # lab_mag_az=mag_az-90.; sample_dip = 90.-dip
return mag_azimuth - 90., 90. - field_dip
if or_con == "7": # lab_mag_az=mag_az; sample_dip = 90.-dip
return mag_azimuth - 90., 90. - field_dip
print("Error in orientation convention")
|
def orient(mag_azimuth, field_dip, or_con)
|
uses specified orientation convention to convert user supplied orientations
to laboratory azimuth and plunge
| 1.913188
| 1.862993
| 1.026943
|
Sb, N = 0., 0.
for rec in data:
delta = 90. - abs(rec['vgp_lat'])
if rec['average_k'] != 0:
k = rec['average_k']
L = rec['average_lat'] * np.pi / 180. # latitude in radians
Nsi = rec['average_nn']
K = old_div(k, (2. * (1. + 3. * np.sin(L)**2) /
(5. - 3. * np.sin(L)**2)))
Sw = old_div(81., np.sqrt(K))
else:
Sw, Nsi = 0, 1.
Sb += delta**2. - old_div((Sw**2), Nsi)
N += 1.
return np.sqrt(old_div(Sb, float(N - 1.)))
|
def get_Sb(data)
|
returns vgp scatter for data set
| 4.709038
| 4.441119
| 1.060327
|
df['delta'] = 90.-df.vgp_lat
Sp2 = np.sum(df.delta**2)/(df.shape[0]-1)
if 'dir_k' in df.columns and mm97:
ks = df.dir_k
Ns = df.dir_n
Ls = np.radians(df.lat)
A95s = 140./np.sqrt(ks*Ns)
Sw2_n = 0.335*(A95s**2)*(2.*(1.+3.*np.sin(Ls)**2) /
(5.-3.*np.sin(Ls)**2))
return np.sqrt(Sp2-Sw2_n.mean())
else:
return np.sqrt(Sp2)
|
def get_sb_df(df, mm97=False)
|
Calculates Sf for a dataframe with VGP Lat., and optional Fisher's k, site latitude and N information can be used to correct for within site scatter (McElhinny & McFadden, 1997)
Parameters
_________
df : Pandas Dataframe with columns
REQUIRED:
vgp_lat : VGP latitude
ONLY REQUIRED for MM97 correction:
dir_k : Fisher kappa estimate
dir_n : number of specimens (samples) per site
lat : latitude of the site
mm97 : if True, will do the correction for within site scatter
Returns:
_______
Sf : Sf
| 6.288589
| 4.855157
| 1.295239
|
ppars = doprinc(di_block) # get principle direction
if combine:
D3 = []
D1, D2 = [], []
for rec in di_block:
ang = angle([rec[0], rec[1]], [ppars['dec'], ppars['inc']])
if ang > 90.:
d, i = (rec[0] - 180.) % 360., -rec[1]
D2.append([d, i])
if combine:
D3.append([d, i])
else:
D1.append([rec[0], rec[1]])
if combine:
D3.append([rec[0], rec[1]])
if combine:
return D3
else:
return D1, D2
|
def flip(di_block, combine=False)
|
determines 'normal' direction along the principle eigenvector, then flips the antipodes of
the reverse mode to the antipode
Parameters
___________
di_block : nested list of directions
Return
D1 : normal mode
D2 : flipped reverse mode as two DI blocks
combine : if True return combined D1, D2, nested D,I pairs
| 3.608239
| 3.444214
| 1.047624
|
vds, X = 0, []
for rec in data:
X.append(dir2cart(rec))
for k in range(len(X) - 1):
xdif = X[k + 1][0] - X[k][0]
ydif = X[k + 1][1] - X[k][1]
zdif = X[k + 1][2] - X[k][2]
vds += np.sqrt(xdif**2 + ydif**2 + zdif**2)
vds += np.sqrt(X[-1][0]**2 + X[-1][1]**2 + X[-1][2]**2)
return vds
|
def dovds(data)
|
calculates vector difference sum for demagnetization data
| 2.105141
| 1.944698
| 1.082502
|
vdata, Dirdata, step_meth = [], [], ""
if len(data) == 0:
return vdata
treat_init = ["treatment_temp", "treatment_temp_decay_rate", "treatment_temp_dc_on", "treatment_temp_dc_off", "treatment_ac_field", "treatment_ac_field_decay_rate", "treatment_ac_field_dc_on",
"treatment_ac_field_dc_off", "treatment_dc_field", "treatment_dc_field_decay_rate", "treatment_dc_field_ac_on", "treatment_dc_field_ac_off", "treatment_dc_field_phi", "treatment_dc_field_theta"]
treats = []
#
# find keys that are used
#
for key in treat_init:
if key in list(data[0].keys()):
treats.append(key) # get a list of keys
stop = {}
stop["er_specimen_name"] = "stop"
for key in treats:
stop[key] = "" # tells program when to quit and go home
data.append(stop)
#
# set initial states
#
DataState0, newstate = {}, 0
for key in treats:
DataState0[key] = data[0][key] # set beginning treatment
k, R = 1, 0
for i in range(k, len(data)):
FDirdata, Dirdata, DataStateCurr, newstate = [], [], {}, 0
for key in treats: # check if anything changed
DataStateCurr[key] = data[i][key]
if DataStateCurr[key].strip() != DataState0[key].strip():
newstate = 1 # something changed
if newstate == 1:
if i == k: # sample is unique
vdata.append(data[i - 1])
else: # measurement is not unique
# print "averaging: records " ,k,i
for l in range(k - 1, i):
if 'orientation' in data[l]['measurement_description']:
data[l]['measurement_description'] = ""
Dirdata.append([float(data[l]['measurement_dec']), float(
data[l]['measurement_inc']), float(data[l]['measurement_magn_moment'])])
FDirdata.append(
[float(data[l]['measurement_dec']), float(data[l]['measurement_inc'])])
dir, R = vector_mean(Dirdata)
Fpars = fisher_mean(FDirdata)
vrec = data[i - 1]
vrec['measurement_dec'] = '%7.1f' % (dir[0])
vrec['measurement_inc'] = '%7.1f' % (dir[1])
vrec['measurement_magn_moment'] = '%8.3e' % (
old_div(R, (i - k + 1)))
vrec['measurement_csd'] = '%7.1f' % (Fpars['csd'])
vrec['measurement_positions'] = '%7.1f' % (Fpars['n'])
vrec['measurement_description'] = 'average of multiple measurements'
if "magic_method_codes" in list(vrec.keys()):
meths = vrec["magic_method_codes"].strip().split(":")
if "DE-VM" not in meths:
meths.append("DE-VM")
methods = ""
for meth in meths:
methods = methods + meth + ":"
vrec["magic_method_codes"] = methods[:-1]
else:
vrec["magic_method_codes"] = "DE-VM"
vdata.append(vrec)
# reset state to new one
for key in treats:
DataState0[key] = data[i][key] # set beginning treatment
k = i + 1
if data[i]["er_specimen_name"] == "stop":
del data[-1] # get rid of dummy stop sign
return vdata, treats
|
def vspec_magic(data)
|
Takes average vector of replicate measurements
| 3.487965
| 3.409069
| 1.023143
|
# sort the specimen names
speclist = []
for rec in data:
try:
spec = rec["er_specimen_name"]
except KeyError as e:
spec = rec["specimen"]
if spec not in speclist:
speclist.append(spec)
speclist.sort()
return speclist
|
def get_specs(data)
|
Takes a magic format file and returns a list of unique specimen names
| 3.537832
| 3.001507
| 1.178685
|
Xbar = np.zeros((3))
X = dir2cart(data).transpose()
for i in range(3):
Xbar[i] = X[i].sum()
R = np.sqrt(Xbar[0]**2+Xbar[1]**2+Xbar[2]**2)
Xbar = Xbar/R
dir = cart2dir(Xbar)
return dir, R
|
def vector_mean(data)
|
calculates the vector mean of a given set of vectors
Parameters
__________
data : nested array of [dec,inc,intensity]
Returns
_______
dir : array of [dec, inc, 1]
R : resultant vector length
| 3.290087
| 2.944184
| 1.117487
|
datablock = []
for rec in data:
if rec['er_specimen_name'] == s:
meths = rec['magic_method_codes'].split(':')
if 'LT-NO' in meths or 'LT-AF-Z' in meths or 'LT-T-Z' in meths:
datablock.append(rec)
dmagrec = datablock[ind]
for k in range(len(data)):
meths = data[k]['magic_method_codes'].split(':')
if 'LT-NO' in meths or 'LT-AF-Z' in meths or 'LT-T-Z' in meths:
if data[k]['er_specimen_name'] == s:
if data[k]['treatment_temp'] == dmagrec['treatment_temp'] and data[k]['treatment_ac_field'] == dmagrec['treatment_ac_field']:
if data[k]['measurement_dec'] == dmagrec['measurement_dec'] and data[k]['measurement_inc'] == dmagrec['measurement_inc'] and data[k]['measurement_magn_moment'] == dmagrec['measurement_magn_moment']:
if data[k]['measurement_flag'] == 'g':
flag = 'b'
else:
flag = 'g'
data[k]['measurement_flag'] = flag
break
return data
|
def mark_dmag_rec(s, ind, data)
|
Edits demagnetization data to mark "bad" points with measurement_flag
| 2.18193
| 2.20996
| 0.987317
|
try:
with codecs.open(infile, "r", "utf-8") as f:
lines = list(f.readlines())
# file might not exist
except FileNotFoundError:
if verbose:
print(
'-W- You are trying to open a file: {} that does not exist'.format(infile))
return []
# encoding might be wrong
except UnicodeDecodeError:
try:
with codecs.open(infile, "r", "Latin-1") as f:
print(
'-I- Using less strict decoding for {}, output may have formatting errors'.format(infile))
lines = list(f.readlines())
# if file exists, and encoding is correct, who knows what the problem is
except Exception as ex:
print("-W- ", type(ex), ex)
return []
except Exception as ex:
print("-W- ", type(ex), ex)
return []
# don't leave a blank line at the end
i = 0
while i < 10:
if not len(lines[-1].strip("\n").strip("\t")):
lines = lines[:-1]
i += 1
else:
i = 10
return lines
|
def open_file(infile, verbose=True)
|
Open file and return a list of the file's lines.
Try to use utf-8 encoding, and if that fails use Latin-1.
Parameters
----------
infile : str
full path to file
Returns
----------
data: list
all lines in the file
| 3.845847
| 3.837408
| 1.002199
|
DATA = {}
#fin = open(path, 'r')
#first_line = fin.readline()
lines = open_file(path)
if not lines:
if return_keys:
return {}, 'empty_file', None
else:
return {}, 'empty_file'
first_line = lines.pop(0)
if first_line[0] == "s" or first_line[1] == "s":
delim = ' '
elif first_line[0] == "t" or first_line[1] == "t":
delim = '\t'
else:
print('-W- error reading ', path)
if return_keys:
return {}, 'bad_file', None
else:
return {}, 'bad_file'
file_type = first_line.strip('\n').strip('\r').split(delim)[1]
item_type = file_type
#item_type = file_type.split('_')[1][:-1]
if sort_by_this_name:
pass
elif item_type == 'age':
sort_by_this_name = "by_line_number"
else:
sort_by_this_name = item_type
line = lines.pop(0)
header = line.strip('\n').strip('\r').split(delim)
counter = 0
for line in lines:
tmp_data = {}
tmp_line = line.strip('\n').strip('\r').split(delim)
for i in range(len(header)):
if i < len(tmp_line):
tmp_data[header[i]] = tmp_line[i].strip()
else:
tmp_data[header[i]] = ""
if sort_by_this_name == "by_line_number":
DATA[counter] = tmp_data
counter += 1
else:
if tmp_data[sort_by_this_name] != "":
DATA[tmp_data[sort_by_this_name]] = tmp_data
if return_keys:
return DATA, file_type, header
else:
return DATA, file_type
|
def magic_read_dict(path, data=None, sort_by_this_name=None, return_keys=False)
|
Read a magic-formatted tab-delimited file and return a dictionary of
dictionaries, with this format:
{'Z35.5a': {'specimen_weight': '1.000e-03', 'er_citation_names': 'This study', 'specimen_volume': '', 'er_location_name': '', 'er_site_name': 'Z35.', 'er_sample_name': 'Z35.5', 'specimen_class': '', 'er_specimen_name': 'Z35.5a', 'specimen_lithology': '', 'specimen_type': ''}, ....}
return data, file_type, and keys (if return_keys is true)
| 2.210306
| 2.158959
| 1.023783
|
'''
Sort magic_data by header (like er_specimen_name for example)
'''
magic_data_sorted = {}
for rec in magic_data:
name = rec[sort_name]
if name not in list(magic_data_sorted.keys()):
magic_data_sorted[name] = []
magic_data_sorted[name].append(rec)
return magic_data_sorted
|
def sort_magic_data(magic_data, sort_name)
|
Sort magic_data by header (like er_specimen_name for example)
| 3.401698
| 2.016388
| 1.687025
|
delim = 'tab'
hold, magic_data, magic_record, magic_keys = [], [], {}, []
f = open(infile, "r")
#
# look for right table
#
line = f.readline()[:-1]
file_type = line.split('\t')[1]
if file_type == 'delimited':
file_type = line.split('\t')[2]
if delim == 'tab':
line = f.readline()[:-1].split('\t')
else:
f.close()
print("only tab delimitted files are supported now")
return
while file_type != table:
while line[0][0:5] in f.readlines() != ">>>>>":
pass
line = f.readline()[:-1]
file_type = line.split('\t')[1]
if file_type == 'delimited':
file_type = line.split('\t')[2]
ine = f.readline()[:-1].split('\t')
while line[0][0:5] in f.readlines() != ">>>>>":
for key in line:
magic_keys.append(key)
for line in f.readlines():
rec = line[:-1].split('\t')
hold.append(rec)
for rec in hold:
magic_record = {}
if len(magic_keys) != len(rec):
print("Uneven record lengths detected: ", rec)
input("Return to continue.... ")
for k in range(len(magic_keys)):
magic_record[magic_keys[k]] = rec[k]
magic_data.append(magic_record)
f.close()
return magic_data
|
def upload_read(infile, table)
|
Reads a table from a MagIC upload (or downloaded) txt file, puts data in a
list of dictionaries
| 3.517844
| 3.467792
| 1.014433
|
pmag_out = open(ofile, 'a')
outstring = ""
for key in keylist:
try:
outstring = outstring + '\t' + str(Rec[key]).strip()
except:
print(key, Rec[key])
# raw_input()
outstring = outstring + '\n'
pmag_out.write(outstring[1:])
pmag_out.close()
|
def putout(ofile, keylist, Rec)
|
writes out a magic format record to ofile
| 2.924839
| 2.791511
| 1.047762
|
keylist = []
opened = False
# sometimes Windows needs a little extra time to open a file
# or else it throws an error
while not opened:
try:
pmag_out = open(ofile, 'w')
opened = True
except IOError:
time.sleep(1)
outstring = "tab \t" + file_type + "\n"
pmag_out.write(outstring)
keystring = ""
for key in list(Rec.keys()):
keystring = keystring + '\t' + key.strip()
keylist.append(key)
keystring = keystring + '\n'
pmag_out.write(keystring[1:])
pmag_out.close()
return keylist
|
def first_rec(ofile, Rec, file_type)
|
opens the file ofile as a magic template file with headers as the keys to Rec
| 3.533392
| 3.555578
| 0.99376
|
if len(Recs) < 1:
print ('nothing to write')
return
pmag_out = open(ofile, 'w')
outstring = "tab \t" + file_type + "\n"
pmag_out.write(outstring)
keystring = ""
keylist = []
for key in list(Recs[0].keys()):
keylist.append(key)
keylist.sort()
for key in keylist:
keystring = keystring + '\t' + key.strip()
keystring = keystring + '\n'
pmag_out.write(keystring[1:])
for Rec in Recs:
outstring = ""
for key in keylist:
try:
outstring = outstring + '\t' + str(Rec[key].strip())
except:
if 'er_specimen_name' in list(Rec.keys()):
print(Rec['er_specimen_name'])
elif 'er_specimen_names' in list(Rec.keys()):
print(Rec['er_specimen_names'])
print(key, Rec[key])
# raw_input()
outstring = outstring + '\n'
pmag_out.write(outstring[1:])
pmag_out.close()
|
def magic_write_old(ofile, Recs, file_type)
|
writes out a magic format list of dictionaries to ofile
Parameters
_________
ofile : path to output file
Recs : list of dictionaries in MagIC format
file_type : MagIC table type (e.g., specimens)
Effects :
writes a MagIC formatted file from Recs
| 2.227152
| 2.228367
| 0.999455
|
if len(Recs) < 1:
print('No records to write to file {}'.format(ofile))
return False, ""
if os.path.split(ofile)[0] != "" and not os.path.isdir(os.path.split(ofile)[0]):
os.mkdir(os.path.split(ofile)[0])
pmag_out = open(ofile, 'w+', errors="backslashreplace")
outstring = "tab \t" + file_type
outstring = outstring.strip("\n").strip(
"\r") + "\n" # make sure it's clean for Windows
pmag_out.write(outstring)
keystring = ""
keylist = []
for key in list(Recs[0].keys()):
keylist.append(key)
keylist.sort()
for key in keylist:
keystring = keystring + '\t' + key.strip()
keystring = keystring + '\n'
pmag_out.write(keystring[1:])
for Rec in Recs:
outstring = ""
for key in keylist:
try:
outstring = outstring + '\t' + str(Rec[key]).strip()
except KeyError:
if 'er_specimen_name' in list(Rec.keys()):
print(Rec['er_specimen_name'])
elif 'specimen' in list(Rec.keys()):
print(Rec['specimen'])
elif 'er_specimen_names' in list(Rec.keys()):
print('specimen names:', Rec['er_specimen_names'])
print("No data for %s" % key)
# just skip it:
outstring = outstring + "\t"
# raw_input()
outstring = outstring + '\n'
pmag_out.write(outstring[1:])
pmag_out.close()
print(len(Recs), ' records written to file ', ofile)
return True, ofile
|
def magic_write(ofile, Recs, file_type)
|
Parameters
_________
ofile : path to output file
Recs : list of dictionaries in MagIC format
file_type : MagIC table type (e.g., specimens)
Return :
[True,False] : True if successful
ofile : same as input
Effects :
writes a MagIC formatted file from Recs
| 2.420057
| 2.406595
| 1.005594
|
rad = old_div(np.pi, 180.) # converts from degrees to radians
X = dir2cart([dec, inc, 1.]) # get cartesian coordinates of dec,inc
# get some sines and cosines of new coordinate system
sa, ca = -np.sin(bed_az * rad), np.cos(bed_az * rad)
cdp, sdp = np.cos(bed_dip * rad), np.sin(bed_dip * rad)
# do the rotation
xc = X[0] * (sa * sa + ca * ca * cdp) + X[1] * \
(ca * sa * (1. - cdp)) + X[2] * sdp * ca
yc = X[0] * ca * sa * (1. - cdp) + X[1] * \
(ca * ca + sa * sa * cdp) - X[2] * sa * sdp
zc = X[0] * ca * sdp - X[1] * sdp * sa - X[2] * cdp
# convert back to direction:
Dir = cart2dir([xc, yc, -zc])
# return declination, inclination of rotated direction
return Dir[0], Dir[1]
|
def dotilt(dec, inc, bed_az, bed_dip)
|
Does a tilt correction on a direction (dec,inc) using bedding dip direction
and bedding dip.
Parameters
----------
dec : declination directions in degrees
inc : inclination direction in degrees
bed_az : bedding dip direction
bed_dip : bedding dip
Returns
-------
dec,inc : a tuple of rotated dec, inc values
Examples
-------
>>> pmag.dotilt(91.2,43.1,90.0,20.0)
(90.952568837153436, 23.103411670066617)
| 3.46855
| 3.559769
| 0.974375
|
indat = indat.transpose()
# unpack input array into separate arrays
dec, inc, bed_az, bed_dip = indat[0], indat[1], indat[2], indat[3]
rad = old_div(np.pi, 180.) # convert to radians
Dir = np.array([dec, inc]).transpose()
X = dir2cart(Dir).transpose() # get cartesian coordinates
N = np.size(dec)
# get some sines and cosines of new coordinate system
sa, ca = -np.sin(bed_az * rad), np.cos(bed_az * rad)
cdp, sdp = np.cos(bed_dip * rad), np.sin(bed_dip * rad)
# do the rotation
xc = X[0] * (sa * sa + ca * ca * cdp) + X[1] * \
(ca * sa * (1. - cdp)) + X[2] * sdp * ca
yc = X[0] * ca * sa * (1. - cdp) + X[1] * \
(ca * ca + sa * sa * cdp) - X[2] * sa * sdp
zc = X[0] * ca * sdp - X[1] * sdp * sa - X[2] * cdp
# convert back to direction:
cart = np.array([xc, yc, -zc]).transpose()
Dir = cart2dir(cart).transpose()
# return declination, inclination arrays of rotated direction
return Dir[0], Dir[1]
|
def dotilt_V(indat)
|
Does a tilt correction on an array with rows of dec,inc bedding dip direction and dip.
Parameters
----------
input : declination, inclination, bedding dip direction and bedding dip
nested array of [[dec1, inc1, bed_az1, bed_dip1],[dec2,inc2,bed_az2,bed_dip2]...]
Returns
-------
dec,inc : arrays of rotated declination, inclination
| 3.499204
| 3.229907
| 1.083376
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.