_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q11500
|
MagICMenu.on_import1
|
train
|
def on_import1(self, event):
"""
initialize window to import an arbitrary file into the working directory
"""
pmag_menu_dialogs.MoveFileIntoWD(self.parent, self.parent.WD)
|
python
|
{
"resource": ""
}
|
q11501
|
MagICMenu.orient_import2
|
train
|
def orient_import2(self, event):
"""
initialize window to import an AzDip format file into the working directory
"""
pmag_menu_dialogs.ImportAzDipFile(self.parent, self.parent.WD)
|
python
|
{
"resource": ""
}
|
q11502
|
_UTMLetterDesignator
|
train
|
def _UTMLetterDesignator(Lat):
"""
This routine determines the correct UTM letter designator for the given latitude
returns 'Z' if latitude is outside the UTM limits of 84N to 80S
Written by Chuck Gantz- chuck.gantz@globalstar.com
"""
if 84 >= Lat >= 72: return 'X'
elif 72 > Lat >= 64: return 'W'
elif 64 > Lat >= 56: return 'V'
elif 56 > Lat >= 48: return 'U'
elif 48 > Lat >= 40: return 'T'
elif 40 > Lat >= 32: return 'S'
elif 32 > Lat >= 24: return 'R'
elif 24 > Lat >= 16: return 'Q'
elif 16 > Lat >= 8: return 'P'
elif 8 > Lat >= 0: return 'N'
elif 0 > Lat >=-8: return 'M'
elif -8 > Lat >=-16: return 'L'
elif -16 > Lat >=-24: return 'K'
elif -24 > Lat >=-32: return 'J'
elif -32 > Lat >=-40: return 'H'
elif -40 > Lat >=-48: return 'G'
elif -48 > Lat >=-56: return 'F'
elif -56 > Lat >=-64: return 'E'
elif -64 > Lat >=-72: return 'D'
elif -72 > Lat >=-80: return 'C'
else: return 'Z'
|
python
|
{
"resource": ""
}
|
q11503
|
MainFrame.highlight_problems
|
train
|
def highlight_problems(self, has_problems):
"""
Outline grid buttons in red if they have validation errors
"""
if has_problems:
self.validation_mode = set(has_problems)
# highlighting doesn't work with Windows
if sys.platform in ['win32', 'win62']:
self.message.SetLabel('The following grid(s) have incorrect or incomplete data:\n{}'.format(', '.join(self.validation_mode)))
# highlighting does work with OSX
else:
for dtype in ["specimens", "samples", "sites", "locations", "ages", "measurements"]:
wind = self.FindWindowByName(dtype + '_btn')
if dtype not in has_problems:
wind.Unbind(wx.EVT_PAINT, handler=self.highlight_button)
else:
wind.Bind(wx.EVT_PAINT, self.highlight_button)
self.Refresh()
self.message.SetLabel('Highlighted grids have incorrect or incomplete data')
self.bSizer_msg.ShowItems(True)
# manually fire a paint event to make sure all buttons
# are highlighted/unhighlighted appropriately
paintEvent = wx.CommandEvent(wx.wxEVT_PAINT,
self.GetId())
self.GetEventHandler().ProcessEvent(paintEvent)
else:
self.message.SetLabel("Validated!")
self.bSizer_msg.ShowItems(True)
self.hbox.Fit(self)
|
python
|
{
"resource": ""
}
|
q11504
|
MainFrame.reset_highlights
|
train
|
def reset_highlights(self):
"""
Remove red outlines from all buttons
"""
for dtype in ["specimens", "samples", "sites", "locations", "ages"]:
wind = self.FindWindowByName(dtype + '_btn')
wind.Unbind(wx.EVT_PAINT, handler=self.highlight_button)
self.Refresh()
#self.message.SetLabel('Highlighted grids have incorrect or incomplete data')
self.bSizer_msg.ShowItems(False)
self.hbox.Fit(self)
|
python
|
{
"resource": ""
}
|
q11505
|
MainFrame.highlight_button
|
train
|
def highlight_button(self, event):
"""
Draw a red highlight line around the event object
"""
wind = event.GetEventObject()
pos = wind.GetPosition()
size = wind.GetSize()
try:
dc = wx.PaintDC(self)
except wx._core.PyAssertionError:
# if it's not a native paint event, we can't us wx.PaintDC
dc = wx.ClientDC(self)
dc.SetPen(wx.Pen('red', 5, wx.SOLID))
dc.DrawRectangle(pos[0], pos[1], size[0], size[1])
event.Skip()
|
python
|
{
"resource": ""
}
|
q11506
|
MagICMenu.on_close_grid
|
train
|
def on_close_grid(self, event):
"""
If there is an open grid, save its data and close it.
"""
if self.parent.grid_frame:
self.parent.grid_frame.onSave(None)
self.parent.grid_frame.Destroy()
|
python
|
{
"resource": ""
}
|
q11507
|
main
|
train
|
def main():
"""
NAME
dipole_plat.py
DESCRIPTION
gives paleolatitude from given inclination, assuming GAD field
SYNTAX
dipole_plat.py [command line options]<filename
OPTIONS
-h prints help message and quits
-i allows interactive entry of latitude
-f file, specifies file name on command line
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
elif '-i' not in sys.argv:
data=sys.stdin.readlines()
if '-i' not in sys.argv:
for line in data:
rec=line.split()
print('%7.1f'%(pmag.plat(float(rec[0]))))
else:
while 1:
try:
inc=input("Inclination for converting to paleolatitude: <cntl-D> to quit ")
print('%7.1f'%(pmag.plat(float(inc))))
except:
print('\n Good-bye \n')
sys.exit()
|
python
|
{
"resource": ""
}
|
q11508
|
main
|
train
|
def main():
"""
NAME
pmag_results_extract.py
DESCRIPTION
make a tab delimited output file from pmag_results table
SYNTAX
pmag_results_extract.py [command line options]
OPTIONS
-h prints help message and quits
-f RFILE, specify pmag_results table; default is pmag_results.txt
-fa AFILE, specify er_ages table; default is NONE
-fsp SFILE, specify pmag_specimens table, default is NONE
-fcr CFILE, specify pmag_criteria table, default is NONE
-g include specimen_grade in table - only works for PmagPy generated pmag_specimen formatted files.
-tex, output in LaTeX format
"""
do_help = pmag.get_flag_arg_from_sys('-h')
if do_help:
print(main.__doc__)
return False
res_file = pmag.get_named_arg('-f', 'pmag_results.txt')
crit_file = pmag.get_named_arg('-fcr', '')
spec_file = pmag.get_named_arg('-fsp', '')
age_file = pmag.get_named_arg('-fa', '')
grade = pmag.get_flag_arg_from_sys('-g')
latex = pmag.get_flag_arg_from_sys('-tex')
WD = pmag.get_named_arg('-WD', os.getcwd())
ipmag.pmag_results_extract(res_file, crit_file, spec_file, age_file, latex, grade, WD)
|
python
|
{
"resource": ""
}
|
q11509
|
main
|
train
|
def main():
"""
NAME
grab_magic_key.py
DESCRIPTION
picks out key and saves to file
SYNTAX
grab_magic_key.py [command line optins]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-key KEY: specify key to print to standard output
"""
dir_path = "./"
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind = sys.argv.index('-f')
magic_file = dir_path+'/'+sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-key' in sys.argv:
ind = sys.argv.index('-key')
grab_key = sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
#
#
# get data read in
Data, file_type = pmag.magic_read(magic_file)
if len(Data) > 0:
for rec in Data:
print(rec[grab_key])
else:
print('bad file name')
|
python
|
{
"resource": ""
}
|
q11510
|
main
|
train
|
def main():
"""
NAME
plot_2cdfs.py
DESCRIPTION
makes plots of cdfs of data in input file
SYNTAX
plot_2cdfs.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f FILE1 FILE2
-t TITLE
-fmt [svg,eps,png,pdf,jpg..] specify format of output figure, default is svg
"""
fmt='svg'
title=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
X=numpy.loadtxt(file)
file=sys.argv[ind+2]
X2=numpy.loadtxt(file)
# else:
# X=numpy.loadtxt(sys.stdin,dtype=numpy.float)
else:
print('-f option required')
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-t' in sys.argv:
ind=sys.argv.index('-t')
title=sys.argv[ind+1]
CDF={'X':1}
pmagplotlib.plot_init(CDF['X'],5,5)
pmagplotlib.plot_cdf(CDF['X'],X,'','r','')
pmagplotlib.plot_cdf(CDF['X'],X2,title,'b','')
D,p=scipy.stats.ks_2samp(X,X2)
if p>=.05:
print(D,p,' not rejected at 95%')
else:
print(D,p,' rejected at 95%')
pmagplotlib.draw_figs(CDF)
ans= input('S[a]ve plot, <Return> to quit ')
if ans=='a':
files={'X':'CDF_.'+fmt}
pmagplotlib.save_plots(CDF,files)
|
python
|
{
"resource": ""
}
|
q11511
|
main
|
train
|
def main():
"""
NAME
reorder_samples.py
DESCRIPTION
takes specimen file and reorders sample file with selected orientation methods placed first
SYNTAX
reorder_samples.py [command line options]
OPTIONS
-h prints help message and quits
-fsp: specimen input pmag_specimens format file, default is "pmag_specimens.txt"
-fsm: sample input er_samples format file, default is "er_samples.txt"
-F: output er_samples format file, default is "er_samples.txt"
OUPUT
writes re-ordered er_samples.txt file
"""
infile='pmag_specimens.txt'
sampfile="er_samples.txt"
outfile="er_samples.txt"
# get command line stuff
if "-h" in sys.argv:
print(main.__doc__)
sys.exit()
if '-fsp' in sys.argv:
ind=sys.argv.index("-fsp")
infile=sys.argv[ind+1]
if '-fsm' in sys.argv:
ind=sys.argv.index("-fsm")
sampfile=sys.argv[ind+1]
if '-F' in sys.argv:
ind=sys.argv.index("-F")
outfile=sys.argv[ind+1]
if '-WD' in sys.argv:
ind=sys.argv.index("-WD")
dir_path=sys.argv[ind+1]
infile=dir_path+'/'+infile
sampfile=dir_path+'/'+sampfile
outfile=dir_path+'/'+outfile
# now do re-ordering
pmag.ReorderSamples(infile,sampfile,outfile)
|
python
|
{
"resource": ""
}
|
q11512
|
get_intensity_col
|
train
|
def get_intensity_col(data):
"""
Check measurement dataframe for intensity columns 'magn_moment', 'magn_volume', 'magn_mass','magn_uncal'.
Return the first intensity column that is in the dataframe AND has data.
Parameters
----------
data : pandas DataFrame
Returns
---------
str
intensity method column or ""
"""
# possible intensity columns
intlist = ['magn_moment', 'magn_volume', 'magn_mass','magn_uncal']
# intensity columns that are in the data
int_meths = [col_name for col_name in data.columns if col_name in intlist]
# drop fully null columns
data.dropna(axis='columns', how='all')
# ignore columns with only blank values (including "")
for col_name in int_meths[:]:
if not data[col_name].any():
int_meths.remove(col_name)
if len(int_meths):
if 'magn_moment' in int_meths:
return 'magn_moment'
return int_meths[0]
return ""
|
python
|
{
"resource": ""
}
|
q11513
|
prep_for_intensity_plot
|
train
|
def prep_for_intensity_plot(data, meth_code, dropna=(), reqd_cols=()):
"""
Strip down measurement data to what is needed for an intensity plot.
Find the column with intensity data.
Drop empty columns, and make sure required columns are present.
Keep only records with the specified method code.
Parameters
----------
data : pandas DataFrame
measurement dataframe
meth_code : str
MagIC method code to include, i.e. 'LT-AF-Z'
dropna : list
columns that must not be empty
reqd_cols : list
columns that must be present
Returns
----------
status : bool
True if successful, else False
data : pandas DataFrame
measurement data with required columns
"""
# initialize
dropna = list(dropna)
reqd_cols = list(reqd_cols)
# get intensity column
try:
magn_col = get_intensity_col(data)
except AttributeError:
return False, "Could not get intensity method from data"
# drop empty columns
if magn_col not in dropna:
dropna.append(magn_col)
data = data.dropna(axis=0, subset=dropna)
# add to reqd_cols list
if 'method_codes' not in reqd_cols:
reqd_cols.append('method_codes')
if magn_col not in reqd_cols:
reqd_cols.append(magn_col)
# drop non reqd cols, make sure all reqd cols are present
try:
data = data[reqd_cols]
except KeyError as ex:
print(ex)
missing = set(reqd_cols).difference(data.columns)
return False, "missing these required columns: {}".format(", ".join(missing))
# filter out records without the correct method code
data = data[data['method_codes'].str.contains(meth_code).astype(bool)]
return True, data
|
python
|
{
"resource": ""
}
|
q11514
|
Contribution.add_empty_magic_table
|
train
|
def add_empty_magic_table(self, dtype, col_names=None, groups=None):
"""
Add a blank MagicDataFrame to the contribution.
You can provide either a list of column names,
or a list of column group names.
If provided, col_names takes precedence.
"""
if dtype not in self.table_names:
print("-W- {} is not a valid MagIC table name".format(dtype))
print("-I- Valid table names are: {}".format(", ".join(self.table_names)))
return
data_container = MagicDataFrame(dtype=dtype, columns=col_names, groups=groups)
self.tables[dtype] = data_container
|
python
|
{
"resource": ""
}
|
q11515
|
Contribution.add_magic_table_from_data
|
train
|
def add_magic_table_from_data(self, dtype, data):
"""
Add a MagIC table to the contribution from a data list
Parameters
----------
dtype : str
MagIC table type, i.e. 'specimens'
data : list of dicts
data list with format [{'key1': 'val1', ...}, {'key1': 'val2', ...}, ... }]
"""
self.tables[dtype] = MagicDataFrame(dtype=dtype, data=data)
if dtype == 'measurements':
self.tables['measurements'].add_sequence()
return dtype, self.tables[dtype]
|
python
|
{
"resource": ""
}
|
q11516
|
Contribution.add_magic_table
|
train
|
def add_magic_table(self, dtype, fname=None, df=None):
"""
Read in a new file to add a table to self.tables.
Requires dtype argument and EITHER filename or df.
Parameters
----------
dtype : str
MagIC table name (plural, i.e. 'specimens')
fname : str
filename of MagIC format file
(short path, directory is self.directory)
default: None
df : pandas DataFrame
data to create the new table with
default: None
"""
if df is None:
# if providing a filename but no data type
if dtype == "unknown":
filename = os.path.join(self.directory, fname)
if not os.path.exists(filename):
return False, False
data_container = MagicDataFrame(filename, dmodel=self.data_model)
dtype = data_container.dtype
if dtype == 'empty':
return False, False
else:
self.tables[dtype] = data_container
return dtype, data_container
# if providing a data type, use the canonical filename
elif dtype not in self.filenames:
print('-W- "{}" is not a valid MagIC table type'.format(dtype))
print("-I- Available table types are: {}".format(", ".join(self.table_names)))
return False, False
#filename = os.path.join(self.directory, self.filenames[dtype])
filename = pmag.resolve_file_name(self.filenames[dtype], self.directory)
if os.path.exists(filename):
data_container = MagicDataFrame(filename, dtype=dtype,
dmodel=self.data_model)
if data_container.dtype != "empty":
self.tables[dtype] = data_container
return dtype, data_container
else:
return False, False
else:
#print("-W- No such file: {}".format(filename))
return False, False
# df is not None
else:
if not dtype:
print("-W- Must provide dtype")
return False, False
data_container = MagicDataFrame(dtype=dtype, df=df)
self.tables[dtype] = data_container
self.tables[dtype].sort_dataframe_cols()
return dtype, self.tables[dtype]
|
python
|
{
"resource": ""
}
|
q11517
|
Contribution.propagate_measurement_info
|
train
|
def propagate_measurement_info(self):
"""
Take a contribution with a measurement table.
Create specimen, sample, site, and location tables
using the unique names in the measurement table to fill in
the index.
"""
meas_df = self.tables['measurements'].df
names_list = ['specimen', 'sample', 'site', 'location']
# add in any tables that you can
for num, name in enumerate(names_list):
# don't replace tables that already exist
if (name + "s") in self.tables:
continue
elif name in meas_df.columns:
items = meas_df[name].unique()
df = pd.DataFrame(columns=[name], index=items)
df[name] = df.index
# add in parent name if possible
# (i.e., sample name to specimens table)
if num < (len(names_list) - 1):
parent = names_list[num+1]
if parent in meas_df.columns:
meas_df = meas_df.where(meas_df.notnull(), "")
df[parent] = meas_df.drop_duplicates(subset=[name])[parent].values.astype(str)
df = df.where(df != "", np.nan)
df = df.dropna(how='all', axis='rows')
if len(df):
self.tables[name + "s"] = MagicDataFrame(dtype=name + "s", df=df)
self.write_table_to_file(name + "s")
|
python
|
{
"resource": ""
}
|
q11518
|
Contribution.get_parent_and_child
|
train
|
def get_parent_and_child(self, table_name):
"""
Get the name of the parent table and the child table
for a given MagIC table name.
Parameters
----------
table_name : string of MagIC table name ['specimens', 'samples', 'sites', 'locations']
Returns
-------
parent_name : string of parent table name
child_name : string of child table name
"""
if table_name not in self.ancestry:
return None, None
parent_ind = self.ancestry.index(table_name) + 1
if parent_ind + 1 > len(self.ancestry):
parent_name = None
else:
parent_name = self.ancestry[parent_ind]
child_ind = self.ancestry.index(table_name) - 1
if child_ind < 0:
child_name = None
else:
child_name = self.ancestry[child_ind]
return parent_name, child_name
|
python
|
{
"resource": ""
}
|
q11519
|
Contribution.propagate_cols_up
|
train
|
def propagate_cols_up(self, cols, target_df_name, source_df_name):
"""
Take values from source table, compile them into a colon-delimited list,
and apply them to the target table.
This method won't overwrite values in the target table, it will only
supply values where they are missing.
Parameters
----------
cols : list-like
list of columns to propagate
target_df_name : str
name of table to propagate values into
source_df_name:
name of table to propagate values from
Returns
---------
target_df : MagicDataFrame
updated MagicDataFrame with propagated values
"""
print("-I- Trying to propagate {} columns from {} table into {} table".format(cols,
source_df_name,
target_df_name))
# make sure target table is read in
if target_df_name not in self.tables:
self.add_magic_table(target_df_name)
if target_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(target_df_name))
return
# make sure source table is read in
if source_df_name not in self.tables:
self.add_magic_table(source_df_name)
print("-W- Couldn't read in {} table".format(source_df_name))
return
target_df = self.tables[target_df_name]
source_df = self.tables[source_df_name]
target_name = target_df_name[:-1]
# make sure source_df has relevant columns
for col in cols:
if col not in source_df.df.columns:
source_df.df[col] = None
# if target_df has info, propagate that into all rows
target_df.front_and_backfill(cols)
# make sure target_name is in source_df for merging
if target_name not in source_df.df.columns:
print("-W- You can't merge data from {} table into {} table".format(source_df_name, target_df_name))
print(" Your {} table is missing {} column".format(source_df_name, target_name))
self.tables[target_df_name] = target_df
return target_df
source_df.front_and_backfill([target_name])
# group source df by target_name
grouped = source_df.df.groupby(source_df.df[target_name])
if not len(grouped):
print("-W- Couldn't propagate from {} to {}".format(source_df_name, target_df_name))
return target_df
# function to generate capitalized, sorted, colon-delimited list
# of unique, non-null values from a column
def func(group, col_name):
lst = group[col_name][group[col_name].notnull()].unique()
split_lst = [col.split(':') for col in lst if col]
sorted_lst = sorted(np.unique([item.capitalize() for sublist in split_lst for item in sublist]))
group_col = ":".join(sorted_lst)
return group_col
# apply func to each column
for col in cols:
res = grouped.apply(func, col)
target_df.df['new_' + col] = res
target_df.df[col] = np.where(target_df.df[col], target_df.df[col], target_df.df['new_' + col])
target_df.df.drop(['new_' + col], axis='columns', inplace=True)
# set table
self.tables[target_df_name] = target_df
return target_df
|
python
|
{
"resource": ""
}
|
q11520
|
Contribution.propagate_ages
|
train
|
def propagate_ages(self):
"""
Mine ages table for any age data, and write it into
specimens, samples, sites, locations tables.
Do not overwrite existing age data.
"""
# if there is no age table, skip
if 'ages' not in self.tables:
return
# if age table has no data, skip
if not len(self.tables['ages'].df):
return
# get levels in age table
self.get_age_levels()
# if age levels could not be determined, skip
if not "level" in self.tables["ages"].df.columns:
return
if not any(self.tables["ages"].df["level"]):
return
# go through each level of age data
for level in self.tables['ages'].df['level'].unique():
table_name = level + 's'
age_headers = self.data_model.get_group_headers(table_name, 'Age')
# find age headers that are actually in table
actual_age_headers = list(set(self.tables[table_name].df.columns).intersection(age_headers))
# find site age headers that are available in ages table
available_age_headers = list(set(self.tables['ages'].df.columns).intersection(age_headers))
# fill in all available age info to all rows
self.tables[table_name].front_and_backfill(actual_age_headers)
# add any available headers to table
add_headers = set(available_age_headers).difference(actual_age_headers)
for header in add_headers:
self.tables[table_name].df[header] = None
# propagate values from ages into table
def move_values(ser, level, available_headers):
name = ser.name
cond1 = self.tables['ages'].df[level] == name
cond2 = self.tables['ages'].df['level'] == level
mask = cond1 & cond2
sli = self.tables['ages'].df[mask]
if len(sli):
return list(sli[available_headers].values[0])
return [None] * len(available_headers)
res = self.tables[table_name].df.apply(move_values, axis=1,
args=[level, available_age_headers])
# fill in table with values gleaned from ages
new_df = pd.DataFrame(data=list(res.values), index=res.index,
columns=available_age_headers)
age_values = np.where(self.tables[table_name].df[available_age_headers],
self.tables[table_name].df[available_age_headers],
new_df)
self.tables[table_name].df[available_age_headers] = age_values
#
# put age_high, age_low into locations table
print("-I- Adding age_high and age_low to locations table based on minimum/maximum ages found in sites table")
self.propagate_min_max_up(cols=['age'], target_df_name='locations',
source_df_name='sites')
|
python
|
{
"resource": ""
}
|
q11521
|
Contribution.remove_non_magic_cols
|
train
|
def remove_non_magic_cols(self):
"""
Remove all non-MagIC columns from all tables.
"""
for table_name in self.tables:
table = self.tables[table_name]
table.remove_non_magic_cols_from_table()
|
python
|
{
"resource": ""
}
|
q11522
|
Contribution.write_table_to_file
|
train
|
def write_table_to_file(self, dtype, custom_name=None, append=False, dir_path=None):
"""
Write out a MagIC table to file, using custom filename
as specified in self.filenames.
Parameters
----------
dtype : str
magic table name
"""
if custom_name:
fname = custom_name
else:
fname = self.filenames[dtype]
if not dir_path:
dir_path=self.directory
if dtype in self.tables:
write_df = self.remove_names(dtype)
outfile = self.tables[dtype].write_magic_file(custom_name=fname,
dir_path=dir_path,
append=append, df=write_df)
return outfile
|
python
|
{
"resource": ""
}
|
q11523
|
Contribution.find_missing_items
|
train
|
def find_missing_items(self, dtype):
"""
Find any items that are referenced in a child table
but are missing in their own table.
For example, a site that is listed in the samples table,
but has no entry in the sites table.
Parameters
----------
dtype : str
table name, e.g. 'specimens'
Returns
---------
set of missing values
"""
parent_dtype, child_dtype = self.get_parent_and_child(dtype)
if not child_dtype in self.tables:
return set()
items = set(self.tables[dtype].df.index.unique())
items_in_child_table = set(self.tables[child_dtype].df[dtype[:-1]].unique())
return {i for i in (items_in_child_table - items) if not_null(i)}
|
python
|
{
"resource": ""
}
|
q11524
|
Contribution.get_con_id
|
train
|
def get_con_id(self):
"""
Return contribution id if available
"""
con_id = ""
if "contribution" in self.tables:
if "id" in self.tables["contribution"].df.columns:
con_id = str(self.tables["contribution"].df["id"].values[0])
return con_id
|
python
|
{
"resource": ""
}
|
q11525
|
MagicDataFrame.remove_non_magic_cols_from_table
|
train
|
def remove_non_magic_cols_from_table(self, ignore_cols=()):
"""
Remove all non-magic columns from self.df.
Changes in place.
Parameters
----------
ignore_cols : list-like
columns not to remove, whether they are proper
MagIC columns or not
Returns
---------
unrecognized_cols : list
any columns that were removed
"""
unrecognized_cols = self.get_non_magic_cols()
for col in ignore_cols:
if col in unrecognized_cols:
unrecognized_cols.remove(col)
if unrecognized_cols:
print('-I- Removing non-MagIC column names from {}:'.format(self.dtype), end=' ')
for col in unrecognized_cols:
self.df.drop(col, axis='columns', inplace=True)
print(col, end=' ')
print("\n")
return unrecognized_cols
|
python
|
{
"resource": ""
}
|
q11526
|
MagicDataFrame.add_row
|
train
|
def add_row(self, label, row_data, columns=""):
"""
Add a row with data.
If any new keys are present in row_data dictionary,
that column will be added to the dataframe.
This is done inplace
"""
# use provided column order, making sure you don't lose any values
# from self.df.columns
if len(columns):
if sorted(self.df.columns) == sorted(columns):
self.df.columns = columns
else:
new_columns = []
new_columns.extend(columns)
for col in self.df.columns:
if col not in new_columns:
new_columns.append(col)
# makes sure all columns have data or None
if sorted(row_data.keys()) != sorted(self.df.columns):
# add any new column names
for key in row_data:
if key not in self.df.columns:
self.df[key] = None
# add missing column names into row_data
for col_label in self.df.columns:
if col_label not in list(row_data.keys()):
row_data[col_label] = None
# (make sure you are working with strings)
self.df.index = self.df.index.astype(str)
label = str(label)
# create a new row with suffix "new"
# (this ensures that you get a unique, new row,
# instead of adding on to an existing row with the same label)
self.df.loc[label + "new"] = pd.Series(row_data)
# rename it to be correct
self.df.rename(index={label + "new": label}, inplace=True)
# use next line to sort index inplace
#self.df.sort_index(inplace=True)
return self.df
|
python
|
{
"resource": ""
}
|
q11527
|
MagicDataFrame.add_data
|
train
|
def add_data(self, data): # add append option later
"""
Add df to a MagicDataFrame using a data list.
Parameters
----------
data : list of dicts
data list with format [{'key1': 'val1', ...}, {'key1': 'val2', ...}, ... }]
dtype : str
MagIC table type
"""
df = pd.DataFrame(data)
name, dtype = self.get_singular_and_plural_dtype(self.dtype)
if name in df.columns:
df.index = df[name]
df.index.name = name + " name"
self.df = df
|
python
|
{
"resource": ""
}
|
q11528
|
MagicDataFrame.add_blank_row
|
train
|
def add_blank_row(self, label):
"""
Add a blank row with only an index value to self.df.
This is done inplace.
"""
col_labels = self.df.columns
blank_item = pd.Series({}, index=col_labels, name=label)
# use .loc to add in place (append won't do that)
self.df.loc[blank_item.name] = blank_item
return self.df
|
python
|
{
"resource": ""
}
|
q11529
|
MagicDataFrame.delete_row
|
train
|
def delete_row(self, ind):
"""
remove self.df row at ind
inplace
"""
self.df = pd.concat([self.df[:ind], self.df[ind+1:]], sort=True)
return self.df
|
python
|
{
"resource": ""
}
|
q11530
|
MagicDataFrame.delete_rows
|
train
|
def delete_rows(self, condition, info_str=None):
"""
delete all rows with condition==True
inplace
Parameters
----------
condition : pandas DataFrame indexer
all self.df rows that meet this condition will be deleted
info_str : str
description of the kind of rows to be deleted,
e.g "specimen rows with blank method codes"
Returns
--------
df_data : pandas DataFrame
updated self.df
"""
self.df['num'] = list(range(len(self.df)))
df_data = self.df
# delete all records that meet condition
if len(df_data[condition]) > 0: #we have one or more records to delete
inds = df_data[condition]['num'] # list of all rows where condition is TRUE
for ind in inds[::-1]:
df_data = self.delete_row(ind)
if info_str:
print("-I- Deleting {}. ".format(info_str), end=' ')
print('deleting row {}'.format(str(ind)))
# sort so that all rows for an item are together
df_data.sort_index(inplace=True)
# redo temporary index
df_data['num'] = list(range(len(df_data)))
self.df = df_data
return df_data
|
python
|
{
"resource": ""
}
|
q11531
|
MagicDataFrame.drop_stub_rows
|
train
|
def drop_stub_rows(self, ignore_cols=('specimen',
'sample',
'software_packages',
'num')):
"""
Drop self.df rows that have only null values,
ignoring certain columns.
Parameters
----------
ignore_cols : list-like
list of column names to ignore for
Returns
---------
self.df : pandas DataFrame
"""
# ignore citations if they just say 'This study'
if 'citations' in self.df.columns:
if list(self.df['citations'].unique()) == ['This study']:
ignore_cols = ignore_cols + ('citations',)
drop_cols = self.df.columns.difference(ignore_cols)
self.df.dropna(axis='index', subset=drop_cols, how='all', inplace=True)
return self.df
|
python
|
{
"resource": ""
}
|
q11532
|
MagicDataFrame.drop_duplicate_rows
|
train
|
def drop_duplicate_rows(self, ignore_cols=['specimen', 'sample']):
"""
Drop self.df rows that have only null values,
ignoring certain columns BUT only if those rows
do not have a unique index.
Different from drop_stub_rows because it only drops
empty rows if there is another row with that index.
Parameters
----------
ignore_cols : list_like
list of colum names to ignore
Returns
----------
self.df : pandas DataFrame
"""
# keep any row with a unique index
unique_index = self.df.index.unique()
cond1 = ~self.df.index.duplicated(keep=False)
# or with actual data
ignore_cols = [col for col in ignore_cols if col in self.df.columns]
relevant_df = self.df.drop(ignore_cols, axis=1)
cond2 = relevant_df.notnull().any(axis=1)
orig_len = len(self.df)
new_df = self.df[cond1 | cond2]
# make sure we haven't lost anything important
if any(unique_index.difference(new_df.index.unique())):
cond1 = ~self.df.index.duplicated(keep="first")
self.df = self.df[cond1 | cond2]
end_len = len(self.df)
removed = orig_len - end_len
if removed:
print('-I- Removed {} redundant records from {} table'.format(removed, self.dtype))
return self.df
|
python
|
{
"resource": ""
}
|
q11533
|
MagicDataFrame.update_record
|
train
|
def update_record(self, name, new_data, condition, update_only=False,
debug=False):
"""
Find the first row in self.df with index == name
and condition == True.
Update that record with new_data, then delete any
additional records where index == name and condition == True.
Change is inplace
"""
# add numeric index column temporarily
self.df['num'] = list(range(len(self.df)))
df_data = self.df
condition2 = (df_data.index == name)
# edit first of existing data that meets condition
if len(df_data[condition & condition2]) > 0: #we have one or more records to update or delete
# list of all rows where condition is true and index == name
inds = df_data[condition & condition2]['num']
#inds = df_data[condition]['num'] # list of all rows where condition is true
existing_data = dict(df_data.iloc[inds.iloc[0]]) # get first record of existing_data from dataframe
existing_data.update(new_data) # update existing data with new interpretations
# update row
self.update_row(inds.iloc[0], existing_data)
# now remove all the remaining records of same condition
if len(inds) > 1:
for ind in inds[1:]:
print("deleting redundant records for:", name)
df_data = self.delete_row(ind)
else:
if update_only:
print("no record found for that condition, not updating ", name)
else:
print('no record found - creating new one for ', name)
# add new row
df_data = self.add_row(name, new_data)
# sort so that all rows for an item are together
df_data.sort_index(inplace=True)
# redo temporary index
df_data['num'] = list(range(len(df_data)))
self.df = df_data
return df_data
|
python
|
{
"resource": ""
}
|
q11534
|
MagicDataFrame.sort_dataframe_cols
|
train
|
def sort_dataframe_cols(self):
"""
Sort self.df so that self.name is the first column,
and the rest of the columns are sorted by group.
"""
# get the group for each column
cols = self.df.columns
groups = list(map(lambda x: self.data_model.get_group_for_col(self.dtype, x), cols))
sorted_cols = cols.groupby(groups)
ordered_cols = []
# put names first
try:
names = sorted_cols.pop('Names')
except KeyError:
names = []
ordered_cols.extend(list(names))
no_group = []
# remove ungrouped columns
if '' in sorted_cols:
no_group = sorted_cols.pop('')
# flatten list of columns
for k in sorted(sorted_cols):
ordered_cols.extend(sorted(sorted_cols[k]))
# add back in ungrouped columns
ordered_cols.extend(no_group)
# put name first
try:
if self.name in ordered_cols:
ordered_cols.remove(self.name)
ordered_cols[:0] = [self.name]
except AttributeError:
pass
#
self.df = self.df[ordered_cols]
return self.df
|
python
|
{
"resource": ""
}
|
q11535
|
MagicDataFrame.get_non_magic_cols
|
train
|
def get_non_magic_cols(self):
"""
Find all columns in self.df that are not real MagIC 3 columns.
Returns
--------
unrecognized_cols : list
"""
table_dm = self.data_model.dm[self.dtype]
approved_cols = table_dm.index
unrecognized_cols = (set(self.df.columns) - set(approved_cols))
return unrecognized_cols
|
python
|
{
"resource": ""
}
|
q11536
|
MagicDataFrame.get_first_non_null_value
|
train
|
def get_first_non_null_value(self, ind_name, col_name):
"""
For a given index and column, find the first non-null value.
Parameters
----------
self : MagicDataFrame
ind_name : str
index name for indexing
col_name : str
column name for indexing
Returns
---------
single value of str, float, or int
"""
short_df = self.df.loc[ind_name, col_name]
mask = pd.notnull(short_df)
print(short_df[mask])
try:
val = short_df[mask].unique()[0]
except IndexError:
val = None
return val
|
python
|
{
"resource": ""
}
|
q11537
|
get_pmag_dir
|
train
|
def get_pmag_dir():
"""
Returns directory in which PmagPy is installed
"""
# this is correct for py2exe (DEPRECATED)
#win_frozen = is_frozen()
#if win_frozen:
# path = os.path.abspath(unicode(sys.executable, sys.getfilesystemencoding()))
# path = os.path.split(path)[0]
# return path
# this is correct for py2app
try:
return os.environ['RESOURCEPATH']
# this works for everything else
except KeyError: pass
# new way:
# if we're in the local PmagPy directory:
if os.path.isfile(os.path.join(os.getcwd(), 'pmagpy', 'pmag.py')):
lib_dir = os.path.join(os.getcwd(), 'pmagpy')
# if we're anywhere else:
elif getattr(sys, 'frozen', False): #pyinstaller datafile directory
return sys._MEIPASS
else:
# horrible, hack-y fix
# (prevents namespace issue between
# local github PmagPy and pip-installed PmagPy).
# must reload because we may have
# changed directories since importing
temp = os.getcwd()
os.chdir('..')
reload(locator)
lib_file = resource_filename('locator', 'resource.py')
full_dir = os.path.split(lib_file)[0]
ind = full_dir.rfind(os.sep)
lib_dir = full_dir[:ind+1]
lib_dir = os.path.realpath(os.path.join(lib_dir, 'pmagpy'))
os.chdir(temp)
# end fix
# old way:
#lib_dir = os.path.dirname(os.path.realpath(__file__))
if not os.path.isfile(os.path.join(lib_dir, 'pmag.py')):
lib_dir = os.getcwd()
fname = os.path.join(lib_dir, 'pmag.py')
if not os.path.isfile(fname):
pmag_dir = os.path.split(os.path.split(__file__)[0])[0]
if os.path.isfile(os.path.join(pmag_dir,'pmagpy','pmag.py')):
return pmag_dir
else:
print('-W- Can\'t find the data model! Make sure you have installed pmagpy using pip: "pip install pmagpy --upgrade"')
return '.'
# strip "/" or "\" and "pmagpy" to return proper PmagPy directory
if lib_dir.endswith(os.sep):
lib_dir = lib_dir[:-1]
if lib_dir.endswith('pmagpy'):
pmag_dir = os.path.split(lib_dir)[0]
else:
pmag_dir = lib_dir
return pmag_dir
|
python
|
{
"resource": ""
}
|
q11538
|
main
|
train
|
def main():
"""
NAME
plot_magic_keys.py
DESCRIPTION
picks out keys and makes and xy plot
SYNTAX
plot_magic_keys.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-xkey KEY: specify key for X
-ykey KEY: specify key for Y
-b xmin xmax ymin ymax, sets bounds
"""
dir_path="./"
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
magic_file=dir_path+'/'+sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-xkey' in sys.argv:
ind=sys.argv.index('-xkey')
xkey=sys.argv[ind+1]
if '-ykey' in sys.argv:
ind=sys.argv.index('-ykey')
ykey=sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-b' in sys.argv:
ind=sys.argv.index('-b')
xmin=float(sys.argv[ind+1])
xmax=float(sys.argv[ind+2])
ymin=float(sys.argv[ind+3])
ymax=float(sys.argv[ind+4])
#
#
# get data read in
X,Y=[],[]
Data,file_type=pmag.magic_read(magic_file)
if len(Data)>0:
for rec in Data:
if xkey in list(rec.keys()) and rec[xkey]!="" and ykey in list(rec.keys()) and rec[ykey]!="":
try:
X.append(float(rec[xkey]))
Y.append(float(rec[ykey]))
except:
pass
FIG={'fig':1}
pmagplotlib.plot_init(FIG['fig'],5,5)
if '-b' in sys.argv:
pmagplotlib.plot_xy(FIG['fig'],X,Y,sym='ro',xlab=xkey,ylab=ykey,xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax )
else:
pmagplotlib.plot_xy(FIG['fig'],X,Y,sym='ro',xlab=xkey,ylab=ykey)
pmagplotlib.draw_figs(FIG)
ans=input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans=="q": sys.exit()
if ans=="a":
files = {}
for key in list(FIG.keys()):
files[key]=str(key) + ".svg"
pmagplotlib.save_plots(FIG,files)
sys.exit()
else:
print('no data to plot')
|
python
|
{
"resource": ""
}
|
q11539
|
main
|
train
|
def main():
"""
NAME
eqarea.py
DESCRIPTION
makes equal area projections from declination/inclination data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
eqarea.py [options]
OPTIONS
-f FILE, specify file on command line
-sav save figure and quit
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-s SIZE specify symbol size - default is 20
-Lsym SHAPE COLOR specify shape and color for lower hemisphere
-Usym SHAPE COLOR specify shape and color for upper hemisphere
shapes: 's': square,'o': circle,'^,>,v,<': [up,right,down,left] triangle, 'd': diamond,
'p': pentagram, 'h': hexagon, '8': octagon, '+': plus, 'x': cross
colors: [b]lue,[g]reen,[r]ed,[c]yan,[m]agenta,[y]ellow,blac[k],[w]hite
"""
title = ""
files, fmt = {}, 'svg'
sym = {'lower': ['o', 'r'], 'upper': ['o', 'w']}
plot = 0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-sav' in sys.argv:
plot = 1
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind + 1]
if '-s' in sys.argv:
ind = sys.argv.index('-s')
sym['size'] = int(sys.argv[ind + 1])
else:
sym['size'] = 20
if '-Lsym' in sys.argv:
ind = sys.argv.index('-Lsym')
sym['lower'][0] = sys.argv[ind + 1]
sym['lower'][1] = sys.argv[ind + 2]
if '-Usym' in sys.argv:
ind = sys.argv.index('-Usym')
sym['upper'][0] = sys.argv[ind + 1]
sym['upper'][1] = sys.argv[ind + 2]
if '-f' in sys.argv: # ask for filename
ind = sys.argv.index('-f')
fname = sys.argv[ind + 1]
else:
print(main.__doc__)
print(' \n -f option required')
sys.exit() # graceful quit
DI = numpy.loadtxt(fname)
EQ = {'eq': 1}
pmagplotlib.plot_init(EQ['eq'], 5, 5)
pmagplotlib.plot_eq_sym(EQ['eq'], DI, 'Equal Area Plot', sym) # make plot
if plot == 0:
pmagplotlib.draw_figs(EQ) # make it visible
for key in list(EQ.keys()):
files[key] = key + '.' + fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles = {}
titles['eq'] = 'Equal Area Plot'
EQ = pmagplotlib.add_borders(EQ, titles, black, purple)
pmagplotlib.save_plots(EQ, files)
elif plot == 1:
fname = os.path.split(fname)[1].split('.')[0]
files['eq'] = fname + '_eq.' + fmt
pmagplotlib.save_plots(EQ, files)
else:
ans = input(" S[a]ve to save plot, [q]uit without saving: ")
if ans == "a":
pmagplotlib.save_plots(EQ, files)
|
python
|
{
"resource": ""
}
|
q11540
|
MagICMenu.on_quit
|
train
|
def on_quit(self, event):
"""
shut down application
"""
if self.parent.grid_frame:
if self.parent.grid_frame.grid.changes:
dlg = wx.MessageDialog(self,caption="Message:", message="Are you sure you want to exit the program?\nYou have a grid open with unsaved changes.\n ", style=wx.OK|wx.CANCEL)
result = dlg.ShowModal()
if result == wx.ID_OK:
dlg.Destroy()
else:
dlg.Destroy()
return
if self.parent.grid_frame:
self.parent.grid_frame.Destroy()
# if there have been edits, save all data to files
# before quitting
if self.parent.edited:
self.parent.er_magic.write_files()
self.parent.Close()
try:
sys.exit()
except TypeError:
pass
|
python
|
{
"resource": ""
}
|
q11541
|
main
|
train
|
def main():
"""
NAME
gobing.py
DESCRIPTION
calculates Bingham parameters from dec inc data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
gobing.py [options]
OPTIONS
-f FILE to read from FILE
-F, specifies output file name
< filename for reading from standard input
OUTPUT
mean dec, mean inc, Eta, Deta, Ieta, Zeta, Zdec, Zinc, N
"""
if len(sys.argv) > 0:
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-f' in sys.argv: # ask for filename
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
else:
data=sys.stdin.readlines() # read in data from standard input
DIs= [] # set up list for dec inc data
ofile = ""
if '-F' in sys.argv: # set up output file
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
for line in data: # read in the data from standard input
if '\t' in line:
rec=line.split('\t') # split each line on space to get records
else:
rec=line.split() # split each line on space to get records
DIs.append((float(rec[0]),float(rec[1])))
#
bpars=pmag.dobingham(DIs)
output = '%7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %i' % (bpars["dec"],bpars["inc"],bpars["Eta"],bpars["Edec"],bpars["Einc"],bpars["Zeta"],bpars["Zdec"],bpars["Zinc"],bpars["n"])
if ofile == "":
print(output)
else:
out.write(output+'\n')
|
python
|
{
"resource": ""
}
|
q11542
|
main
|
train
|
def main():
"""
NAME
atrm_magic.py
DESCRIPTION
Converts ATRM data to best-fit tensor (6 elements plus sigma)
Original program ARMcrunch written to accomodate ARM anisotropy data
collected from 6 axial directions (+X,+Y,+Z,-X,-Y,-Z) using the
off-axis remanence terms to construct the tensor. A better way to
do the anisotropy of ARMs is to use 9,12 or 15 measurements in
the Hext rotational scheme.
SYNTAX
atrm_magic.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is atrm_measurements.txt
-fsp FILE: specimen input file, default is specimens.txt (optional)
-Fsp FILE: specify output file, default is specimens.txt (MagIC 3 only)
-DM DATA_MODEL: specify MagIC 2 or MagIC 3, default is 3
INPUT
Input for the present program is a TRM acquisition data with an optional baseline.
The order of the measurements is:
Decs=[0,90,0,180,270,0,0,90,0]
Incs=[0,0,90,0,0,-90,0,0,90]
The last two measurements are optional
"""
# initialize some parameters
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
#if "-Fa" in args:
# ind = args.index("-Fa")
# rmag_anis = args[ind + 1]
#if "-Fr" in args:
# ind = args.index("-Fr")
# rmag_res = args[ind + 1]
#meas_file = "atrm_measurements.txt"
#rmag_anis = "trm_anisotropy.txt"
#rmag_res = "atrm_results.txt"
dir_path = pmag.get_named_arg("-WD", ".")
input_dir_path = pmag.get_named_arg("-ID", "")
meas_file = pmag.get_named_arg("-f", "measurements.txt")
data_model_num = int(pmag.get_named_arg("-DM", 3))
spec_outfile = pmag.get_named_arg("-Fsp", "specimens.txt")
spec_infile = pmag.get_named_arg("-fsp", "specimens.txt")
ipmag.atrm_magic(meas_file, dir_path, input_dir_path,
spec_infile, spec_outfile, data_model_num)
|
python
|
{
"resource": ""
}
|
q11543
|
OrientFrameGrid3.create_sheet
|
train
|
def create_sheet(self):
'''
create an editable grid showing demag_orient.txt
'''
#--------------------------------
# orient.txt supports many other headers
# but we will only initialize with
# the essential headers for
# sample orientation and headers present
# in existing demag_orient.txt file
#--------------------------------
#--------------------------------
# create the grid
#--------------------------------
samples_list = list(self.orient_data.keys())
samples_list.sort()
self.samples_list = [ sample for sample in samples_list if sample is not "" ]
#self.headers.extend(self.add_extra_headers(samples_list))
display_headers = [header[1] for header in self.headers]
self.grid = magic_grid.MagicGrid(self.panel, 'orient grid',
self.samples_list, display_headers)
self.grid.InitUI()
#--------------------------------
# color the columns by groups
#--------------------------------
for i in range(len(self.samples_list)):
self.grid.SetCellBackgroundColour(i, 0, "LIGHT GREY")
self.grid.SetCellBackgroundColour(i, 1, "LIGHT STEEL BLUE")
self.grid.SetCellBackgroundColour(i, 2, "YELLOW")
self.grid.SetCellBackgroundColour(i, 3, "YELLOW")
self.grid.SetCellBackgroundColour(i, 4, "PALE GREEN")
self.grid.SetCellBackgroundColour(i, 5, "PALE GREEN")
self.grid.SetCellBackgroundColour(i, 6, "KHAKI")
self.grid.SetCellBackgroundColour(i, 7, "KHAKI")
self.grid.SetCellBackgroundColour(i, 8, "KHAKI")
self.grid.SetCellBackgroundColour(i, 9, "KHAKI")
self.grid.SetCellBackgroundColour(i, 10, "KHAKI")
self.grid.SetCellBackgroundColour(i, 11, "LIGHT MAGENTA")
self.grid.SetCellBackgroundColour(i, 12, "LIGHT MAGENTA")
#--------------------------------
# fill data from self.orient_data
#--------------------------------
headers = [header[0] for header in self.headers]
for sample in self.samples_list:
for key in list(self.orient_data[sample].keys()):
if key in headers:
sample_index = self.samples_list.index(sample)
i = headers.index(key)
val = str(self.orient_data[sample][key])
# if it's a pmag_object, use its name
try:
val = val.name
except AttributeError:
pass
if val and val != "None":
self.grid.SetCellValue(sample_index, i, val)
#--------------------------------
#--------------------------------
# fill in some default values
#--------------------------------
for row in range(self.grid.GetNumberRows()):
col = 1
if not self.grid.GetCellValue(row, col):
self.grid.SetCellValue(row, col, 'g')
#--------------------------------
# temporary trick to get drop-down-menus to work
self.grid.changes = {'a'}
self.grid.AutoSize()
#self.drop_down_menu = drop_down_menus.Menus("orient", self, self.grid, '')
self.drop_down_menu = drop_down_menus3.Menus("orient", self.contribution, self.grid)
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
|
python
|
{
"resource": ""
}
|
q11544
|
main
|
train
|
def main():
"""
NAME
mk_redo.py
DESCRIPTION
Makes thellier_redo and zeq_redo files from existing pmag_specimens format file
SYNTAX
mk_redo.py [-h] [command line options]
INPUT
takes specimens.txt formatted input file
OPTIONS
-h: prints help message and quits
-f FILE: specify input file, default is 'specimens.txt'
-F REDO: specify output file suffix, default is redo so that
output filenames are 'thellier_redo' for thellier data and 'zeq_redo' for direction only data
OUTPUT
makes a thellier_redo or a zeq_redo format file
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
zfile, tfile = 'zeq_redo', 'thellier_redo'
zredo, tredo = "", ""
dir_path = pmag.get_named_arg('-WD', '.')
inspec = pmag.get_named_arg('-f', 'specimens.txt')
if '-F' in sys.argv:
ind = sys.argv.index('-F')
redo = sys.argv[ind + 1]
tfile = redo
zfile = redo
inspec = pmag.resolve_file_name(inspec, dir_path)
zfile = pmag.resolve_file_name(zfile, dir_path)
tfile = pmag.resolve_file_name(tfile, dir_path)
#
# read in data
#
specs = []
prior_spec_data, file_type = pmag.magic_read(inspec)
if file_type != 'specimens':
print(file_type, " this is not a valid pmag_specimens file")
sys.exit()
outstrings = []
for spec in prior_spec_data:
tmp = spec["method_codes"].split(":")
meths = []
for meth in tmp:
methods = meth.strip().split('-')
for m in methods:
if m not in meths:
meths.append(m)
if 'DIR' in meths: # DE-BFL, DE-BFP or DE-FM
specs.append(spec['specimen'])
if 'dir_comp' in list(spec.keys()) and spec['dir_comp'] != "" and spec['dir_comp'] != " ":
comp_name = spec['dir_comp']
else:
comp_name = string.ascii_uppercase[specs.count(
spec['specimen']) - 1]
calculation_type = "DE-BFL" # assume default calculation type is best-fit line
if "BFP" in meths:
calculation_type = 'DE-BFP'
elif "FM" in meths:
calculation_type = 'DE-FM'
if zredo == "":
zredo = open(zfile, "w")
outstring = '%s %s %s %s %s \n' % (
spec["specimen"], calculation_type, spec["meas_step_min"], spec["meas_step_max"], comp_name)
if outstring not in outstrings:
zredo.write(outstring)
outstrings.append(outstring) # only writes unique interpretions
elif "PI" in meths and "TRM" in meths: # thellier record
if tredo == "":
tredo = open(tfile, "w")
outstring = '%s %i %i \n' % (spec["specimen"], float(
spec["meas_step_min"]), float(spec["meas_step_max"]))
if outstring not in outstrings:
tredo.write(outstring)
outstrings.append(outstring) # only writes unique interpretions
print('Redo files saved to: ', zfile, tfile)
|
python
|
{
"resource": ""
}
|
q11545
|
find_side
|
train
|
def find_side(ls, side):
"""
Given a shapely LineString which is assumed to be rectangular, return the
line corresponding to a given side of the rectangle.
"""
minx, miny, maxx, maxy = ls.bounds
points = {'left': [(minx, miny), (minx, maxy)],
'right': [(maxx, miny), (maxx, maxy)],
'bottom': [(minx, miny), (maxx, miny)],
'top': [(minx, maxy), (maxx, maxy)],}
return sgeom.LineString(points[side])
|
python
|
{
"resource": ""
}
|
q11546
|
lambert_xticks
|
train
|
def lambert_xticks(ax, ticks):
"""Draw ticks on the bottom x-axis of a Lambert Conformal projection."""
te = lambda xy: xy[0]
lc = lambda t, n, b: np.vstack((np.zeros(n) + t, np.linspace(b[2], b[3], n))).T
xticks, xticklabels = _lambert_ticks(ax, ticks, 'bottom', lc, te)
ax.xaxis.tick_bottom()
ax.set_xticks(xticks)
ax.set_xticklabels([ax.xaxis.get_major_formatter()(xtick) for xtick in xticklabels])
|
python
|
{
"resource": ""
}
|
q11547
|
lambert_yticks
|
train
|
def lambert_yticks(ax, ticks):
"""Draw ricks on the left y-axis of a Lamber Conformal projection."""
te = lambda xy: xy[1]
lc = lambda t, n, b: np.vstack((np.linspace(b[0], b[1], n), np.zeros(n) + t)).T
yticks, yticklabels = _lambert_ticks(ax, ticks, 'left', lc, te)
ax.yaxis.tick_left()
ax.set_yticks(yticks)
ax.set_yticklabels([ax.yaxis.get_major_formatter()(ytick) for ytick in yticklabels])
|
python
|
{
"resource": ""
}
|
q11548
|
_lambert_ticks
|
train
|
def _lambert_ticks(ax, ticks, tick_location, line_constructor, tick_extractor):
"""Get the tick locations and labels for an axis of a Lambert Conformal projection."""
outline_patch = sgeom.LineString(ax.outline_patch.get_path().vertices.tolist())
axis = find_side(outline_patch, tick_location)
n_steps = 30
extent = ax.get_extent(ccrs.PlateCarree())
_ticks = []
for t in ticks:
xy = line_constructor(t, n_steps, extent)
proj_xyz = ax.projection.transform_points(ccrs.Geodetic(), xy[:, 0], xy[:, 1])
xyt = proj_xyz[..., :2]
ls = sgeom.LineString(xyt.tolist())
locs = axis.intersection(ls)
if not locs:
tick = [None]
else:
tick = tick_extractor(locs.xy)
_ticks.append(tick[0])
# Remove ticks that aren't visible:
ticklabels = copy(ticks)
while True:
try:
index = _ticks.index(None)
except ValueError:
break
_ticks.pop(index)
ticklabels.pop(index)
return _ticks, ticklabels
|
python
|
{
"resource": ""
}
|
q11549
|
main
|
train
|
def main():
"""
NAME
dmag_magic.py
DESCRIPTION
plots intensity decay curves for demagnetization experiments
SYNTAX
dmag_magic -h [command line options]
INPUT
takes magic formatted measurements.txt files
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is: measurements.txt
-obj OBJ: specify object [loc, sit, sam, spc] for plot,
default is by location
-LT [AF,T,M]: specify lab treatment type, default AF
-XLP [PI]: exclude specific lab protocols,
(for example, method codes like LP-PI)
-N do not normalize by NRM magnetization
-sav save plots silently and quit
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
NOTE
loc: location (study); sit: site; sam: sample; spc: specimen
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
# initialize variables from command line + defaults
dir_path = pmag.get_named_arg("-WD", default_val=".")
input_dir_path = pmag.get_named_arg('-ID', '')
if not input_dir_path:
input_dir_path = dir_path
in_file = pmag.get_named_arg("-f", default_val="measurements.txt")
in_file = pmag.resolve_file_name(in_file, input_dir_path)
if "-ID" not in sys.argv:
input_dir_path = os.path.split(in_file)[0]
plot_by = pmag.get_named_arg("-obj", default_val="loc")
LT = pmag.get_named_arg("-LT", "AF")
no_norm = pmag.get_flag_arg_from_sys("-N")
norm = False if no_norm else True
save_plots = pmag.get_flag_arg_from_sys("-sav")
fmt = pmag.get_named_arg("-fmt", "svg")
XLP = pmag.get_named_arg("-XLP", "")
spec_file = pmag.get_named_arg("-fsp", default_val="specimens.txt")
samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt")
site_file = pmag.get_named_arg("-fsi", default_val="sites.txt")
loc_file = pmag.get_named_arg("-flo", default_val="locations.txt")
dmag_magic(in_file, dir_path, input_dir_path, spec_file, samp_file,
site_file, loc_file, plot_by, LT, norm, XLP,
save_plots, fmt)
|
python
|
{
"resource": ""
}
|
q11550
|
main
|
train
|
def main():
"""This program prints doubled values!"""
import numpy
X=arange(.1,10.1,.2) #make a list of numbers
Y=myfunc(X) # calls myfunc with argument X
for i in range(len(X)):
print(X[i],Y[i])
|
python
|
{
"resource": ""
}
|
q11551
|
main
|
train
|
def main():
"""
NAME
common_mean.py
DESCRIPTION
calculates bootstrap statistics to test for common mean
INPUT FORMAT
takes dec/inc as first two columns in two space delimited files
SYNTAX
common_mean.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE, input file
-f2 FILE, optional second file to compare with first file
-dir D I, optional direction to compare with input file
-fmt [svg,jpg,pnd,pdf] set figure format [default is svg]
NOTES
must have either F2 OR dir but not both
"""
d,i,file2="","",""
fmt,plot='svg',0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-sav' in sys.argv: plot=1
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file1=sys.argv[ind+1]
if '-f2' in sys.argv:
ind=sys.argv.index('-f2')
file2=sys.argv[ind+1]
if '-dir' in sys.argv:
ind=sys.argv.index('-dir')
d=float(sys.argv[ind+1])
i=float(sys.argv[ind+2])
D1=numpy.loadtxt(file1,dtype=numpy.float)
if file2!="": D2=numpy.loadtxt(file2,dtype=numpy.float)
#
counter,NumSims=0,1000
#
# get bootstrapped means for first data set
#
print("Doing first set of directions, please be patient..")
BDI1=pmag.di_boot(D1)
#
# convert to cartesian coordinates X1,X2, Y1,Y2 and Z1, Z2
#
if d=="": # repeat for second data set
print("Doing second set of directions, please be patient..")
BDI2=pmag.di_boot(D2)
else:
BDI2=[]
# set up plots
CDF={'X':1,'Y':2,'Z':3}
pmagplotlib.plot_init(CDF['X'],4,4)
pmagplotlib.plot_init(CDF['Y'],4,4)
pmagplotlib.plot_init(CDF['Z'],4,4)
# draw the cdfs
pmagplotlib.plot_com(CDF,BDI1,BDI2,[d,i])
files={}
files['X']='CD_X.'+fmt
files['Y']='CD_Y.'+fmt
files['Z']='CD_Z.'+fmt
if plot==0:
pmagplotlib.draw_figs(CDF)
ans=input("S[a]ve plots, <Return> to quit ")
if ans=="a":
pmagplotlib.save_plots(CDF,files)
else:
sys.exit()
else:
pmagplotlib.save_plots(CDF,files)
sys.exit()
|
python
|
{
"resource": ""
}
|
q11552
|
main
|
train
|
def main():
"""
NAME
sundec.py
DESCRIPTION
calculates calculates declination from sun compass measurements
INPUT FORMAT
GMT_offset, lat,long,year,month,day,hours,minutes,shadow_angle
where GMT_offset is the hours to subtract from local time for GMT.
SYNTAX
sundec.py [-i][-f FILE] [< filename ]
OPTIONS
-i for interactive data entry
-f FILE to set file name on command line
otherwise put data in input format in space delimited file
OUTPUT:
declination
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines() # read in data from standard input
for line in data: # step through line by line
dec=spitout(line)
sys.exit()
if '-i' in sys.argv:
while 1: # repeat this block until program killed
sundata={} # dictionary with sundata in it
print ("Time difference between Greenwich Mean Time (hrs to subtract from local time to get GMT): ")
try:
sundata["delta_u"]=input("<cntl-D> to quit ")
except:
print("\n Good-bye\n")
sys.exit()
date=""
date=date+input("Year: <cntl-D to quit> ")
date=date+":"+input("Month: ")
date=date+":"+input("Day: ")
date=date+":"+input("hour: ")
date=date+":"+input("minute: ")
sundata["date"]=date
sundata["lat"]=input("Latitude of sampling site (negative in southern hemisphere): ")
sundata["lon"]=input("Longitude of sampling site (negative for western hemisphere): ")
sundata["shadow_angle"]=input("Shadow angle: ")
print('%7.1f'%(pmag.dosundec(sundata))) # call sundec function from pmag module and print
else:
data=sys.stdin.readlines() # read in data from standard input
for line in data: # step through line by line
dec=spitout(line)
|
python
|
{
"resource": ""
}
|
q11553
|
main
|
train
|
def main():
"""
NAME
sites_locations.py
DESCRIPTION
reads in er_sites.txt file and finds all locations and bounds of locations
outputs er_locations.txt file
SYNTAX
sites_locations.py [command line options]
OPTIONS
-h prints help message and quits
-f: specimen input er_sites format file, default is "er_sites.txt"
-F: locations table: default is "er_locations.txt"
"""
# set defaults
site_file="er_sites.txt"
loc_file="er_locations.txt"
Names,user=[],"unknown"
Done=[]
version_num=pmag.get_version()
args=sys.argv
dir_path='.'
# get command line stuff
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if '-f' in args:
ind=args.index("-f")
site_file=args[ind+1]
if '-F' in args:
ind=args.index("-F")
loc_file=args[ind+1]
#
site_file=dir_path+'/'+site_file
loc_file=dir_path+'/'+loc_file
Sites,file_type=pmag.magic_read(site_file)
if file_type != 'er_sites':
print(file_type)
print(file_type,"This is not a valid er_sites file ")
sys.exit()
# read in site data
#
LocNames,Locations=[],[]
for site in Sites:
if site['er_location_name'] not in LocNames: # new location name
LocNames.append(site['er_location_name'])
sites_locs=pmag.get_dictitem(Sites,'er_location_name',site['er_location_name'],'T') # get all sites for this loc
lats=pmag.get_dictkey(sites_locs,'site_lat','f') # get all the latitudes as floats
lons=pmag.get_dictkey(sites_locs,'site_lon','f') # get all the longitudes as floats
LocRec={'er_citation_names':'This study','er_location_name':site['er_location_name'],'location_type':''}
LocRec['location_begin_lat']=str(min(lats))
LocRec['location_end_lat']=str(max(lats))
LocRec['location_begin_lon']=str(min(lons))
LocRec['location_end_lon']=str(max(lons))
Locations.append(LocRec)
if len(Locations)>0:
pmag.magic_write(loc_file,Locations,"er_locations")
print("Locations written to: ",loc_file)
|
python
|
{
"resource": ""
}
|
q11554
|
main
|
train
|
def main():
"""
NAME
azdip_magic.py
DESCRIPTION
takes space delimited AzDip file and converts to MagIC formatted tables
SYNTAX
azdip_magic.py [command line options]
OPTIONS
-f FILE: specify input file
-Fsa FILE: specify output file, default is: er_samples.txt/samples.txt
-ncn NCON: specify naming convention: default is #1 below
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
-loc: location name, default="unknown"
-app appends to existing samples file, default is to overwrite
INPUT FORMAT
Input files must be space delimited:
Samp Az Dip Strike Dip
Orientation convention:
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
e.g. field_dip is degrees from horizontal of drill direction
Magnetic declination convention:
Az is already corrected in file
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
OUTPUT
output saved in samples file will overwrite any existing files
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([['f', False, 'orient.txt'], ['Fsa', False, 'samples.txt'], ['ncn', False, "1"], ['mcd', False, 'FS-FD'], ['loc', False, 'unknown'], ['app', False, False], ['WD', False, '.'], ['ID', False, '.'], ['DM', False, 3]])
checked_args = extractor.extract_and_check_args(args, dataframe)
#print('checked_args:', checked_args)
orient_file, samp_file, samp_con, method_codes, location_name, append, output_dir, input_dir, data_model = extractor.get_vars(['f', 'Fsa', 'ncn', 'mcd', 'loc', 'app', 'WD', 'ID', 'DM'], checked_args)
if len(str(samp_con)) > 1:
samp_con, Z = samp_con.split('-')
Z = float(Z)
else:
Z = 1
ipmag.azdip_magic(orient_file, samp_file, samp_con, Z, method_codes, location_name, append, output_dir, input_dir, data_model)
|
python
|
{
"resource": ""
}
|
q11555
|
main
|
train
|
def main():
"""
NAME
k15_magic.py
DESCRIPTION
converts .k15 format data to magic_measurements format.
assums Jelinek Kappabridge measurement scheme
SYNTAX
k15_magic.py [-h] [command line options]
OPTIONS
-h prints help message and quits
-DM DATA_MODEL: specify data model 2 or 3 (default 3)
-f KFILE: specify .k15 format input file
-F MFILE: specify measurement output file
-Fsa SFILE, specify sample file for output
-Fa AFILE, specify specimen file for output [rmag_anisotropy for data model 2 only]
#-ins INST: specify instrument that measurements were made on # not implemented
-spc NUM: specify number of digits for specimen ID, default is 0
-ncn NCOM: specify naming convention (default is #1)
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail ltauxe@ucsd.edu for help.
DEFAULTS
MFILE: measurements.txt
SFILE: samples.txt
AFILE: specimens.txt
INPUT
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
"""
args = sys.argv
if '-h' in args:
print(do_help())
sys.exit()
# def k15_magic(k15file, specnum=0, sample_naming_con='1', er_location_name="unknown", measfile='magic_measurements.txt', sampfile="er_samples.txt", aniso_outfile='rmag_anisotropy.txt', result_file="rmag_results.txt", input_dir_path='.', output_dir_path='.'):
dataframe = extractor.command_line_dataframe([['f', True, ''], ['F', False, 'measurements.txt'], ['Fsa', False, 'samples.txt'], ['Fa', False, 'specimens.txt'], [
'Fr', False, 'rmag_results.txt'], ['spc', False, 0], ['ncn', False, '1'], ['loc', False, 'unknown'], ['WD', False, '.'], ['ID', False, '.'], ['DM', False, 3]])
checked_args = extractor.extract_and_check_args(args, dataframe)
k15file, measfile, sampfile, aniso_outfile, result_file, specnum, sample_naming_con, location_name, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(
['f', 'F', 'Fsa', 'Fa', 'Fr', 'spc', 'ncn', 'loc', 'WD', 'ID', 'DM'], checked_args)
program_ran, error_message = convert.k15(k15file, specnum=specnum, sample_naming_con=sample_naming_con, location=location_name, meas_file=measfile,
samp_file=sampfile, aniso_outfile=aniso_outfile, result_file=result_file, input_dir_path=input_dir_path, dir_path=output_dir_path, data_model_num=data_model_num)
|
python
|
{
"resource": ""
}
|
q11556
|
main
|
train
|
def main():
"""
NAME
pca.py
DESCRIPTION
calculates best-fit line/plane through demagnetization data
INPUT FORMAT
takes specimen_name treatment intensity declination inclination in space delimited file
SYNTAX
pca.py [command line options][< filename]
OPTIONS
-h prints help and quits
-f FILE
-dir [L,P,F][BEG][END] specify direction type, beginning and end
(L:line, P:plane or F:fisher mean of unit vectors)
BEG: first step (NRM = step zero)
END: last step (NRM = step zero)
< filename for reading from standard input
OUTPUT:
specimen_name calculation_type N beg end MAD dec inc
if calculation_type is 'p', dec and inc are pole to plane, otherwise, best-fit direction
EXAMPLE:
pca.py -dir L 1 5 <ex3.3
will calculate best-fit line through demagnetization steps 1 and 5 from file ex5.1
"""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
else:
data=sys.stdin.readlines() # read in data from standard input
if '-dir' in sys.argv: #
ind=sys.argv.index('-dir')
typ=sys.argv[ind+1]
if typ=='L': calculation_type='DE-BFL'
if typ=='P': calculation_type='DE-BFP'
if typ=='F': calculation_type='DE-FM'
beg_pca = int(sys.argv[ind+2])
end_pca = int(sys.argv[ind+3])
#
#
datablock= [] # set up list for data
s=""
ind=0
for line in data: # read in the data from standard input
rec=line.split() # split each line on space to get records
if s=="":
s=rec[0]
print(s, calculation_type)
print(ind,rec[1],rec[3],rec[4],rec[2])
ind+=1
datablock.append([float(rec[1]),float(rec[3]),float(rec[4]),float(rec[2]),'0']) # treatment,dec,inc,int,dummy
mpars=pmag.domean(datablock,beg_pca,end_pca,calculation_type)
if calculation_type=="DE-FM":
print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s,calculation_type,mpars["specimen_n"],mpars["measurement_step_min"],mpars["measurement_step_max"],mpars["specimen_a95"],mpars["specimen_dec"],mpars["specimen_inc"]))
else:
print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s,calculation_type,mpars["specimen_n"],mpars["measurement_step_min"],mpars["measurement_step_max"],mpars["specimen_mad"],mpars["specimen_dec"],mpars["specimen_inc"]))
|
python
|
{
"resource": ""
}
|
q11557
|
requiredUnless
|
train
|
def requiredUnless(col_name, arg, dm, df, *args):
"""
Arg is a string in the format "str1, str2, ..."
Each string will be a column name.
Col_name is required in df unless each column from arg is present.
"""
# if column name is present, no need to check if it is required
if col_name in df.columns:
return None
arg_list = arg.split(",")
arg_list = [argument.strip('"') for argument in arg_list]
msg = ""
for a in arg_list:
# ignore validations that reference a different table
if "." in a:
continue
if a not in df.columns:
msg += "{} column is required unless {} is present. ".format(col_name, a)
if msg:
return msg
else:
return None
return None
|
python
|
{
"resource": ""
}
|
q11558
|
requiredIfGroup
|
train
|
def requiredIfGroup(col_name, arg, dm, df, *args):
"""
Col_name is required if other columns of
the group arg are present.
"""
group_name = arg
groups = set()
columns = df.columns
for col in columns:
if col not in dm.index:
continue
group = dm.loc[col]['group']
groups.add(group)
if group_name in groups:
if col_name in columns:
return None
else:
return "{} column is required if column group {} is used".format(col_name, group_name)
return None
|
python
|
{
"resource": ""
}
|
q11559
|
required
|
train
|
def required(col_name, arg, dm, df, *args):
"""
Col_name is required in df.columns.
Return error message if not.
"""
if col_name in df.columns:
return None
else:
return '"{}" column is required'.format(col_name)
|
python
|
{
"resource": ""
}
|
q11560
|
validate_table
|
train
|
def validate_table(the_con, dtype, verbose=False, output_dir="."):
"""
Return name of bad table, or False if no errors found.
Calls validate_df then parses its output.
"""
print("-I- Validating {}".format(dtype))
# grab dataframe
current_df = the_con.tables[dtype].df
# grab data model
current_dm = the_con.tables[dtype].data_model.dm[dtype]
# run all validations (will add columns to current_df)
current_df = validate_df(current_df, current_dm, the_con)
# get names of the added columns
value_col_names, present_col_names, type_col_names, missing_groups, validation_col_names = get_validation_col_names(current_df)
# print out failure messages
ofile = os.path.join(output_dir, "{}_errors.txt".format(dtype))
failing_items = get_row_failures(current_df, value_col_names,
type_col_names, verbose, outfile=ofile)
bad_rows, bad_cols, missing_cols = get_bad_rows_and_cols(current_df, validation_col_names,
value_col_names, type_col_names,
verbose=True)
# delete all validation rows
current_df.drop(validation_col_names, axis=1, inplace=True)
current_df.drop(missing_groups, axis=1, inplace=True)
if len(failing_items):
print("-I- Complete list of row errors can be found in {}".format(ofile))
return dtype, bad_rows, bad_cols, missing_cols, missing_groups, failing_items
elif len(missing_cols) or len(missing_groups):
print("-I- You are missing some required headers")
if len(missing_cols):
print("-I- You are missing these required headers: {}".format(", ".join(missing_cols)))
if len(missing_groups):
formatted_groups = [group[11:] for group in missing_groups]
print('-I- You need at least one header from these groups: {}'.format(", ".join(formatted_groups)))
else:
formatted_groups = []
return dtype, bad_rows, bad_cols, missing_cols, formatted_groups, failing_items
else:
print("-I- No row errors found!")
return False
|
python
|
{
"resource": ""
}
|
q11561
|
validate_contribution
|
train
|
def validate_contribution(the_con):
"""
Go through a Contribution and validate each table
"""
passing = True
for dtype in list(the_con.tables.keys()):
print("validating {}".format(dtype))
fail = validate_table(the_con, dtype)
if fail:
passing = False
print('--')
|
python
|
{
"resource": ""
}
|
q11562
|
get_degree_cols
|
train
|
def get_degree_cols(df):
"""
Take in a pandas DataFrame, and return a list of columns
that are in that DataFrame AND should be between 0 - 360 degrees.
"""
vals = ['lon_w', 'lon_e', 'lat_lon_precision', 'pole_lon',
'paleolon', 'paleolon_sigma',
'lon', 'lon_sigma', 'vgp_lon', 'paleo_lon', 'paleo_lon_sigma',
'azimuth', 'azimuth_dec_correction', 'dir_dec',
'geographic_precision', 'bed_dip_direction']
relevant_cols = list(set(vals).intersection(df.columns))
return relevant_cols
|
python
|
{
"resource": ""
}
|
q11563
|
extract_col_name
|
train
|
def extract_col_name(string):
"""
Take a string and split it.
String will be a format like "presence_pass_azimuth",
where "azimuth" is the MagIC column name and "presence_pass"
is the validation.
Return "presence", "azimuth".
"""
prefixes = ["presence_pass_", "value_pass_", "type_pass_"]
end = string.rfind("_")
for prefix in prefixes:
if string.startswith(prefix):
return prefix[:-6], string[len(prefix):end]
return string, string
|
python
|
{
"resource": ""
}
|
q11564
|
main
|
train
|
def main():
"""
NAME
s_magic.py
DESCRIPTION
converts .s format data to measurements format.
SYNTAX
s_magic.py [command line options]
OPTIONS
-h prints help message and quits
-DM DATA_MODEL_NUM data model number (default is 3)
-f SFILE specifies the .s file name
-sig last column has sigma
-typ Anisotropy type: AMS,AARM,ATRM (default is AMS)
-F FILE specifies the specimens formatted file name
-usr USER specify username
-loc location specify location/study name
-spc NUM : specify number of characters to
designate a specimen, default = 0
-spn SPECNAME, this specimen has the name SPECNAME
-n first column has specimen name
-crd [s,g,t], specify coordinate system of data
s=specimen,g=geographic,t=tilt adjusted, default is 's'
-ncn NCON: naming convention
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] sample = site
[6] sample, site, location info in er_samples.txt -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail ltauxe@ucsd.edu for help.
DEFAULT
FILE: specimens.txt
INPUT
X11,X22,X33,X12,X23,X13 (.s format file)
X11,X22,X33,X12,X23,X13,sigma (.s format file with -sig option)
SID, X11,X22,X33,X12,X23,X13 (.s format file with -n option)
OUTPUT
specimens.txt format file
NOTE
because .s files do not have specimen names or location information, the output MagIC files
will have to be changed prior to importing to data base.
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
data_model_num = pmag.get_named_arg("-DM", 3)
data_model_num = int(float(data_model_num))
sfile = pmag.get_named_arg("-f", reqd=True)
if data_model_num == 2:
anisfile = pmag.get_named_arg("-F", "rmag_anisotropy.txt")
else:
anisfile = pmag.get_named_arg("-F", "specimens.txt")
location = pmag.get_named_arg("-loc", "unknown")
user = pmag.get_named_arg("-usr", "")
sitename = pmag.get_named_arg("unknown", "")
specnum = pmag.get_named_arg("-spc", 0)
specnum = -int(specnum)
dir_path = pmag.get_named_arg("-WD", ".")
name = pmag.get_flag_arg_from_sys("-n")
sigma = pmag.get_flag_arg_from_sys("-sig")
spec = pmag.get_named_arg("-spn", "unknown")
atype = pmag.get_named_arg("-typ", 'AMS')
samp_con = pmag.get_named_arg("-ncn", "1")
#if '-sig' in sys.argv:
# sigma = 1
#if "-n" in sys.argv:
# name = 1
coord_type = pmag.get_named_arg("-crd", 's')
convert.s_magic(sfile, anisfile, dir_path, atype,
coord_type, sigma, samp_con, specnum,
location, spec, sitename, user, data_model_num, name)
|
python
|
{
"resource": ""
}
|
q11565
|
main
|
train
|
def main():
"""
NAME
incfish.py
DESCRIPTION
calculates fisher parameters from inc only data
INPUT FORMAT
takes inc data
SYNTAX
incfish.py [options] [< filename]
OPTIONS
-h prints help message and quits
-i for interactive filename entry
-f FILE, specify input file name
-F FILE, specify output file name
< filename for reading from standard input
OUTPUT
mean inc,Fisher inc, N, R, k, a95
NOTES
takes the absolute value of inclinations (to take into account reversals),
but returns gaussian mean if < 50.0, because of polarity ambiguity and
lack of bias.
"""
inc=[]
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-i' in sys.argv: # ask for filename
file=input("Enter file name with inc data: ")
inc=numpy.loadtxt(file)
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
inc=numpy.loadtxt(file)
else:
inc = numpy.loadtxt(sys.stdin,dtype=numpy.float)
ofile=""
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
#
#get doincfish to do the dirty work:
fpars= pmag.doincfish(inc)
outstring='%7.1f %7.1f %i %8.1f %7.1f %7.1f'%(fpars['ginc'],fpars['inc'],fpars['n'],fpars['r'],fpars['k'],fpars['alpha95'])
if ofile == "":
print(outstring)
else:
out.write(outstring+'\n')
|
python
|
{
"resource": ""
}
|
q11566
|
sort_diclist
|
train
|
def sort_diclist(undecorated, sort_on):
"""
Sort a list of dictionaries by the value in each
dictionary for the sorting key
Parameters
----------
undecorated : list of dicts
sort_on : str, numeric
key that is present in all dicts to sort on
Returns
---------
ordered list of dicts
Examples
---------
>>> lst = [{'key1': 10, 'key2': 2}, {'key1': 1, 'key2': 20}]
>>> sort_diclist(lst, 'key1')
[{'key2': 20, 'key1': 1}, {'key2': 2, 'key1': 10}]
>>> sort_diclist(lst, 'key2')
[{'key2': 2, 'key1': 10}, {'key2': 20, 'key1': 1}]
"""
decorated = [(len(dict_[sort_on]) if hasattr(dict_[sort_on], '__len__') else dict_[
sort_on], index) for (index, dict_) in enumerate(undecorated)]
decorated.sort()
return[undecorated[index] for (key, index) in decorated]
|
python
|
{
"resource": ""
}
|
q11567
|
convert_lat
|
train
|
def convert_lat(Recs):
"""
uses lat, for age<5Ma, model_lat if present, else tries to use average_inc to estimate plat.
"""
New = []
for rec in Recs:
if 'model_lat' in list(rec.keys()) and rec['model_lat'] != "":
New.append(rec)
elif 'average_age' in list(rec.keys()) and rec['average_age'] != "" and float(rec['average_age']) <= 5.:
if 'site_lat' in list(rec.keys()) and rec['site_lat'] != "":
rec['model_lat'] = rec['site_lat']
New.append(rec)
elif 'average_inc' in list(rec.keys()) and rec['average_inc'] != "":
rec['model_lat'] = '%7.1f' % (plat(float(rec['average_inc'])))
New.append(rec)
return New
|
python
|
{
"resource": ""
}
|
q11568
|
convert_directory_2_to_3
|
train
|
def convert_directory_2_to_3(meas_fname="magic_measurements.txt", input_dir=".",
output_dir=".", meas_only=False, data_model=None):
"""
Convert 2.0 measurements file into 3.0 measurements file.
Merge and convert specimen, sample, site, and location data.
Also translates criteria data.
Parameters
----------
meas_name : name of measurement file (do not include full path,
default is "magic_measurements.txt")
input_dir : name of input directory (default is ".")
output_dir : name of output directory (default is ".")
meas_only : boolean, convert only measurement data (default is False)
data_model : data_model3.DataModel object (default is None)
Returns
---------
NewMeas : 3.0 measurements data (output of pmag.convert_items)
upgraded : list of files successfully upgraded to 3.0
no_upgrade: list of 2.5 files not upgraded to 3.0
"""
convert = {'specimens': map_magic.spec_magic2_2_magic3_map,
'samples': map_magic.samp_magic2_2_magic3_map,
'sites': map_magic.site_magic2_2_magic3_map,
'locations': map_magic.loc_magic2_2_magic3_map,
'ages': map_magic.age_magic2_2_magic3_map}
full_name = os.path.join(input_dir, meas_fname)
if not os.path.exists(full_name):
print("-W- {} is not a file".format(full_name))
return False, False, False
# read in data model 2.5 measurements file
data2, filetype = magic_read(full_name)
# convert list of dicts to 3.0
NewMeas = convert_items(data2, map_magic.meas_magic2_2_magic3_map)
# write 3.0 output to file
ofile = os.path.join(output_dir, 'measurements.txt')
magic_write(ofile, NewMeas, 'measurements')
upgraded = []
if os.path.exists(ofile):
print("-I- 3.0 format measurements file was successfully created: {}".format(ofile))
upgraded.append("measurements.txt")
else:
print("-W- 3.0 format measurements file could not be created")
#
no_upgrade = []
if not meas_only:
# try to convert specimens, samples, sites, & locations
for dtype in ['specimens', 'samples', 'sites', 'locations', 'ages']:
mapping = convert[dtype]
res = convert_and_combine_2_to_3(
dtype, mapping, input_dir, output_dir, data_model)
if res:
upgraded.append(res)
# try to upgrade criteria file
if os.path.exists(os.path.join(input_dir, 'pmag_criteria.txt')):
crit_file = convert_criteria_file_2_to_3(input_dir=input_dir,
output_dir=output_dir,
data_model=data_model)[0]
if crit_file:
upgraded.append(crit_file)
else:
no_upgrade.append("pmag_criteria.txt")
# create list of all un-upgradeable files
for fname in os.listdir(input_dir):
if fname in ['measurements.txt', 'specimens.txt', 'samples.txt',
'sites.txt', 'locations.txt']:
continue
elif 'rmag' in fname:
no_upgrade.append(fname)
elif fname in ['pmag_results.txt', 'er_synthetics.txt', 'er_images.txt',
'er_plots.txt']:
no_upgrade.append(fname)
return NewMeas, upgraded, no_upgrade
|
python
|
{
"resource": ""
}
|
q11569
|
convert_criteria_file_2_to_3
|
train
|
def convert_criteria_file_2_to_3(fname="pmag_criteria.txt", input_dir=".",
output_dir=".", data_model=None):
"""
Convert a criteria file from 2.5 to 3.0 format and write it out to file
Parameters
----------
fname : string of filename (default "pmag_criteria.txt")
input_dir : string of input directory (default ".")
output_dir : string of output directory (default ".")
data_model : data_model.DataModel object (default None)
Returns
---------
outfile : string output criteria filename, or False
crit_container : cb.MagicDataFrame with 3.0 criteria table
"""
# get criteria from infile
fname = os.path.join(input_dir, fname)
if not os.path.exists(fname):
return False, None
orig_crit, warnings = read_criteria_from_file(fname, initialize_acceptance_criteria(),
data_model=2, return_warnings=True)
converted_crit = {}
# get data model including criteria map
if not data_model:
from . import data_model3 as dm3
DM = dm3.DataModel()
else:
DM = data_model
crit_map = DM.crit_map
# drop all empty mappings
stripped_crit_map = crit_map.dropna(axis='rows')
# go through criteria and get 3.0 name and criterion_operation
for crit in orig_crit:
if orig_crit[crit]['value'] in [-999, '-999', -999.]:
continue
if crit in stripped_crit_map.index:
criterion_operation = stripped_crit_map.loc[crit]['criteria_map']['criterion_operation']
table_col = stripped_crit_map.loc[crit]['criteria_map']['table_column']
orig_crit[crit]['criterion_operation'] = criterion_operation
converted_crit[table_col] = orig_crit[crit]
else:
print('-W- Could not convert {} to 3.0, skipping'.format(crit))
# switch axes
converted_df = pd.DataFrame(converted_crit).transpose()
# name the index
converted_df.index.name = "table_column"
# rename columns to 3.0 values
# 'category' --> criterion (uses defaults from initalize_default_criteria)
# 'pmag_criteria_code' --> criterion (uses what's actually in the translated file)
converted_df.rename(columns={'pmag_criteria_code': 'criterion', 'er_citation_names': 'citations',
'criteria_definition': 'description', 'value': 'criterion_value'},
inplace=True)
# drop unused columns
valid_cols = DM.dm['criteria'].index
drop_cols = set(converted_df.columns) - set(valid_cols)
converted_df.drop(drop_cols, axis='columns', inplace=True)
# move 'table_column' from being the index to being a column
converted_df['table_column'] = converted_df.index
crit_container = cb.MagicDataFrame(dtype='criteria', df=converted_df)
crit_container.write_magic_file(dir_path=output_dir)
return "criteria.txt", crit_container
|
python
|
{
"resource": ""
}
|
q11570
|
orient
|
train
|
def orient(mag_azimuth, field_dip, or_con):
"""
uses specified orientation convention to convert user supplied orientations
to laboratory azimuth and plunge
"""
or_con = str(or_con)
if mag_azimuth == -999:
return "", ""
if or_con == "1": # lab_mag_az=mag_az; sample_dip = -dip
return mag_azimuth, -field_dip
if or_con == "2":
return mag_azimuth - 90., -field_dip
if or_con == "3": # lab_mag_az=mag_az; sample_dip = 90.-dip
return mag_azimuth, 90. - field_dip
if or_con == "4": # lab_mag_az=mag_az; sample_dip = dip
return mag_azimuth, field_dip
if or_con == "5": # lab_mag_az=mag_az; sample_dip = dip-90.
return mag_azimuth, field_dip - 90.
if or_con == "6": # lab_mag_az=mag_az-90.; sample_dip = 90.-dip
return mag_azimuth - 90., 90. - field_dip
if or_con == "7": # lab_mag_az=mag_az; sample_dip = 90.-dip
return mag_azimuth - 90., 90. - field_dip
print("Error in orientation convention")
|
python
|
{
"resource": ""
}
|
q11571
|
get_Sb
|
train
|
def get_Sb(data):
"""
returns vgp scatter for data set
"""
Sb, N = 0., 0.
for rec in data:
delta = 90. - abs(rec['vgp_lat'])
if rec['average_k'] != 0:
k = rec['average_k']
L = rec['average_lat'] * np.pi / 180. # latitude in radians
Nsi = rec['average_nn']
K = old_div(k, (2. * (1. + 3. * np.sin(L)**2) /
(5. - 3. * np.sin(L)**2)))
Sw = old_div(81., np.sqrt(K))
else:
Sw, Nsi = 0, 1.
Sb += delta**2. - old_div((Sw**2), Nsi)
N += 1.
return np.sqrt(old_div(Sb, float(N - 1.)))
|
python
|
{
"resource": ""
}
|
q11572
|
flip
|
train
|
def flip(di_block, combine=False):
"""
determines 'normal' direction along the principle eigenvector, then flips the antipodes of
the reverse mode to the antipode
Parameters
___________
di_block : nested list of directions
Return
D1 : normal mode
D2 : flipped reverse mode as two DI blocks
combine : if True return combined D1, D2, nested D,I pairs
"""
ppars = doprinc(di_block) # get principle direction
if combine:
D3 = []
D1, D2 = [], []
for rec in di_block:
ang = angle([rec[0], rec[1]], [ppars['dec'], ppars['inc']])
if ang > 90.:
d, i = (rec[0] - 180.) % 360., -rec[1]
D2.append([d, i])
if combine:
D3.append([d, i])
else:
D1.append([rec[0], rec[1]])
if combine:
D3.append([rec[0], rec[1]])
if combine:
return D3
else:
return D1, D2
|
python
|
{
"resource": ""
}
|
q11573
|
dovds
|
train
|
def dovds(data):
"""
calculates vector difference sum for demagnetization data
"""
vds, X = 0, []
for rec in data:
X.append(dir2cart(rec))
for k in range(len(X) - 1):
xdif = X[k + 1][0] - X[k][0]
ydif = X[k + 1][1] - X[k][1]
zdif = X[k + 1][2] - X[k][2]
vds += np.sqrt(xdif**2 + ydif**2 + zdif**2)
vds += np.sqrt(X[-1][0]**2 + X[-1][1]**2 + X[-1][2]**2)
return vds
|
python
|
{
"resource": ""
}
|
q11574
|
get_specs
|
train
|
def get_specs(data):
"""
Takes a magic format file and returns a list of unique specimen names
"""
# sort the specimen names
speclist = []
for rec in data:
try:
spec = rec["er_specimen_name"]
except KeyError as e:
spec = rec["specimen"]
if spec not in speclist:
speclist.append(spec)
speclist.sort()
return speclist
|
python
|
{
"resource": ""
}
|
q11575
|
mark_dmag_rec
|
train
|
def mark_dmag_rec(s, ind, data):
"""
Edits demagnetization data to mark "bad" points with measurement_flag
"""
datablock = []
for rec in data:
if rec['er_specimen_name'] == s:
meths = rec['magic_method_codes'].split(':')
if 'LT-NO' in meths or 'LT-AF-Z' in meths or 'LT-T-Z' in meths:
datablock.append(rec)
dmagrec = datablock[ind]
for k in range(len(data)):
meths = data[k]['magic_method_codes'].split(':')
if 'LT-NO' in meths or 'LT-AF-Z' in meths or 'LT-T-Z' in meths:
if data[k]['er_specimen_name'] == s:
if data[k]['treatment_temp'] == dmagrec['treatment_temp'] and data[k]['treatment_ac_field'] == dmagrec['treatment_ac_field']:
if data[k]['measurement_dec'] == dmagrec['measurement_dec'] and data[k]['measurement_inc'] == dmagrec['measurement_inc'] and data[k]['measurement_magn_moment'] == dmagrec['measurement_magn_moment']:
if data[k]['measurement_flag'] == 'g':
flag = 'b'
else:
flag = 'g'
data[k]['measurement_flag'] = flag
break
return data
|
python
|
{
"resource": ""
}
|
q11576
|
open_file
|
train
|
def open_file(infile, verbose=True):
"""
Open file and return a list of the file's lines.
Try to use utf-8 encoding, and if that fails use Latin-1.
Parameters
----------
infile : str
full path to file
Returns
----------
data: list
all lines in the file
"""
try:
with codecs.open(infile, "r", "utf-8") as f:
lines = list(f.readlines())
# file might not exist
except FileNotFoundError:
if verbose:
print(
'-W- You are trying to open a file: {} that does not exist'.format(infile))
return []
# encoding might be wrong
except UnicodeDecodeError:
try:
with codecs.open(infile, "r", "Latin-1") as f:
print(
'-I- Using less strict decoding for {}, output may have formatting errors'.format(infile))
lines = list(f.readlines())
# if file exists, and encoding is correct, who knows what the problem is
except Exception as ex:
print("-W- ", type(ex), ex)
return []
except Exception as ex:
print("-W- ", type(ex), ex)
return []
# don't leave a blank line at the end
i = 0
while i < 10:
if not len(lines[-1].strip("\n").strip("\t")):
lines = lines[:-1]
i += 1
else:
i = 10
return lines
|
python
|
{
"resource": ""
}
|
q11577
|
putout
|
train
|
def putout(ofile, keylist, Rec):
"""
writes out a magic format record to ofile
"""
pmag_out = open(ofile, 'a')
outstring = ""
for key in keylist:
try:
outstring = outstring + '\t' + str(Rec[key]).strip()
except:
print(key, Rec[key])
# raw_input()
outstring = outstring + '\n'
pmag_out.write(outstring[1:])
pmag_out.close()
|
python
|
{
"resource": ""
}
|
q11578
|
first_rec
|
train
|
def first_rec(ofile, Rec, file_type):
"""
opens the file ofile as a magic template file with headers as the keys to Rec
"""
keylist = []
opened = False
# sometimes Windows needs a little extra time to open a file
# or else it throws an error
while not opened:
try:
pmag_out = open(ofile, 'w')
opened = True
except IOError:
time.sleep(1)
outstring = "tab \t" + file_type + "\n"
pmag_out.write(outstring)
keystring = ""
for key in list(Rec.keys()):
keystring = keystring + '\t' + key.strip()
keylist.append(key)
keystring = keystring + '\n'
pmag_out.write(keystring[1:])
pmag_out.close()
return keylist
|
python
|
{
"resource": ""
}
|
q11579
|
magic_write_old
|
train
|
def magic_write_old(ofile, Recs, file_type):
"""
writes out a magic format list of dictionaries to ofile
Parameters
_________
ofile : path to output file
Recs : list of dictionaries in MagIC format
file_type : MagIC table type (e.g., specimens)
Effects :
writes a MagIC formatted file from Recs
"""
if len(Recs) < 1:
print ('nothing to write')
return
pmag_out = open(ofile, 'w')
outstring = "tab \t" + file_type + "\n"
pmag_out.write(outstring)
keystring = ""
keylist = []
for key in list(Recs[0].keys()):
keylist.append(key)
keylist.sort()
for key in keylist:
keystring = keystring + '\t' + key.strip()
keystring = keystring + '\n'
pmag_out.write(keystring[1:])
for Rec in Recs:
outstring = ""
for key in keylist:
try:
outstring = outstring + '\t' + str(Rec[key].strip())
except:
if 'er_specimen_name' in list(Rec.keys()):
print(Rec['er_specimen_name'])
elif 'er_specimen_names' in list(Rec.keys()):
print(Rec['er_specimen_names'])
print(key, Rec[key])
# raw_input()
outstring = outstring + '\n'
pmag_out.write(outstring[1:])
pmag_out.close()
|
python
|
{
"resource": ""
}
|
q11580
|
dotilt_V
|
train
|
def dotilt_V(indat):
"""
Does a tilt correction on an array with rows of dec,inc bedding dip direction and dip.
Parameters
----------
input : declination, inclination, bedding dip direction and bedding dip
nested array of [[dec1, inc1, bed_az1, bed_dip1],[dec2,inc2,bed_az2,bed_dip2]...]
Returns
-------
dec,inc : arrays of rotated declination, inclination
"""
indat = indat.transpose()
# unpack input array into separate arrays
dec, inc, bed_az, bed_dip = indat[0], indat[1], indat[2], indat[3]
rad = old_div(np.pi, 180.) # convert to radians
Dir = np.array([dec, inc]).transpose()
X = dir2cart(Dir).transpose() # get cartesian coordinates
N = np.size(dec)
# get some sines and cosines of new coordinate system
sa, ca = -np.sin(bed_az * rad), np.cos(bed_az * rad)
cdp, sdp = np.cos(bed_dip * rad), np.sin(bed_dip * rad)
# do the rotation
xc = X[0] * (sa * sa + ca * ca * cdp) + X[1] * \
(ca * sa * (1. - cdp)) + X[2] * sdp * ca
yc = X[0] * ca * sa * (1. - cdp) + X[1] * \
(ca * ca + sa * sa * cdp) - X[2] * sa * sdp
zc = X[0] * ca * sdp - X[1] * sdp * sa - X[2] * cdp
# convert back to direction:
cart = np.array([xc, yc, -zc]).transpose()
Dir = cart2dir(cart).transpose()
# return declination, inclination arrays of rotated direction
return Dir[0], Dir[1]
|
python
|
{
"resource": ""
}
|
q11581
|
find_samp_rec
|
train
|
def find_samp_rec(s, data, az_type):
"""
find the orientation info for samp s
"""
datablock, or_error, bed_error = [], 0, 0
orient = {}
orient["sample_dip"] = ""
orient["sample_azimuth"] = ""
orient['sample_description'] = ""
for rec in data:
if rec["er_sample_name"].lower() == s.lower():
if 'sample_orientation_flag' in list(rec.keys()) and rec['sample_orientation_flag'] == 'b':
orient['sample_orientation_flag'] = 'b'
return orient
if "magic_method_codes" in list(rec.keys()) and az_type != "0":
methods = rec["magic_method_codes"].replace(" ", "").split(":")
if az_type in methods and "sample_azimuth" in list(rec.keys()) and rec["sample_azimuth"] != "":
orient["sample_azimuth"] = float(rec["sample_azimuth"])
if "sample_dip" in list(rec.keys()) and rec["sample_dip"] != "":
orient["sample_dip"] = float(rec["sample_dip"])
if "sample_bed_dip_direction" in list(rec.keys()) and rec["sample_bed_dip_direction"] != "":
orient["sample_bed_dip_direction"] = float(
rec["sample_bed_dip_direction"])
if "sample_bed_dip" in list(rec.keys()) and rec["sample_bed_dip"] != "":
orient["sample_bed_dip"] = float(rec["sample_bed_dip"])
else:
if "sample_azimuth" in list(rec.keys()):
orient["sample_azimuth"] = float(rec["sample_azimuth"])
if "sample_dip" in list(rec.keys()):
orient["sample_dip"] = float(rec["sample_dip"])
if "sample_bed_dip_direction" in list(rec.keys()):
orient["sample_bed_dip_direction"] = float(
rec["sample_bed_dip_direction"])
if "sample_bed_dip" in list(rec.keys()):
orient["sample_bed_dip"] = float(rec["sample_bed_dip"])
if 'sample_description' in list(rec.keys()):
orient['sample_description'] = rec['sample_description']
if orient["sample_azimuth"] != "":
break
return orient
|
python
|
{
"resource": ""
}
|
q11582
|
vspec
|
train
|
def vspec(data):
"""
Takes the vector mean of replicate measurements at a given step
"""
vdata, Dirdata, step_meth = [], [], []
tr0 = data[0][0] # set beginning treatment
data.append("Stop")
k, R = 1, 0
for i in range(k, len(data)):
Dirdata = []
if data[i][0] != tr0:
if i == k: # sample is unique
vdata.append(data[i - 1])
step_meth.append(" ")
else: # sample is not unique
for l in range(k - 1, i):
Dirdata.append([data[l][1], data[l][2], data[l][3]])
dir, R = vector_mean(Dirdata)
vdata.append([data[i - 1][0], dir[0], dir[1],
old_div(R, (i - k + 1)), '1', 'g'])
step_meth.append("DE-VM")
tr0 = data[i][0]
k = i + 1
if tr0 == "stop":
break
del data[-1]
return step_meth, vdata
|
python
|
{
"resource": ""
}
|
q11583
|
Vdiff
|
train
|
def Vdiff(D1, D2):
"""
finds the vector difference between two directions D1,D2
"""
A = dir2cart([D1[0], D1[1], 1.])
B = dir2cart([D2[0], D2[1], 1.])
C = []
for i in range(3):
C.append(A[i] - B[i])
return cart2dir(C)
|
python
|
{
"resource": ""
}
|
q11584
|
cart2dir
|
train
|
def cart2dir(cart):
"""
Converts a direction in cartesian coordinates into declination, inclinations
Parameters
----------
cart : input list of [x,y,z] or list of lists [[x1,y1,z1],[x2,y2,z2]...]
Returns
-------
direction_array : returns an array of [declination, inclination, intensity]
Examples
--------
>>> pmag.cart2dir([0,1,0])
array([ 90., 0., 1.])
"""
cart = np.array(cart)
rad = old_div(np.pi, 180.) # constant to convert degrees to radians
if len(cart.shape) > 1:
Xs, Ys, Zs = cart[:, 0], cart[:, 1], cart[:, 2]
else: # single vector
Xs, Ys, Zs = cart[0], cart[1], cart[2]
if np.iscomplexobj(Xs):
Xs = Xs.real
if np.iscomplexobj(Ys):
Ys = Ys.real
if np.iscomplexobj(Zs):
Zs = Zs.real
Rs = np.sqrt(Xs**2 + Ys**2 + Zs**2) # calculate resultant vector length
# calculate declination taking care of correct quadrants (arctan2) and
# making modulo 360.
Decs = (old_div(np.arctan2(Ys, Xs), rad)) % 360.
try:
# calculate inclination (converting to degrees) #
Incs = old_div(np.arcsin(old_div(Zs, Rs)), rad)
except:
print('trouble in cart2dir') # most likely division by zero somewhere
return np.zeros(3)
return np.array([Decs, Incs, Rs]).transpose()
|
python
|
{
"resource": ""
}
|
q11585
|
findrec
|
train
|
def findrec(s, data):
"""
finds all the records belonging to s in data
"""
datablock = []
for rec in data:
if s == rec[0]:
datablock.append([rec[1], rec[2], rec[3], rec[4]])
return datablock
|
python
|
{
"resource": ""
}
|
q11586
|
circ
|
train
|
def circ(dec, dip, alpha):
"""
function to calculate points on an circle about dec,dip with angle alpha
"""
rad = old_div(np.pi, 180.)
D_out, I_out = [], []
dec, dip, alpha = dec * rad, dip * rad, alpha * rad
dec1 = dec + old_div(np.pi, 2.)
isign = 1
if dip != 0:
isign = (old_div(abs(dip), dip))
dip1 = (dip - isign * (old_div(np.pi, 2.)))
t = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
v = [0, 0, 0]
t[0][2] = np.cos(dec) * np.cos(dip)
t[1][2] = np.sin(dec) * np.cos(dip)
t[2][2] = np.sin(dip)
t[0][1] = np.cos(dec) * np.cos(dip1)
t[1][1] = np.sin(dec) * np.cos(dip1)
t[2][1] = np.sin(dip1)
t[0][0] = np.cos(dec1)
t[1][0] = np.sin(dec1)
t[2][0] = 0
for i in range(101):
psi = float(i) * np.pi / 50.
v[0] = np.sin(alpha) * np.cos(psi)
v[1] = np.sin(alpha) * np.sin(psi)
v[2] = np.sqrt(abs(1. - v[0]**2 - v[1]**2))
elli = [0, 0, 0]
for j in range(3):
for k in range(3):
elli[j] = elli[j] + t[j][k] * v[k]
Dir = cart2dir(elli)
D_out.append(Dir[0])
I_out.append(Dir[1])
return D_out, I_out
|
python
|
{
"resource": ""
}
|
q11587
|
getnames
|
train
|
def getnames():
"""
get mail names
"""
namestring = ""
addmore = 1
while addmore:
scientist = input("Enter name - <Return> when done ")
if scientist != "":
namestring = namestring + ":" + scientist
else:
namestring = namestring[1:]
addmore = 0
return namestring
|
python
|
{
"resource": ""
}
|
q11588
|
gha
|
train
|
def gha(julian_day, f):
"""
returns greenwich hour angle
"""
rad = old_div(np.pi, 180.)
d = julian_day - 2451545.0 + f
L = 280.460 + 0.9856474 * d
g = 357.528 + 0.9856003 * d
L = L % 360.
g = g % 360.
# ecliptic longitude
lamb = L + 1.915 * np.sin(g * rad) + .02 * np.sin(2 * g * rad)
# obliquity of ecliptic
epsilon = 23.439 - 0.0000004 * d
# right ascension (in same quadrant as lambda)
t = (np.tan(old_div((epsilon * rad), 2)))**2
r = old_div(1, rad)
rl = lamb * rad
alpha = lamb - r * t * np.sin(2 * rl) + \
(old_div(r, 2)) * t * t * np.sin(4 * rl)
# alpha=mod(alpha,360.0)
# declination
delta = np.sin(epsilon * rad) * np.sin(lamb * rad)
delta = old_div(np.arcsin(delta), rad)
# equation of time
eqt = (L - alpha)
#
utm = f * 24 * 60
H = old_div(utm, 4) + eqt + 180
H = H % 360.0
return H, delta
|
python
|
{
"resource": ""
}
|
q11589
|
julian
|
train
|
def julian(mon, day, year):
"""
returns julian day
"""
ig = 15 + 31 * (10 + 12 * 1582)
if year == 0:
print("Julian no can do")
return
if year < 0:
year = year + 1
if mon > 2:
julian_year = year
julian_month = mon + 1
else:
julian_year = year - 1
julian_month = mon + 13
j1 = int(365.25 * julian_year)
j2 = int(30.6001 * julian_month)
j3 = day + 1720995
julian_day = j1 + j2 + j3
if day + 31 * (mon + 12 * year) >= ig:
jadj = int(0.01 * julian_year)
julian_day = julian_day + 2 - jadj + int(0.25 * jadj)
return julian_day
|
python
|
{
"resource": ""
}
|
q11590
|
fillkeys
|
train
|
def fillkeys(Recs):
"""
reconciles keys of dictionaries within Recs.
"""
keylist, OutRecs = [], []
for rec in Recs:
for key in list(rec.keys()):
if key not in keylist:
keylist.append(key)
for rec in Recs:
for key in keylist:
if key not in list(rec.keys()):
rec[key] = ""
OutRecs.append(rec)
return OutRecs, keylist
|
python
|
{
"resource": ""
}
|
q11591
|
fisher_mean
|
train
|
def fisher_mean(data):
"""
Calculates the Fisher mean and associated parameter from a di_block
Parameters
----------
di_block : a nested list of [dec,inc] or [dec,inc,intensity]
Returns
-------
fpars : dictionary containing the Fisher mean and statistics
dec : mean declination
inc : mean inclination
r : resultant vector length
n : number of data points
k : Fisher k value
csd : Fisher circular standard deviation
alpha95 : Fisher circle of 95% confidence
"""
R, Xbar, X, fpars = 0, [0, 0, 0], [], {}
N = len(data)
if N < 2:
return fpars
X = dir2cart(data)
for i in range(len(X)):
for c in range(3):
Xbar[c] += X[i][c]
for c in range(3):
R += Xbar[c]**2
R = np.sqrt(R)
for c in range(3):
Xbar[c] = Xbar[c]/R
dir = cart2dir(Xbar)
fpars["dec"] = dir[0]
fpars["inc"] = dir[1]
fpars["n"] = N
fpars["r"] = R
if N != R:
k = (N - 1.) / (N - R)
fpars["k"] = k
csd = 81./np.sqrt(k)
else:
fpars['k'] = 'inf'
csd = 0.
b = 20.**(1./(N - 1.)) - 1
a = 1 - b * (N - R) / R
if a < -1:
a = -1
a95 = np.degrees(np.arccos(a))
fpars["alpha95"] = a95
fpars["csd"] = csd
if a < 0:
fpars["alpha95"] = 180.0
return fpars
|
python
|
{
"resource": ""
}
|
q11592
|
gausspars
|
train
|
def gausspars(data):
"""
calculates gaussian statistics for data
"""
N, mean, d = len(data), 0., 0.
if N < 1:
return "", ""
if N == 1:
return data[0], 0
for j in range(N):
mean += old_div(data[j], float(N))
for j in range(N):
d += (data[j] - mean)**2
stdev = np.sqrt(d * (1./(float(N - 1))))
return mean, stdev
|
python
|
{
"resource": ""
}
|
q11593
|
weighted_mean
|
train
|
def weighted_mean(data):
"""
calculates weighted mean of data
"""
W, N, mean, d = 0, len(data), 0, 0
if N < 1:
return "", ""
if N == 1:
return data[0][0], 0
for x in data:
W += x[1] # sum of the weights
for x in data:
mean += old_div((float(x[1]) * float(x[0])), float(W))
for x in data:
d += (old_div(float(x[1]), float(W))) * (float(x[0]) - mean)**2
stdev = np.sqrt(d * (old_div(1., (float(N - 1)))))
return mean, stdev
|
python
|
{
"resource": ""
}
|
q11594
|
vclose
|
train
|
def vclose(L, V):
"""
gets the closest vector
"""
lam, X = 0, []
for k in range(3):
lam = lam + V[k] * L[k]
beta = np.sqrt(1. - lam**2)
for k in range(3):
X.append((old_div((V[k] - lam * L[k]), beta)))
return X
|
python
|
{
"resource": ""
}
|
q11595
|
calculate_best_fit_vectors
|
train
|
def calculate_best_fit_vectors(L, E, V, n_planes):
"""
Calculates the best fit vectors for a set of plane interpretations used in fisher mean calculations
@param: L - a list of the "EL, EM, EN" array of MM88 or the cartisian form of dec and inc of the plane interpretation
@param: E - the sum of the cartisian coordinates of all the line fits to be used in the mean
@param: V - inital direction to start iterating from to get plane best fits
@returns: nested list of n_plane by 3 dimension where the 3 are the cartisian dimension of the best fit vector
"""
U, XV = E[:], [] # make a copy of E to prevent mutation
for pole in L:
XV.append(vclose(pole, V)) # get some points on the great circle
for c in range(3):
U[c] = U[c] + XV[-1][c]
# iterate to find best agreement
angle_tol = 1.
while angle_tol > 0.1:
angles = []
for k in range(n_planes):
for c in range(3):
U[c] = U[c] - XV[k][c]
R = np.sqrt(U[0]**2 + U[1]**2 + U[2]**2)
for c in range(3):
V[c] = old_div(U[c], R)
XX = vclose(L[k], V)
ang = XX[0] * XV[k][0] + XX[1] * XV[k][1] + XX[2] * XV[k][2]
angles.append(np.arccos(ang) * 180. / np.pi)
for c in range(3):
XV[k][c] = XX[c]
U[c] = U[c] + XX[c]
amax = -1
for ang in angles:
if ang > amax:
amax = ang
angle_tol = amax
return XV
|
python
|
{
"resource": ""
}
|
q11596
|
process_data_for_mean
|
train
|
def process_data_for_mean(data, direction_type_key):
"""
takes list of dicts with dec and inc as well as direction_type if possible or method_codes and sorts the data into lines and planes and process it for fisher means
@param: data - list of dicts with dec inc and some manner of PCA type info
@param: direction_type_key - key that indicates the direction type variable in the dictionaries of data
@return: tuple with values - (
list of lists with [dec, inc, 1.] for all lines
number of line
list of lists with [EL,EM,EN] of all planes
number of planes
list of sum of the cartezian components of all lines
)
"""
dec_key, inc_key, meth_key = 'dec', 'inc', 'magic_method_codes' # data model 2.5
if 'dir_dec' in data[0].keys(): # this is data model 3.0
dec_key, inc_key, meth_key = 'dir_dec', 'dir_inc', 'method_codes'
n_lines, n_planes = 0, 0
L, fdata = [], []
E = [0, 0, 0]
# sort data into lines and planes and collect cartesian coordinates
for rec in data:
cart = dir2cart([float(rec[dec_key]), float(rec[inc_key])])[0]
if direction_type_key in list(rec.keys()):
if rec[direction_type_key] == 'p': # this is a pole to a plane
n_planes += 1
L.append(cart) # this is the "EL, EM, EN" array of MM88
else: # this is a line
n_lines += 1
# collect data for fisher calculation
fdata.append([float(rec[dec_key]), float(rec[inc_key]), 1.])
E[0] += cart[0]
E[1] += cart[1]
E[2] += cart[2]
elif 'method_codes' in list(rec.keys()):
if "DE-BFP" in rec[meth_key]: # this is a pole to a plane
n_planes += 1
L.append(cart) # this is the "EL, EM, EN" array of MM88
else: # this is a line
n_lines += 1
# collect data for fisher calculation
fdata.append([rec[dec_key], rec[inc_key], 1.])
E[0] += cart[0]
E[1] += cart[1]
E[2] += cart[2]
elif meth_key in list(rec.keys()):
if "DE-BFP" in rec[meth_key]: # this is a pole to a plane
n_planes += 1
L.append(cart) # this is the "EL, EM, EN" array of MM88
else: # this is a line
n_lines += 1
# collect data for fisher calculation
fdata.append([rec[dec_key], rec[inc_key], 1.])
E[0] += cart[0]
E[1] += cart[1]
E[2] += cart[2]
else:
# EVERYTHING IS A LINE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
n_lines += 1
# collect data for fisher calculation
fdata.append([rec[dec_key], rec[inc_key], 1.])
E[0] += cart[0]
E[1] += cart[1]
E[2] += cart[2]
return fdata, n_lines, L, n_planes, E
|
python
|
{
"resource": ""
}
|
q11597
|
cdfout
|
train
|
def cdfout(data, file):
"""
spits out the cdf for data to file
"""
f = open(file, "w")
data.sort()
for j in range(len(data)):
y = old_div(float(j), float(len(data)))
out = str(data[j]) + ' ' + str(y) + '\n'
f.write(out)
f.close()
|
python
|
{
"resource": ""
}
|
q11598
|
dobingham
|
train
|
def dobingham(di_block):
"""
Calculates the Bingham mean and associated statistical parameters from
directions that are input as a di_block
Parameters
----------
di_block : a nested list of [dec,inc] or [dec,inc,intensity]
Returns
-------
bpars : dictionary containing the Bingham mean and associated statistics
dictionary keys
dec : mean declination
inc : mean inclination
n : number of datapoints
Eta : major ellipse
Edec : declination of major ellipse axis
Einc : inclination of major ellipse axis
Zeta : minor ellipse
Zdec : declination of minor ellipse axis
Zinc : inclination of minor ellipse axis
"""
control, X, bpars = [], [], {}
N = len(di_block)
if N < 2:
return bpars
#
# get cartesian coordinates
#
for rec in di_block:
X.append(dir2cart([rec[0], rec[1], 1.]))
#
# put in T matrix
#
T = np.array(Tmatrix(X))
t, V = tauV(T)
w1, w2, w3 = t[2], t[1], t[0]
k1, k2 = binglookup(w1, w2)
PDir = cart2dir(V[0])
EDir = cart2dir(V[1])
ZDir = cart2dir(V[2])
if PDir[1] < 0:
PDir[0] += 180.
PDir[1] = -PDir[1]
PDir[0] = PDir[0] % 360.
bpars["dec"] = PDir[0]
bpars["inc"] = PDir[1]
bpars["Edec"] = EDir[0]
bpars["Einc"] = EDir[1]
bpars["Zdec"] = ZDir[0]
bpars["Zinc"] = ZDir[1]
bpars["n"] = N
#
# now for Bingham ellipses.
#
fac1, fac2 = -2 * N * (k1) * (w3 - w1), -2 * N * (k2) * (w3 - w2)
sig31, sig32 = np.sqrt(old_div(1., fac1)), np.sqrt(old_div(1., fac2))
bpars["Zeta"], bpars["Eta"] = 2.45 * sig31 * \
180. / np.pi, 2.45 * sig32 * 180. / np.pi
return bpars
|
python
|
{
"resource": ""
}
|
q11599
|
doflip
|
train
|
def doflip(dec, inc):
"""
flips lower hemisphere data to upper hemisphere
"""
if inc < 0:
inc = -inc
dec = (dec + 180.) % 360.
return dec, inc
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.