_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q11700
|
Menus.add_drop_down
|
train
|
def add_drop_down(self, col_number, col_label):
"""
Add a correctly formatted drop-down-menu for given col_label, if required.
Otherwise do nothing.
"""
if col_label in ['magic_method_codes', 'magic_method_codes++']:
self.add_method_drop_down(col_number, col_label)
if col_label in vocab.possible_vocabularies:
if col_number not in list(self.choices.keys()): # if not already assigned above
self.grid.SetColLabelValue(col_number, col_label + "**") # mark it as using a controlled vocabulary
url = 'http://api.earthref.org/MagIC/vocabularies/{}.json'.format(col_label)
controlled_vocabulary = pd.io.json.read_json(url)
stripped_list = []
for item in controlled_vocabulary[col_label][0]:
try:
stripped_list.append(str(item['item']))
except UnicodeEncodeError:
# skips items with non ASCII characters
pass
#stripped_list = [item['item'] for item in controlled_vocabulary[label][0]]
if len(stripped_list) > 100:
# split out the list alphabetically, into a dict of lists {'A': ['alpha', 'artist'], 'B': ['beta', 'beggar']...}
dictionary = {}
for item in stripped_list:
letter = item[0].upper()
if letter not in list(dictionary.keys()):
dictionary[letter] = []
dictionary[letter].append(item)
stripped_list = dictionary
two_tiered = True if isinstance(stripped_list, dict) else False
self.choices[col_number] = (stripped_list, two_tiered)
|
python
|
{
"resource": ""
}
|
q11701
|
Menus.clean_up
|
train
|
def clean_up(self):#, grid):
"""
de-select grid cols, refresh grid
"""
if self.selected_col:
col_label_value = self.grid.GetColLabelValue(self.selected_col)
col_label_value = col_label_value.strip('\nEDIT ALL')
self.grid.SetColLabelValue(self.selected_col, col_label_value)
for row in range(self.grid.GetNumberRows()):
self.grid.SetCellBackgroundColour(row, self.selected_col, 'white')
self.grid.ForceRefresh()
|
python
|
{
"resource": ""
}
|
q11702
|
Menus.on_select_menuitem
|
train
|
def on_select_menuitem(self, event, grid, row, col, selection):
"""
sets value of selected cell to value selected from menu
"""
if self.grid.changes: # if user selects a menuitem, that is an edit
self.grid.changes.add(row)
else:
self.grid.changes = {row}
item_id = event.GetId()
item = event.EventObject.FindItemById(item_id)
label = item.Label
cell_value = grid.GetCellValue(row, col)
if str(label) == "CLEAR cell of all values":
label = ""
col_label = grid.GetColLabelValue(col).strip('\nEDIT ALL').strip('**')
if col_label in self.colon_delimited_lst and label:
if not label.lower() in cell_value.lower():
label += (":" + cell_value).rstrip(':')
else:
label = cell_value
if self.selected_col and self.selected_col == col:
for row in range(self.grid.GetNumberRows()):
grid.SetCellValue(row, col, label)
if self.grid.changes:
self.grid.changes.add(row)
else:
self.grid.changes = {row}
#self.selected_col = None
else:
grid.SetCellValue(row, col, label)
if selection:
for cell in selection:
row = cell[0]
grid.SetCellValue(row, col, label)
return
|
python
|
{
"resource": ""
}
|
q11703
|
main
|
train
|
def main():
"""
NAME
plot_cdf.py
DESCRIPTION
makes plots of cdfs of data in input file
SYNTAX
plot_cdf.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f FILE
-t TITLE
-fmt [svg,eps,png,pdf,jpg..] specify format of output figure, default is svg
-sav saves plot and quits
"""
fmt,plot='svg',0
title=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-sav' in sys.argv:plot=1
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
X=numpy.loadtxt(file)
# else:
# X=numpy.loadtxt(sys.stdin,dtype=numpy.float)
else:
print('-f option required')
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-t' in sys.argv:
ind=sys.argv.index('-t')
title=sys.argv[ind+1]
CDF={'X':1}
pmagplotlib.plot_init(CDF['X'],5,5)
pmagplotlib.plot_cdf(CDF['X'],X,title,'r','')
files={'X':'CDF_.'+fmt}
if plot==0:
pmagplotlib.draw_figs(CDF)
ans= input('S[a]ve plot, <Return> to quit ')
if ans=='a':
pmagplotlib.save_plots(CDF,files)
else:
pmagplotlib.save_plots(CDF,files)
|
python
|
{
"resource": ""
}
|
q11704
|
main
|
train
|
def main():
"""
NAME
bootams.py
DESCRIPTION
calculates bootstrap statistics for tensor data
SYNTAX
bootams.py [-h][command line options]
OPTIONS:
-h prints help message and quits
-f FILE specifies input file name
-par specifies parametric bootstrap [by whole data set]
-n N specifies the number of bootstrap samples, default is N=1000
INPUT
x11 x22 x33 x12 x23 x13
OUTPUT
tau_1 tau_1_sigma V1_dec V1_inc V1_eta V1_eta_dec V1_eta_inc V1_zeta V1_zeta_dec V1_zeta_inc
tau_2 tau_2_sigma V2_dec V2_inc V2_eta V2_eta_dec V2_eta_inc V2_zeta V2_zeta_dec V2_zeta_inc
tau_3 tau_2_sigma V3_dec V3_inc V3_eta V3_eta_dec V3_eta_inc V3_zeta V3_zeta_dec V3_zeta_inc
"""
# set options
ipar,nb=0,5000
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
Ss=np.loadtxt(file)
#f=open(file,'r')
#data=f.readlines()
if '-par' in sys.argv:ipar=1
if '-n' in sys.argv:
ind=sys.argv.index('-n')
nb=int(sys.argv[ind+1])
# read in the data
print("Doing bootstrap - be patient")
#Ss=[]
#for line in data:
# s=[]
# rec=line.split()
# for i in range(6):
# s.append(float(rec[i]))
# Ss.append(s)
Tmean,Vmean,Taus,Vs=pmag.s_boot(Ss,ipar=ipar,nb=nb)
bpars=pmag.sbootpars(Taus,Vs) # calculate kent parameters for bootstrap
bpars["v1_dec"]=Vmean[0][0]
bpars["v1_inc"]=Vmean[0][1]
bpars["v2_dec"]=Vmean[1][0]
bpars["v2_inc"]=Vmean[1][1]
bpars["v3_dec"]=Vmean[2][0]
bpars["v3_inc"]=Vmean[2][1]
bpars["t1"]=Tmean[0]
bpars["t2"]=Tmean[1]
bpars["t3"]=Tmean[2]
print("""
tau tau_sigma V_dec V_inc V_eta V_eta_dec V_eta_inc V_zeta V_zeta_dec V_zeta_inc
""")
outstring='%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(bpars["t1"],bpars["t1_sigma"],bpars["v1_dec"],bpars["v1_inc"],bpars["v1_zeta"],bpars["v1_zeta_dec"],bpars["v1_zeta_inc"],bpars["v1_eta"],bpars["v1_eta_dec"],bpars["v1_eta_inc"])
print(outstring)
outstring='%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(bpars["t2"],bpars["t2_sigma"],bpars["v2_dec"],bpars["v2_inc"],bpars["v2_zeta"],bpars["v2_zeta_dec"],bpars["v2_zeta_inc"],bpars["v2_eta"],bpars["v2_eta_dec"],bpars["v2_eta_inc"])
print(outstring)
outstring='%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(bpars["t3"],bpars["t3_sigma"],bpars["v3_dec"],bpars["v3_inc"],bpars["v3_zeta"],bpars["v3_zeta_dec"],bpars["v3_zeta_inc"],bpars["v3_eta"],bpars["v3_eta_dec"],bpars["v3_eta_inc"])
print(outstring)
|
python
|
{
"resource": ""
}
|
q11705
|
Vocabulary.get_json_online
|
train
|
def get_json_online(self, url):
"""
Use requests module to json from Earthref.
If this fails or times out, return false.
Returns
---------
result : requests.models.Response, or [] if unsuccessful
"""
if not requests:
return False
try:
req = requests.get(url, timeout=.2)
if not req.ok:
return []
return req
except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout):
return []
|
python
|
{
"resource": ""
}
|
q11706
|
Vocabulary.get_controlled_vocabularies
|
train
|
def get_controlled_vocabularies(self, vocab_types=default_vocab_types):
"""
Get all non-method controlled vocabularies
"""
if len(VOCAB):
self.set_vocabularies()
return
data = []
controlled_vocabularies = []
# try to get online
if not set_env.OFFLINE:
url = 'https://www2.earthref.org/vocabularies/controlled.json'
try:
raw = self.get_json_online(url)
data = pd.DataFrame(raw.json())
print('-I- Importing controlled vocabularies from https://earthref.org')
except Exception as ex:
pass
#print(ex, type(ex))
# used cached
if not len(data):
print('-I- Using cached vocabularies')
fname = os.path.join(data_model_dir, "controlled_vocabularies_December_10_2018.json")
data = pd.io.json.read_json(fname, encoding='utf-8-sig')
# parse data
possible_vocabularies = data.columns
## this line means, grab every single controlled vocabulary
vocab_types = list(possible_vocabularies)
def get_cv_from_list(lst):
"""
Check a validations list from the data model.
If there is a controlled vocabulary validation,
return which category of controlled vocabulary it is.
This will generally be applied to the validations col
of the data model
"""
try:
for i in lst:
if "cv(" in i:
return i[4:-2]
except TypeError:
return None
else:
return None
vocab_col_names = []
data_model = self.data_model
for dm_key in data_model.dm:
df = data_model.dm[dm_key]
df['vocab_name'] = df['validations'].apply(get_cv_from_list)
lst = list(zip(df[df['vocab_name'].notnull()]['vocab_name'], df[df['vocab_name'].notnull()].index))
# in lst, first value is the name of the controlled vocabulary
# second value is the name of the dataframe column
vocab_col_names.extend(lst)
# vocab_col_names is now a list of tuples
# consisting of the vocabulary name and the column name
# i.e., (u'type', u'geologic_types')
# remove duplicate col_names:
vocab_col_names = sorted(set(vocab_col_names))
# add in boolean category to controlled vocabularies
bool_items = [{'item': True}, {'item': False}, {'item': 'true'},
{'item': 'false'}, {'item': 0}, {'item': 1},
{'item': 0.0}, {'item': 1.0},
{'item': 't'}, {'item': 'f'},
{'item': 'T'}, {'item': 'F'}]
series = Series({'label': 'Boolean', 'items': bool_items})
data['boolean'] = series
# use vocabulary name to get possible values for the column name
for vocab in vocab_col_names[:]:
if vocab[0] == "magic_table_column":
vocab_col_names.remove(("magic_table_column", "table_column"))
continue
items = data[vocab[0]]['items']
stripped_list = [item['item'] for item in items]
controlled_vocabularies.append(stripped_list)
# create series with the column name as the index,
# and the possible values as the values
ind_values = [i[1] for i in vocab_col_names]
vocabularies = pd.Series(controlled_vocabularies, index=ind_values)
return vocabularies
|
python
|
{
"resource": ""
}
|
q11707
|
main
|
train
|
def main():
"""
NAME
uniform.py
DESCRIPTION
draws N directions from uniform distribution on a sphere
SYNTAX
uniform.py [-h][command line options]
-h prints help message and quits
-n N, specify N on the command line (default is 100)
-F file, specify output file name, default is standard output
"""
outf=""
N=100
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outf=sys.argv[ind+1]
if outf!="": out=open(outf,'w')
if '-n' in sys.argv:
ind=sys.argv.index('-n')
N=int(sys.argv[ind+1])
dirs=pmag.get_unf(N)
if outf=='':
for dir in dirs:
print('%7.1f %7.1f'%(dir[0],dir[1]))
else:
numpy.savetxt(outf,dirs,fmt='%7.1f %7.1f')
|
python
|
{
"resource": ""
}
|
q11708
|
iodp_samples_srm
|
train
|
def iodp_samples_srm(df, spec_file='specimens.txt',samp_file="samples.txt",site_file="sites.txt",dir_path='.',
input_dir_path='',comp_depth_key="",lat="",lon=""):
"""
Convert IODP samples data generated from the SRM measurements file into datamodel 3.0 MagIC samples file.
Default is to overwrite the output files in your working directory.
Parameters
----------
df : str
Pandas DataFrame of SRM Archive data
dir_path : str
working directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
spec_file : str
output specimens.txt file name
samp_file : str
output samples.txt file name
site_file : str
output sites.txt file name
comp_depth_key : str
if not "", there is a composite depth model, for example 'Depth CSF-B (m)'
lat : float
latitude of hole location
lon : float
longitude of hole location
NOTE: all output files will overwrite existing files.
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful,file names written)
"""
# define required columns for samples, sites, locations (holes).
spec_reqd_columns=['specimen','sample','result_quality','method_codes','volume',\
'specimen_name_alternatives','citations']
samp_reqd_columns=['sample','site','result_type','result_quality',\
'azimuth','dip','azimuth_correction','method_codes','citations','core_depth','composite_depth']
site_reqd_columns=['site','location','lat','lon','result_type','result_quality','method_codes',\
'core_depth','composite_depth',\
'geologic_types','geologic_classes','geologic_lithologies',\
'age','age_low','age_high','age_unit','citations']
# fix the path names for input and output directories (if different)
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
# find the input sample file name downloaded from LORE
# set the output file names
spec_out = os.path.join(output_dir_path, spec_file)
samp_out = os.path.join(output_dir_path, samp_file)
site_out = os.path.join(output_dir_path, site_file)
# read in the data from the downloaded .csv file
# create the MagIC data model 3.0 DataFrames with the required column
specimens_df=pd.DataFrame(columns = spec_reqd_columns)
samples_df=pd.DataFrame(columns = samp_reqd_columns)
sites_df=pd.DataFrame(columns = site_reqd_columns)
# set some column headers for the sample master .csv file
depth_key = "Depth CSF-A (m)"
text_key = "Text ID"
date_key = "Date sample logged"
volume_key = 'Volume (cm3)'
# convert the meta data in the sample master to MagIC datamodel 3
# construct the sample name
holes,specimens=iodp_sample_names(df)
# put the sample name in the specimen, sample, site
specimens_df['specimen']=specimens
specimens_df['sample']=specimens
samples_df['sample']=specimens
samples_df['site']=specimens
samples_df['core_depth']=df[depth_key]
if comp_depth_key: samples_df['composite_depth']=df[comp_depth_key]
# add in the rest to the sites table
sites_df['site']=specimens
sites_df['core_depth']=df[depth_key]
sites_df['lat']=lat
sites_df['lon']=lon
sites_df['result_type']='i'
sites_df['result_quality']='g'
sites_df['method_codes']='FS-C-DRILL-IODP'
sites_df['location']=holes[0]
sites_df['citations']="This study"
if comp_depth_key: sites_df['composite_depth']=df[comp_depth_key]
# now do the samples
samples_df['method_codes']='FS-C-DRILL-IODP:SP-SS-C:SO-V' # could put in sampling tool...
samples_df['site']=specimens # same as sample and specimen name
samples_df['result_type']='i'
samples_df['result_type']='g'
samples_df['azimuth']='0'
samples_df['dip']='0'
samples_df['citations']='This study'
# NEED TO ADD TIMESTAMP FROM TIMESTAMP KEY SOMEDAY
# and the specimens
specimens_df['result_quality']='g'
specimens_df['method_codes']='FS-C-DRILL-IODP'
specimens_df['citations']='This study'
# fill in the np.nan with blanks
specimens_df.fillna("",inplace=True)
samples_df.fillna("",inplace=True)
sites_df.fillna("",inplace=True)
# save the files in the designated spots spec_out, samp_out, site_out and loc_out
spec_dicts = specimens_df.to_dict('records')
pmag.magic_write(spec_out, spec_dicts, 'specimens')
samp_dicts = samples_df.to_dict('records')
pmag.magic_write(samp_out, samp_dicts, 'samples')
site_dicts = sites_df.to_dict('records')
pmag.magic_write(site_out, site_dicts, 'sites')
return holes[0],specimens
|
python
|
{
"resource": ""
}
|
q11709
|
make_thing
|
train
|
def make_thing():
""" makes example PintPars object """
cwd = os.getcwd()
main_dir = cwd + '/SPD'
try:
import new_lj_thellier_gui_spd as tgs
gui = tgs.Arai_GUI('/magic_measurements.txt', main_dir)
specimens = list(gui.Data.keys())
thing = PintPars(gui.Data, '0238x6011044', 473., 623.)
thing.calculate_all_statistics()
#new_thing = PintPars(gui.Data, '0238x5721062', 100. + 273., 525. + 273.)
#new_thing.calculate_all_statistics()
gui2 = tgs.Arai_GUI('/magic_measurements.txt', '/Users/nebula/Desktop/MagIC_experiments/ODP-SBG_1')
thing2 = PintPars(gui2.Data, '0335x1031411', 273., 743.)
return thing, thing2
except Exception as ex:
print('could not make standard specimen objects')
print(ex)
|
python
|
{
"resource": ""
}
|
q11710
|
PintPars.get_curve_prime
|
train
|
def get_curve_prime(self):
"""not in SPD documentation. same as k, but using the segment instead of the full data set"""
if len(self.x_Arai_segment) < 4:
self.pars['specimen_k_prime'], self.pars['specimen_k_prime_sse'] = 0, 0
return 0
data = lib_k.AraiCurvature(self.x_Arai_segment, self.y_Arai_segment)
self.pars['specimen_k_prime'] = data[0]
self.pars['specimen_k_prime_sse'] = data[3]
|
python
|
{
"resource": ""
}
|
q11711
|
PintPars.get_ptrm_dec_and_inc
|
train
|
def get_ptrm_dec_and_inc(self):
"""not included in spd."""
PTRMS = self.PTRMS[1:]
CART_pTRMS_orig = numpy.array([lib_direct.dir2cart(row[1:4]) for row in PTRMS])
#B_lab_dir = [self.B_lab_dir[0], self.B_lab_dir[1], 1.] # dir
tmin, tmax = self.t_Arai[0], self.t_Arai[-1]
ptrms_dec_Free, ptrms_inc_Free, ptrm_best_fit_vector_Free, ptrm_tau_Free, ptrm_v_Free, ptrm_mass_center_Free, ptrm_PCA_sigma_Free = lib_direct.get_dec_and_inc(CART_pTRMS_orig, self.t_Arai, tmin, tmax, anchored=False)
ptrms_angle = lib_direct.get_ptrms_angle(ptrm_best_fit_vector_Free, self.B_lab_cart)
self.pars['ptrms_dec_Free'], self.pars['ptrms_inc_Free'] = ptrms_dec_Free, ptrms_inc_Free
self.pars['ptrms_tau_Free'] = ptrm_tau_Free
self.pars['ptrms_angle_Free'] = ptrms_angle
|
python
|
{
"resource": ""
}
|
q11712
|
DataModel.get_data_model
|
train
|
def get_data_model(self):
"""
Try to download the data model from Earthref.
If that fails, grab the cached data model.
"""
if len(DM):
self.dm = DM
self.crit_map = CRIT_MAP
return
if not set_env.OFFLINE:
dm = self.get_dm_online()
if dm:
print('-I- Using online data model')
#self.cache_data_model(dm)
return self.parse_response(dm)
# if online is not available, get cached dm
dm = self.get_dm_offline()
print('-I- Using cached data model')
return self.parse_cache(dm)
|
python
|
{
"resource": ""
}
|
q11713
|
DataModel.get_dm_online
|
train
|
def get_dm_online(self):
"""
Use requests module to get data model from Earthref.
If this fails or times out, return false.
Returns
---------
result : requests.models.Response, False if unsuccessful
"""
if not requests:
return False
try:
req = requests.get("https://earthref.org/MagIC/data-models/3.0.json", timeout=3)
if not req.ok:
return False
return req
except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout):
return False
|
python
|
{
"resource": ""
}
|
q11714
|
DataModel.parse_cache
|
train
|
def parse_cache(self, full_df):
"""
Format the cached data model into a dictionary of DataFrames
and a criteria map DataFrame.
Parameters
----------
full_df : DataFrame
result of self.get_dm_offline()
Returns
----------
data_model : dictionary of DataFrames
crit_map : DataFrame
"""
data_model = {}
levels = ['specimens', 'samples', 'sites', 'locations',
'ages', 'measurements', 'criteria', 'contribution',
'images']
criteria_map = pd.DataFrame(full_df['criteria_map'])
for level in levels:
df = pd.DataFrame(full_df['tables'][level]['columns'])
data_model[level] = df.transpose()
# replace np.nan with None
data_model[level] = data_model[level].where((pd.notnull(data_model[level])), None)
return data_model, criteria_map
|
python
|
{
"resource": ""
}
|
q11715
|
DataModel.parse
|
train
|
def parse(self, data_model, crit):
"""
Take the relevant pieces of the data model json
and parse into data model and criteria map.
Parameters
----------
data_model : data model piece of json (nested dicts)
crit : criteria map piece of json (nested dicts)
Returns
----------
data_model : dictionary of DataFrames
crit_map : DataFrame
"""
# data model
tables = pd.DataFrame(data_model)
data_model = {}
for table_name in tables.columns:
data_model[table_name] = pd.DataFrame(tables[table_name]['columns']).T
# replace np.nan with None
data_model[table_name] = data_model[table_name].where((pd.notnull(data_model[table_name])), None)
# criteria map
zipped = list(zip(crit.keys(), crit.values()))
crit_map = pd.DataFrame(zipped)
crit_map.index = crit_map[0]
crit_map.drop(0, axis='columns', inplace=True)
crit_map.rename({1: 'criteria_map'}, axis='columns', inplace=True)
crit_map.index.rename("", inplace=True)
for table_name in ['measurements', 'specimens', 'samples', 'sites', 'locations',
'contribution', 'criteria', 'images', 'ages']:
crit_map.loc[table_name] = np.nan
return data_model, crit_map
|
python
|
{
"resource": ""
}
|
q11716
|
DataModel.parse_response
|
train
|
def parse_response(self, raw):
"""
Format the requested data model into a dictionary of DataFrames
and a criteria map DataFrame.
Take data returned by a requests.get call to Earthref.
Parameters
----------
raw: 'requests.models.Response'
Returns
---------
data_model : dictionary of DataFrames
crit_map : DataFrame
"""
tables = raw.json()['tables']
crit = raw.json()['criteria_map']
return self.parse(tables, crit)
|
python
|
{
"resource": ""
}
|
q11717
|
DataModel.find_cached_dm
|
train
|
def find_cached_dm(self):
"""
Find filename where cached data model json is stored.
Returns
---------
model_file : str
data model json file location
"""
pmag_dir = find_pmag_dir.get_pmag_dir()
if pmag_dir is None:
pmag_dir = '.'
model_file = os.path.join(pmag_dir, 'pmagpy',
'data_model', 'data_model.json')
# for py2app:
if not os.path.isfile(model_file):
model_file = os.path.join(pmag_dir, 'data_model',
'data_model.json')
if not os.path.isfile(model_file):
model_file = os.path.join(os.path.split(os.path.dirname(__file__))[0],'pmagpy', 'data_model','data_model.json')
if not os.path.isfile(model_file):
model_file = os.path.join(os.path.split(os.path.dirname(__file__))[0], 'data_model','data_model.json')
return model_file
|
python
|
{
"resource": ""
}
|
q11718
|
DataModel.cache_data_model
|
train
|
def cache_data_model(self, raw):
"""
Cache the data model json.
Take data returned by a requests.get call to Earthref.
Parameters
----------
raw: requests.models.Response
"""
output_json = json.loads(raw.content)
output_file = self.find_cached_dm()
json.dump(output_json, open(output_file, 'w+'))
|
python
|
{
"resource": ""
}
|
q11719
|
DataModel.get_groups
|
train
|
def get_groups(self, table_name):
"""
Return list of all groups for a particular data type
"""
df = self.dm[table_name]
return list(df['group'].unique())
|
python
|
{
"resource": ""
}
|
q11720
|
DataModel.get_group_headers
|
train
|
def get_group_headers(self, table_name, group_name):
"""
Return a list of all headers for a given group
"""
# get all headers of a particular group
df = self.dm[table_name]
cond = df['group'] == group_name
return df[cond].index
|
python
|
{
"resource": ""
}
|
q11721
|
DataModel.get_reqd_headers
|
train
|
def get_reqd_headers(self, table_name):
"""
Return a list of all required headers for a particular table
"""
df = self.dm[table_name]
cond = df['validations'].map(lambda x: 'required()' in str(x))
return df[cond].index
|
python
|
{
"resource": ""
}
|
q11722
|
DataModel.get_group_for_col
|
train
|
def get_group_for_col(self, table_name, col_name):
"""
Check data model to find group name for a given column header
Parameters
----------
table_name: str
col_name: str
Returns
---------
group_name: str
"""
df = self.dm[table_name]
try:
group_name = df.loc[col_name, 'group']
except KeyError:
return ''
return group_name
|
python
|
{
"resource": ""
}
|
q11723
|
main
|
train
|
def main():
"""
NAME
fisher.py
DESCRIPTION
generates set of Fisher distribed data from specified distribution
INPUT (COMMAND LINE ENTRY)
OUTPUT
dec, inc
SYNTAX
fisher.py [-h] [-i] [command line options]
OPTIONS
-h prints help message and quits
-i for interactive entry
-k specify kappa as next argument, default is 20
-n specify N as next argument, default is 100
where:
kappa: fisher distribution concentration parameter
N: number of directions desired
"""
N,kappa=100,20
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
elif '-i' in sys.argv:
ans=input(' Kappa: ')
kappa=float(ans)
ans=input(' N: ')
N=int(ans)
else:
if '-k' in sys.argv:
ind=sys.argv.index('-k')
kappa=float(sys.argv[ind+1])
if '-n' in sys.argv:
ind=sys.argv.index('-n')
N=int(sys.argv[ind+1])
for k in range(N): spitout(kappa)
|
python
|
{
"resource": ""
}
|
q11724
|
main
|
train
|
def main():
"""
NAME
pmm_redo.py
DESCRIPTION
converts the UCSC PMM files format to PmagPy redo file
SYNTAX
pmm_redo.py [-h] [command line options]
OPTIONS
-h: prints help message and quits
-f FILE: specify input file
-F FILE: specify output file, default is 'zeq_redo'
"""
dir_path='.'
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
zfile=dir_path+'/zeq_redo'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
inspec=dir_path+'/'+sys.argv[ind+1]
if '-F' in sys.argv:
ind=sys.argv.index('-F')
zfile=dir_path+'/'+sys.argv[ind+1]
zredo=open(zfile,"w")
#
# read in PMM file
#
specs=[]
prior_spec_data=open(inspec,'r').readlines()
for line in prior_spec_data:
rec=line.split(',')
if rec[0][0]!='"' and rec[0]!="ID" and len(rec)>2: # skip all the header stuff
spec=rec[0]
specs.append(spec)
comp_name=string.uppercase[specs.count(spec)-1] # assign component names
calculation_type="DE-FM"
if rec[1].strip()=='DirPCA': calculation_type="DE-BFL" # assume default calculation type is best-fit line
if rec[1].strip()=='DirOPCA': calculation_type="DE-BFL-A" # anchored best-fit line
if rec[1].strip()=='GCPCA' or rec[1]=='GCnPCA' : calculation_type="DE-BFP" # best-fit plane
steps=rec[2].strip().split('-')
min,max=steps[0],steps[1]
beg,end="",""
if min=="NRM":
beg=0
elif min[0]=='M' or min[0]=='H':
beg=float(min[1:])*1e-3 # convert to T from mT
elif min[-1]=='M':
beg=float(min[:-1])*1e-3 # convert to T from mT
elif min[0]=='T':
beg=float(min[1:])+273 # convert to C to kelvin
if max[0]=='M' or max[0]=='H':
end=float(max[1:])*1e-3 # convert to T from mT
elif max[0]=='T':
end=float(max[1:])+273 # convert to C to kelvin
elif max[-1]=='M':
end=float(max[1:])*1e-3 # convert to T from mT
if beg==0:beg=273
outstring='%s %s %s %s %s \n'%(spec,calculation_type,beg,end,comp_name)
zredo.write(outstring)
|
python
|
{
"resource": ""
}
|
q11725
|
get_item_string
|
train
|
def get_item_string(items_list):
"""
take in a list of pmag_objects
return a colon-delimited list of the findable names
"""
if not items_list:
return ''
string_list = []
for item in items_list:
try:
name = item.name
string_list.append(name)
except AttributeError:
pass
return ":".join(string_list)
|
python
|
{
"resource": ""
}
|
q11726
|
combine_dicts
|
train
|
def combine_dicts(new_dict, old_dict):
"""
returns a dictionary with all key, value pairs from new_dict.
also returns key, value pairs from old_dict, if that key does not exist in new_dict.
if a key is present in both new_dict and old_dict, the new_dict value will take precedence.
"""
old_data_keys = list(old_dict.keys())
new_data_keys = list(new_dict.keys())
all_keys = set(old_data_keys).union(new_data_keys)
combined_data_dict = {}
for k in all_keys:
try:
combined_data_dict[k] = new_dict[k]
except KeyError:
combined_data_dict[k] = old_dict[k]
return combined_data_dict
|
python
|
{
"resource": ""
}
|
q11727
|
ErMagicBuilder.find_by_name
|
train
|
def find_by_name(self, item_name, items_list, name_list=None):
"""
Return item from items_list with name item_name.
"""
if not name_list:
names = [item.name for item in items_list if item]
else:
names = name_list
if item_name in names:
ind = names.index(item_name)
return items_list[ind]
return False
|
python
|
{
"resource": ""
}
|
q11728
|
ErMagicBuilder.find_or_create_by_name
|
train
|
def find_or_create_by_name(self, item_name, items_list, item_type):
"""
See if item with item_name exists in item_list.
If not, create that item.
Either way, return an item of type item_type.
"""
item = self.find_by_name(item_name, items_list)
if not item:
item = self.data_lists[item_type][2](item_name, None)
return item
|
python
|
{
"resource": ""
}
|
q11729
|
ErMagicBuilder.init_default_headers
|
train
|
def init_default_headers(self):
"""
initialize default required headers.
if there were any pre-existing headers, keep them also.
"""
if not self.data_model:
self.data_model = validate_upload.get_data_model()
if not self.data_model:
print("Can't access MagIC-data-model at the moment.\nIf you are working offline, make sure MagIC-data-model.txt is in your PmagPy directory (or download it from https://github.com/ltauxe/PmagPy and put it in your PmagPy directory).\nOtherwise, check your internet connection")
return False
# actual is at position 0, reqd is at position 1, optional at position 2
self.headers['measurement']['er'][1], self.headers['measurement']['er'][2] = self.get_headers('magic_measurements')
self.headers['specimen']['er'][1], self.headers['specimen']['er'][2] = self.get_headers('er_specimens')
self.headers['sample']['er'][1], self.headers['sample']['er'][2] = self.get_headers('er_samples')
self.headers['site']['er'][1], self.headers['site']['er'][2] = self.get_headers('er_sites')
self.headers['location']['er'][1], self.headers['location']['er'][2] = self.get_headers('er_locations')
self.headers['age']['er'][1], self.headers['age']['er'][2] = self.get_headers('er_ages')
self.headers['result']['pmag'][1], self.headers['result']['pmag'][2] = self.get_headers('pmag_results')
self.headers['specimen']['pmag'][1], self.headers['specimen']['pmag'][2] = self.get_headers('pmag_specimens')
self.headers['sample']['pmag'][1], self.headers['sample']['pmag'][2] = self.get_headers('pmag_samples')
self.headers['site']['pmag'][1], self.headers['site']['pmag'][2] = self.get_headers('pmag_sites')
|
python
|
{
"resource": ""
}
|
q11730
|
ErMagicBuilder.add_measurement
|
train
|
def add_measurement(self, exp_name, meas_num, spec_name=None, er_data=None, pmag_data=None):
"""
Find actual data object for specimen.
Then create a measurement belonging to that specimen and add it to the data object
"""
specimen = self.find_by_name(spec_name, self.specimens)
measurement = Measurement(exp_name, meas_num, specimen, er_data)
self.measurements.append(measurement)
return measurement
|
python
|
{
"resource": ""
}
|
q11731
|
ErMagicBuilder.delete_specimen
|
train
|
def delete_specimen(self, spec_name):
"""
Remove specimen with name spec_name from self.specimens.
If the specimen belonged to a sample, remove it from the sample's specimen list.
"""
specimen = self.find_by_name(spec_name, self.specimens)
if not specimen:
return False
sample = specimen.sample
if sample:
sample.specimens.remove(specimen)
self.specimens.remove(specimen)
del specimen
return []
|
python
|
{
"resource": ""
}
|
q11732
|
ErMagicBuilder.delete_sample
|
train
|
def delete_sample(self, sample_name, replacement_samp=None):
"""
Remove sample with name sample_name from self.samples.
If the sample belonged to a site, remove it from the site's sample list.
If the sample had any specimens, change specimen.sample to "".
"""
sample = self.find_by_name(sample_name, self.samples)
if not sample:
return False
specimens = sample.specimens
site = sample.site
if site:
site.samples.remove(sample)
self.samples.remove(sample)
for spec in specimens:
spec.sample = ""
return specimens
|
python
|
{
"resource": ""
}
|
q11733
|
ErMagicBuilder.add_site
|
train
|
def add_site(self, site_name, location_name=None, er_data=None, pmag_data=None):
"""
Create a Site object and add it to self.sites.
If a location name is provided, add the site to location.sites as well.
"""
if location_name:
location = self.find_by_name(location_name, self.locations)
if not location:
location = self.add_location(location_name)
else:
location = None
## check all declinations/azimuths/longitudes in range 0=>360.
#for key, value in er_data.items():
# er_data[key] = pmag.adjust_to_360(value, key)
new_site = Site(site_name, location, self.data_model, er_data, pmag_data)
self.sites.append(new_site)
if location:
location.sites.append(new_site)
return new_site
|
python
|
{
"resource": ""
}
|
q11734
|
ErMagicBuilder.delete_site
|
train
|
def delete_site(self, site_name, replacement_site=None):
"""
Remove site with name site_name from self.sites.
If the site belonged to a location, remove it from the location's site list.
If the site had any samples, change sample.site to "".
"""
site = self.find_by_name(site_name, self.sites)
if not site:
return False
self.sites.remove(site)
if site.location:
site.location.sites.remove(site)
samples = site.samples
for samp in samples:
samp.site = ''
del site
return samples
|
python
|
{
"resource": ""
}
|
q11735
|
ErMagicBuilder.change_location
|
train
|
def change_location(self, old_location_name, new_location_name, new_parent_name=None,
new_er_data=None, new_pmag_data=None, replace_data=False):
"""
Find actual data object for location with old_location_name.
Then call Location class change method to update location name and data.
"""
location = self.find_by_name(old_location_name, self.locations)
if not location:
print('-W- {} is not a currently existing location, so it cannot be updated.'.format(old_location_name))
return False
location.change_location(new_location_name, new_er_data, new_pmag_data, replace_data)
return location
|
python
|
{
"resource": ""
}
|
q11736
|
ErMagicBuilder.add_location
|
train
|
def add_location(self, location_name, parent_name=None, er_data=None, pmag_data=None):
"""
Create a Location object and add it to self.locations.
"""
if not location_name:
return False
location = Location(location_name, data_model=self.data_model, er_data=er_data, pmag_data=pmag_data)
self.locations.append(location)
return location
|
python
|
{
"resource": ""
}
|
q11737
|
ErMagicBuilder.delete_location
|
train
|
def delete_location(self, location_name):
"""
Remove location with name location_name from self.locations.
If the location had any sites, change site.location to "".
"""
location = self.find_by_name(location_name, self.locations)
if not location:
return False
sites = location.sites
self.locations.remove(location)
for site in sites:
if site:
site.location = ''
del location
return sites
|
python
|
{
"resource": ""
}
|
q11738
|
ErMagicBuilder.change_result
|
train
|
def change_result(self, old_result_name, new_result_name, new_er_data=None,
new_pmag_data=None, spec_names=None, samp_names=None,
site_names=None, loc_names=None, replace_data=False):
"""
Find actual data object for result with old_result_name.
Then call Result class change method to update result name and data.
"""
result = self.find_by_name(old_result_name, self.results)
if not result:
msg = '-W- {} is not a currently existing result, so it cannot be updated.'.format(old_result_name)
print(msg)
return False
else:
specimens, samples, sites, locations = None, None, None, None
if spec_names:
specimens = [self.find_or_create_by_name(spec, self.specimens, 'specimen') for spec in spec_names]
if samp_names:
samples = [self.find_or_create_by_name(samp, self.samples, 'sample') for samp in samp_names]
if site_names:
sites = [self.find_or_create_by_name(site, self.sites, 'site') for site in site_names]
if loc_names:
locations = [self.find_or_create_by_name(loc, self.locations, 'location') for loc in loc_names]
result.change_result(new_result_name, new_pmag_data, specimens, samples,
sites, locations, replace_data)
return result
|
python
|
{
"resource": ""
}
|
q11739
|
ErMagicBuilder.get_data
|
train
|
def get_data(self):
"""
attempt to read measurements file in working directory.
"""
meas_file = os.path.join(self.WD, 'magic_measurements.txt')
if not os.path.isfile(meas_file):
print("-I- No magic_measurements.txt file")
return {}
try:
meas_data, file_type = pmag.magic_read(meas_file)
except IOError:
print("-I- No magic_measurements.txt file")
return {}
if file_type == 'bad_file':
print("-E- ERROR: Can't read magic_measurements.txt file. File is corrupted.")
old_specimen_name = ''
#start_time = time.time()
meas_name_list = [measurement.name for measurement in self.measurements]
for rec in meas_data:
# get citation information
citation = rec.get('er_citation_names', 'This study')
if 'This study' not in citation:
citation = citation.strip() + ':This study'
er_data = {'er_citation_names': citation}
pmag_data = {'er_citation_names': 'This study'}
specimen_name = rec["er_specimen_name"]
# ignore measurement if there is no specimen
if specimen_name == "" or specimen_name == " ":
continue
# if we've moved onto a new specimen, make sure a sample/site/location
# exists for that specimen
if specimen_name != old_specimen_name:
sample_name = rec["er_sample_name"]
site_name = rec["er_site_name"]
location_name = rec["er_location_name"]
# add items and parents
location = self.find_by_name(location_name, self.locations)
if location_name and not location:
location = self.add_location(location_name, er_data=er_data,
pmag_data=pmag_data)
site = self.find_by_name(site_name, self.sites)
if site_name and not site:
site = self.add_site(site_name, location_name,
er_data, pmag_data)
sample = self.find_by_name(sample_name, self.samples)
if sample_name and not sample:
sample = self.add_sample(sample_name, site_name,
er_data, pmag_data)
specimen = self.find_by_name(specimen_name, self.specimens)
if specimen_name and not specimen:
specimen = self.add_specimen(specimen_name, sample_name,
er_data, pmag_data)
# add child_items
if sample and not self.find_by_name(specimen_name, sample.specimens):
sample.specimens.append(specimen)
if site and not self.find_by_name(sample_name, site.samples):
site.samples.append(sample)
if location and not self.find_by_name(site_name, location.sites):
location.sites.append(site)
exp_name = rec['magic_experiment_name']
meas_num = rec['measurement_number']
meas_name = exp_name + '_' + str(meas_num)
measurement = self.find_by_name(meas_name, self.measurements, meas_name_list)
if not measurement:
self.add_measurement(exp_name, meas_num, specimen.name, rec)
meas_name_list.append(meas_name)
old_specimen_name = specimen_name
|
python
|
{
"resource": ""
}
|
q11740
|
ErMagicBuilder.get_results_info
|
train
|
def get_results_info(self, filename=None):
"""
Read pmag_results.txt file.
Parse information into dictionaries for each item.
Then add it to the item object as object.results_data.
"""
if not filename:
short_filename = "pmag_results.txt"
magic_file = os.path.join(self.WD, short_filename)
else:
magic_file = filename
if not os.path.isfile(magic_file):
print('-W- Could not find {} in your working directory {}'.format(short_filename, self.WD))
return False
# get the data from the pmag_results.txt file
data_dict = self.read_magic_file(magic_file, 'by_line_number')[0]
def make_items_list(string, search_items_list):
names = string.split(':')
items = []
for name in names:
name = name.strip(' ')
item = self.find_by_name(name, search_items_list)
if item:
items.append(item)
return items
for num, result in list(data_dict.items()):
name, specimens, samples, sites, locations = None, None, None, None, None
for key, value in list(result.items()):
#print key, ':', value
if key == 'er_specimen_names':
specimens = make_items_list(value, self.specimens)
if key == 'er_sample_names':
samples = make_items_list(value, self.samples)
if key == 'er_site_names':
sites = make_items_list(value, self.sites)
if key == 'er_location_names':
locations = make_items_list(value, self.locations)
if key == 'pmag_result_name':
name = value
for header_name in ['er_specimen_names', 'er_site_names',
'er_sample_names', 'er_location_names']:
if header_name in list(result.keys()):
result.pop(header_name)
if not name:
name = num
result_item = self.find_by_name(name, self.results)
if not result_item:
result_item = Result(name, specimens, samples, sites, locations, result, self.data_model)
else:
print('-W- Two or more results with name: {} found in your result file.\n Taking only the first.'.format(name))
if result_item and result_item not in self.results:
self.results.append(result_item)
|
python
|
{
"resource": ""
}
|
q11741
|
ErMagicBuilder.write_age_file
|
train
|
def write_age_file(self):
"""
Write er_ages.txt based on updated ErMagicBuilder data object
"""
if not self.write_ages:
print('-I- No age data available to write')
return
first_headers = self.first_age_headers
actual_headers = sorted(self.headers['age']['er'][0])
for header in first_headers:
if header in actual_headers:
actual_headers.remove(header)
add_headers = ['er_specimen_name', 'er_sample_name', 'er_site_name', 'er_location_name']
actual_headers[:0] = first_headers
full_headers = add_headers[:]
full_headers.extend(actual_headers)
header_string = '\t'.join(full_headers)
ages = []
for dtype in ['specimen', 'sample', 'site', 'location']:
ages_list = sorted(self.data_lists[dtype][0], key=lambda item: item.name)
ages.extend(ages_list)
age_strings = []
for age in ages:
ind = self.ancestry.index(age.dtype)
ancestors = ['' for num in range(len(self.ancestry) - (ind+2))]
data_found = False
string = ''
if age.dtype == 'specimen':
string += age.name + '\t'
elif age.dtype == 'sample':
string += '\t' + age.name + '\t'
elif age.dtype == 'site':
string += '\t\t' + age.name + '\t'
elif age.dtype == 'location':
string += '\t\t\t' + age.name + '\t'
parent = age.get_parent()
grandparent = None
if parent:
ancestors[0] = parent.name
grandparent = parent.get_parent()
if grandparent:
ancestors[1] = grandparent.name
greatgrandparent = grandparent.get_parent()
if greatgrandparent:
ancestors[2] = greatgrandparent.name
for ancestor in ancestors:
string += ancestor + '\t'
for key in actual_headers:
try:
add_string = age.age_data[key]
except KeyError:
add_string = ''
age.age_data[key] = ''
if add_string and not key == 'er_citation_names':
data_found = True
if key == 'er_citation_names' and not add_string.strip('\t'):
add_string = 'This study'
string += add_string + '\t'
# prevent extra '' at the end of age string
if string.endswith('\t'):
string = string[:-1]
# only write ages to file if there is data provided
if data_found:
age_strings.append(string)
outfile = open(os.path.join(self.WD, 'er_ages.txt'), 'w')
outfile.write('tab\ter_ages\n')
outfile.write(header_string + '\n')
if not age_strings:
outfile.close()
os.remove(os.path.join(self.WD, 'er_ages.txt'))
return False
for string in age_strings:
outfile.write(string + '\n')
outfile.close()
return outfile
|
python
|
{
"resource": ""
}
|
q11742
|
ErMagicBuilder.validate_data
|
train
|
def validate_data(self):
"""
Validate specimen, sample, site, and location data.
"""
warnings = {}
spec_warnings, samp_warnings, site_warnings, loc_warnings = {}, {}, {}, {}
if self.specimens:
spec_warnings = self.validate_items(self.specimens, 'specimen')
if self.samples:
samp_warnings = self.validate_items(self.samples, 'sample')
if self.sites:
site_warnings = self.validate_items(self.sites, 'site')
if self.locations:
loc_warnings = self.validate_items(self.locations, 'location')
return spec_warnings, samp_warnings, site_warnings, loc_warnings
|
python
|
{
"resource": ""
}
|
q11743
|
Specimen.set_parent
|
train
|
def set_parent(self, new_samp):
"""
Set self.sample as either an empty string, or with a new Sample.
"""
self.sample = new_samp
if new_samp:
if not isinstance(new_samp, Sample):
raise Exception
self.propagate_data()
return new_samp
|
python
|
{
"resource": ""
}
|
q11744
|
Sample.set_parent
|
train
|
def set_parent(self, new_site):
"""
Set self.site as either an empty string, or with a new Site.
"""
if new_site:
if not isinstance(new_site, Site):
raise Exception
self.site = new_site
self.propagate_data()
return new_site
|
python
|
{
"resource": ""
}
|
q11745
|
Site.change_site
|
train
|
def change_site(self, new_name, new_location=None, new_er_data=None,
new_pmag_data=None, replace_data=False):
"""
Update a site's name, location, er_data, and pmag_data.
By default, new data will be added in to pre-existing data, overwriting existing values.
If replace_data is True, the new data dictionary will simply take the place of the existing dict.
"""
self.name = new_name
if new_location:
self.location = new_location
self.update_data(new_er_data, new_pmag_data, replace_data)
|
python
|
{
"resource": ""
}
|
q11746
|
thellier_auto_interpreter.find_close_value
|
train
|
def find_close_value(self, LIST, value):
'''
take a LIST and find the nearest value in LIST to 'value'
'''
diff = inf
for a in LIST:
if abs(value - a) < diff:
diff = abs(value - a)
result = a
return(result)
|
python
|
{
"resource": ""
}
|
q11747
|
main
|
train
|
def main():
"""
NAME
sufar4-asc_magic.py
DESCRIPTION
converts ascii files generated by SUFAR ver.4.0 to MagIC formated
files for use with PmagPy plotting software
SYNTAX
sufar4-asc_magic.py -h [command line options]
OPTIONS
-h: prints the help message and quits
-f FILE: specify .asc input file name
-fsp SINFILE: specify er_specimens input file with location, sample, site, etc. information
-F MFILE: specify measurements output file
-Fa AFILE: specify rmag_anisotropy output file # MagIC 2 only
-Fsi SFILE: specify specimens output file
-usr USER: specify who made the measurements
-loc LOC: specify location name for study
-ins INST: specify instrument used
-spc SPEC: specify number of characters to specify specimen from sample
-ncn NCON: specify naming convention: default is #2 below
-k15 : specify static 15 position mode - default is spinning
-new : replace all existing magic files
DEFAULTS
AFILE: rmag_anisotropy.txt # MagIC 2 only
SFILE: default is to create new specimen file
USER: ""
LOC: "unknown"
INST: ""
SPEC: 0 sample name is same as site (if SPEC is 1, sample is all but last character)
appends to specimen/sample/site files
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
[8] This is a synthetic
[9] ODP naming convention
"""
args = sys.argv
if '-h' in args:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([ ['WD', False, '.'], ['ID', False, ''],
['usr', False, ''], ['ncn', False, '1'],
['k15', False, False], ['ins', False, ''],
['f', True, ''], ['F', False, 'measurements.txt'],
['Fa', False, 'rmag_anisotropy.txt'],
['Fsi', False, 'specimens.txt'],
['loc', False, 'unknown'], ['spc', False, 0],
['fsi', False, None], ['DM', False, 3] ])
#'WD', 'ID', 'usr', 'ncn', 'k15', 'ins', 'f', 'F', 'Fa', 'Fsi', 'loc', 'spc',
checked_args = extractor.extract_and_check_args(args, dataframe)
output_dir_path, input_dir_path, user, sample_naming_con, static_15_position_mode, instrument, ascfile, meas_output, aniso_output, spec_outfile, locname, specnum, spec_infile, data_model_num = extractor.get_vars(['WD', 'ID', 'usr', 'ncn', 'k15', 'ins', 'f', 'F', 'Fa', 'Fsi', 'loc', 'spc', 'fsi', 'DM'], checked_args)
convert.sufar4(ascfile, meas_output, aniso_output, spec_infile,
spec_outfile, specnum=specnum, sample_naming_con=sample_naming_con,
user=user, locname=locname, instrument=instrument,
static_15_position_mode=static_15_position_mode, dir_path=output_dir_path,
input_dir_path=input_dir_path, data_model_num=data_model_num)
|
python
|
{
"resource": ""
}
|
q11748
|
Demag_GUI.get_coordinate_system
|
train
|
def get_coordinate_system(self):
"""
Check self.Data for available coordinate systems.
Returns
---------
initial_coordinate, coordinate_list : str, list
i.e., 'geographic', ['specimen', 'geographic']
"""
coordinate_list = ['specimen']
initial_coordinate = 'specimen'
for specimen in self.specimens:
if 'geographic' not in coordinate_list and self.Data[specimen]['zijdblock_geo']:
coordinate_list.append('geographic')
initial_coordinate = 'geographic'
if 'tilt-corrected' not in coordinate_list and self.Data[specimen]['zijdblock_tilt']:
coordinate_list.append('tilt-corrected')
return initial_coordinate, coordinate_list
|
python
|
{
"resource": ""
}
|
q11749
|
Demag_GUI.draw_figure
|
train
|
def draw_figure(self, s, update_high_plots=True):
"""
Convenience function that sets current specimen to s and calculates
data for that specimen then redraws all plots.
Parameters
----------
s : specimen to set current specimen too
update_high_plots : bool which decides if high level mean plot
updates (default: False)
"""
self.initialize_CART_rot(s)
# Draw Zij plot
self.draw_zijderveld()
# Draw specimen equal area
self.draw_spec_eqarea()
# Draw M/M0 plot ( or NLT data on the same area in the GUI)
self.draw_MM0()
# If measurements are selected redisplay selected data
if len(self.selected_meas) > 0:
self.plot_selected_meas()
# Draw high level equal area
if update_high_plots:
self.plot_high_levels_data()
self.canvas4.draw()
|
python
|
{
"resource": ""
}
|
q11750
|
Demag_GUI.plot_high_levels_data
|
train
|
def plot_high_levels_data(self):
"""
Complicated function that draws the high level mean plot on canvas4,
draws all specimen, sample, or site interpretations according to the
UPPER_LEVEL_SHOW variable, draws the fisher mean or fisher mean by
polarity of all interpretations displayed, draws sample orientation
check if on, and if interpretation editor is open it calls the
interpretation editor to have it draw the same things.
"""
# self.toolbar4.home()
high_level = self.level_box.GetValue()
self.UPPER_LEVEL_NAME = self.level_names.GetValue()
self.UPPER_LEVEL_MEAN = self.mean_type_box.GetValue()
draw_net(self.high_level_eqarea)
what_is_it = self.level_box.GetValue()+": "+self.level_names.GetValue()
self.high_level_eqarea.text(-1.2, 1.15, what_is_it, {
'family': self.font_type, 'fontsize': 10*self.GUI_RESOLUTION, 'style': 'normal', 'va': 'center', 'ha': 'left'})
if self.ie_open:
self.ie.draw_net()
self.ie.write(what_is_it)
# plot elements directions
self.plot_high_level_elements()
# plot elements means
self.plot_high_level_means()
# update high level stats after plotting in case of change
self.update_high_level_stats()
# check sample orietation
if self.check_orient_on:
self.calc_and_plot_sample_orient_check()
self.canvas4.draw()
if self.ie_open:
self.ie.draw()
|
python
|
{
"resource": ""
}
|
q11751
|
Demag_GUI.get_levels_and_coordinates_names
|
train
|
def get_levels_and_coordinates_names(self):
"""
Get the current level of the high level mean plot and the name of
the corrisponding site, study, etc. As well as the code for the
current coordinate system.
Returns
-------
(high_level_type,high_level_name,coordinate_system) : tuple object
containing current high level type, name, and coordinate system
being analyzed
"""
if self.COORDINATE_SYSTEM == "geographic":
dirtype = 'DA-DIR-GEO'
elif self.COORDINATE_SYSTEM == "tilt-corrected":
dirtype = 'DA-DIR-TILT'
else:
dirtype = 'DA-DIR'
if self.level_box.GetValue() == 'sample':
high_level_type = 'samples'
if self.level_box.GetValue() == 'site':
high_level_type = 'sites'
if self.level_box.GetValue() == 'location':
high_level_type = 'locations'
if self.level_box.GetValue() == 'study':
high_level_type = 'study'
high_level_name = str(self.level_names.GetValue())
return (high_level_type, high_level_name, dirtype)
|
python
|
{
"resource": ""
}
|
q11752
|
Demag_GUI.delete_fit
|
train
|
def delete_fit(self, fit, specimen=None):
"""
removes fit from GUI results data
Parameters
----------
fit : fit to remove
specimen : specimen of fit to remove, if not provided and
set to None then the function will find the specimen itself
"""
if specimen == None:
for spec in self.pmag_results_data['specimens']:
if fit in self.pmag_results_data['specimens'][spec]:
specimen = spec
break
if specimen not in self.pmag_results_data['specimens']:
return
if fit in self.pmag_results_data['specimens'][specimen]:
self.pmag_results_data['specimens'][specimen].remove(fit)
if fit == self.current_fit:
if self.pmag_results_data['specimens'][specimen]:
self.pmag_results_data['specimens'][specimen][-1].select()
else:
self.current_fit = None
self.close_warning = True
self.calculate_high_levels_data()
if self.ie_open:
self.ie.update_editor()
self.update_selection()
|
python
|
{
"resource": ""
}
|
q11753
|
Demag_GUI.generate_warning_text
|
train
|
def generate_warning_text(self):
"""
generates warnings for the current specimen then adds them to the
current warning text for the GUI which will be rendered on a call to
update_warning_box.
"""
self.warning_text = ""
if self.s in list(self.pmag_results_data['specimens'].keys()):
for fit in self.pmag_results_data['specimens'][self.s]:
beg_pca, end_pca = self.get_indices(
fit, fit.tmin, fit.tmax, self.s)
if beg_pca == None or end_pca == None:
self.warning_text += "%s to %s are invalid bounds, to fit %s.\n" % (
fit.tmin, fit.tmax, fit.name)
elif end_pca - beg_pca < 2:
self.warning_text += "there are not enough points between %s to %s, on fit %s.\n" % (
fit.tmin, fit.tmax, fit.name)
else:
check_duplicates = []
warning_issued = [] # keep track of warnings issued to avoid redundant warnings
# if within range, attempt to go one additional step beyond
# tmax so that duplicates at the upper bound are caught
if (end_pca + 2) < len(self.Data[self.s]['zijdblock_steps']):
check_endpoint = end_pca + 2
else:
check_endpoint = end_pca + 1
for s, f in zip(self.Data[self.s]['zijdblock_steps'][beg_pca:check_endpoint],
self.Data[self.s]['measurement_flag'][beg_pca:check_endpoint]):
if f == 'g' and [s, 'g'] in check_duplicates:
if s == fit.tmin and s not in warning_issued:
self.warning_text += ("There are multiple good %s " +
"steps at the upper bound of Fit %s. The first " +
"measurement will be used as the lower bound.\n") % (
s, fit.name)
# warning_issued_low.append(s)
warning_issued.append(s)
elif s == fit.tmax and s not in warning_issued:
self.warning_text += ("There are multiple good %s " +
"steps at the upper bound of Fit %s. The first " +
"measurement will be used as the upper bound.\n") % (
s, fit.name)
# warning_issued_high.append(s)
warning_issued.append(s)
elif s not in warning_issued:
self.warning_text += ("Within Fit %s, there are " +
"multiple good measurements at the %s step. All " +
"good measurements are included in the fit.\n") % (
fit.name, s)
warning_issued.append(s)
else:
pass
else:
check_duplicates.append([s, f])
if self.s in list(self.Data.keys()):
if not self.Data[self.s]['zijdblock_geo']:
self.warning_text += "There is no geographic data for this specimen.\n"
if not self.Data[self.s]['zijdblock_tilt']:
self.warning_text += "There is no tilt-corrected data for this specimen.\n"
|
python
|
{
"resource": ""
}
|
q11754
|
Demag_GUI.read_criteria_file
|
train
|
def read_criteria_file(self, criteria_file_name=None):
"""
reads 2.5 or 3.0 formatted PmagPy criteria file and returns a set of
nested dictionary 2.5 formated criteria data that can be passed into
pmag.grade to filter data.
Parameters
----------
criteria_file : name of criteria file to read in
Returns
-------
nested dictionary 2.5 formated criteria data
"""
# import pdb; pdb.set_trace()
acceptance_criteria = pmag.initialize_acceptance_criteria()
if self.data_model == 3:
if criteria_file_name == None:
criteria_file_name = "criteria.txt"
contribution = cb.Contribution(self.WD, read_tables=[
'criteria'], custom_filenames={'criteria': criteria_file_name})
if 'criteria' in contribution.tables:
crit_container = contribution.tables['criteria']
crit_data = crit_container.df
crit_data = crit_data.to_dict('records')
for crit in crit_data:
m2_name = map_magic.convert_direction_criteria(
'magic2', crit['table_column'])
if m2_name != "":
try:
if crit['criterion_value'] == 'True':
acceptance_criteria[m2_name]['value'] = 1
else:
acceptance_criteria[m2_name]['value'] = 0
acceptance_criteria[m2_name]['value'] = float(
crit['criterion_value'])
except ValueError:
self.user_warning("%s is not a valid comparitor for %s, skipping this criteria" % (
str(crit['criterion_value']), m2_name))
continue
acceptance_criteria[m2_name]['pmag_criteria_code'] = crit['criterion']
return acceptance_criteria
else:
if criteria_file_name == None:
criteria_file_name = "pmag_criteria.txt"
try:
acceptance_criteria = pmag.read_criteria_from_file(
os.path.join(self.WD, criteria_file_name), acceptance_criteria)
except (IOError, OSError) as e:
self.user_warning("File %s not found in directory %s aborting opperation" % (
criteria_file_name, self.WD))
return acceptance_criteria
|
python
|
{
"resource": ""
}
|
q11755
|
Demag_GUI.get_PCA_parameters
|
train
|
def get_PCA_parameters(self, specimen, fit, tmin, tmax, coordinate_system, calculation_type):
"""
Uses pmag.domean to preform a line, line-with-origin, line-anchored,
or plane least squared regression or a fisher mean on the
measurement data of specimen in coordinate system between bounds
tmin to tmax
Parameters
----------
specimen : specimen with measurement data in self.Data
fit : fit for which the regression or mean is being applied
(used for calculating measurement index of tmin and tmax)
tmin : lower bound of measurement data
tmax : upper bound of measurement data
coordinate_system : which coordinate system the measurement data
should be in
calculation_type : type of regression or mean to preform
(options - DE-BFL:line,DE-BFL-A:line-anchored,DE-BFL-O:line-with-
origin,DE-FM:fisher,DE-BFP:plane)
Returns
-------
mpars : a 2.5 data model dictionary type specimen record of the dec,
inc, etc of the regression or mean
"""
if tmin == '' or tmax == '':
return
beg_pca, end_pca = self.get_indices(fit, tmin, tmax, specimen)
if coordinate_system == 'geographic' or coordinate_system == 'DA-DIR-GEO':
block = self.Data[specimen]['zijdblock_geo']
elif coordinate_system == 'tilt-corrected' or coordinate_system == 'DA-DIR-TILT':
block = self.Data[specimen]['zijdblock_tilt']
else:
block = self.Data[specimen]['zijdblock']
if block == []:
print(("-E- no measurement data for specimen %s in coordinate system %s" %
(specimen, coordinate_system)))
mpars = {}
elif end_pca > beg_pca and end_pca - beg_pca > 1:
try:
# preformes regression
mpars = pmag.domean(block, beg_pca, end_pca, calculation_type)
except:
print((block, beg_pca, end_pca, calculation_type,
specimen, fit.name, tmin, tmax, coordinate_system))
return
if 'specimen_direction_type' in mpars and mpars['specimen_direction_type'] == 'Error':
print(("-E- no measurement data for specimen %s in coordinate system %s" %
(specimen, coordinate_system)))
return {}
else:
mpars = {}
for k in list(mpars.keys()):
try:
if math.isnan(float(mpars[k])):
mpars[k] = 0
except:
pass
if "DE-BFL" in calculation_type and 'specimen_dang' not in list(mpars.keys()):
mpars['specimen_dang'] = 0
if 'best fit vector' in self.plane_display_box.GetValue():
self.calculate_best_fit_vectors()
return(mpars)
|
python
|
{
"resource": ""
}
|
q11756
|
Demag_GUI.autointerpret
|
train
|
def autointerpret(self, event, step_size=None, calculation_type="DE-BFL"):
"""
Clears current interpretations and adds interpretations to every
specimen of type = calculation_type by attempting fits of size =
step size and type = calculation_type and testing the mad or a95
then finding peaks in these to note areas of maximum error then fits
between these peaks excluding them.
Parameters
----------
step_size : int that is the size of fits to make while stepping
through data if None then step size = len(meas data for
specimen)/10 rounded up if that value is greater than 3 else it
is 3 (default: None)
calculation_type : type of fit to make (default: DE-BFL or line)
"""
if not self.user_warning("This feature is in ALPHA and still in development and testing. It is subject to bugs and will often create a LOT of new interpretations. This feature should only be used to get a general idea of the trend of the data before actually mannuely interpreting the data and the output of this function should certainly not be trusted as 100% accurate and useable for publication. Would you like to continue?"):
return
if not self.clear_interpretations():
return
print("Autointerpretation Start")
self.set_test_mode(True)
for specimen in self.specimens:
self.autointerpret_specimen(specimen, step_size, calculation_type)
self.set_test_mode(False)
if self.pmag_results_data['specimens'][self.s] != []:
self.current_fit = self.pmag_results_data['specimens'][self.s][-1]
else:
self.current_fit = None
print("Autointerpretation Complete")
self.update_selection()
if self.ie_open:
self.ie.update_editor()
|
python
|
{
"resource": ""
}
|
q11757
|
Demag_GUI.calculate_mean
|
train
|
def calculate_mean(self, pars_for_mean, calculation_type):
"""
Uses pmag.dolnp or pmag.fisher_by_pol to do a fisher mean or fisher
mean by polarity on the list of dictionaries in pars for mean
Parameters
----------
pars_for_mean : list of dictionaries with all data to average
calculation_type : type of mean to take (options: Fisher,
Fisher by polarity)
Returns
-------
mpars : dictionary with information of mean or empty dictionary
TODO : put Bingham statistics back in once a method for displaying
them is figured out
"""
if len(pars_for_mean) == 0:
return({})
elif len(pars_for_mean) == 1:
return ({"dec": float(pars_for_mean[0]['dec']), "inc": float(pars_for_mean[0]['inc']), "calculation_type": calculation_type, "n": 1})
# elif calculation_type =='Bingham':
# data=[]
# for pars in pars_for_mean:
# # ignore great circle
# if 'direction_type' in pars.keys() and 'direction_type'=='p':
# continue
# else:
# data.append([pars['dec'],pars['inc']])
# mpars=pmag.dobingham(data)
elif calculation_type == 'Fisher':
mpars = pmag.dolnp(pars_for_mean, 'direction_type')
elif calculation_type == 'Fisher by polarity':
mpars = pmag.fisher_by_pol(pars_for_mean)
for key in list(mpars.keys()):
mpars[key]['n_planes'] = 0
mpars[key]['calculation_type'] = 'Fisher'
mpars['calculation_type'] = calculation_type
return mpars
|
python
|
{
"resource": ""
}
|
q11758
|
Demag_GUI.calculate_high_levels_data
|
train
|
def calculate_high_levels_data(self):
"""
calculates high level mean data for the high level mean plot using
information in level_box, level_names, mean_type_box, and
mean_fit_box also updates the information in the ie to match high
level mean data in main GUI.
"""
high_level_type = str(self.level_box.GetValue())
if high_level_type == 'sample':
high_level_type = 'samples'
if high_level_type == 'site':
high_level_type = 'sites'
if high_level_type == 'location':
high_level_type = 'locations'
high_level_name = str(self.level_names.GetValue())
calculation_type = str(self.mean_type_box.GetValue())
elements_type = self.UPPER_LEVEL_SHOW
if self.ie_open:
self.ie.mean_type_box.SetStringSelection(calculation_type)
self.calculate_high_level_mean(
high_level_type, high_level_name, calculation_type, elements_type, self.mean_fit)
|
python
|
{
"resource": ""
}
|
q11759
|
Demag_GUI.quiet_reset_backend
|
train
|
def quiet_reset_backend(self, reset_interps=True):
"""
Doesn't update plots or logger or any visable data but resets all
measurement data, hierarchy data, and optionally resets
intepretations.
Parameters
----------
reset_interps : bool to tell the function to reset fits or
not.
"""
new_Data_info = self.get_data_info()
new_Data, new_Data_hierarchy = self.get_data()
if not new_Data:
print("Data read in failed when reseting, aborting reset")
return
else:
self.Data, self.Data_hierarchy, self.Data_info = new_Data, new_Data_hierarchy, new_Data_info
if reset_interps:
self.pmag_results_data = {}
for level in ['specimens', 'samples', 'sites', 'locations', 'study']:
self.pmag_results_data[level] = {}
self.high_level_means = {}
high_level_means = {}
for high_level in ['samples', 'sites', 'locations', 'study']:
if high_level not in list(self.high_level_means.keys()):
self.high_level_means[high_level] = {}
# get list of sites
self.locations = list(self.Data_hierarchy['locations'].keys())
self.locations.sort() # get list of sites
# get list of sites
self.sites = list(self.Data_hierarchy['sites'].keys())
self.sites.sort(key=spec_key_func) # get list of sites
self.samples = [] # sort the samples within each site
for site in self.sites:
self.samples.extend(
sorted(self.Data_hierarchy['sites'][site]['samples'], key=spec_key_func))
self.specimens = [] # sort the specimens within each sample
for samp in self.samples:
self.specimens.extend(
sorted(self.Data_hierarchy['samples'][samp]['specimens'], key=spec_key_func))
# --------------------------------------------------------------------
# initialize first specimen in list as current specimen
# --------------------------------------------------------------------
if self.s in self.specimens:
pass
elif len(self.specimens) > 0:
self.select_specimen(str(self.specimens[0]))
else:
self.select_specimen("")
try:
self.sample = self.Data_hierarchy['sample_of_specimen'][self.s]
except KeyError:
self.sample = ""
try:
self.site = self.Data_hierarchy['site_of_specimen'][self.s]
except KeyError:
self.site = ""
if self.Data and reset_interps:
self.update_pmag_tables()
if self.ie_open:
self.ie.specimens_list = self.specimens
|
python
|
{
"resource": ""
}
|
q11760
|
Demag_GUI.reset_backend
|
train
|
def reset_backend(self, warn_user=True, reset_interps=True):
"""
Resets GUI data and updates GUI displays such as plots, boxes, and
logger
Parameters
----------
warn_user : bool which decides if a warning dialog is displayed to
the user to ask about reseting data
reset_interps : bool which decides if interpretations are read in
for pmag tables or left alone
"""
if warn_user and not self.data_loss_warning():
return False
# reset backend, including get_data(), get_data_info()
self.quiet_reset_backend(reset_interps=reset_interps)
# reset specimens box
self.specimens_box.SetItems(self.specimens)
self.specimens_box.SetStringSelection(str(self.s))
# reset site level means box
self.level_names.Clear()
self.level_names.AppendItems(self.sites)
if self.sites:
self.level_names.SetSelection(0)
# reset coordinate system
self.COORDINATE_SYSTEM, self.coordinate_list = self.get_coordinate_system()
self.coordinates_box.Clear()
self.coordinates_box.AppendItems(self.coordinate_list)
self.coordinates_box.SetStringSelection(self.COORDINATE_SYSTEM)
# get cart rot
self.initialize_CART_rot(str(self.s))
# draw everything
if self.Data:
if not self.current_fit:
self.draw_figure(self.s)
self.update_selection()
else:
self.Add_text()
self.update_fit_boxes()
if self.ie_open:
self.ie.update_editor()
|
python
|
{
"resource": ""
}
|
q11761
|
Demag_GUI.recalculate_current_specimen_interpreatations
|
train
|
def recalculate_current_specimen_interpreatations(self):
"""
recalculates all interpretations on all specimens for all coordinate
systems. Does not display recalcuated data.
"""
self.initialize_CART_rot(self.s)
if str(self.s) in self.pmag_results_data['specimens']:
for fit in self.pmag_results_data['specimens'][self.s]:
if fit.get('specimen') and 'calculation_type' in fit.get('specimen'):
fit.put(self.s, 'specimen', self.get_PCA_parameters(
self.s, fit, fit.tmin, fit.tmax, 'specimen', fit.get('specimen')['calculation_type']))
if len(self.Data[self.s]['zijdblock_geo']) > 0 and fit.get('geographic') and 'calculation_type' in fit.get('geographic'):
fit.put(self.s, 'geographic', self.get_PCA_parameters(
self.s, fit, fit.tmin, fit.tmax, 'geographic', fit.get('geographic')['calculation_type']))
if len(self.Data[self.s]['zijdblock_tilt']) > 0 and fit.get('tilt-corrected') and 'calculation_type' in fit.get('tilt-corrected'):
fit.put(self.s, 'tilt-corrected', self.get_PCA_parameters(self.s, fit, fit.tmin,
fit.tmax, 'tilt-corrected', fit.get('tilt-corrected')['calculation_type']))
|
python
|
{
"resource": ""
}
|
q11762
|
Demag_GUI.merge_pmag_recs
|
train
|
def merge_pmag_recs(self, old_recs):
"""
Takes in a list of dictionaries old_recs and returns a list of
dictionaries where every dictionary in the returned list has the
same keys as all the others.
Parameters
----------
old_recs : list of dictionaries to fix
Returns
-------
recs : list of dictionaries with same keys
"""
recs = {}
recs = deepcopy(old_recs)
headers = []
for rec in recs:
for key in list(rec.keys()):
if key not in headers:
headers.append(key)
for rec in recs:
for header in headers:
if header not in list(rec.keys()):
rec[header] = ""
return recs
|
python
|
{
"resource": ""
}
|
q11763
|
Demag_GUI.select_specimen
|
train
|
def select_specimen(self, specimen):
"""
Goes through the calculations necessary to plot measurement data for
specimen and sets specimen as current GUI specimen, also attempts to
handle changing current fit.
"""
try:
fit_index = self.pmag_results_data['specimens'][self.s].index(
self.current_fit)
except KeyError:
fit_index = None
except ValueError:
fit_index = None
# sets self.s to specimen calculates params etc.
self.initialize_CART_rot(specimen)
self.list_bound_loc = 0
if fit_index != None and self.s in self.pmag_results_data['specimens']:
try:
self.current_fit = self.pmag_results_data['specimens'][self.s][fit_index]
except IndexError:
self.current_fit = None
else:
self.current_fit = None
if self.s != self.specimens_box.GetValue():
self.specimens_box.SetValue(self.s)
|
python
|
{
"resource": ""
}
|
q11764
|
Demag_GUI.clear_interpretations
|
train
|
def clear_interpretations(self, message=None):
"""
Clears all specimen interpretations
Parameters
----------
message : message to display when warning the user that all
fits will be deleted. If None default message is used (None is
default)
"""
if self.total_num_of_interpertations() == 0:
print("There are no interpretations")
return True
if message == None:
message = "All interpretations will be deleted all unsaved data will be irretrievable, continue?"
dlg = wx.MessageDialog(self, caption="Delete?",
message=message, style=wx.OK | wx.CANCEL)
result = self.show_dlg(dlg)
dlg.Destroy()
if result != wx.ID_OK:
return False
for specimen in list(self.pmag_results_data['specimens'].keys()):
self.pmag_results_data['specimens'][specimen] = []
# later on when high level means are fixed remove the bellow loop and loop over pmag_results_data
for high_level_type in ['samples', 'sites', 'locations', 'study']:
self.high_level_means[high_level_type] = {}
self.current_fit = None
if self.ie_open:
self.ie.update_editor()
return True
|
python
|
{
"resource": ""
}
|
q11765
|
Demag_GUI.mark_meas_good
|
train
|
def mark_meas_good(self, g_index):
"""
Marks the g_index'th measuremnt of current specimen good
Parameters
----------
g_index : int that gives the index of the measurement to mark good,
indexed from 0
"""
meas_index, ind_data = 0, []
for i, meas_data in enumerate(self.mag_meas_data):
if meas_data['er_specimen_name'] == self.s:
ind_data.append(i)
meas_index = ind_data[g_index]
self.Data[self.s]['measurement_flag'][g_index] = 'g'
if len(self.Data[self.s]['zijdblock'][g_index]) < 6:
self.Data[self.s]['zijdblock'][g_index].append('g')
self.Data[self.s]['zijdblock'][g_index][5] = 'g'
if 'zijdblock_geo' in self.Data[self.s] and g_index < len(self.Data[self.s]['zijdblock_geo']):
if len(self.Data[self.s]['zijdblock_geo'][g_index]) < 6:
self.Data[self.s]['zijdblock_geo'][g_index].append('g')
self.Data[self.s]['zijdblock_geo'][g_index][5] = 'g'
if 'zijdblock_tilt' in self.Data[self.s] and g_index < len(self.Data[self.s]['zijdblock_tilt']):
if len(self.Data[self.s]['zijdblock_tilt'][g_index]) < 6:
self.Data[self.s]['zijdblock_tilt'][g_index].append('g')
self.Data[self.s]['zijdblock_tilt'][g_index][5] = 'g'
self.mag_meas_data[meas_index]['measurement_flag'] = 'g'
if self.data_model == 3.0:
meas_name = str(self.Data[self.s]['measurement_names'][g_index])
mdf = self.con.tables['measurements'].df
# check for multiple measurements with the same name
if not isinstance(mdf.loc[meas_name], pd.Series):
res = self.user_warning("Your measurements table has non-unique measurement names.\nYou may end up marking more than one measurement as good.\nRight click this measurement again to undo.")
# mark measurement as good
mdf.loc[meas_name, 'quality'] = 'g'
|
python
|
{
"resource": ""
}
|
q11766
|
Demag_GUI.mark_fit_good
|
train
|
def mark_fit_good(self, fit, spec=None):
"""
Marks fit good so it is used in high level means
Parameters
----------
fit : fit to mark good
spec : specimen of fit to mark good (optional though runtime will
increase if not provided)
"""
if spec == None:
for spec, fits in list(self.pmag_results_data['specimens'].items()):
if fit in fits:
break
samp = self.Data_hierarchy['sample_of_specimen'][spec]
if 'sample_orientation_flag' not in self.Data_info['er_samples'][samp]:
self.Data_info['er_samples'][samp]['sample_orientation_flag'] = 'g'
samp_flag = self.Data_info['er_samples'][samp]['sample_orientation_flag']
if samp_flag == 'g':
self.bad_fits.remove(fit)
return True
else:
self.user_warning(
"Cannot mark this interpretation good its sample orientation has been marked bad")
return False
|
python
|
{
"resource": ""
}
|
q11767
|
Demag_GUI.mark_fit_bad
|
train
|
def mark_fit_bad(self, fit):
"""
Marks fit bad so it is excluded from high level means
Parameters
----------
fit : fit to mark bad
"""
if fit not in self.bad_fits:
self.bad_fits.append(fit)
return True
else:
return False
|
python
|
{
"resource": ""
}
|
q11768
|
Demag_GUI.get_preferences
|
train
|
def get_preferences(self):
"""
Gets preferences for certain display variables from
zeq_gui_preferences.
"""
# default
preferences = {}
preferences['gui_resolution'] = 100.
preferences['show_Zij_treatments'] = True
preferences['show_Zij_treatments_steps'] = 2.
preferences['show_eqarea_treatments'] = False
preferences['auto_save'] = True
# preferences['show_statistics_on_gui']=["int_n","int_ptrm_n","frac","scat","gmax","b_beta","int_mad","dang","f","fvds","g","q","drats"]#,'ptrms_dec','ptrms_inc','ptrms_mad','ptrms_angle']
#
# try to read preferences file:
#user_data_dir = find_pmag_dir.find_user_data_dir("demag_gui")
# if not user_data_dir:
# return preferences
# if os.path.exists(user_data_dir):
# pref_file = os.path.join(user_data_dir, "demag_gui_preferences.json")
# if os.path.exists(pref_file):
# with open(pref_file, "r") as pfile:
# return json.load(pfile)
return preferences
|
python
|
{
"resource": ""
}
|
q11769
|
Demag_GUI.read_magic_file
|
train
|
def read_magic_file(self, path, sort_by_this_name):
"""
reads a magic formated data file from path and sorts the keys
according to sort_by_this_name
Parameters
----------
path : path to file to read
sort_by_this_name : variable to sort data by
"""
DATA = {}
try:
with open(path, 'r') as finput:
lines = list(finput.readlines()[1:])
except FileNotFoundError:
return []
# fin=open(path,'r')
# fin.readline()
line = lines[0]
header = line.strip('\n').split('\t')
error_strings = []
for line in lines[1:]:
tmp_data = {}
tmp_line = line.strip('\n').split('\t')
for i in range(len(tmp_line)):
tmp_data[header[i]] = tmp_line[i]
if tmp_data[sort_by_this_name] in list(DATA.keys()):
error_string = "-E- ERROR: magic file %s has more than one line for %s %s" % (
path, sort_by_this_name, tmp_data[sort_by_this_name])
# only print each error message once
if error_string not in error_strings:
print(error_string)
error_strings.append(error_string)
DATA[tmp_data[sort_by_this_name]] = tmp_data
# fin.close()
finput.close()
return(DATA)
|
python
|
{
"resource": ""
}
|
q11770
|
Demag_GUI.read_redo_file
|
train
|
def read_redo_file(self, redo_file):
"""
Reads a .redo formated file and replaces all current interpretations
with interpretations taken from the .redo file
Parameters
----------
redo_file : path to .redo file to read
"""
if not self.clear_interpretations():
return
print("-I- read redo file and processing new bounds")
fin = open(redo_file, 'r')
new_s = ""
for Line in fin.read().splitlines():
line = Line.split('\t')
specimen = line[0]
if specimen.startswith("current_"):
specimen = specimen.lstrip("current_")
new_s = specimen
if len(line) < 6:
continue
if len(line) < 6:
print(("insuffecent data for specimen %s and fit %s" %
(line[0], line[4])))
continue
if len(line) == 6:
line.append('g')
if specimen not in self.specimens:
print(
("specimen %s not found in this data set and will be ignored" % (specimen)))
continue
tmin, tmax = self.parse_bound_data(line[2], line[3], specimen)
new_fit = self.add_fit(
specimen, line[4], tmin, tmax, line[1], line[5])
if line[6] == 'b' and new_fit != None:
self.bad_fits.append(new_fit)
fin.close()
if new_s != "":
self.select_specimen(new_s)
if (self.s not in self.pmag_results_data['specimens']) or (not self.pmag_results_data['specimens'][self.s]):
self.current_fit = None
else:
self.current_fit = self.pmag_results_data['specimens'][self.s][-1]
self.calculate_high_levels_data()
if self.ie_open:
self.ie.update_editor()
self.update_selection()
|
python
|
{
"resource": ""
}
|
q11771
|
Demag_GUI.change_WD
|
train
|
def change_WD(self, new_WD, meas_file=""):
"""
Changes Demag GUI's current WD to new_WD if possible
Parameters
----------
new_WD : WD to change to current GUI's WD
"""
new_WD = os.path.abspath(new_WD)
if not os.path.isdir(new_WD):
return
self.WD = new_WD
if not meas_file:
if self.data_model == None:
if os.path.isfile(os.path.join(self.WD, "measurements.txt")) and os.path.isfile(os.path.join(self.WD, "magic_measurements.txt")):
ui_dialog = demag_dialogs.user_input(self, ['data_model'], parse_funcs=[
float], heading="More than one measurement file found in CWD with different data models please input prefered data model (2.5,3.0)", values=[3])
self.show_dlg(ui_dialog)
ui_data = ui_dialog.get_values()
self.data_model = ui_data[1]['data_model']
elif os.path.isfile(os.path.join(self.WD, "measurements.txt")):
self.data_model = 3.0
elif os.path.isfile(os.path.join(self.WD, "magic_measurements.txt")):
self.data_model = 2.5
else:
self.user_warning(
"No measurement file found in chosen directory")
self.data_model = 3
try:
self.data_model = float(self.data_model)
if int(self.data_model) == 3:
meas_file = os.path.join(self.WD, "measurements.txt")
elif int(self.data_model) == 2:
meas_file = os.path.join(self.WD, "magic_measurements.txt")
else:
meas_file = ''
self.data_model = 3
except (ValueError, TypeError) as e:
self.user_warning(
"Provided data model is unrecognized or invalid, assuming you want data model 3")
self.data_model = 3
if os.path.isfile(meas_file):
self.magic_file = meas_file
else:
self.magic_file = self.choose_meas_file()
if not self.data_model:
self.data_model = 3
|
python
|
{
"resource": ""
}
|
q11772
|
Demag_GUI.init_log_file
|
train
|
def init_log_file(self):
"""
redirects stdout to a log file to prevent printing to a hanging
terminal when dealing with the compiled binary.
"""
# redirect terminal output
self.old_stdout = sys.stdout
sys.stdout = open(os.path.join(self.WD, "demag_gui.log"), 'w+')
|
python
|
{
"resource": ""
}
|
q11773
|
Demag_GUI.write_acceptance_criteria_to_file
|
train
|
def write_acceptance_criteria_to_file(self):
"""
Writes current GUI acceptance criteria to criteria.txt or
pmag_criteria.txt depending on data model
"""
crit_list = list(self.acceptance_criteria.keys())
crit_list.sort()
rec = {}
rec['pmag_criteria_code'] = "ACCEPT"
# rec['criteria_definition']=""
rec['criteria_definition'] = "acceptance criteria for study"
rec['er_citation_names'] = "This study"
for crit in crit_list:
if type(self.acceptance_criteria[crit]['value']) == str:
if self.acceptance_criteria[crit]['value'] != "-999" and self.acceptance_criteria[crit]['value'] != "":
rec[crit] = self.acceptance_criteria[crit]['value']
elif type(self.acceptance_criteria[crit]['value']) == int:
if self.acceptance_criteria[crit]['value'] != -999:
rec[crit] = "%.i" % (
self.acceptance_criteria[crit]['value'])
elif type(self.acceptance_criteria[crit]['value']) == float:
if float(self.acceptance_criteria[crit]['value']) == -999:
continue
decimal_points = self.acceptance_criteria[crit]['decimal_points']
if decimal_points != -999:
command = "rec[crit]='%%.%sf'%%(self.acceptance_criteria[crit]['value'])" % (
decimal_points)
exec(command)
else:
rec[crit] = "%e" % (
self.acceptance_criteria[crit]['value'])
pmag.magic_write(os.path.join(self.WD, "pmag_criteria.txt"), [
rec], "pmag_criteria")
|
python
|
{
"resource": ""
}
|
q11774
|
Demag_GUI.show_dlg
|
train
|
def show_dlg(self, dlg):
"""
Abstraction function that is to be used instead of dlg.ShowModal
Parameters
----------
dlg : dialog to ShowModal if possible
"""
if not self.test_mode:
dlg.Center()
return dlg.ShowModal()
else:
return dlg.GetAffirmativeId()
|
python
|
{
"resource": ""
}
|
q11775
|
Demag_GUI.get_DIR
|
train
|
def get_DIR(self):
"""
Dialog that allows user to choose a working directory
"""
dlg = wx.DirDialog(self, "Choose a directory:", defaultPath=self.currentDirectory,
style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON | wx.DD_CHANGE_DIR)
ok = self.show_dlg(dlg)
if ok == wx.ID_OK:
new_WD = dlg.GetPath()
dlg.Destroy()
else:
new_WD = os.getcwd()
dlg.Destroy()
return new_WD
|
python
|
{
"resource": ""
}
|
q11776
|
Demag_GUI.choose_meas_file
|
train
|
def choose_meas_file(self, event=None):
"""
Opens a dialog allowing the user to pick a measurement file
"""
dlg = wx.FileDialog(
self, message="Please choose a measurement file",
defaultDir=self.WD,
defaultFile="measurements.txt",
wildcard="measurement files (*.magic,*.txt)|*.magic;*.txt",
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if self.show_dlg(dlg) == wx.ID_OK:
meas_file = dlg.GetPath()
dlg.Destroy()
else:
meas_file = ''
self.data_model = 2.5
dlg.Destroy()
return meas_file
|
python
|
{
"resource": ""
}
|
q11777
|
Demag_GUI.saved_dlg
|
train
|
def saved_dlg(self, message, caption='Saved:'):
"""
Shows a dialog that tells the user that a file has been saved
Parameters
----------
message : message to display to user
caption : title for dialog (default: "Saved:")
"""
dlg = wx.MessageDialog(self, caption=caption,
message=message, style=wx.OK)
result = self.show_dlg(dlg)
dlg.Destroy()
|
python
|
{
"resource": ""
}
|
q11778
|
Demag_GUI.user_warning
|
train
|
def user_warning(self, message, caption='Warning!'):
"""
Shows a dialog that warns the user about some action
Parameters
----------
message : message to display to user
caption : title for dialog (default: "Warning!")
Returns
-------
continue_bool : True or False
"""
dlg = wx.MessageDialog(self, message, caption,
wx.OK | wx.CANCEL | wx.ICON_WARNING)
if self.show_dlg(dlg) == wx.ID_OK:
continue_bool = True
else:
continue_bool = False
dlg.Destroy()
return continue_bool
|
python
|
{
"resource": ""
}
|
q11779
|
Demag_GUI.on_close_criteria_box
|
train
|
def on_close_criteria_box(self, dia):
"""
Function called on close of change acceptance criteria dialog that
writes new criteria to the hardrive and sets new criteria as GUI's
current criteria.
Parameters
----------
dia : closed change criteria dialog
"""
window_list_specimens = [
'specimen_n', 'specimen_mad', 'specimen_dang', 'specimen_alpha95']
window_list_samples = ['sample_n', 'sample_n_lines',
'sample_n_planes', 'sample_k', 'sample_r', 'sample_alpha95']
window_list_sites = ['site_n', 'site_n_lines',
'site_n_planes', 'site_k', 'site_r', 'site_alpha95']
demag_gui_supported_criteria = window_list_specimens + \
window_list_samples+window_list_sites
if self.data_model == 3:
new_crits = []
for crit in demag_gui_supported_criteria:
new_crit = {}
command = "dia.set_%s.GetValue()" % (crit)
new_value = pmag.execute(command, dia=dia)
if new_value == None or new_value == '':
continue
d = findall(r"[-+]?\d*\.\d+|\d+", new_value)
if len(d) > 0:
d = d[0]
comp = new_value.strip(str(d))
if comp == '':
comp = '>='
if 'specimen' in crit:
col = "specimens."+map_magic.spec_magic2_2_magic3_map[crit]
elif 'sample' in crit:
col = "samples."+map_magic.samp_magic2_2_magic3_map[crit]
elif 'site' in crit:
col = "sites."+map_magic.site_magic2_2_magic3_map[crit]
else:
print("no way this like is impossible")
continue
new_crit['criterion'] = "ACCEPT"
new_crit['criterion_value'] = d
new_crit['criterion_operation'] = comp
new_crit['table_column'] = col
new_crit['citations'] = "This study"
new_crit['description'] = ''
new_crits.append(new_crit)
cdf = DataFrame(new_crits)
cdf = cdf.set_index("table_column")
cdf["table_column"] = cdf.index
cdf = cdf.reindex_axis(sorted(cdf.columns), axis=1)
if 'criteria' not in self.con.tables:
cols = ['criterion', 'criterion_value', 'criterion_operation',
'table_column', 'citations', 'description']
self.con.add_empty_magic_table('criteria', col_names=cols)
self.con.tables['criteria'].df = cdf
self.con.tables['criteria'].write_magic_file(dir_path=self.WD)
else:
for crit in demag_gui_supported_criteria:
command = "new_value=dia.set_%s.GetValue()" % (crit)
exec(command)
# empty box
if new_value == "":
self.acceptance_criteria[crit]['value'] = -999
continue
# box with no valid number
try:
float(new_value)
except:
self.show_crit_window_err_messege(crit)
continue
self.acceptance_criteria[crit]['value'] = float(new_value)
# message dialog
self.saved_dlg(message="changes saved to criteria")
self.write_acceptance_criteria_to_file()
dia.Destroy()
|
python
|
{
"resource": ""
}
|
q11780
|
Demag_GUI.show_crit_window_err_messege
|
train
|
def show_crit_window_err_messege(self, crit):
"""
error message if a valid naumber is not entered to criteria dialog
boxes
"""
dlg = wx.MessageDialog(
self, caption="Error:", message="not a vaild value for statistic %s\n ignoring value" % crit, style=wx.OK)
result = self.show_dlg(dlg)
if result == wx.ID_OK:
dlg.Destroy()
|
python
|
{
"resource": ""
}
|
q11781
|
Demag_GUI.update_warning_box
|
train
|
def update_warning_box(self):
"""
updates the warning box with whatever the warning_text variable
contains for this specimen
"""
self.warning_box.Clear()
if self.warning_text == "":
self.warning_box.AppendText("No Problems")
else:
self.warning_box.AppendText(self.warning_text)
|
python
|
{
"resource": ""
}
|
q11782
|
Demag_GUI.update_GUI_with_new_interpretation
|
train
|
def update_GUI_with_new_interpretation(self):
"""
update statistics boxes and figures with a new interpretatiom when
selecting new temperature bound
"""
self.update_fit_bounds_and_statistics()
self.draw_interpretations()
self.calculate_high_levels_data()
self.plot_high_levels_data()
|
python
|
{
"resource": ""
}
|
q11783
|
Demag_GUI.update_high_level_stats
|
train
|
def update_high_level_stats(self):
"""
updates high level statistics in bottom left of GUI.
"""
self.clear_high_level_pars()
dirtype = str(self.coordinates_box.GetValue())
if dirtype == 'specimen':
dirtype = 'DA-DIR'
elif dirtype == 'geographic':
dirtype = 'DA-DIR-GEO'
elif dirtype == 'tilt-corrected':
dirtype = 'DA-DIR-TILT'
if str(self.level_box.GetValue()) == 'sample':
high_level_type = 'samples'
elif str(self.level_box.GetValue()) == 'site':
high_level_type = 'sites'
elif str(self.level_box.GetValue()) == 'location':
high_level_type = 'locations'
elif str(self.level_box.GetValue()) == 'study':
high_level_type = 'study'
high_level_name = str(self.level_names.GetValue())
elements_type = self.UPPER_LEVEL_SHOW
if high_level_name in list(self.high_level_means[high_level_type].keys()):
mpars = []
for mf in list(self.high_level_means[high_level_type][high_level_name].keys()):
if mf in list(self.high_level_means[high_level_type][high_level_name].keys()) and self.mean_fit == 'All' or mf == self.mean_fit:
if dirtype in list(self.high_level_means[high_level_type][high_level_name][mf].keys()):
mpar = deepcopy(
self.high_level_means[high_level_type][high_level_name][mf][dirtype])
if 'n' in mpar and mpar['n'] == 1:
mpar['calculation_type'] = "Fisher:"+mf
mpars.append(mpar)
elif mpar['calculation_type'] == 'Fisher by polarity':
for k in list(mpar.keys()):
if k == 'color' or k == 'calculation_type':
continue
mpar[k]['calculation_type'] += ':'+k+':'+mf
mpar[k]['color'] = mpar['color']
if 'K' not in mpar[k] and 'k' in mpar[k]:
mpar[k]['K'] = mpar[k]['k']
if 'R' not in mpar[k] and 'r' in mpar[k]:
mpar[k]['R'] = mpar[k]['r']
if 'n_lines' not in mpar[k] and 'n' in mpar[k]:
mpar[k]['n_lines'] = mpar[k]['n']
mpars.append(mpar[k])
else:
mpar['calculation_type'] += ":"+mf
mpars.append(mpar)
self.switch_stats_button.SetRange(0, len(mpars)-1)
self.show_high_levels_pars(mpars)
if self.ie_open:
self.ie.switch_stats_button.SetRange(0, len(mpars)-1)
|
python
|
{
"resource": ""
}
|
q11784
|
Demag_GUI.update_bounds_boxes
|
train
|
def update_bounds_boxes(self):
"""
updates bounds boxes with bounds of current specimen and fit
"""
if self.s not in list(self.Data.keys()):
self.select_specimen(list(self.Data.keys())[0])
self.T_list = self.Data[self.s]['zijdblock_steps']
if self.current_fit:
self.tmin_box.SetItems(self.T_list)
self.tmax_box.SetItems(self.T_list)
if type(self.current_fit.tmin) == str and type(self.current_fit.tmax) == str:
self.tmin_box.SetStringSelection(self.current_fit.tmin)
self.tmax_box.SetStringSelection(self.current_fit.tmax)
if self.ie_open:
self.ie.update_bounds_boxes(self.T_list)
|
python
|
{
"resource": ""
}
|
q11785
|
Demag_GUI.update_PCA_box
|
train
|
def update_PCA_box(self):
"""
updates PCA box with current fit's PCA type
"""
if self.s in list(self.pmag_results_data['specimens'].keys()):
if self.current_fit:
tmin = self.current_fit.tmin
tmax = self.current_fit.tmax
calculation_type = self.current_fit.PCA_type
else:
calculation_type = self.PCA_type_box.GetValue()
PCA_type = "None"
# update calculation type windows
if calculation_type == "DE-BFL":
PCA_type = "line"
elif calculation_type == "DE-BFL-A":
PCA_type = "line-anchored"
elif calculation_type == "DE-BFL-O":
PCA_type = "line-with-origin"
elif calculation_type == "DE-FM":
PCA_type = "Fisher"
elif calculation_type == "DE-BFP":
PCA_type = "plane"
else:
print("no PCA type found setting to line")
PCA_type = "line"
self.PCA_type_box.SetStringSelection(PCA_type)
|
python
|
{
"resource": ""
}
|
q11786
|
Demag_GUI.on_menu_import_meas_file
|
train
|
def on_menu_import_meas_file(self, event):
"""
Open measurement file, reset self.magic_file
and self.WD, and reset everything.
"""
# use new measurement file and corresponding WD
meas_file = self.choose_meas_file()
WD = os.path.split(meas_file)[0]
self.WD = WD
self.magic_file = meas_file
# reset backend with new files
self.reset_backend()
|
python
|
{
"resource": ""
}
|
q11787
|
Demag_GUI.on_menu_criteria_file
|
train
|
def on_menu_criteria_file(self, event):
"""
read pmag_criteria.txt file
and open changecriteria dialog
"""
if self.data_model == 3:
default_file = "criteria.txt"
else:
default_file = "pmag_criteria.txt"
read_sucsess = False
dlg = wx.FileDialog(
self, message="choose pmag criteria file",
defaultDir=self.WD,
defaultFile=default_file,
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if self.show_dlg(dlg) == wx.ID_OK:
criteria_file = dlg.GetPath()
print(("-I- Read new criteria file: %s" % criteria_file))
# check if this is a valid pmag_criteria file
try:
mag_meas_data, file_type = pmag.magic_read(criteria_file)
except:
dlg = wx.MessageDialog(
self, caption="Error", message="not a valid pmag_criteria file", style=wx.OK)
result = self.show_dlg(dlg)
if result == wx.ID_OK:
dlg.Destroy()
dlg.Destroy()
return
# initialize criteria
self.acceptance_criteria = self.read_criteria_file(criteria_file)
read_sucsess = True
dlg.Destroy()
if read_sucsess:
self.on_menu_change_criteria(None)
|
python
|
{
"resource": ""
}
|
q11788
|
Demag_GUI.right_click_zijderveld
|
train
|
def right_click_zijderveld(self, event):
"""
toggles between zoom and pan effects for the zijderveld on right
click
Parameters
----------
event : the wx.MouseEvent that triggered the call of this function
Alters
------
zijderveld_setting, toolbar1 setting
"""
if event.LeftIsDown() or event.ButtonDClick():
return
elif self.zijderveld_setting == "Zoom":
self.zijderveld_setting = "Pan"
try:
self.toolbar1.pan('off')
except TypeError:
pass
elif self.zijderveld_setting == "Pan":
self.zijderveld_setting = "Zoom"
try:
self.toolbar1.zoom()
except TypeError:
pass
|
python
|
{
"resource": ""
}
|
q11789
|
Demag_GUI.on_zijd_mark
|
train
|
def on_zijd_mark(self, event):
"""
Get mouse position on double right click find the interpretation in
range of mouse
position then mark that interpretation bad or good
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit
"""
if not array(self.CART_rot).any():
return
pos = event.GetPosition()
width, height = self.canvas1.get_width_height()
pos[1] = height - pos[1]
xpick_data, ypick_data = pos
xdata_org = list(self.CART_rot[:, 0]) + list(self.CART_rot[:, 0])
ydata_org = list(-1*self.CART_rot[:, 1]) + list(-1*self.CART_rot[:, 2])
data_corrected = self.zijplot.transData.transform(
vstack([xdata_org, ydata_org]).T)
xdata, ydata = data_corrected.T
xdata = list(map(float, xdata))
ydata = list(map(float, ydata))
e = 4e0
index = None
for i, (x, y) in enumerate(zip(xdata, ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index != None:
steps = self.Data[self.s]['zijdblock']
if self.Data[self.s]['measurement_flag'][index % len(steps)] == "g":
self.mark_meas_bad(index % len(steps))
else:
self.mark_meas_good(index % len(steps))
pmag.magic_write(os.path.join(
self.WD, "magic_measurements.txt"), self.mag_meas_data, "magic_measurements")
self.recalculate_current_specimen_interpreatations()
if self.ie_open:
self.ie.update_current_fit_data()
self.calculate_high_levels_data()
self.update_selection()
|
python
|
{
"resource": ""
}
|
q11790
|
Demag_GUI.right_click_specimen_equalarea
|
train
|
def right_click_specimen_equalarea(self, event):
"""
toggles between zoom and pan effects for the specimen equal area on
right click
Parameters
----------
event : the wx.MouseEvent that triggered the call of this function
Alters
------
specimen_EA_setting, toolbar2 setting
"""
if event.LeftIsDown() or event.ButtonDClick():
return
elif self.specimen_EA_setting == "Zoom":
self.specimen_EA_setting = "Pan"
try:
self.toolbar2.pan('off')
except TypeError:
pass
elif self.specimen_EA_setting == "Pan":
self.specimen_EA_setting = "Zoom"
try:
self.toolbar2.zoom()
except TypeError:
pass
|
python
|
{
"resource": ""
}
|
q11791
|
Demag_GUI.right_click_high_equalarea
|
train
|
def right_click_high_equalarea(self, event):
"""
toggles between zoom and pan effects for the high equal area on
right click
Parameters
----------
event : the wx.MouseEvent that triggered the call of this function
Alters
------
high_EA_setting, toolbar4 setting
"""
if event.LeftIsDown():
return
elif self.high_EA_setting == "Zoom":
self.high_EA_setting = "Pan"
try:
self.toolbar4.pan('off')
except TypeError:
pass
elif self.high_EA_setting == "Pan":
self.high_EA_setting = "Zoom"
try:
self.toolbar4.zoom()
except TypeError:
pass
|
python
|
{
"resource": ""
}
|
q11792
|
Demag_GUI.Add_text
|
train
|
def Add_text(self):
"""
Add measurement data lines to the text window.
"""
self.selected_meas = []
if self.COORDINATE_SYSTEM == 'geographic':
zijdblock = self.Data[self.s]['zijdblock_geo']
elif self.COORDINATE_SYSTEM == 'tilt-corrected':
zijdblock = self.Data[self.s]['zijdblock_tilt']
else:
zijdblock = self.Data[self.s]['zijdblock']
tmin_index, tmax_index = -1, -1
if self.current_fit and self.current_fit.tmin and self.current_fit.tmax:
tmin_index, tmax_index = self.get_indices(self.current_fit)
TEXT = ""
self.logger.DeleteAllItems()
for i in range(len(zijdblock)):
lab_treatment = self.Data[self.s]['zijdblock_lab_treatments'][i]
Step = ""
methods = lab_treatment.split('-')
if "NO" in methods:
Step = "N"
elif "AF" in methods:
Step = "AF"
elif "ARM" in methods:
Step = "ARM"
elif "IRM" in methods:
Step = "IRM"
elif "T" in methods:
Step = "T"
elif "LT" in methods:
Step = "LT"
Tr = zijdblock[i][0]
Dec = zijdblock[i][1]
Inc = zijdblock[i][2]
Int = zijdblock[i][3]
csd = self.Data[self.s]['csds'][i]
self.logger.InsertItem(i, "%i" % i)
self.logger.SetItem(i, 1, Step)
self.logger.SetItem(i, 2, "%.1f" % Tr)
self.logger.SetItem(i, 3, "%.1f" % Dec)
self.logger.SetItem(i, 4, "%.1f" % Inc)
self.logger.SetItem(i, 5, "%.2e" % Int)
self.logger.SetItem(i, 6, csd)
self.logger.SetItemBackgroundColour(i, "WHITE")
if i >= tmin_index and i <= tmax_index:
self.logger.SetItemBackgroundColour(i, "LIGHT BLUE")
if self.Data[self.s]['measurement_flag'][i] == 'b':
self.logger.SetItemBackgroundColour(i, "red")
|
python
|
{
"resource": ""
}
|
q11793
|
Demag_GUI.on_right_click_listctrl
|
train
|
def on_right_click_listctrl(self, event):
"""
right click on the listctrl toggles measurement bad
"""
g_index = event.GetIndex()
if self.Data[self.s]['measurement_flag'][g_index] == 'g':
self.mark_meas_bad(g_index)
else:
self.mark_meas_good(g_index)
if self.data_model == 3.0:
self.con.tables['measurements'].write_magic_file(dir_path=self.WD)
else:
pmag.magic_write(os.path.join(
self.WD, "magic_measurements.txt"), self.mag_meas_data, "magic_measurements")
self.recalculate_current_specimen_interpreatations()
if self.ie_open:
self.ie.update_current_fit_data()
self.calculate_high_levels_data()
self.update_selection()
|
python
|
{
"resource": ""
}
|
q11794
|
Demag_GUI.on_enter_specimen
|
train
|
def on_enter_specimen(self, event):
"""
upon enter on the specimen box it makes that specimen the current
specimen
"""
new_specimen = self.specimens_box.GetValue()
if new_specimen not in self.specimens:
self.user_warning(
"%s is not a valid specimen with measurement data, aborting" % (new_specimen))
self.specimens_box.SetValue(self.s)
return
self.select_specimen(new_specimen)
if self.ie_open:
self.ie.change_selected(self.current_fit)
self.update_selection()
|
python
|
{
"resource": ""
}
|
q11795
|
Demag_GUI.get_new_PCA_parameters
|
train
|
def get_new_PCA_parameters(self, event):
"""
calculate statistics when temperatures are selected
or PCA type is changed
"""
tmin = str(self.tmin_box.GetValue())
tmax = str(self.tmax_box.GetValue())
if tmin == "" or tmax == "":
return
if tmin in self.T_list and tmax in self.T_list and \
(self.T_list.index(tmax) <= self.T_list.index(tmin)):
return
PCA_type = self.PCA_type_box.GetValue()
if PCA_type == "line":
calculation_type = "DE-BFL"
elif PCA_type == "line-anchored":
calculation_type = "DE-BFL-A"
elif PCA_type == "line-with-origin":
calculation_type = "DE-BFL-O"
elif PCA_type == "Fisher":
calculation_type = "DE-FM"
elif PCA_type == "plane":
calculation_type = "DE-BFP"
coordinate_system = self.COORDINATE_SYSTEM
if self.current_fit:
self.current_fit.put(self.s, coordinate_system, self.get_PCA_parameters(
self.s, self.current_fit, tmin, tmax, coordinate_system, calculation_type))
if self.ie_open:
self.ie.update_current_fit_data()
self.update_GUI_with_new_interpretation()
|
python
|
{
"resource": ""
}
|
q11796
|
Demag_GUI.on_select_fit
|
train
|
def on_select_fit(self, event):
"""
Picks out the fit selected in the fit combobox and sets it to the
current fit of the GUI then calls the select function of the fit to
set the GUI's bounds boxes and alter other such parameters
Parameters
----------
event : the wx.ComboBoxEvent that triggers this function
Alters
------
current_fit, fit_box selection, tmin_box selection, tmax_box
selection
"""
fit_val = self.fit_box.GetValue()
if self.s not in self.pmag_results_data['specimens'] or not self.pmag_results_data['specimens'][self.s] or fit_val == 'None':
self.clear_boxes()
self.current_fit = None
self.fit_box.SetStringSelection('None')
self.tmin_box.SetStringSelection('')
self.tmax_box.SetStringSelection('')
else:
try:
fit_num = list(
map(lambda x: x.name, self.pmag_results_data['specimens'][self.s])).index(fit_val)
except ValueError:
fit_num = -1
self.pmag_results_data['specimens'][self.s][fit_num].select()
if self.ie_open:
self.ie.change_selected(self.current_fit)
|
python
|
{
"resource": ""
}
|
q11797
|
Demag_GUI.on_enter_fit_name
|
train
|
def on_enter_fit_name(self, event):
"""
Allows the entering of new fit names in the fit combobox
Parameters
----------
event : the wx.ComboBoxEvent that triggers this function
Alters
------
current_fit.name
"""
if self.current_fit == None:
self.on_btn_add_fit(event)
value = self.fit_box.GetValue()
if ':' in value:
name, color = value.split(':')
else:
name, color = value, None
if name in [x.name for x in self.pmag_results_data['specimens'][self.s]]:
print('bad name')
return
self.current_fit.name = name
if color in list(self.color_dict.keys()):
self.current_fit.color = self.color_dict[color]
self.update_fit_boxes()
self.plot_high_levels_data()
|
python
|
{
"resource": ""
}
|
q11798
|
Demag_GUI.on_save_interpretation_button
|
train
|
def on_save_interpretation_button(self, event):
"""
on the save button the interpretation is saved to pmag_results_table
data in all coordinate systems
"""
if self.current_fit:
self.current_fit.saved = True
calculation_type = self.current_fit.get(self.COORDINATE_SYSTEM)[
'calculation_type']
tmin = str(self.tmin_box.GetValue())
tmax = str(self.tmax_box.GetValue())
self.current_fit.put(self.s, 'specimen', self.get_PCA_parameters(
self.s, self.current_fit, tmin, tmax, 'specimen', calculation_type))
if len(self.Data[self.s]['zijdblock_geo']) > 0:
self.current_fit.put(self.s, 'geographic', self.get_PCA_parameters(
self.s, self.current_fit, tmin, tmax, 'geographic', calculation_type))
if len(self.Data[self.s]['zijdblock_tilt']) > 0:
self.current_fit.put(self.s, 'tilt-corrected', self.get_PCA_parameters(
self.s, self.current_fit, tmin, tmax, 'tilt-corrected', calculation_type))
# calculate high level data
self.calculate_high_levels_data()
self.plot_high_levels_data()
self.on_menu_save_interpretation(event)
self.update_selection()
self.close_warning = True
|
python
|
{
"resource": ""
}
|
q11799
|
Demag_GUI.on_btn_add_fit
|
train
|
def on_btn_add_fit(self, event):
"""
add a new interpretation to the current specimen
Parameters
----------
event : the wx.ButtonEvent that triggered this function
Alters
------
pmag_results_data
"""
if self.auto_save.GetValue():
self.current_fit = self.add_fit(self.s, None, None, None, saved=True)
else:
self.current_fit = self.add_fit(self.s, None, None, None, saved=False)
self.generate_warning_text()
self.update_warning_box()
if self.ie_open:
self.ie.update_editor()
self.update_fit_boxes(True)
# Draw figures and add text
self.get_new_PCA_parameters(event)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.