code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
ancestry = self.er_magic_data.ancestry child_type = ancestry[ancestry.index(data_type) - 1] names = [self.grid.GetCellValue(row, 0) for row in self.selected_rows] if data_type == 'site': how_to_fix = 'Make sure to select a new site for each orphaned sample in the next step' else: how_to_fix = 'Go back a step and select a new {} for each orphaned {}'.format(data_type, child_type) orphans = [] for name in names: row = self.grid.row_labels.index(name) orphan = self.er_magic_data.delete_methods[data_type](name) if orphan: orphans.extend(orphan) self.grid.remove_row(row) if orphans: orphan_names = self.er_magic_data.make_name_list(orphans) pw.simple_warning('You have deleted:\n\n {}\n\nthe parent(s) of {}(s):\n\n {}\n\n{}'.format(', '.join(names), child_type, ', '.join(orphan_names), how_to_fix)) self.selected_rows = set() # update grid and data model self.update_grid(self.grid)#, grids[grid_name]) self.grid.Refresh()
def onDeleteRow(self, event, data_type)
On button click, remove relevant object from both the data model and the grid.
4.31424
4.190672
1.029486
if event.Col == -1 and event.Row == -1: pass elif event.Col < 0: self.onSelectRow(event) elif event.Row < 0: self.drop_down_menu.on_label_click(event)
def onLeftClickLabel(self, event)
When user clicks on a grid label, determine if it is a row label or a col label. Pass along the event to the appropriate function. (It will either highlight a column for editing all values, or highlight a row for deletion).
4.44499
4.031965
1.102438
grid = self.grid row = event.Row default = (255, 255, 255, 255) highlight = (191, 216, 216, 255) cell_color = grid.GetCellBackgroundColour(row, 0) attr = wx.grid.GridCellAttr() if cell_color == default: attr.SetBackgroundColour(highlight) self.selected_rows.add(row) else: attr.SetBackgroundColour(default) try: self.selected_rows.remove(row) except KeyError: pass if self.selected_rows and self.deleteRowButton: self.deleteRowButton.Enable() else: self.deleteRowButton.Disable() grid.SetRowAttr(row, attr) grid.Refresh()
def onSelectRow(self, event)
Highlight or unhighlight a row for possible deletion.
2.447914
2.312391
1.058607
data_methods = {'specimen': self.er_magic_data.change_specimen, 'sample': self.er_magic_data.change_sample, 'site': self.er_magic_data.change_site, 'location': self.er_magic_data.change_location, 'age': self.er_magic_data.change_age} grid_name = str(grid.GetName()) cols = list(range(grid.GetNumberCols())) col_labels = [] for col in cols: col_labels.append(grid.GetColLabelValue(col)) for row in grid.changes: # go through changes and update data structures if row == -1: continue else: data_dict = {} for num, label in enumerate(col_labels): if label: data_dict[str(label)] = str(grid.GetCellValue(row, num)) new_name = str(grid.GetCellValue(row, 0)) old_name = self.temp_data[grid_name][row] data_methods[grid_name](new_name, old_name, data_dict) grid.changes = False
def update_grid(self, grid)
takes in wxPython grid and ErMagic data object to be updated
3.302321
2.823812
1.169455
# deselect column, including remove 'EDIT ALL' label if self.drop_down_menu: self.drop_down_menu.clean_up() # save all changes to er_magic data object self.grid_builder.save_grid_data() # don't actually write data in this step (time-consuming) # instead, write to files when user is done editing #self.er_magic_data.write_files() wx.MessageBox('Saved!', 'Info', style=wx.OK | wx.ICON_INFORMATION)
def onSave(self, grid):#, age_data_type='site')
Save grid data in the data object
11.140358
11.125902
1.001299
dir_path='.' meas_file='magic_measurements.txt' samp_file="er_samples.txt" out_file='magic_measurements.txt' if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-WD' in sys.argv: ind = sys.argv.index('-WD') dir_path=sys.argv[ind+1] if '-f' in sys.argv: ind = sys.argv.index('-f') meas_file=sys.argv[ind+1] if '-fsa' in sys.argv: ind = sys.argv.index('-fsa') samp_file=sys.argv[ind+1] if '-F' in sys.argv: ind = sys.argv.index('-F') out_file=sys.argv[ind+1] # read in measurements file meas_file=dir_path+'/'+meas_file out_file=dir_path+'/'+out_file samp_file=dir_path+'/'+samp_file data,file_type=pmag.magic_read(meas_file) samps,file_type=pmag.magic_read(samp_file) MeasRecs=[] sampnames,sflag=[],0 for rec in data: for samp in samps: if samp['er_sample_name'].lower()==rec['er_sample_name'].lower(): if samp['er_sample_name'] not in sampnames:sampnames.append(samp['er_sample_name'].lower()) rec['er_site_name']=samp['er_site_name'] rec['er_location_name']=samp['er_location_name'] MeasRecs.append(rec) break if rec['er_sample_name'].lower() not in sampnames: sampnames.append(rec['er_sample_name'].lower()) sflag=1 SampRec={} for key in list(samps[0].keys()):SampRec[key]="" SampRec['er_sample_name']=rec['er_sample_name'] SampRec['er_citation_names']="This study" SampRec['er_site_name']='MISSING' SampRec['er_location_name']='MISSING' SampRec['sample_desription']='recorded added by update_measurements - edit as needed' samps.append(SampRec) print(rec['er_sample_name'],' missing from er_samples.txt file - edit orient.txt file and re-import') rec['er_site_name']='MISSING' rec['er_location_name']='MISSING' MeasRecs.append(rec) pmag.magic_write(out_file,MeasRecs,'magic_measurements') print("updated measurements file stored in ", out_file) if sflag==1: pmag.magic_write(samp_file,samps,'er_samples') print("updated sample file stored in ", samp_file)
def main()
NAME update_measurements.py DESCRIPTION update the magic_measurements table with new orientation info SYNTAX update_measurements.py [command line options] OPTIONS -h prints help message and quits -f MFILE, specify magic_measurements file; default is magic_measurements.txt -fsa SFILE, specify er_samples table; default is er_samples.txt -F OFILE, specify output file, default is same as MFILE
2.320462
2.120319
1.094393
out = "" if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-F' in sys.argv: ind = sys.argv.index('-F') o = sys.argv[ind + 1] out = open(o, 'w') if '-i' in sys.argv: cont = 1 while cont == 1: dir1, dir2 = [], [] try: ans = input('Declination 1: [ctrl-D to quit] ') dir1.append(float(ans)) ans = input('Inclination 1: ') dir1.append(float(ans)) ans = input('Declination 2: ') dir2.append(float(ans)) ans = input('Inclination 2: ') dir2.append(float(ans)) except: print("\nGood bye\n") sys.exit() # send dirs to angle and spit out result ang = pmag.angle(dir1, dir2) print('%7.1f ' % (ang)) elif '-f' in sys.argv: ind = sys.argv.index('-f') file = sys.argv[ind + 1] file_input = numpy.loadtxt(file) else: # read from standard input file_input = numpy.loadtxt(sys.stdin.readlines(), dtype=numpy.float) if len(file_input.shape) > 1: # list of directions dir1, dir2 = file_input[:, 0:2], file_input[:, 2:] else: dir1, dir2 = file_input[0:2], file_input[2:] angs = pmag.angle(dir1, dir2) for ang in angs: # read in the data (as string variable), line by line print('%7.1f' % (ang)) if out != "": out.write('%7.1f \n' % (ang)) if out: out.close()
def main()
NAME angle.py DESCRIPTION calculates angle between two input directions D1,D2 INPUT (COMMAND LINE ENTRY) D1_dec D1_inc D1_dec D2_inc OUTPUT angle SYNTAX angle.py [-h][-i] [command line options] [< filename] OPTIONS -h prints help and quits -i for interactive data entry -f FILE input filename -F FILE output filename (required if -F set) Standard I/O
2.938923
2.722208
1.07961
N,kappa,D,I=100,20.,0.,90. if len(sys.argv)!=0 and '-h' in sys.argv: print(main.__doc__) sys.exit() elif '-i' in sys.argv: ans=input(' Kappa: ') kappa=float(ans) ans=input(' N: ') N=int(ans) ans=input(' Mean Dec: ') D=float(ans) ans=input(' Mean Inc: ') I=float(ans) else: if '-k' in sys.argv: ind=sys.argv.index('-k') kappa=float(sys.argv[ind+1]) if '-n' in sys.argv: ind=sys.argv.index('-n') N=int(sys.argv[ind+1]) if '-D' in sys.argv: ind=sys.argv.index('-D') D=float(sys.argv[ind+1]) if '-I' in sys.argv: ind=sys.argv.index('-I') I=float(sys.argv[ind+1]) for k in range(N): dec,inc= pmag.fshdev(kappa) # send kappa to fshdev drot,irot=pmag.dodirot(dec,inc,D,I) print('%7.1f %7.1f ' % (drot,irot))
def main()
NAME fishrot.py DESCRIPTION generates set of Fisher distributed data from specified distribution SYNTAX fishrot.py [-h][-i][command line options] OPTIONS -h prints help message and quits -i for interactive entry -k kappa specify kappa, default is 20 -n N specify N, default is 100 -D D specify mean Dec, default is 0 -I I specify mean Inc, default is 90 where: kappa: fisher distribution concentration parameter N: number of directions desired OUTPUT dec, inc
2.735889
2.226606
1.228726
ofile="" if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-F' in sys.argv: ind=sys.argv.index('-F') ofile=sys.argv[ind+1] out=open(ofile,'w') if '-flt' in sys.argv: ind=sys.argv.index('-flt') flt=float(sys.argv[ind+1]) else: print(main.__doc__) sys.exit() if '-f' in sys.argv: ind=sys.argv.index('-f') file=sys.argv[ind+1] input=np.loadtxt(file) else: input=np.loadtxt(sys.stdin,dtype=np.float) # read in inclination data di=input.transpose() decs,incs=di[0],di[1] incnew=pmag.squish(incs,flt) for k in range(input.shape[0]): if ofile=="": print('%7.1f %7.1f'% (decs[k],incnew[k])) else: out.write('%7.1f %7.1f'% (decs[k],incnew[k])+'\n')
def main()
NAME squish.py DESCRIPTION takes dec/inc data and "squishes" with specified flattening factor, flt using formula tan(Io)=flt*tan(If) INPUT declination inclination OUTPUT "squished" declincation inclination SYNTAX squish.py [command line options] [< filename] OPTIONS -h print help and quit -f FILE, input file -F FILE, output file -flt FLT, flattening factor [required]
2.81866
2.43643
1.156881
if "-h" in sys.argv: print(main.__doc__) sys.exit() dataframe = extractor.command_line_dataframe([['WD', False, '.'], ['ID', False, '.'], ['f', True, ''], ['Fsa', False, 'samples.txt'], ['DM', False, 3]]) args = sys.argv checked_args = extractor.extract_and_check_args(args, dataframe) samp_file, output_samp_file, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(['f', 'Fsa', 'WD', 'ID', 'DM'], checked_args) data_model_num = int(float(data_model_num)) if '-Fsa' not in args and data_model_num == 2: output_samp_file = "er_samples.txt" ran, error = convert.iodp_samples(samp_file, output_samp_file, output_dir_path, input_dir_path, data_model_num=data_model_num) if not ran: print("-W- " + error)
def main()
iodp_samples_magic.py OPTIONS: -f FILE, input csv file -Fsa FILE, output samples file for updating, default is to overwrite existing samples file
5.642965
4.551832
1.239713
# print "calling cart2dir(), not in anything" cart=numpy.array(cart) rad=old_div(numpy.pi,180.) # constant to convert degrees to radians if len(cart.shape)>1: Xs,Ys,Zs=cart[:,0],cart[:,1],cart[:,2] else: #single vector Xs,Ys,Zs=cart[0],cart[1],cart[2] Rs=numpy.sqrt(Xs**2+Ys**2+Zs**2) # calculate resultant vector length Decs=(old_div(numpy.arctan2(Ys,Xs),rad))%360. # calculate declination taking care of correct quadrants (arctan2) and making modulo 360. try: Incs=old_div(numpy.arcsin(old_div(Zs,Rs)),rad) # calculate inclination (converting to degrees) # except: print('trouble in cart2dir') # most likely division by zero somewhere return numpy.zeros(3) return numpy.array([Decs,Incs,Rs]).transpose()
def cart2dir(self,cart)
converts a direction to cartesian coordinates
4.322353
4.258452
1.015006
# print "calling magic_read(self, infile)", infile hold,magic_data,magic_record,magic_keys=[],[],{},[] try: f=open(infile,"r") except: return [],'bad_file' d = f.readline()[:-1].strip('\n') if d[0]=="s" or d[1]=="s": delim='space' elif d[0]=="t" or d[1]=="t": delim='tab' else: print('error reading ', infile) sys.exit() if delim=='space':file_type=d.split()[1] if delim=='tab':file_type=d.split('\t')[1] if file_type=='delimited': if delim=='space':file_type=d.split()[2] if delim=='tab':file_type=d.split('\t')[2] if delim=='space':line =f.readline()[:-1].split() if delim=='tab':line =f.readline()[:-1].split('\t') for key in line: magic_keys.append(key) lines=f.readlines() for line in lines[:-1]: line.replace('\n','') if delim=='space':rec=line[:-1].split() if delim=='tab':rec=line[:-1].split('\t') hold.append(rec) line = lines[-1].replace('\n','') if delim=='space':rec=line[:-1].split() if delim=='tab':rec=line.split('\t') hold.append(rec) for rec in hold: magic_record={} if len(magic_keys) != len(rec): print("Warning: Uneven record lengths detected: ") #print magic_keys #print rec for k in range(len(rec)): magic_record[magic_keys[k]]=rec[k].strip('\n') magic_data.append(magic_record) magictype=file_type.lower().split("_") Types=['er','magic','pmag','rmag'] if magictype in Types:file_type=file_type.lower() # print "magic data from magic_read:" # print str(magic_data)[:500] + "..." # print "file_type", file_type return magic_data,file_type
def magic_read(self,infile)
reads a Magic template file, puts data in a list of dictionaries
3.081265
3.007837
1.024412
# sort the specimen names # # print "calling get_specs()" speclist=[] for rec in data: spec=rec["er_specimen_name"] if spec not in speclist:speclist.append(spec) speclist.sort() #print speclist return speclist
def get_specs(self,data)
takes a magic format file and returns a list of unique specimen names
6.132009
4.969864
1.233838
if '-h' in sys.argv: print(main.__doc__) sys.exit() dir_path = pmag.get_named_arg("-WD", ".") # plot: default is 0, if -sav in sys.argv should be 1 interactive = True save_plots = pmag.get_flag_arg_from_sys("-sav", true=1, false=0) if save_plots: interactive = False fmt = pmag.get_named_arg("-fmt", "pdf") res = pmag.get_named_arg("-res", "c") proj = pmag.get_named_arg("-prj", "ortho") anti = pmag.get_flag_arg_from_sys("-S", true=1, false=0) fancy = pmag.get_flag_arg_from_sys("-etp", true=1, false=0) ell = pmag.get_flag_arg_from_sys("-ell", true=1, false=0) ages = pmag.get_flag_arg_from_sys("-age", true=1, false=0) if '-rev' in sys.argv: flip = 1 ind = sys.argv.index('-rev') rsym = (sys.argv[ind + 1]) rsize = int(sys.argv[ind + 2]) else: flip, rsym, rsize = 0, "g^", 8 if '-sym' in sys.argv: ind = sys.argv.index('-sym') sym = (sys.argv[ind + 1]) size = int(sys.argv[ind + 2]) else: sym, size = 'ro', 8 if '-eye' in sys.argv: ind = sys.argv.index('-eye') lat_0 = float(sys.argv[ind + 1]) lon_0 = float(sys.argv[ind + 2]) else: lat_0, lon_0 = 90., 0. crd = pmag.get_named_arg("-crd", "") results_file = pmag.get_named_arg("-f", "sites.txt") ipmag.vgpmap_magic(dir_path, results_file, crd, sym, size, rsym, rsize, fmt, res, proj, flip, anti, fancy, ell, ages, lat_0, lon_0, save_plots, interactive)
def main()
NAME vgpmap_magic.py DESCRIPTION makes a map of vgps and a95/dp,dm for site means in a sites table SYNTAX vgpmap_magic.py [command line options] OPTIONS -h prints help and quits -eye ELAT ELON [specify eyeball location], default is 90., 0. -f FILE sites format file, [default is sites.txt] -res [c,l,i,h] specify resolution (crude, low, intermediate, high] -etp plot the etopo20 topographpy data (requires high resolution data set) -prj PROJ, specify one of the following: ortho = orthographic lcc = lambert conformal moll = molweide merc = mercator -sym SYM SIZE: choose a symbol and size, examples: ro 5 : small red circles bs 10 : intermediate blue squares g^ 20 : large green triangles -ell plot dp/dm or a95 ellipses -rev RSYM RSIZE : flip reverse poles to normal antipode -S: plot antipodes of all poles -age : plot the ages next to the poles -crd [g,t] : choose coordinate system, default is to plot all site VGPs -fmt [pdf, png, eps...] specify output format, default is pdf -sav save and quit DEFAULTS FILE: sites.txt res: c prj: ortho ELAT,ELON = 0,0 SYM SIZE: ro 8 RSYM RSIZE: g^ 8
2.785435
2.043481
1.363084
num_rows = self.GetNumberRows() current_grid_rows = [self.GetCellValue(num, 0) for num in range(num_rows)] er_data = {item.name: item.er_data for item in items_list} pmag_data = {item.name: item.pmag_data for item in items_list} items_list = sorted(items_list, key=lambda item: item.name) for item in items_list[:]: if item.name in current_grid_rows: pass else: self.add_row(item.name, item) self.add_data(er_data)#, pmag=False) if incl_pmag: self.add_data(pmag_data, pmag=True) if incl_parents: self.add_parents()
def add_items(self, items_list, incl_pmag=True, incl_parents=True)
Add items and/or update existing items in grid
2.649594
2.510265
1.055504
self.AppendRows(1) last_row = self.GetNumberRows() - 1 self.SetCellValue(last_row, 0, str(label)) self.row_labels.append(label) self.row_items.append(item)
def add_row(self, label='', item='')
Add a row to the grid
2.762397
2.472142
1.117411
#DeleteRows(self, pos, numRows, updateLabel if not row_num and row_num != 0: row_num = self.GetNumberRows() - 1 label = self.GetCellValue(row_num, 0) self.DeleteRows(pos=row_num, numRows=1, updateLabels=True) # remove label from row_labels try: self.row_labels.remove(label) except ValueError: # if label name hasn't been saved yet, simply truncate row_labels self.row_labels = self.row_labels[:-1] self.row_items.pop(row_num) if not self.changes: self.changes = set() self.changes.add(-1) # fix #s for rows edited: self.update_changes_after_row_delete(row_num)
def remove_row(self, row_num=None)
Remove a row from the grid
5.025679
5.039396
0.997278
if col_label in ['magic_method_codes', 'magic_method_codes++']: self.add_method_drop_down(col_number, col_label) if col_label in vocab.possible_vocabularies: if col_number not in list(self.choices.keys()): # if not already assigned above self.grid.SetColLabelValue(col_number, col_label + "**") # mark it as using a controlled vocabulary url = 'http://api.earthref.org/MagIC/vocabularies/{}.json'.format(col_label) controlled_vocabulary = pd.io.json.read_json(url) stripped_list = [] for item in controlled_vocabulary[col_label][0]: try: stripped_list.append(str(item['item'])) except UnicodeEncodeError: # skips items with non ASCII characters pass #stripped_list = [item['item'] for item in controlled_vocabulary[label][0]] if len(stripped_list) > 100: # split out the list alphabetically, into a dict of lists {'A': ['alpha', 'artist'], 'B': ['beta', 'beggar']...} dictionary = {} for item in stripped_list: letter = item[0].upper() if letter not in list(dictionary.keys()): dictionary[letter] = [] dictionary[letter].append(item) stripped_list = dictionary two_tiered = True if isinstance(stripped_list, dict) else False self.choices[col_number] = (stripped_list, two_tiered)
def add_drop_down(self, col_number, col_label)
Add a correctly formatted drop-down-menu for given col_label, if required. Otherwise do nothing.
4.760268
4.692792
1.014379
if self.data_type == 'age': method_list = vocab.age_methods elif '++' in col_label: method_list = vocab.pmag_methods elif self.data_type == 'result': method_list = vocab.pmag_methods else: method_list = vocab.er_methods self.choices[col_number] = (method_list, True)
def add_method_drop_down(self, col_number, col_label)
Add drop-down-menu options for magic_method_codes columns
4.610744
4.633088
0.995177
if self.selected_col: col_label_value = self.grid.GetColLabelValue(self.selected_col) col_label_value = col_label_value.strip('\nEDIT ALL') self.grid.SetColLabelValue(self.selected_col, col_label_value) for row in range(self.grid.GetNumberRows()): self.grid.SetCellBackgroundColour(row, self.selected_col, 'white') self.grid.ForceRefresh()
def clean_up(self):#, grid)
de-select grid cols, refresh grid
2.798273
2.56485
1.091008
color = self.grid.GetCellBackgroundColour(event.GetRow(), event.GetCol()) # allow user to cherry-pick cells for editing. gets selection of meta key for mac, ctrl key for pc if event.ControlDown() or event.MetaDown(): row, col = event.GetRow(), event.GetCol() if (row, col) not in self.dispersed_selection: self.dispersed_selection.append((row, col)) self.grid.SetCellBackgroundColour(row, col, 'light blue') else: self.dispersed_selection.remove((row, col)) self.grid.SetCellBackgroundColour(row, col, color)# 'white' self.grid.ForceRefresh() return if event.ShiftDown(): # allow user to highlight multiple consecutive cells in a column previous_col = self.grid.GetGridCursorCol() previous_row = self.grid.GetGridCursorRow() col = event.GetCol() row = event.GetRow() if col != previous_col: return else: if row > previous_row: row_range = list(range(previous_row, row+1)) else: row_range = list(range(row, previous_row+1)) for r in row_range: self.grid.SetCellBackgroundColour(r, col, 'light blue') self.selection.append((r, col)) self.grid.ForceRefresh() return selection = False if self.dispersed_selection: is_dispersed = True selection = self.dispersed_selection if self.selection: is_dispersed = False selection = self.selection try: col = event.GetCol() row = event.GetRow() except AttributeError: row, col = selection[0][0], selection[0][1] self.grid.SetGridCursor(row, col) if col in list(choices.keys()): # column should have a pop-up menu menu = wx.Menu() two_tiered = choices[col][1] choices = choices[col][0] if not two_tiered: # menu is one tiered if 'CLEAR cell of all values' not in choices: choices.insert(0, 'CLEAR cell of all values') for choice in choices: if not choice: choice = " " # prevents error if choice is an empty string menuitem = menu.Append(wx.ID_ANY, str(choice)) self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem) self.show_menu(event, menu) else: # menu is two_tiered clear = menu.Append(-1, 'CLEAR cell of all values') self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), clear) for choice in sorted(choices.items()): submenu = wx.Menu() for item in choice[1]: menuitem = submenu.Append(-1, str(item)) self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem) menu.Append(-1, choice[0], submenu) self.show_menu(event, menu) if selection: # re-whiten the cells that were previously highlighted for row, col in selection: self.grid.SetCellBackgroundColour(row, col, self.col_color) self.dispersed_selection = [] self.selection = [] self.grid.ForceRefresh()
def on_left_click(self, event, grid, choices)
creates popup menu when user clicks on the column if that column is in the list of choices that get a drop-down menu. allows user to edit the column, but only from available values
2.543674
2.499791
1.017555
if self.grid.changes: # if user selects a menuitem, that is an edit self.grid.changes.add(row) else: self.grid.changes = {row} item_id = event.GetId() item = event.EventObject.FindItemById(item_id) label = item.Label cell_value = grid.GetCellValue(row, col) if str(label) == "CLEAR cell of all values": label = "" col_label = grid.GetColLabelValue(col).strip('\nEDIT ALL').strip('**') if col_label in self.colon_delimited_lst and label: if not label.lower() in cell_value.lower(): label += (":" + cell_value).rstrip(':') else: label = cell_value if self.selected_col and self.selected_col == col: for row in range(self.grid.GetNumberRows()): grid.SetCellValue(row, col, label) if self.grid.changes: self.grid.changes.add(row) else: self.grid.changes = {row} #self.selected_col = None else: grid.SetCellValue(row, col, label) if selection: for cell in selection: row = cell[0] grid.SetCellValue(row, col, label) return
def on_select_menuitem(self, event, grid, row, col, selection)
sets value of selected cell to value selected from menu
4.095611
3.953412
1.035969
fmt,plot='svg',0 title="" if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-sav' in sys.argv:plot=1 if '-f' in sys.argv: ind=sys.argv.index('-f') file=sys.argv[ind+1] X=numpy.loadtxt(file) # else: # X=numpy.loadtxt(sys.stdin,dtype=numpy.float) else: print('-f option required') print(main.__doc__) sys.exit() if '-fmt' in sys.argv: ind=sys.argv.index('-fmt') fmt=sys.argv[ind+1] if '-t' in sys.argv: ind=sys.argv.index('-t') title=sys.argv[ind+1] CDF={'X':1} pmagplotlib.plot_init(CDF['X'],5,5) pmagplotlib.plot_cdf(CDF['X'],X,title,'r','') files={'X':'CDF_.'+fmt} if plot==0: pmagplotlib.draw_figs(CDF) ans= input('S[a]ve plot, <Return> to quit ') if ans=='a': pmagplotlib.save_plots(CDF,files) else: pmagplotlib.save_plots(CDF,files)
def main()
NAME plot_cdf.py DESCRIPTION makes plots of cdfs of data in input file SYNTAX plot_cdf.py [-h][command line options] OPTIONS -h prints help message and quits -f FILE -t TITLE -fmt [svg,eps,png,pdf,jpg..] specify format of output figure, default is svg -sav saves plot and quits
3.010746
2.98279
1.009373
if not requests: return False try: req = requests.get(url, timeout=.2) if not req.ok: return [] return req except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout): return []
def get_json_online(self, url)
Use requests module to json from Earthref. If this fails or times out, return false. Returns --------- result : requests.models.Response, or [] if unsuccessful
3.485462
3.322058
1.049188
categories = Series(code_types[code_types[mtype] == True].index) codes = {cat: list(self.get_one_meth_type(cat, all_codes).index) for cat in categories} return codes
def get_tiered_meth_category(self, mtype, all_codes, code_types)
Get a tiered list of all er/pmag_age codes i.e. pmag_codes = {'anisotropy_codes': ['code1', 'code2'], 'sample_preparation': [code1, code2], ...}
5.327725
5.573102
0.955971
if len(VOCAB): self.set_vocabularies() return data = [] controlled_vocabularies = [] # try to get online if not set_env.OFFLINE: url = 'https://www2.earthref.org/vocabularies/controlled.json' try: raw = self.get_json_online(url) data = pd.DataFrame(raw.json()) print('-I- Importing controlled vocabularies from https://earthref.org') except Exception as ex: pass #print(ex, type(ex)) # used cached if not len(data): print('-I- Using cached vocabularies') fname = os.path.join(data_model_dir, "controlled_vocabularies_December_10_2018.json") data = pd.io.json.read_json(fname, encoding='utf-8-sig') # parse data possible_vocabularies = data.columns ## this line means, grab every single controlled vocabulary vocab_types = list(possible_vocabularies) def get_cv_from_list(lst): try: for i in lst: if "cv(" in i: return i[4:-2] except TypeError: return None else: return None vocab_col_names = [] data_model = self.data_model for dm_key in data_model.dm: df = data_model.dm[dm_key] df['vocab_name'] = df['validations'].apply(get_cv_from_list) lst = list(zip(df[df['vocab_name'].notnull()]['vocab_name'], df[df['vocab_name'].notnull()].index)) # in lst, first value is the name of the controlled vocabulary # second value is the name of the dataframe column vocab_col_names.extend(lst) # vocab_col_names is now a list of tuples # consisting of the vocabulary name and the column name # i.e., (u'type', u'geologic_types') # remove duplicate col_names: vocab_col_names = sorted(set(vocab_col_names)) # add in boolean category to controlled vocabularies bool_items = [{'item': True}, {'item': False}, {'item': 'true'}, {'item': 'false'}, {'item': 0}, {'item': 1}, {'item': 0.0}, {'item': 1.0}, {'item': 't'}, {'item': 'f'}, {'item': 'T'}, {'item': 'F'}] series = Series({'label': 'Boolean', 'items': bool_items}) data['boolean'] = series # use vocabulary name to get possible values for the column name for vocab in vocab_col_names[:]: if vocab[0] == "magic_table_column": vocab_col_names.remove(("magic_table_column", "table_column")) continue items = data[vocab[0]]['items'] stripped_list = [item['item'] for item in items] controlled_vocabularies.append(stripped_list) # create series with the column name as the index, # and the possible values as the values ind_values = [i[1] for i in vocab_col_names] vocabularies = pd.Series(controlled_vocabularies, index=ind_values) return vocabularies
def get_controlled_vocabularies(self, vocab_types=default_vocab_types)
Get all non-method controlled vocabularies
4.423636
4.402727
1.004749
''' #==================================================================== in: read measured raw data file, search the line [' Field Remanence '] in measured data file, skip all the rows above and the last line, otherwise, load data as two columns. # out: rawDf,fitDf are the pandas dataframe of measured raw data, and the log10 interploted data #==================================================================== ''' skip_from = ' Field Remanence ' with open(filePath,'rb') as fr: #f = fr.read() for i,line in enumerate(fr,1): #print(line) if skip_from in str(line): skiprows=i+2 break else: skiprows=None skiprows = skiprows if isinstance(skiprows,int) else 1 rawDf = pd.read_csv(filePath, sep='\s+', delimiter=',', names=['field','remanance'], dtype=np.float64, skiprows=skiprows, skipfooter=1,engine='python') rawDf = rawDf[(rawDf['field']>0)] rawDf = rawDf.sort_values(by=['field']) rawDf['field'] = rawDf['field']*10**3 # mT to # T y_measure=rawDf['remanance'] rawDf = rawDf[(rawDf['field']>=2)] rawDf['field_log'] = np.log10(rawDf['field']) rawDf['rem_gradient'] = np.gradient(rawDf['remanance']) rawDf['rem_grad_norm'] = rawDf['rem_gradient']/rawDf['rem_gradient'].max() field_fit = np.linspace(np.log10(rawDf['field'].min()), np.log10(rawDf['field'].max()), 100) y_gradient = interpolate.splev(field_fit, interpolate.splrep(np.log10(rawDf['field']), np.gradient(rawDf['remanance']))) fitDf = pd.DataFrame({'field':field_fit,'remanance':y_gradient}) fitDf.remanance[fitDf.remanance<=0] = 10**-15 return rawDf,fitDf
def loadData(filePath=None)
#==================================================================== in: read measured raw data file, search the line [' Field Remanence '] in measured data file, skip all the rows above and the last line, otherwise, load data as two columns. # out: rawDf,fitDf are the pandas dataframe of measured raw data, and the log10 interploted data #====================================================================
3.988074
2.359533
1.690197
''' #==================================================================== plot the fitted results for data fit and refit #==================================================================== ''' global _yfits_ _yfits_ = yfit ax.plot(xfit, yfit) ax.plot(xfit, np.sum(yfit,axis=1)) ax.scatter(xraw, yraw) ax.set_xlabel('Field (log10(mT))') ax.set_ylabel('IRM normalization')
def fit_plots(ax,xfit,xraw,yfit,yraw)
#==================================================================== plot the fitted results for data fit and refit #====================================================================
6.728464
4.510396
1.491768
outf="" N=100 if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-F' in sys.argv: ind=sys.argv.index('-F') outf=sys.argv[ind+1] if outf!="": out=open(outf,'w') if '-n' in sys.argv: ind=sys.argv.index('-n') N=int(sys.argv[ind+1]) dirs=pmag.get_unf(N) if outf=='': for dir in dirs: print('%7.1f %7.1f'%(dir[0],dir[1])) else: numpy.savetxt(outf,dirs,fmt='%7.1f %7.1f')
def main()
NAME uniform.py DESCRIPTION draws N directions from uniform distribution on a sphere SYNTAX uniform.py [-h][command line options] -h prints help message and quits -n N, specify N on the command line (default is 100) -F file, specify output file name, default is standard output
2.694431
2.559347
1.052781
if 'Top offset (cm)' in df.columns: offset_key='Top offset (cm)' elif 'Offset (cm)' in df.columns: offset_key='Offset (cm)' else: print ('No offset key found') return False,[] interval=df[offset_key].astype('float').astype('str') holes=df['Site'].astype('str')+df['Hole'] specimens=df['Exp'].astype('str')+'-'+holes +'-'+df['Core'].astype('str')+df['Type'].astype('str')+\ '-'+df['Sect'].astype('str')+ '-' + df['A/W']+'-'+ interval return holes, specimens
def iodp_sample_names(df)
Convert expedition, hole, section, type, interval to sample name in format: exp-hole-type-sect-interval Parameters ___________ df : Pandas DataFrame dataframe read in from .csv file downloaded from LIMS online database Returns -------------- holes : list IODP Holes name in format U999A specimens : pandas Series series with sample names, e.g. 999-U999A-1H-1-W-1
5.5739
4.689844
1.188504
cwd = os.getcwd() main_dir = cwd + '/SPD' try: import new_lj_thellier_gui_spd as tgs gui = tgs.Arai_GUI('/magic_measurements.txt', main_dir) specimens = list(gui.Data.keys()) thing = PintPars(gui.Data, '0238x6011044', 473., 623.) thing.calculate_all_statistics() #new_thing = PintPars(gui.Data, '0238x5721062', 100. + 273., 525. + 273.) #new_thing.calculate_all_statistics() gui2 = tgs.Arai_GUI('/magic_measurements.txt', '/Users/nebula/Desktop/MagIC_experiments/ODP-SBG_1') thing2 = PintPars(gui2.Data, '0335x1031411', 273., 743.) return thing, thing2 except Exception as ex: print('could not make standard specimen objects') print(ex)
def make_thing()
makes example PintPars object
7.550762
6.709851
1.125325
if len(self.x_Arai_segment) < 4: self.pars['specimen_k_prime'], self.pars['specimen_k_prime_sse'] = 0, 0 return 0 data = lib_k.AraiCurvature(self.x_Arai_segment, self.y_Arai_segment) self.pars['specimen_k_prime'] = data[0] self.pars['specimen_k_prime_sse'] = data[3]
def get_curve_prime(self)
not in SPD documentation. same as k, but using the segment instead of the full data set
4.531971
3.88533
1.166432
PTRMS = self.PTRMS[1:] CART_pTRMS_orig = numpy.array([lib_direct.dir2cart(row[1:4]) for row in PTRMS]) #B_lab_dir = [self.B_lab_dir[0], self.B_lab_dir[1], 1.] # dir tmin, tmax = self.t_Arai[0], self.t_Arai[-1] ptrms_dec_Free, ptrms_inc_Free, ptrm_best_fit_vector_Free, ptrm_tau_Free, ptrm_v_Free, ptrm_mass_center_Free, ptrm_PCA_sigma_Free = lib_direct.get_dec_and_inc(CART_pTRMS_orig, self.t_Arai, tmin, tmax, anchored=False) ptrms_angle = lib_direct.get_ptrms_angle(ptrm_best_fit_vector_Free, self.B_lab_cart) self.pars['ptrms_dec_Free'], self.pars['ptrms_inc_Free'] = ptrms_dec_Free, ptrms_inc_Free self.pars['ptrms_tau_Free'] = ptrm_tau_Free self.pars['ptrms_angle_Free'] = ptrms_angle
def get_ptrm_dec_and_inc(self)
not included in spd.
4.184273
4.147146
1.008953
# # parse command line options # if '-h' in sys.argv: print(main.__doc__) sys.exit() dir_path = pmag.get_named_arg("-WD", default_val=".") input_dir_path = pmag.get_named_arg('-ID', "") input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path) meas_file = pmag.get_named_arg( "-f", default_val="measurements.txt") #spec_file = pmag.get_named_arg( # "-fsp", default_val="specimens.txt") #crit_file = pmag.get_named_arg("-fcr", default_val="criteria.txt") #spec_file = os.path.join(dir_path, spec_file) #crit_file = os.path.join(dir_path, crit_file) meas_file = pmag.resolve_file_name(meas_file, input_dir_path) fmt = pmag.get_named_arg("-fmt", "svg") save_plots = False interactive = True if '-sav' in sys.argv: save_plots = True interactive=False spec = pmag.get_named_arg("-spc", default_val="") n_specs = pmag.get_named_arg("-n", default_val="all") try: n_specs = int(n_specs) except ValueError: pass ipmag.thellier_magic(meas_file, dir_path, input_dir_path, spec, n_specs, save_plots, fmt, interactive)
def main()
NAME thellier_magic.py DESCRIPTION plots Thellier-Thellier data in version 3.0 format Reads saved interpretations from a specimen formatted table, default: specimens.txt SYNTAX thellier_magic.py [command line options] OPTIONS -h prints help message and quits -f MEAS, set measurements input file, default is 'measurements.txt' -WD: directory to output files to (default : current directory) Note: if using Windows, all figures will output to current directory -ID: directory to read files from (default : same as -WD) -fsp PRIOR, set specimens.txt prior interpretations file, default is 'specimens.txt' -fmt [svg,png,jpg], format for images - default is svg -sav, saves plots without review (in format specified by -fmt key or default) -spc SPEC, plots single specimen SPEC, saves plot with specified format with optional -b bounds and quits -n SPECIMENS, number of specimens to plot OUTPUT figures: ALL: numbers refer to temperature steps in command line window 1) Arai plot: closed circles are zero-field first/infield open circles are infield first/zero-field triangles are pTRM checks squares are pTRM tail checks VDS is vector difference sum diamonds are bounds for interpretation 2) Zijderveld plot: closed (open) symbols are X-Y (X-Z) planes X rotated to NRM direction 3) (De/Re)Magnetization diagram: circles are NRM remaining squares are pTRM gained 4) equal area projections: green triangles are pTRM gained direction red (purple) circles are lower(upper) hemisphere of ZI step directions blue (cyan) squares are lower(upper) hemisphere IZ step directions 5) Optional: TRM acquisition 6) Optional: TDS normalization command line window: list is: temperature step numbers, temperatures (C), Dec, Inc, Int (units of measuements) list of possible commands: type letter followed by return to select option saving of plots creates image files with specimen, plot type as name
2.534327
2.072818
1.222648
if len(DM): self.dm = DM self.crit_map = CRIT_MAP return if not set_env.OFFLINE: dm = self.get_dm_online() if dm: print('-I- Using online data model') #self.cache_data_model(dm) return self.parse_response(dm) # if online is not available, get cached dm dm = self.get_dm_offline() print('-I- Using cached data model') return self.parse_cache(dm)
def get_data_model(self)
Try to download the data model from Earthref. If that fails, grab the cached data model.
6.749041
5.930315
1.138058
model_file = self.find_cached_dm() try: f = open(model_file, 'r', encoding='utf-8-sig') except TypeError: f = open(model_file, 'r') string = '\n'.join(f.readlines()) f.close() raw = json.loads(string) full = pd.DataFrame(raw) return full
def get_dm_offline(self)
Grab the 3.0 data model from the PmagPy/pmagpy directory Returns --------- full : DataFrame cached data model json in DataFrame format
3.986563
3.192938
1.248556
if not requests: return False try: req = requests.get("https://earthref.org/MagIC/data-models/3.0.json", timeout=3) if not req.ok: return False return req except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout): return False
def get_dm_online(self)
Use requests module to get data model from Earthref. If this fails or times out, return false. Returns --------- result : requests.models.Response, False if unsuccessful
5.005898
3.694422
1.354988
data_model = {} levels = ['specimens', 'samples', 'sites', 'locations', 'ages', 'measurements', 'criteria', 'contribution', 'images'] criteria_map = pd.DataFrame(full_df['criteria_map']) for level in levels: df = pd.DataFrame(full_df['tables'][level]['columns']) data_model[level] = df.transpose() # replace np.nan with None data_model[level] = data_model[level].where((pd.notnull(data_model[level])), None) return data_model, criteria_map
def parse_cache(self, full_df)
Format the cached data model into a dictionary of DataFrames and a criteria map DataFrame. Parameters ---------- full_df : DataFrame result of self.get_dm_offline() Returns ---------- data_model : dictionary of DataFrames crit_map : DataFrame
4.665666
4.118067
1.132975
# data model tables = pd.DataFrame(data_model) data_model = {} for table_name in tables.columns: data_model[table_name] = pd.DataFrame(tables[table_name]['columns']).T # replace np.nan with None data_model[table_name] = data_model[table_name].where((pd.notnull(data_model[table_name])), None) # criteria map zipped = list(zip(crit.keys(), crit.values())) crit_map = pd.DataFrame(zipped) crit_map.index = crit_map[0] crit_map.drop(0, axis='columns', inplace=True) crit_map.rename({1: 'criteria_map'}, axis='columns', inplace=True) crit_map.index.rename("", inplace=True) for table_name in ['measurements', 'specimens', 'samples', 'sites', 'locations', 'contribution', 'criteria', 'images', 'ages']: crit_map.loc[table_name] = np.nan return data_model, crit_map
def parse(self, data_model, crit)
Take the relevant pieces of the data model json and parse into data model and criteria map. Parameters ---------- data_model : data model piece of json (nested dicts) crit : criteria map piece of json (nested dicts) Returns ---------- data_model : dictionary of DataFrames crit_map : DataFrame
3.095528
2.912444
1.062863
tables = raw.json()['tables'] crit = raw.json()['criteria_map'] return self.parse(tables, crit)
def parse_response(self, raw)
Format the requested data model into a dictionary of DataFrames and a criteria map DataFrame. Take data returned by a requests.get call to Earthref. Parameters ---------- raw: 'requests.models.Response' Returns --------- data_model : dictionary of DataFrames crit_map : DataFrame
13.954059
7.461175
1.870223
pmag_dir = find_pmag_dir.get_pmag_dir() if pmag_dir is None: pmag_dir = '.' model_file = os.path.join(pmag_dir, 'pmagpy', 'data_model', 'data_model.json') # for py2app: if not os.path.isfile(model_file): model_file = os.path.join(pmag_dir, 'data_model', 'data_model.json') if not os.path.isfile(model_file): model_file = os.path.join(os.path.split(os.path.dirname(__file__))[0],'pmagpy', 'data_model','data_model.json') if not os.path.isfile(model_file): model_file = os.path.join(os.path.split(os.path.dirname(__file__))[0], 'data_model','data_model.json') return model_file
def find_cached_dm(self)
Find filename where cached data model json is stored. Returns --------- model_file : str data model json file location
2.055767
1.975929
1.040406
output_json = json.loads(raw.content) output_file = self.find_cached_dm() json.dump(output_json, open(output_file, 'w+'))
def cache_data_model(self, raw)
Cache the data model json. Take data returned by a requests.get call to Earthref. Parameters ---------- raw: requests.models.Response
5.439997
5.576272
0.975562
df = self.dm[table_name] return list(df['group'].unique())
def get_groups(self, table_name)
Return list of all groups for a particular data type
8.107651
6.11915
1.324964
# get all headers of a particular group df = self.dm[table_name] cond = df['group'] == group_name return df[cond].index
def get_group_headers(self, table_name, group_name)
Return a list of all headers for a given group
6.928447
6.926326
1.000306
df = self.dm[table_name] cond = df['validations'].map(lambda x: 'required()' in str(x)) return df[cond].index
def get_reqd_headers(self, table_name)
Return a list of all required headers for a particular table
10.302163
8.977221
1.147589
df = self.dm[table_name] try: group_name = df.loc[col_name, 'group'] except KeyError: return '' return group_name
def get_group_for_col(self, table_name, col_name)
Check data model to find group name for a given column header Parameters ---------- table_name: str col_name: str Returns --------- group_name: str
3.825243
4.017732
0.95209
N,kappa=100,20 if '-h' in sys.argv: print(main.__doc__) sys.exit() elif '-i' in sys.argv: ans=input(' Kappa: ') kappa=float(ans) ans=input(' N: ') N=int(ans) else: if '-k' in sys.argv: ind=sys.argv.index('-k') kappa=float(sys.argv[ind+1]) if '-n' in sys.argv: ind=sys.argv.index('-n') N=int(sys.argv[ind+1]) for k in range(N): spitout(kappa)
def main()
NAME fisher.py DESCRIPTION generates set of Fisher distribed data from specified distribution INPUT (COMMAND LINE ENTRY) OUTPUT dec, inc SYNTAX fisher.py [-h] [-i] [command line options] OPTIONS -h prints help message and quits -i for interactive entry -k specify kappa as next argument, default is 20 -n specify N as next argument, default is 100 where: kappa: fisher distribution concentration parameter N: number of directions desired
2.72541
2.467851
1.104366
dir_path='.' if '-WD' in sys.argv: ind=sys.argv.index('-WD') dir_path=sys.argv[ind+1] zfile=dir_path+'/zeq_redo' if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-f' in sys.argv: ind=sys.argv.index('-f') inspec=dir_path+'/'+sys.argv[ind+1] if '-F' in sys.argv: ind=sys.argv.index('-F') zfile=dir_path+'/'+sys.argv[ind+1] zredo=open(zfile,"w") # # read in PMM file # specs=[] prior_spec_data=open(inspec,'r').readlines() for line in prior_spec_data: rec=line.split(',') if rec[0][0]!='"' and rec[0]!="ID" and len(rec)>2: # skip all the header stuff spec=rec[0] specs.append(spec) comp_name=string.uppercase[specs.count(spec)-1] # assign component names calculation_type="DE-FM" if rec[1].strip()=='DirPCA': calculation_type="DE-BFL" # assume default calculation type is best-fit line if rec[1].strip()=='DirOPCA': calculation_type="DE-BFL-A" # anchored best-fit line if rec[1].strip()=='GCPCA' or rec[1]=='GCnPCA' : calculation_type="DE-BFP" # best-fit plane steps=rec[2].strip().split('-') min,max=steps[0],steps[1] beg,end="","" if min=="NRM": beg=0 elif min[0]=='M' or min[0]=='H': beg=float(min[1:])*1e-3 # convert to T from mT elif min[-1]=='M': beg=float(min[:-1])*1e-3 # convert to T from mT elif min[0]=='T': beg=float(min[1:])+273 # convert to C to kelvin if max[0]=='M' or max[0]=='H': end=float(max[1:])*1e-3 # convert to T from mT elif max[0]=='T': end=float(max[1:])+273 # convert to C to kelvin elif max[-1]=='M': end=float(max[1:])*1e-3 # convert to T from mT if beg==0:beg=273 outstring='%s %s %s %s %s \n'%(spec,calculation_type,beg,end,comp_name) zredo.write(outstring)
def main()
NAME pmm_redo.py DESCRIPTION converts the UCSC PMM files format to PmagPy redo file SYNTAX pmm_redo.py [-h] [command line options] OPTIONS -h: prints help message and quits -f FILE: specify input file -F FILE: specify output file, default is 'zeq_redo'
3.609235
3.248829
1.110934
if not items_list: return '' string_list = [] for item in items_list: try: name = item.name string_list.append(name) except AttributeError: pass return ":".join(string_list)
def get_item_string(items_list)
take in a list of pmag_objects return a colon-delimited list of the findable names
2.639032
2.264948
1.165162
old_data_keys = list(old_dict.keys()) new_data_keys = list(new_dict.keys()) all_keys = set(old_data_keys).union(new_data_keys) combined_data_dict = {} for k in all_keys: try: combined_data_dict[k] = new_dict[k] except KeyError: combined_data_dict[k] = old_dict[k] return combined_data_dict
def combine_dicts(new_dict, old_dict)
returns a dictionary with all key, value pairs from new_dict. also returns key, value pairs from old_dict, if that key does not exist in new_dict. if a key is present in both new_dict and old_dict, the new_dict value will take precedence.
1.720615
1.702668
1.01054
if not name_list: names = [item.name for item in items_list if item] else: names = name_list if item_name in names: ind = names.index(item_name) return items_list[ind] return False
def find_by_name(self, item_name, items_list, name_list=None)
Return item from items_list with name item_name.
2.305834
2.129586
1.082761
item = self.find_by_name(item_name, items_list) if not item: item = self.data_lists[item_type][2](item_name, None) return item
def find_or_create_by_name(self, item_name, items_list, item_type)
See if item with item_name exists in item_list. If not, create that item. Either way, return an item of type item_type.
3.647676
3.54211
1.029803
if not self.data_model: self.data_model = validate_upload.get_data_model() if not self.data_model: print("Can't access MagIC-data-model at the moment.\nIf you are working offline, make sure MagIC-data-model.txt is in your PmagPy directory (or download it from https://github.com/ltauxe/PmagPy and put it in your PmagPy directory).\nOtherwise, check your internet connection") return False # actual is at position 0, reqd is at position 1, optional at position 2 self.headers['measurement']['er'][1], self.headers['measurement']['er'][2] = self.get_headers('magic_measurements') self.headers['specimen']['er'][1], self.headers['specimen']['er'][2] = self.get_headers('er_specimens') self.headers['sample']['er'][1], self.headers['sample']['er'][2] = self.get_headers('er_samples') self.headers['site']['er'][1], self.headers['site']['er'][2] = self.get_headers('er_sites') self.headers['location']['er'][1], self.headers['location']['er'][2] = self.get_headers('er_locations') self.headers['age']['er'][1], self.headers['age']['er'][2] = self.get_headers('er_ages') self.headers['result']['pmag'][1], self.headers['result']['pmag'][2] = self.get_headers('pmag_results') self.headers['specimen']['pmag'][1], self.headers['specimen']['pmag'][2] = self.get_headers('pmag_specimens') self.headers['sample']['pmag'][1], self.headers['sample']['pmag'][2] = self.get_headers('pmag_samples') self.headers['site']['pmag'][1], self.headers['site']['pmag'][2] = self.get_headers('pmag_sites')
def init_default_headers(self)
initialize default required headers. if there were any pre-existing headers, keep them also.
2.320349
2.274033
1.020367
specimen = self.find_by_name(spec_name, self.specimens) measurement = Measurement(exp_name, meas_num, specimen, er_data) self.measurements.append(measurement) return measurement
def add_measurement(self, exp_name, meas_num, spec_name=None, er_data=None, pmag_data=None)
Find actual data object for specimen. Then create a measurement belonging to that specimen and add it to the data object
2.57832
2.440368
1.056529
specimen = self.find_by_name(old_spec_name, self.specimens) if not specimen: print('-W- {} is not a currently existing specimen, so cannot be updated'.format(old_spec_name)) return False if new_sample_name: new_sample = self.find_by_name(new_sample_name, self.samples) if not new_sample: print(.format(new_sample_name, new_sample_name)) new_sample = self.add_sample(new_sample_name) else: new_sample = None specimen.change_specimen(new_spec_name, new_sample, new_er_data, new_pmag_data, replace_data) return specimen
def change_specimen(self, old_spec_name, new_spec_name, new_sample_name=None, new_er_data=None, new_pmag_data=None, replace_data=False)
Find actual data objects for specimen and sample. Then call Specimen class change method to update specimen name and data.
2.198706
2.188006
1.004891
specimen = self.find_by_name(spec_name, self.specimens) if not specimen: return False sample = specimen.sample if sample: sample.specimens.remove(specimen) self.specimens.remove(specimen) del specimen return []
def delete_specimen(self, spec_name)
Remove specimen with name spec_name from self.specimens. If the specimen belonged to a sample, remove it from the sample's specimen list.
2.721157
2.282468
1.192199
if samp_name: sample = self.find_by_name(samp_name, self.samples) if not sample: print(.format(samp_name, samp_name)) sample = self.add_sample(samp_name) else: sample = None specimen = Specimen(spec_name, sample, self.data_model, er_data, pmag_data) self.specimens.append(specimen) if sample: sample.specimens.append(specimen) return specimen
def add_specimen(self, spec_name, samp_name=None, er_data=None, pmag_data=None)
Create a Specimen object and add it to self.specimens. If a sample name is provided, add the specimen to sample.specimens as well.
2.456594
2.38639
1.029418
sample = self.find_by_name(old_samp_name, self.samples) if not sample: print('-W- {} is not a currently existing sample, so it cannot be updated'.format(old_samp_name)) return False if new_site_name: new_site = self.find_by_name(new_site_name, self.sites) if not new_site: print(.format(new_site_name, new_site_name))#sample.site or '*empty*', sample) new_site = self.add_site(new_site_name) else: new_site = None sample.change_sample(new_samp_name, new_site, new_er_data, new_pmag_data, replace_data) return sample
def change_sample(self, old_samp_name, new_samp_name, new_site_name=None, new_er_data=None, new_pmag_data=None, replace_data=False)
Find actual data objects for sample and site. Then call Sample class change method to update sample name and data..
2.896879
2.890532
1.002196
if site_name: site = self.find_by_name(site_name, self.sites) if not site: print(.format(site_name, site_name)) site = self.add_site(site_name) else: site = None sample = Sample(samp_name, site, self.data_model, er_data, pmag_data) self.samples.append(sample) if site: site.samples.append(sample) return sample
def add_sample(self, samp_name, site_name=None, er_data=None, pmag_data=None)
Create a Sample object and add it to self.samples. If a site name is provided, add the sample to site.samples as well.
2.488013
2.336056
1.065049
sample = self.find_by_name(sample_name, self.samples) if not sample: return False specimens = sample.specimens site = sample.site if site: site.samples.remove(sample) self.samples.remove(sample) for spec in specimens: spec.sample = "" return specimens
def delete_sample(self, sample_name, replacement_samp=None)
Remove sample with name sample_name from self.samples. If the sample belonged to a site, remove it from the site's sample list. If the sample had any specimens, change specimen.sample to "".
3.351785
2.387077
1.404138
site = self.find_by_name(old_site_name, self.sites) if not site: print('-W- {} is not a currently existing site, so it cannot be updated.'.format(old_site_name)) return False if new_location_name: if site.location: old_location = self.find_by_name(site.location.name, self.locations) if old_location: old_location.sites.remove(site) new_location = self.find_by_name(new_location_name, self.locations) if not new_location: print(.format(new_location_name, new_location_name)) new_location = self.add_location(new_location_name) new_location.sites.append(site) else: new_location = None ## check all declinations/azimuths/longitudes in range 0=>360. #for key, value in new_er_data.items(): # new_er_data[key] = pmag.adjust_to_360(value, key) site.change_site(new_site_name, new_location, new_er_data, new_pmag_data, replace_data) return site
def change_site(self, old_site_name, new_site_name, new_location_name=None, new_er_data=None, new_pmag_data=None, replace_data=False)
Find actual data objects for site and location. Then call the Site class change method to update site name and data.
2.752392
2.719438
1.012118
if location_name: location = self.find_by_name(location_name, self.locations) if not location: location = self.add_location(location_name) else: location = None ## check all declinations/azimuths/longitudes in range 0=>360. #for key, value in er_data.items(): # er_data[key] = pmag.adjust_to_360(value, key) new_site = Site(site_name, location, self.data_model, er_data, pmag_data) self.sites.append(new_site) if location: location.sites.append(new_site) return new_site
def add_site(self, site_name, location_name=None, er_data=None, pmag_data=None)
Create a Site object and add it to self.sites. If a location name is provided, add the site to location.sites as well.
3.169111
3.027206
1.046877
site = self.find_by_name(site_name, self.sites) if not site: return False self.sites.remove(site) if site.location: site.location.sites.remove(site) samples = site.samples for samp in samples: samp.site = '' del site return samples
def delete_site(self, site_name, replacement_site=None)
Remove site with name site_name from self.sites. If the site belonged to a location, remove it from the location's site list. If the site had any samples, change sample.site to "".
3.656231
2.564705
1.425595
location = self.find_by_name(old_location_name, self.locations) if not location: print('-W- {} is not a currently existing location, so it cannot be updated.'.format(old_location_name)) return False location.change_location(new_location_name, new_er_data, new_pmag_data, replace_data) return location
def change_location(self, old_location_name, new_location_name, new_parent_name=None, new_er_data=None, new_pmag_data=None, replace_data=False)
Find actual data object for location with old_location_name. Then call Location class change method to update location name and data.
3.019348
2.869639
1.05217
if not location_name: return False location = Location(location_name, data_model=self.data_model, er_data=er_data, pmag_data=pmag_data) self.locations.append(location) return location
def add_location(self, location_name, parent_name=None, er_data=None, pmag_data=None)
Create a Location object and add it to self.locations.
2.320904
2.096155
1.10722
location = self.find_by_name(location_name, self.locations) if not location: return False sites = location.sites self.locations.remove(location) for site in sites: if site: site.location = '' del location return sites
def delete_location(self, location_name)
Remove location with name location_name from self.locations. If the location had any sites, change site.location to "".
3.733307
2.953812
1.263894
result = self.find_by_name(old_result_name, self.results) if not result: msg = '-W- {} is not a currently existing result, so it cannot be updated.'.format(old_result_name) print(msg) return False else: specimens, samples, sites, locations = None, None, None, None if spec_names: specimens = [self.find_or_create_by_name(spec, self.specimens, 'specimen') for spec in spec_names] if samp_names: samples = [self.find_or_create_by_name(samp, self.samples, 'sample') for samp in samp_names] if site_names: sites = [self.find_or_create_by_name(site, self.sites, 'site') for site in site_names] if loc_names: locations = [self.find_or_create_by_name(loc, self.locations, 'location') for loc in loc_names] result.change_result(new_result_name, new_pmag_data, specimens, samples, sites, locations, replace_data) return result
def change_result(self, old_result_name, new_result_name, new_er_data=None, new_pmag_data=None, spec_names=None, samp_names=None, site_names=None, loc_names=None, replace_data=False)
Find actual data object for result with old_result_name. Then call Result class change method to update result name and data.
1.867539
1.844017
1.012756
meas_file = os.path.join(self.WD, 'magic_measurements.txt') if not os.path.isfile(meas_file): print("-I- No magic_measurements.txt file") return {} try: meas_data, file_type = pmag.magic_read(meas_file) except IOError: print("-I- No magic_measurements.txt file") return {} if file_type == 'bad_file': print("-E- ERROR: Can't read magic_measurements.txt file. File is corrupted.") old_specimen_name = '' #start_time = time.time() meas_name_list = [measurement.name for measurement in self.measurements] for rec in meas_data: # get citation information citation = rec.get('er_citation_names', 'This study') if 'This study' not in citation: citation = citation.strip() + ':This study' er_data = {'er_citation_names': citation} pmag_data = {'er_citation_names': 'This study'} specimen_name = rec["er_specimen_name"] # ignore measurement if there is no specimen if specimen_name == "" or specimen_name == " ": continue # if we've moved onto a new specimen, make sure a sample/site/location # exists for that specimen if specimen_name != old_specimen_name: sample_name = rec["er_sample_name"] site_name = rec["er_site_name"] location_name = rec["er_location_name"] # add items and parents location = self.find_by_name(location_name, self.locations) if location_name and not location: location = self.add_location(location_name, er_data=er_data, pmag_data=pmag_data) site = self.find_by_name(site_name, self.sites) if site_name and not site: site = self.add_site(site_name, location_name, er_data, pmag_data) sample = self.find_by_name(sample_name, self.samples) if sample_name and not sample: sample = self.add_sample(sample_name, site_name, er_data, pmag_data) specimen = self.find_by_name(specimen_name, self.specimens) if specimen_name and not specimen: specimen = self.add_specimen(specimen_name, sample_name, er_data, pmag_data) # add child_items if sample and not self.find_by_name(specimen_name, sample.specimens): sample.specimens.append(specimen) if site and not self.find_by_name(sample_name, site.samples): site.samples.append(sample) if location and not self.find_by_name(site_name, location.sites): location.sites.append(site) exp_name = rec['magic_experiment_name'] meas_num = rec['measurement_number'] meas_name = exp_name + '_' + str(meas_num) measurement = self.find_by_name(meas_name, self.measurements, meas_name_list) if not measurement: self.add_measurement(exp_name, meas_num, specimen.name, rec) meas_name_list.append(meas_name) old_specimen_name = specimen_name
def get_data(self)
attempt to read measurements file in working directory.
2.364628
2.31286
1.022383
# use filename if provided, otherwise find er_ages.txt in WD if not filename: short_filename = 'er_ages.txt' magic_file = os.path.join(self.WD, short_filename) else: magic_file = filename if not os.path.isfile(magic_file): print('-W- Could not find {}'.format(magic_file)) return False data_dict, header, file_type = self.read_magic_file(magic_file, 'by_line_number') # if provided file is not an age_file, # try to read it in as whatever type of file it actually is if file_type != 'er_ages': item_type = file_type.split('_')[1][:-1] self.get_magic_info(item_type, filename=filename, sort_by_file_type=True) return file_type # if it is an age file, # determine level for each age and assign it to the appropriate pmag object for item_dict in list(data_dict.values()): item_type = None for dtype in ['specimen', 'sample', 'site', 'location']: header_name = 'er_' + dtype + '_name' if header_name in list(item_dict.keys()): if item_dict[header_name]: item_type = dtype item_name = item_dict[header_name].strip() break if not item_type: print('-W- You must provide a name for your age') print(' These data:\n{}\n will not be imported'.format(item_dict)) continue items_list = self.data_lists[item_type][0] item = self.find_by_name(item_name, items_list) if not item: ## the following code creates any item in er_ages that does not exist already ## however, we may not WANT that behavior print(.format(item_type, item_name, item_type, item_name)) ind = self.ancestry.index(item_type) parent_type = self.ancestry[ind+1] parent_header, parent_constructor = None, None if parent_type: parent_list, parent_class, parent_constructor = self.data_lists[parent_type] parent_header = 'er_' + parent_type + '_name' parent_name = item_dict.get(parent_header, '') parent = self.find_by_name(parent_name, parent_list) # if the parent item doesn't exist, and should, create it if parent_name and not parent: print(.format(parent_type, parent_name, parent_type, parent_name)) parent = parent_constructor(parent_name, None) item_constructor = self.data_lists[item_type][2] if not parent: parent_name = None item = item_constructor(item_name, parent_name) # add the age data to the object item.age_data = remove_dict_headers(item_dict) # note that data is available to write self.write_ages = True return file_type
def get_age_info(self, filename=None)
Read er_ages.txt file. Parse information into dictionaries for each site/sample. Then add it to the site/sample object as site/sample.age_data.
4.013684
3.962181
1.012999
if not filename: short_filename = "pmag_results.txt" magic_file = os.path.join(self.WD, short_filename) else: magic_file = filename if not os.path.isfile(magic_file): print('-W- Could not find {} in your working directory {}'.format(short_filename, self.WD)) return False # get the data from the pmag_results.txt file data_dict = self.read_magic_file(magic_file, 'by_line_number')[0] def make_items_list(string, search_items_list): names = string.split(':') items = [] for name in names: name = name.strip(' ') item = self.find_by_name(name, search_items_list) if item: items.append(item) return items for num, result in list(data_dict.items()): name, specimens, samples, sites, locations = None, None, None, None, None for key, value in list(result.items()): #print key, ':', value if key == 'er_specimen_names': specimens = make_items_list(value, self.specimens) if key == 'er_sample_names': samples = make_items_list(value, self.samples) if key == 'er_site_names': sites = make_items_list(value, self.sites) if key == 'er_location_names': locations = make_items_list(value, self.locations) if key == 'pmag_result_name': name = value for header_name in ['er_specimen_names', 'er_site_names', 'er_sample_names', 'er_location_names']: if header_name in list(result.keys()): result.pop(header_name) if not name: name = num result_item = self.find_by_name(name, self.results) if not result_item: result_item = Result(name, specimens, samples, sites, locations, result, self.data_model) else: print('-W- Two or more results with name: {} found in your result file.\n Taking only the first.'.format(name)) if result_item and result_item not in self.results: self.results.append(result_item)
def get_results_info(self, filename=None)
Read pmag_results.txt file. Parse information into dictionaries for each item. Then add it to the item object as object.results_data.
2.492908
2.424284
1.028307
DATA = {} with open(path, 'r') as fin: lines = list(fin.readlines()) first_line = lines[0] if not first_line: return False, None, 'empty_file' if first_line[0] == "s" or first_line[1] == "s": delim = ' ' elif first_line[0] == "t" or first_line[1] == "t": delim = '\t' else: print('-W- error reading ', path) return False, None, 'bad_file' file_type = first_line.strip('\n').split(delim)[1] if sort_by_file_type: item_type = file_type.split('_')[1][:-1] if item_type == 'age': sort_by_this_name = "by_line_number" else: sort_by_this_name = 'er_' + item_type + '_name' line = lines[1] header = line.strip('\n').split(delim) counter = 0 for line in lines[2:]: tmp_data = {} tmp_line = line.strip('\n').split(delim) for i in range(len(header)): if i < len(tmp_line): tmp_data[header[i]] = tmp_line[i].strip() else: tmp_data[header[i]] = "" if sort_by_this_name == "by_line_number": DATA[counter] = tmp_data counter += 1 else: if tmp_data[sort_by_this_name] != "": DATA[tmp_data[sort_by_this_name]] = tmp_data return DATA, header, file_type
def read_magic_file(self, path, sort_by_this_name, sort_by_file_type=False)
read a magic-formatted tab-delimited file. return a dictionary of dictionaries, with this format: {'Z35.5a': {'specimen_weight': '1.000e-03', 'er_citation_names': 'This study', 'specimen_volume': '', 'er_location_name': '', 'er_site_name': 'Z35.', 'er_sample_name': 'Z35.5', 'specimen_class': '', 'er_specimen_name': 'Z35.5a', 'specimen_lithology': '', 'specimen_type': ''}, ....}
2.343089
2.283092
1.026279
warnings = self.validate_data() print('-I- Writing all saved data to files') if self.measurements: self.write_measurements_file() for dtype in ['specimen', 'sample', 'site']: if self.data_lists[dtype][0]: do_pmag = dtype in self.incl_pmag_data self.write_magic_file(dtype, do_er=True, do_pmag=do_pmag) if not do_pmag: pmag_file = os.path.join(self.WD, 'pmag_' + dtype + 's.txt') if os.path.isfile(pmag_file): os.remove(pmag_file) if self.locations: self.write_magic_file('location', do_er=True, do_pmag=False) self.write_age_file() if self.results: self.write_result_file() if warnings: print('-W- ' + str(warnings)) return False, warnings return True, None
def write_files(self)
write all data out into er_* and pmag_* files as appropriate
3.923313
3.310577
1.185084
if not self.write_ages: print('-I- No age data available to write') return first_headers = self.first_age_headers actual_headers = sorted(self.headers['age']['er'][0]) for header in first_headers: if header in actual_headers: actual_headers.remove(header) add_headers = ['er_specimen_name', 'er_sample_name', 'er_site_name', 'er_location_name'] actual_headers[:0] = first_headers full_headers = add_headers[:] full_headers.extend(actual_headers) header_string = '\t'.join(full_headers) ages = [] for dtype in ['specimen', 'sample', 'site', 'location']: ages_list = sorted(self.data_lists[dtype][0], key=lambda item: item.name) ages.extend(ages_list) age_strings = [] for age in ages: ind = self.ancestry.index(age.dtype) ancestors = ['' for num in range(len(self.ancestry) - (ind+2))] data_found = False string = '' if age.dtype == 'specimen': string += age.name + '\t' elif age.dtype == 'sample': string += '\t' + age.name + '\t' elif age.dtype == 'site': string += '\t\t' + age.name + '\t' elif age.dtype == 'location': string += '\t\t\t' + age.name + '\t' parent = age.get_parent() grandparent = None if parent: ancestors[0] = parent.name grandparent = parent.get_parent() if grandparent: ancestors[1] = grandparent.name greatgrandparent = grandparent.get_parent() if greatgrandparent: ancestors[2] = greatgrandparent.name for ancestor in ancestors: string += ancestor + '\t' for key in actual_headers: try: add_string = age.age_data[key] except KeyError: add_string = '' age.age_data[key] = '' if add_string and not key == 'er_citation_names': data_found = True if key == 'er_citation_names' and not add_string.strip('\t'): add_string = 'This study' string += add_string + '\t' # prevent extra '' at the end of age string if string.endswith('\t'): string = string[:-1] # only write ages to file if there is data provided if data_found: age_strings.append(string) outfile = open(os.path.join(self.WD, 'er_ages.txt'), 'w') outfile.write('tab\ter_ages\n') outfile.write(header_string + '\n') if not age_strings: outfile.close() os.remove(os.path.join(self.WD, 'er_ages.txt')) return False for string in age_strings: outfile.write(string + '\n') outfile.close() return outfile
def write_age_file(self)
Write er_ages.txt based on updated ErMagicBuilder data object
2.761092
2.692777
1.02537
warnings = {} spec_warnings, samp_warnings, site_warnings, loc_warnings = {}, {}, {}, {} if self.specimens: spec_warnings = self.validate_items(self.specimens, 'specimen') if self.samples: samp_warnings = self.validate_items(self.samples, 'sample') if self.sites: site_warnings = self.validate_items(self.sites, 'site') if self.locations: loc_warnings = self.validate_items(self.locations, 'location') return spec_warnings, samp_warnings, site_warnings, loc_warnings
def validate_data(self)
Validate specimen, sample, site, and location data.
2.142415
1.666074
1.285906
def append_or_create_dict_item(warning_type, dictionary, key, value): if not value: return try: name = key.name except AttributeError: name = key if not name in dictionary: dictionary[name] = {} if not warning_type in dictionary[name]: dictionary[name][warning_type] = [] for v in value: dictionary[name][warning_type].append(v) def check_item_type(item, item_type):#, warnings=None): warnings = [] item_list, item_class, item_constructor = self.data_lists[item_type] if not isinstance(item, item_class): warnings.append(PmagException('wrong type')) if item not in item_list: warnings.append(PmagException('not in data object')) return warnings def check_item_for_parent(item, item_type, parent_type): if not parent_type: return [] if not isinstance(item, Pmag_object): return [] warnings = [] parent = item.get_parent() parent_list, parent_class, parent_constructor = self.data_lists[parent_type] if not parent or not parent.name: warnings.append(PmagException('missing parent')) return warnings if not isinstance(parent, parent_class): warnings.append(PmagException('invalid parent type', parent)) if not parent in parent_list: warnings.append(PmagException('parent not in data object', parent)) return warnings def check_item_for_children(item, child_type): if not child_type: return [] warnings = [] children = item.children child_list, child_class, child_constructor = self.data_lists[child_type] for child in children: if not isinstance(child, child_class): warnings.append(PmagException('child has wrong type', child)) if not child in child_list: warnings.append(PmagException('child not in data object', child)) return warnings warnings = {} type_ind = self.ancestry.index(item_type) parent_type = self.ancestry[type_ind+1] child_type = self.ancestry[type_ind-1] for item in item_list: #warnings[item] = [] type_warnings = check_item_type(item, item_type) append_or_create_dict_item('type', warnings, item, type_warnings) parent_warnings = check_item_for_parent(item, item_type, parent_type) append_or_create_dict_item('parent', warnings, item, parent_warnings) child_warnings = check_item_for_children(item, child_type) append_or_create_dict_item('children', warnings, item, child_warnings) return warnings
def validate_items(self, item_list, item_type)
Go through a list Pmag_objects and check for: parent errors, children errors, type errors. Return a dictionary of exceptions in this format: {sample1: {'parent': [warning1, warning2, warning3], 'child': [warning1, warning2]}, sample2: {'child': [warning1], 'type': [warning1, warning2]}, ...}
2.11604
1.985278
1.065866
d = {} for location in locations: sites = location.sites max_lat, min_lat = '', '' max_lon, min_lon = '', '' if not any(sites): d[location.name] = {'location_begin_lat': min_lat, 'location_begin_lon': min_lon, 'location_end_lat': max_lat, 'location_end_lon': max_lon} #return d continue lats, lons = [], [] # try to fill in min/max latitudes/longitudes from sites for site in sites: if site.er_data['site_lon']: lons.append(site.er_data['site_lon']) if site.er_data['site_lat']: lats.append(site.er_data['site_lat']) if lats: lats = [float(lat) for lat in lats] max_lat = max(lats) min_lat = min(lats) if lons: lons = [float(lon) for lon in lons] max_lon = max(lons) min_lon = min(lons) d[location.name] = {'location_begin_lat': min_lat, 'location_begin_lon': min_lon, 'location_end_lat': max_lat, 'location_end_lon': max_lon} return d
def get_min_max_lat_lon(self, locations)
Take a list of locations and return a dictionary with: location1: 'location_begin_lat', 'location_begin_lon', 'location_end_lat', 'location_end_lon'. and so on.
1.945969
1.807658
1.076514
self.sample = new_samp if new_samp: if not isinstance(new_samp, Sample): raise Exception self.propagate_data() return new_samp
def set_parent(self, new_samp)
Set self.sample as either an empty string, or with a new Sample.
4.53259
3.8998
1.162262
if new_site: if not isinstance(new_site, Site): raise Exception self.site = new_site self.propagate_data() return new_site
def set_parent(self, new_site)
Set self.site as either an empty string, or with a new Site.
4.672548
3.749271
1.246255
self.name = new_name if new_location: self.location = new_location self.update_data(new_er_data, new_pmag_data, replace_data)
def change_site(self, new_name, new_location=None, new_er_data=None, new_pmag_data=None, replace_data=False)
Update a site's name, location, er_data, and pmag_data. By default, new data will be added in to pre-existing data, overwriting existing values. If replace_data is True, the new data dictionary will simply take the place of the existing dict.
2.081329
2.210835
0.941422
''' take a LIST and find the nearest value in LIST to 'value' ''' diff = inf for a in LIST: if abs(value - a) < diff: diff = abs(value - a) result = a return(result)
def find_close_value(self, LIST, value)
take a LIST and find the nearest value in LIST to 'value'
4.288908
2.68985
1.594479
''' find the best interpretation with the minimum stratard deviation (in units of percent % !) ''' Best_array = [] best_array_std_perc = inf Best_array_tmp = [] Best_interpretations = {} Best_interpretations_tmp = {} for this_specimen in list(Intensities.keys()): for value in Intensities[this_specimen]: Best_interpretations_tmp[this_specimen] = value Best_array_tmp = [value] all_other_specimens = list(Intensities.keys()) all_other_specimens.remove(this_specimen) for other_specimen in all_other_specimens: closest_value = self.find_close_value( Intensities[other_specimen], value) Best_array_tmp.append(closest_value) Best_interpretations_tmp[other_specimen] = closest_value if std(Best_array_tmp, ddof=1) / mean(Best_array_tmp) < best_array_std_perc: Best_array = Best_array_tmp best_array_std_perc = std( Best_array, ddof=1) / mean(Best_array_tmp) Best_interpretations = copy.deepcopy( Best_interpretations_tmp) Best_interpretations_tmp = {} return Best_interpretations, mean(Best_array), std(Best_array, ddof=1)
def find_sample_min_std(self, Intensities)
find the best interpretation with the minimum stratard deviation (in units of percent % !)
2.829079
2.265185
1.248939
''' calcualte sample or site bootstrap paleointensities and statistics Grade_As={} ''' thellier_interpreter_pars = {} thellier_interpreter_pars['fail_criteria'] = [] thellier_interpreter_pars['pass_or_fail'] = 'pass' BOOTSTRAP_N = int(self.preferences['BOOTSTRAP_N']) Grade_A_samples_BS = {} if len(list(Grade_As.keys())) >= self.acceptance_criteria['sample_int_n']['value']: for specimen in list(Grade_As.keys()): if specimen not in list(Grade_A_samples_BS.keys()) and len(Grade_As[specimen]) > 0: Grade_A_samples_BS[specimen] = [] for B in Grade_As[specimen]: Grade_A_samples_BS[specimen].append(B) Grade_A_samples_BS[specimen].sort() specimen_int_max_slope_diff = max( Grade_A_samples_BS[specimen]) / min(Grade_A_samples_BS[specimen]) if specimen_int_max_slope_diff > self.acceptance_criteria['specimen_int_max_slope_diff']: self.thellier_interpreter_log.write( "-I- specimen %s Failed specimen_int_max_slope_diff\n" % specimen, Grade_A_samples_BS[specimen]) del Grade_A_samples_BS[specimen] if len(list(Grade_A_samples_BS.keys())) >= self.acceptance_criteria['sample_int_n']['value']: BS_means_collection = [] for i in range(BOOTSTRAP_N): B_BS = [] for j in range(len(list(Grade_A_samples_BS.keys()))): LIST = list(Grade_A_samples_BS.keys()) specimen = random.choice(LIST) if self.acceptance_criteria['interpreter_method']['value'] == 'bs': B = random.choice(Grade_A_samples_BS[specimen]) if self.acceptance_criteria['interpreter_method']['value'] == 'bs_par': B = random.uniform(min(Grade_A_samples_BS[specimen]), max( Grade_A_samples_BS[specimen])) B_BS.append(B) BS_means_collection.append(mean(B_BS)) BS_means = array(BS_means_collection) BS_means.sort() sample_median = median(BS_means) sample_std = std(BS_means, ddof=1) sample_68 = [BS_means[(0.16) * len(BS_means)], BS_means[(0.84) * len(BS_means)]] sample_95 = [BS_means[(0.025) * len(BS_means)], BS_means[(0.975) * len(BS_means)]] else: String = "-I- sample %s FAIL: not enough specimen int_n= %i < %i " % (sample, len( list(Grade_A_samples_BS.keys())), int(self.acceptance_criteria['sample_int_n']['value'])) # print String self.thellier_interpreter_log.write(String) thellier_interpreter_pars['bs_bedian'] = sample_median thellier_interpreter_pars['bs_std'] = sample_std thellier_interpreter_pars['bs_68'] = sample_68 thellier_interpreter_pars['bs_95'] = sample_95 thellier_interpreter_pars['bs_n'] = len( list(Grade_A_samples_BS.keys()))
def thellier_interpreter_BS_pars_calc(self, Grade_As)
calcualte sample or site bootstrap paleointensities and statistics Grade_As={}
2.521534
2.281107
1.105399
mapped_dictionary = {} for key, value in dictionary.items(): if key in list(mapping.keys()): new_key = mapping[key] # if there is already a mapped value, try to figure out which value to use # (i.e., if both er_synthetic_name and er_specimen_name are in one measurement file) if new_key in mapped_dictionary: if hasattr(value, 'any'): if not value.any(): # if new value is null, leave the old value there continue if hasattr(mapped_dictionary, 'any'): if value.any() and not mapped_dictionary[new_key].any(): # choose the one that has a non-null value mapped_dictionary[new_key] = value elif value.any() and mapped_dictionary[new_key].any(): # if both have values, choose which one to replace and warn #print('-W- Two possible values found for {}'.format(new_key)) #print(' Replacing {} with {}'.format(mapped_dictionary[new_key], value)) mapped_dictionary[new_key] = value else: if value.any() and not mapped_dictionary[new_key].any(): # choose the one that has a non-null value mapped_dictionary[new_key] = value elif value.any() and mapped_dictionary[new_key].any(): # if both have values, choose which one to replace and warn #print('-W- Two possible values found for {}'.format(new_key)) #print(' Replacing {} with {}'.format(mapped_dictionary[new_key], value)) mapped_dictionary[new_key] = value else: if not value: # if new value is null, leave the old value there continue elif value and not mapped_dictionary[new_key]: # choose the one that has a non-null value mapped_dictionary[new_key] = value elif value and mapped_dictionary[new_key]: # if both have values, choose which one to replace and warn #print('-W- Two possible values found for {}'.format(new_key)) #print(' Replacing {} with {}'.format(mapped_dictionary[new_key], value)) mapped_dictionary[new_key] = value # if there is no mapped_value already: else: mapped_dictionary[new_key] = value else: # if this line is left in, it gives everything from the original dictionary mapped_dictionary[key] = value return mapped_dictionary
def mapping(dictionary, mapping)
takes in a dictionary and a mapping which contains new key names, and returns a new dictionary with the updated key names, i.e.: dictionary = {'a': 1, 'b': 2, 'c': 3} mapping = {'a': 'aa', 'c': 'cc'} mapped_dictionary = mapping(dictionary, mapping) mapped_dictionary = {'aa': 1, b, 2, 'cc': 3}
2.200696
2.243247
0.981032
def get_2_to_3(dm_type, dm): table_names3_2_table_names2 = {'measurements': ['magic_measurements'], 'locations': ['er_locations'], 'sites': ['er_sites', 'pmag_sites'], 'samples': ['er_samples', 'pmag_samples'], 'specimens': ['er_specimens', 'pmag_specimens'], 'ages': ['er_ages'], 'criteria': ['pmag_criteria'], 'images': ['er_images'], 'contribution': []} table_names3 = table_names3_2_table_names2[dm_type] dictionary = {} for label, row in dm.iterrows(): # if there are one or more corresponding 2.5 columns: if isinstance(row['previous_columns'], list): for previous_values in row['previous_columns']: previous_table = previous_values['table'] previous_value = previous_values['column'] if previous_table in table_names3: add_to_dict(previous_value, label, dictionary) elif previous_table in ["pmag_results", "rmag_results"]: if label not in dictionary.values(): if previous_value not in dictionary.keys(): add_to_dict(previous_value, label, dictionary) return dictionary def add_to_dict(key, value, dictionary): if key in dictionary: if value != dictionary[key]: print('W- OVERWRITING') print('was:', key, dictionary[key]) print('now:', key, value) dictionary[key] = value # begin data_model = DataModel() maps = {} for table_name in data_model.dm: dm = data_model.dm[table_name] new_mapping = get_2_to_3(table_name, dm) maps[table_name] = new_mapping # write maps out to file f = open(file_path, 'w') f.write("all_maps = ") json.dump(maps, f) f.close() return maps
def cache_mappings(file_path)
Make a full mapping for 2 --> 3 columns. Output the mapping to json in the specified file_path. Note: This file is currently called maps.py, full path is PmagPy/pmagpy/mapping/maps.py. Parameters ---------- file_path : string with full file path to dump mapping json. Returns --------- maps : nested dictionary with format {table_name: {magic2_col_name: magic3_col_name, ...}, ...}
3.236662
2.982852
1.08509
if int(output) == 2: thellier_gui_meas3_2_meas2_map = meas_magic3_2_magic2_map.copy() if 'treat_step_num' in input_df.columns: thellier_gui_meas3_2_meas2_map.update( {'treat_step_num': 'measurement_number'}) thellier_gui_meas3_2_meas2_map.pop('measurement') return thellier_gui_meas3_2_meas2_map # 2 --> 3 else: thellier_gui_meas2_2_meas3_map = meas_magic2_2_magic3_map.copy() if 'measurement' in input_df.columns: thellier_gui_meas2_2_meas3_map.pop('measurement_number') try: res = int(input_df.iloc[0]['measurement_number']) if res < 100: thellier_gui_meas2_2_meas3_map['measurement_number'] = 'treat_step_num' except ValueError as ex: pass return thellier_gui_meas2_2_meas3_map
def get_thellier_gui_meas_mapping(input_df, output=2)
Get the appropriate mapping for translating measurements in Thellier GUI. This requires special handling for treat_step_num/measurement/measurement_number. Parameters ---------- input_df : pandas DataFrame MagIC records output : int output to this MagIC data model (2 or 3) Output -------- mapping : dict (used in convert_meas_df_thellier_gui)
2.300471
2.082084
1.104889
output = int(output) meas_mapping = get_thellier_gui_meas_mapping(meas_df_in, output) meas_df_out = meas_df_in.rename(columns=meas_mapping) if 'measurement' not in meas_df_out.columns: meas_df_out['measurement'] = meas_df_in['measurement'] return meas_df_out
def convert_meas_df_thellier_gui(meas_df_in, output)
Take a measurement dataframe and convert column names from MagIC 2 --> 3 or vice versa. Use treat_step_num --> measurement_number if available, otherwise measurement --> measurement_number. Parameters ---------- meas_df_in : pandas DataFrame input dataframe with measurement data output : int output to MagIC 2 or MagIC 3
2.549783
3.075717
0.829004
if direction == 'magic3': columns = meas_magic2_2_magic3_map MeasRec = {} for key in columns: if key in list(Rec.keys()): # transfer info and change column name to data model 3.0 MeasRec[columns[key]] = Rec[key] return MeasRec else: # haven't added this way yet pass
def convert_meas(direction, Rec)
converts measurments tables from magic 2 to 3 (direction=magic3) or from model 3 to 2.5 (direction=magic2) [not available]
9.630214
7.261703
1.326165
# directional # do directional stuff first # a few things need cleaning up dir_df = sites_df.copy().dropna( subset=['dir_dec', 'dir_inc']) # delete blank directions # sort by absolute value of vgp_lat in order to eliminate duplicate rows for # directions put in by accident on intensity rows DirCols = ["Site", "TC (%)", "Dec.", "Inc.", "N", "k ", "R", "a95", "VGP Lat", "VGP Long"] columns = ['site', 'dir_tilt_correction', 'dir_dec', 'dir_inc', 'dir_n_samples', 'dir_k', 'dir_r', 'dir_alpha95', 'vgp_lat', 'vgp_lon'] dm3_to_readable = dict(zip(columns, DirCols)) if len(dir_df) > 0: for col in ['dir_n_samples', 'dir_tilt_correction']: if col in dir_df.columns: dir_df[col] = dir_df[col].values.astype('int') columns = dir_df.columns.intersection(columns) has_vgps = False if 'vgp_lat' in dir_df.columns: test_vgp = dir_df.dropna(subset=['vgp_lat', 'vgp_lon']) if len(test_vgp) > 0: has_vgps = True if has_vgps: dir_df['vgp_lat_abs'] = dir_df.vgp_lat.abs() dir_df.sort_values(by=['site', 'vgp_lat_abs'], ascending=False, inplace=True) dir_df = dir_df[columns] # this will take the first record for each site's directions (including VGP lat if present) dir_df.drop_duplicates( subset=['dir_dec', 'dir_inc', 'site'], inplace=True) else: dir_df.drop_duplicates( subset=['dir_dec', 'dir_inc', 'site'], inplace=True) dir_df = dir_df[['site', 'dir_tilt_correction', 'dir_dec', 'dir_inc', 'dir_n_samples', 'dir_k', 'dir_r', 'dir_alpha95']] dir_df.rename(dm3_to_readable, axis='columns', inplace=True) dir_df.sort_values(by=['Site'], inplace=True, ascending=True) new_cols = list(dir_df.columns.drop('Site')) dir_df = dir_df[['Site'] + new_cols] return dir_df
def convert_site_dm3_table_directions(sites_df)
Convert MagIC site headers to short/readable headers for a figure (used by ipmag.sites_extract) Directional table only. Parameters ---------- sites_df : pandas DataFrame sites information Returns --------- dir_df : pandas DataFrame directional site data with easily readable headers
3.459689
3.370052
1.026598
coordinate_list = ['specimen'] initial_coordinate = 'specimen' for specimen in self.specimens: if 'geographic' not in coordinate_list and self.Data[specimen]['zijdblock_geo']: coordinate_list.append('geographic') initial_coordinate = 'geographic' if 'tilt-corrected' not in coordinate_list and self.Data[specimen]['zijdblock_tilt']: coordinate_list.append('tilt-corrected') return initial_coordinate, coordinate_list
def get_coordinate_system(self)
Check self.Data for available coordinate systems. Returns --------- initial_coordinate, coordinate_list : str, list i.e., 'geographic', ['specimen', 'geographic']
3.629201
2.615927
1.387348
self.initialize_CART_rot(s) # Draw Zij plot self.draw_zijderveld() # Draw specimen equal area self.draw_spec_eqarea() # Draw M/M0 plot ( or NLT data on the same area in the GUI) self.draw_MM0() # If measurements are selected redisplay selected data if len(self.selected_meas) > 0: self.plot_selected_meas() # Draw high level equal area if update_high_plots: self.plot_high_levels_data() self.canvas4.draw()
def draw_figure(self, s, update_high_plots=True)
Convenience function that sets current specimen to s and calculates data for that specimen then redraws all plots. Parameters ---------- s : specimen to set current specimen too update_high_plots : bool which decides if high level mean plot updates (default: False)
11.479923
10.821042
1.060889
# self.toolbar4.home() high_level = self.level_box.GetValue() self.UPPER_LEVEL_NAME = self.level_names.GetValue() self.UPPER_LEVEL_MEAN = self.mean_type_box.GetValue() draw_net(self.high_level_eqarea) what_is_it = self.level_box.GetValue()+": "+self.level_names.GetValue() self.high_level_eqarea.text(-1.2, 1.15, what_is_it, { 'family': self.font_type, 'fontsize': 10*self.GUI_RESOLUTION, 'style': 'normal', 'va': 'center', 'ha': 'left'}) if self.ie_open: self.ie.draw_net() self.ie.write(what_is_it) # plot elements directions self.plot_high_level_elements() # plot elements means self.plot_high_level_means() # update high level stats after plotting in case of change self.update_high_level_stats() # check sample orietation if self.check_orient_on: self.calc_and_plot_sample_orient_check() self.canvas4.draw() if self.ie_open: self.ie.draw()
def plot_high_levels_data(self)
Complicated function that draws the high level mean plot on canvas4, draws all specimen, sample, or site interpretations according to the UPPER_LEVEL_SHOW variable, draws the fisher mean or fisher mean by polarity of all interpretations displayed, draws sample orientation check if on, and if interpretation editor is open it calls the interpretation editor to have it draw the same things.
5.886344
5.306547
1.109261
if self.COORDINATE_SYSTEM == "geographic": dirtype = 'DA-DIR-GEO' elif self.COORDINATE_SYSTEM == "tilt-corrected": dirtype = 'DA-DIR-TILT' else: dirtype = 'DA-DIR' if self.level_box.GetValue() == 'sample': high_level_type = 'samples' if self.level_box.GetValue() == 'site': high_level_type = 'sites' if self.level_box.GetValue() == 'location': high_level_type = 'locations' if self.level_box.GetValue() == 'study': high_level_type = 'study' high_level_name = str(self.level_names.GetValue()) return (high_level_type, high_level_name, dirtype)
def get_levels_and_coordinates_names(self)
Get the current level of the high level mean plot and the name of the corrisponding site, study, etc. As well as the code for the current coordinate system. Returns ------- (high_level_type,high_level_name,coordinate_system) : tuple object containing current high level type, name, and coordinate system being analyzed
2.987525
2.651896
1.126562
if pars == {}: pass elif 'calculation_type' in list(pars.keys()) and pars['calculation_type'] == 'DE-BFP': ymin, ymax = fig.get_ylim() xmin, xmax = fig.get_xlim() D_c, I_c = pmag.circ(pars["specimen_dec"], pars["specimen_inc"], 90) X_c_up, Y_c_up = [], [] X_c_d, Y_c_d = [], [] for k in range(len(D_c)): XY = pmag.dimap(D_c[k], I_c[k]) if I_c[k] < 0: X_c_up.append(XY[0]) Y_c_up.append(XY[1]) if I_c[k] > 0: X_c_d.append(XY[0]) Y_c_d.append(XY[1]) fig.plot(X_c_d, Y_c_d, 'b', lw=0.5) fig.plot(X_c_up, Y_c_up, 'c', lw=0.5) if self.ie_open: self.ie.plot(X_c_d, Y_c_d, 'b', lw=0.5) self.ie.plot(X_c_up, Y_c_up, 'c', lw=0.5) fig.set_xlim(xmin, xmax) fig.set_ylim(ymin, ymax) # plot best-fit direction else: if "specimen_dec" in list(pars.keys()) and "specimen_inc" in list(pars.keys()): dec = pars["specimen_dec"] inc = pars["specimen_inc"] elif "dec" in list(pars.keys()) and "inc" in list(pars.keys()): dec = pars["dec"] inc = pars["inc"] else: print(("either dec or inc missing from values recived for high level plot, was given %s, aborting" % ( str(pars)))) return XY = pmag.dimap(float(dec), float(inc)) if inc > 0: if 'color' in list(pars.keys()): FC = pars['color'] EC = pars['color'] SIZE = 15*self.GUI_RESOLUTION else: FC = 'grey' EC = 'grey' SIZE = 15*self.GUI_RESOLUTION else: if 'color' in list(pars.keys()): FC = 'white' EC = pars['color'] SIZE = 15*self.GUI_RESOLUTION else: FC = 'white' EC = 'grey' SIZE = 15*self.GUI_RESOLUTION fig.scatter([XY[0]], [XY[1]], marker='o', edgecolor=EC, facecolor=FC, s=SIZE, lw=1, clip_on=False) if self.ie_open: self.ie.scatter([XY[0]], [XY[1]], marker='o', edgecolor=EC, facecolor=FC, s=SIZE, lw=1, clip_on=False)
def plot_eqarea_pars(self, pars, fig)
Given a dictionary of parameters (pars) that is returned from pmag.domean plots those pars to the given fig
2.167434
2.152554
1.006913
mpars_to_plot = [] if meanpars == {}: return if meanpars['calculation_type'] == 'Fisher by polarity': for mode in list(meanpars.keys()): if type(meanpars[mode]) == dict and meanpars[mode] != {}: mpars_to_plot.append(meanpars[mode]) else: mpars_to_plot.append(meanpars) ymin, ymax = fig.get_ylim() xmin, xmax = fig.get_xlim() if 'color' in meanpars: color = meanpars['color'] else: color = 'black' size, alpha = 30, 1. # put on the mean direction for mpars in mpars_to_plot: XYM = pmag.dimap(float(mpars["dec"]), float(mpars["inc"])) if float(mpars["inc"]) > 0: FC = color EC = 'black' else: FC = 'white' EC = color self.displayed_means.append(fig.scatter([XYM[0]], [ XYM[1]], marker='o', edgecolor=EC, facecolor=FC, s=size, lw=1, clip_on=False, alpha=alpha)) if "alpha95" in list(mpars.keys()): # get the alpha95 Xcirc, Ycirc = [], [] Da95, Ia95 = pmag.circ(float(mpars["dec"]), float( mpars["inc"]), float(mpars["alpha95"])) for k in range(len(Da95)): XY = pmag.dimap(Da95[k], Ia95[k]) Xcirc.append(XY[0]) Ycirc.append(XY[1]) self.displayed_means.append( fig.plot(Xcirc, Ycirc, color, alpha=alpha)) if self.ie_open: self.displayed_means.append(self.ie.scatter([XYM[0]], [ XYM[1]], marker='o', edgecolor=EC, facecolor=FC, s=size, lw=1, clip_on=False, alpha=alpha)) if "alpha95" in list(mpars.keys()): self.displayed_means.append( self.ie.plot(Xcirc, Ycirc, color, alpha=alpha)) self.ie.eqarea.set_xlim(xmin, xmax) self.ie.eqarea.set_ylim(ymin, ymax) fig.set_xlim(xmin, xmax) fig.set_ylim(ymin, ymax)
def plot_eqarea_mean(self, meanpars, fig)
Given a dictionary of parameters from pmag.dofisher, pmag.dolnp, or pmag.dobingham (meanpars) plots parameters to fig
2.739015
2.686801
1.019434
if specimen not in list(self.Data.keys()) and not suppress_warnings: self.user_warning( "there is no measurement data for %s and therefore no interpretation can be created for this specimen" % (specimen)) return if fmax != None and fmax not in self.Data[specimen]['zijdblock_steps'] or fmin != None and fmin not in self.Data[specimen]['zijdblock_steps']: return if not (specimen in list(self.pmag_results_data['specimens'].keys())): self.pmag_results_data['specimens'][specimen] = [] next_fit = str(len(self.pmag_results_data['specimens'][specimen]) + 1) if name == None or name in [x.name for x in self.pmag_results_data['specimens'][specimen]] or name == "" or name.replace(" ", "") == "": name = ('Fit ' + next_fit) if name in [x.name for x in self.pmag_results_data['specimens'][specimen]]: print('bad name') return if color == None: color = self.colors[(int(next_fit)-1) % len(self.colors)] new_fit = Fit(name, fmax, fmin, color, self, PCA_type, saved) if fmin != None and fmax != None: new_fit.put(specimen, self.COORDINATE_SYSTEM, self.get_PCA_parameters( specimen, new_fit, fmin, fmax, self.COORDINATE_SYSTEM, PCA_type)) if ('specimen_dec' not in list(new_fit.get(self.COORDINATE_SYSTEM).keys()) or 'specimen_inc' not in list(new_fit.get(self.COORDINATE_SYSTEM).keys()))\ and not suppress_warnings: TEXT = "Could not calculate dec or inc for specimen %s component %s with bounds %s and %s in coordinate_system %s, component not added" % ( specimen, name, fmin, fmax, self.COORDINATE_SYSTEM) self.user_warning(TEXT) print(TEXT) return self.pmag_results_data['specimens'][specimen].append(new_fit) samp = self.Data_hierarchy['sample_of_specimen'][specimen] if samp in list(self.Data_info['er_samples'].keys()): if 'sample_orientation_flag' not in self.Data_info['er_samples'][samp]: self.Data_info['er_samples'][samp]['sample_orientation_flag'] = 'g' samp_flag = self.Data_info['er_samples'][samp]['sample_orientation_flag'] if samp_flag == 'b': self.mark_fit_bad(new_fit) self.close_warning = True return new_fit
def add_fit(self, specimen, name, fmin, fmax, PCA_type="DE-BFL", color=None, suppress_warnings=False, saved=True)
Goes through the data checks required to add an interpretation to the param specimen with the name param name, the bounds param fmin and param fmax, and calculation type param PCA_type. Parameters ---------- specimen : specimen with measurement data to add the interpretation to name : name of the new interpretation fmin : lower bound of new interpretation fmax : upper bound of new interpretation PCA_type : type of regression or mean for new interpretaion (default: DE-BFL or line) color : color to plot the new interpretation in Returns ------- new Fit object or None if fit could not be added
2.996965
2.955424
1.014056
if specimen == None: for spec in self.pmag_results_data['specimens']: if fit in self.pmag_results_data['specimens'][spec]: specimen = spec break if specimen not in self.pmag_results_data['specimens']: return if fit in self.pmag_results_data['specimens'][specimen]: self.pmag_results_data['specimens'][specimen].remove(fit) if fit == self.current_fit: if self.pmag_results_data['specimens'][specimen]: self.pmag_results_data['specimens'][specimen][-1].select() else: self.current_fit = None self.close_warning = True self.calculate_high_levels_data() if self.ie_open: self.ie.update_editor() self.update_selection()
def delete_fit(self, fit, specimen=None)
removes fit from GUI results data Parameters ---------- fit : fit to remove specimen : specimen of fit to remove, if not provided and set to None then the function will find the specimen itself
2.671973
2.696558
0.990883
if "age" not in list(er_ages_rec.keys()): return(er_ages_rec) if "age_unit" not in list(er_ages_rec.keys()): return(er_ages_rec) if er_ages_rec["age_unit"] == "": return(er_ages_rec) if er_ages_rec["age"] == "": if "age_range_high" in list(er_ages_rec.keys()) and "age_range_low" in list(er_ages_rec.keys()): if er_ages_rec["age_range_high"] != "" and er_ages_rec["age_range_low"] != "": er_ages_rec["age"] = scipy.mean( [float(er_ages_rec["age_range_high"]), float(er_ages_rec["age_range_low"])]) if er_ages_rec["age"] == "": return(er_ages_rec) age_unit = er_ages_rec["age_unit"] # Fix 'age': mutliplier = 1 if age_unit == "Ga": mutliplier = -1e9 if age_unit == "Ma": mutliplier = -1e6 if age_unit == "Ka": mutliplier = -1e3 if age_unit == "Years AD (+/-)" or age_unit == "Years Cal AD (+/-)": mutliplier = 1 if age_unit == "Years BP" or age_unit == "Years Cal BP": mutliplier = 1 age = float(er_ages_rec["age"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age = 1950-age er_ages_rec['age_cal_year'] = age # Fix 'age_range_low': age_range_low = age age_range_high = age age_sigma = 0 if "age_sigma" in list(er_ages_rec.keys()) and er_ages_rec["age_sigma"] != "": age_sigma = float(er_ages_rec["age_sigma"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_sigma = 1950-age_sigma age_range_low = age-age_sigma age_range_high = age+age_sigma if "age_range_high" in list(er_ages_rec.keys()) and "age_range_low" in list(er_ages_rec.keys()): if er_ages_rec["age_range_high"] != "" and er_ages_rec["age_range_low"] != "": age_range_high = float( er_ages_rec["age_range_high"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_range_high = 1950-age_range_high age_range_low = float(er_ages_rec["age_range_low"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_range_low = 1950-age_range_low er_ages_rec['age_cal_year_range_low'] = age_range_low er_ages_rec['age_cal_year_range_high'] = age_range_high return(er_ages_rec)
def convert_ages_to_calendar_year(self, er_ages_rec)
convert all age units to calendar year Parameters ---------- er_ages_rec : Dict type object containing preferbly at least keys 'age', 'age_unit', and either 'age_range_high', 'age_range_low' or 'age_sigma' Returns ------- er_ages_rec : Same dict object input but altered to have new records 'age_cal_year_range_low' and 'age_cal_year_range_high'
1.647712
1.567827
1.050952
self.warning_text = "" if self.s in list(self.pmag_results_data['specimens'].keys()): for fit in self.pmag_results_data['specimens'][self.s]: beg_pca, end_pca = self.get_indices( fit, fit.tmin, fit.tmax, self.s) if beg_pca == None or end_pca == None: self.warning_text += "%s to %s are invalid bounds, to fit %s.\n" % ( fit.tmin, fit.tmax, fit.name) elif end_pca - beg_pca < 2: self.warning_text += "there are not enough points between %s to %s, on fit %s.\n" % ( fit.tmin, fit.tmax, fit.name) else: check_duplicates = [] warning_issued = [] # keep track of warnings issued to avoid redundant warnings # if within range, attempt to go one additional step beyond # tmax so that duplicates at the upper bound are caught if (end_pca + 2) < len(self.Data[self.s]['zijdblock_steps']): check_endpoint = end_pca + 2 else: check_endpoint = end_pca + 1 for s, f in zip(self.Data[self.s]['zijdblock_steps'][beg_pca:check_endpoint], self.Data[self.s]['measurement_flag'][beg_pca:check_endpoint]): if f == 'g' and [s, 'g'] in check_duplicates: if s == fit.tmin and s not in warning_issued: self.warning_text += ("There are multiple good %s " + "steps at the upper bound of Fit %s. The first " + "measurement will be used as the lower bound.\n") % ( s, fit.name) # warning_issued_low.append(s) warning_issued.append(s) elif s == fit.tmax and s not in warning_issued: self.warning_text += ("There are multiple good %s " + "steps at the upper bound of Fit %s. The first " + "measurement will be used as the upper bound.\n") % ( s, fit.name) # warning_issued_high.append(s) warning_issued.append(s) elif s not in warning_issued: self.warning_text += ("Within Fit %s, there are " + "multiple good measurements at the %s step. All " + "good measurements are included in the fit.\n") % ( fit.name, s) warning_issued.append(s) else: pass else: check_duplicates.append([s, f]) if self.s in list(self.Data.keys()): if not self.Data[self.s]['zijdblock_geo']: self.warning_text += "There is no geographic data for this specimen.\n" if not self.Data[self.s]['zijdblock_tilt']: self.warning_text += "There is no tilt-corrected data for this specimen.\n"
def generate_warning_text(self)
generates warnings for the current specimen then adds them to the current warning text for the GUI which will be rendered on a call to update_warning_box.
3.005062
2.953909
1.017317
# import pdb; pdb.set_trace() acceptance_criteria = pmag.initialize_acceptance_criteria() if self.data_model == 3: if criteria_file_name == None: criteria_file_name = "criteria.txt" contribution = cb.Contribution(self.WD, read_tables=[ 'criteria'], custom_filenames={'criteria': criteria_file_name}) if 'criteria' in contribution.tables: crit_container = contribution.tables['criteria'] crit_data = crit_container.df crit_data = crit_data.to_dict('records') for crit in crit_data: m2_name = map_magic.convert_direction_criteria( 'magic2', crit['table_column']) if m2_name != "": try: if crit['criterion_value'] == 'True': acceptance_criteria[m2_name]['value'] = 1 else: acceptance_criteria[m2_name]['value'] = 0 acceptance_criteria[m2_name]['value'] = float( crit['criterion_value']) except ValueError: self.user_warning("%s is not a valid comparitor for %s, skipping this criteria" % ( str(crit['criterion_value']), m2_name)) continue acceptance_criteria[m2_name]['pmag_criteria_code'] = crit['criterion'] return acceptance_criteria else: if criteria_file_name == None: criteria_file_name = "pmag_criteria.txt" try: acceptance_criteria = pmag.read_criteria_from_file( os.path.join(self.WD, criteria_file_name), acceptance_criteria) except (IOError, OSError) as e: self.user_warning("File %s not found in directory %s aborting opperation" % ( criteria_file_name, self.WD)) return acceptance_criteria
def read_criteria_file(self, criteria_file_name=None)
reads 2.5 or 3.0 formatted PmagPy criteria file and returns a set of nested dictionary 2.5 formated criteria data that can be passed into pmag.grade to filter data. Parameters ---------- criteria_file : name of criteria file to read in Returns ------- nested dictionary 2.5 formated criteria data
3.141894
3.205125
0.980272
if tmin == '' or tmax == '': return beg_pca, end_pca = self.get_indices(fit, tmin, tmax, specimen) if coordinate_system == 'geographic' or coordinate_system == 'DA-DIR-GEO': block = self.Data[specimen]['zijdblock_geo'] elif coordinate_system == 'tilt-corrected' or coordinate_system == 'DA-DIR-TILT': block = self.Data[specimen]['zijdblock_tilt'] else: block = self.Data[specimen]['zijdblock'] if block == []: print(("-E- no measurement data for specimen %s in coordinate system %s" % (specimen, coordinate_system))) mpars = {} elif end_pca > beg_pca and end_pca - beg_pca > 1: try: # preformes regression mpars = pmag.domean(block, beg_pca, end_pca, calculation_type) except: print((block, beg_pca, end_pca, calculation_type, specimen, fit.name, tmin, tmax, coordinate_system)) return if 'specimen_direction_type' in mpars and mpars['specimen_direction_type'] == 'Error': print(("-E- no measurement data for specimen %s in coordinate system %s" % (specimen, coordinate_system))) return {} else: mpars = {} for k in list(mpars.keys()): try: if math.isnan(float(mpars[k])): mpars[k] = 0 except: pass if "DE-BFL" in calculation_type and 'specimen_dang' not in list(mpars.keys()): mpars['specimen_dang'] = 0 if 'best fit vector' in self.plane_display_box.GetValue(): self.calculate_best_fit_vectors() return(mpars)
def get_PCA_parameters(self, specimen, fit, tmin, tmax, coordinate_system, calculation_type)
Uses pmag.domean to preform a line, line-with-origin, line-anchored, or plane least squared regression or a fisher mean on the measurement data of specimen in coordinate system between bounds tmin to tmax Parameters ---------- specimen : specimen with measurement data in self.Data fit : fit for which the regression or mean is being applied (used for calculating measurement index of tmin and tmax) tmin : lower bound of measurement data tmax : upper bound of measurement data coordinate_system : which coordinate system the measurement data should be in calculation_type : type of regression or mean to preform (options - DE-BFL:line,DE-BFL-A:line-anchored,DE-BFL-O:line-with- origin,DE-FM:fisher,DE-BFP:plane) Returns ------- mpars : a 2.5 data model dictionary type specimen record of the dec, inc, etc of the regression or mean
3.878672
3.312064
1.171074