code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
DRATS = (old_div(sum_ptrm_checks, x_Arai[end])) * 100. DRATS_prime = (old_div(sum_abs_ptrm_checks, x_Arai[end])) * 100. return DRATS, DRATS_prime
def get_DRATS(sum_ptrm_checks, sum_abs_ptrm_checks, x_Arai, end)
input: sum of ptrm check diffs, sum of absolute value of ptrm check diffs, x_Arai set of points, end. output: DRATS (uses sum of diffs), DRATS_prime (uses sum of absolute diffs)
2.101726
2.13367
0.985029
if not n_pTRM: return float('nan'), float('nan') mean_DRAT = ((old_div(1., n_pTRM)) * (old_div(sum_ptrm_checks, L))) * 100 mean_DRAT_prime = ((old_div(1., n_pTRM)) * (old_div(sum_abs_ptrm_checks, L))) * 100 return mean_DRAT, mean_DRAT_prime
def get_mean_DRAT(sum_ptrm_checks, sum_abs_ptrm_checks, n_pTRM, L)
input: sum_ptrm_checks, sum_abs_ptrm_checks, n_pTRM, L output: mean DRAT (the average difference produced by a pTRM check, normalized by the length of the best-fit line)
1.980775
2.110226
0.938656
if not n_pTRM: return float('nan'), float('nan') mean_DEV = ((old_div(1., n_pTRM)) * (old_div(sum_ptrm_checks, delta_x_prime))) * 100 mean_DEV_prime= ((old_div(1., n_pTRM)) * (old_div(sum_abs_ptrm_checks, delta_x_prime))) * 100 return mean_DEV, mean_DEV_prime
def get_mean_DEV(sum_ptrm_checks, sum_abs_ptrm_checks, n_pTRM, delta_x_prime)
input: sum_ptrm_checks, sum_abs_ptrm_checks, n_pTRM, delta_x_prime output: Mean deviation of a pTRM check
2.097305
2.25805
0.928812
PTRMS = numpy.array(PTRMS) PTRM_Checks = numpy.array(PTRM_Checks) TRM_1 = lib_direct.dir2cart(PTRMS[0,1:3]) PTRMS_cart = [] Checks_cart = [] for num, ptrm in enumerate(PTRMS): ptrm_cart = lib_direct.dir2cart([PTRMS[num][1], PTRMS[num][2], old_div(PTRMS[num][3], NRM)]) PTRMS_cart.append(ptrm_cart) for num, check in enumerate(PTRM_Checks): check_cart = lib_direct.dir2cart([PTRM_Checks[num][1], PTRM_Checks[num][2], old_div(PTRM_Checks[num][3], NRM)]) Checks_cart.append(check_cart) return PTRMS_cart, Checks_cart, TRM_1
def get_delta_pal_vectors(PTRMS, PTRM_Checks, NRM)
takes in PTRM data in this format: [temp, dec, inc, moment, ZI or IZ] -- and PTRM_check data in this format: [temp, dec, inc, moment]. Returns them in vector form (cartesian).
2.379424
2.277274
1.044856
ptrm_temps = numpy.array(ptrms_orig)[:,0] check_temps = numpy.array(checks_orig)[:,0] index = numpy.zeros(len(ptrm_temps)) for num, temp in enumerate(ptrm_temps): if len(numpy.where(check_temps == temp)[0]): index[num] = numpy.where(check_temps == temp)[0][0] else: index[num] = float('nan') diffs = numpy.zeros((len(ptrms_vectors), 3)) for num, ptrm in enumerate(ptrms_vectors): if numpy.isnan(index[num]): diffs[num] = numpy.array([0,0,0]) else: diffs[num] = ptrm_checks_vectors[int(index[num])] - ptrm C = numpy.cumsum(diffs, 0) #print "diffs (should be same as to_sum" #print diffs #print "C (should be same as dpal_sum)" #print C return diffs, C
def get_diffs(ptrms_vectors, ptrm_checks_vectors, ptrms_orig, checks_orig)
input: ptrms_vectors, ptrm_checks_vectors, ptrms_orig, checks_orig output: vector diffs between original and ptrm check, C
2.517461
2.482919
1.013912
TRM_star = numpy.zeros([len(ptrms_vectors), 3]) TRM_star[0] = [0., 0., 0.] x_star = numpy.zeros(len(ptrms_vectors)) for num, vec in enumerate(ptrms_vectors[1:]): TRM_star[num+1] = vec + C[num] # print 'vec', vec # print 'C', C[num] for num, trm in enumerate(TRM_star): x_star[num] = numpy.linalg.norm(trm) #print "x_star (should match corr_TRM / NRM)" #print x_star[start:end+1] return TRM_star[start:end+1], x_star[start:end+1]
def get_TRM_star(C, ptrms_vectors, start, end)
input: C, ptrms_vectors, start, end output: TRM_star, x_star (for delta_pal statistic)
2.879695
2.785906
1.033665
#print "x_star, should be same as Xcorr / NRM" #print x_star x_star_mean = numpy.mean(x_star) x_err = x_star - x_star_mean b_star = -1* numpy.sqrt( old_div(sum(numpy.array(y_err)**2), sum(numpy.array(x_err)**2)) ) # averaged slope #print "y_segment", y_segment b_star = numpy.sign(sum(x_err * y_err)) * numpy.std(y_segment, ddof=1) / numpy.std(x_star, ddof=1) #print "b_star (should be same as corr_slope)" #print b_star return b_star
def get_b_star(x_star, y_err, y_mean, y_segment)
input: x_star, y_err, y_mean, y_segment output: b_star (corrected slope for delta_pal statistic)
3.929813
3.824688
1.027486
delta_pal = numpy.abs(old_div((b - b_star), b)) * 100 return delta_pal
def get_delta_pal(b, b_star)
input: b, b_star (actual and corrected slope) output: delta_pal
4.24024
4.0484
1.047387
#print "-------" #print "calling get_full_delta_pal in lib" # return 0 PTRMS_cart, checks, TRM_1 = get_delta_pal_vectors(PTRMS, PTRM_Checks, NRM) # print "PTRMS_Cart", PTRMS_cart diffs, C = get_diffs(PTRMS_cart, checks, PTRMS, PTRM_Checks) # print "C", C TRM_star, x_star = get_TRM_star(C, PTRMS_cart, start, end) # print "x_star", x_star # print type(x_star) b_star = get_b_star(x_star, y_err, y_mean, y_segment) delta_pal = get_delta_pal(b, b_star) return delta_pal
def get_full_delta_pal(PTRMS, PTRM_Checks, NRM, y_err, y_mean, b, start, end, y_segment)
input: PTRMS, PTRM_Checks, NRM, y_err, y_mean, b, start, end, y_segment runs full sequence necessary to get delta_pal
3.912563
3.716794
1.052671
ptrms_included = [] checks_included = [] ptrms = numpy.array(ptrms) for ptrm in ptrms: if ptrm[0] <= tmax: ptrms_included.append(ptrm) for check in ptrm_checks: if check[0] <= tmax: checks_included.append(check) #print "checks", ptrm_checks #print "checks_included", checks_included return ptrms_included, checks_included
def get_segments(ptrms, ptrm_checks, tmax)
input: ptrms, ptrm_checks, tmax grabs ptrms that are done below tmax grabs ptrm checks that are done below tmax AND whose starting temp is below tmax output: ptrms_included, checks_included
2.379215
1.922018
1.237873
if self.GUI==None: return self.GUI.current_fit = self if self.tmax != None and self.tmin != None: self.GUI.update_bounds_boxes() if self.PCA_type != None: self.GUI.update_PCA_box() try: self.GUI.zijplot except AttributeError: self.GUI.draw_figure(self.GUI.s) self.GUI.fit_box.SetStringSelection(self.name) self.GUI.get_new_PCA_parameters(-1)
def select(self)
Makes this fit the selected fit on the GUI that is it's parent (Note: may be moved into GUI soon)
7.076731
5.966331
1.186111
if coordinate_system == 'DA-DIR' or coordinate_system == 'specimen': return self.pars elif coordinate_system == 'DA-DIR-GEO' or coordinate_system == 'geographic': return self.geopars elif coordinate_system == 'DA-DIR-TILT' or coordinate_system == 'tilt-corrected': return self.tiltpars else: print("-E- no such parameters to fetch for " + coordinate_system + " in fit: " + self.name) return None
def get(self,coordinate_system)
Return the pmagpy paramters dictionary associated with this fit and the given coordinate system @param: coordinate_system -> the coordinate system who's parameters to return
4.753367
4.107324
1.15729
if specimen != None: if type(new_pars)==dict: if 'er_specimen_name' not in list(new_pars.keys()): new_pars['er_specimen_name'] = specimen if 'specimen_comp_name' not in list(new_pars.keys()): new_pars['specimen_comp_name'] = self.name if type(new_pars) != dict or 'measurement_step_min' not in list(new_pars.keys()) or 'measurement_step_max' not in list(new_pars.keys()) or 'calculation_type' not in list(new_pars.keys()): print("-E- invalid parameters cannot assign to fit %s for specimen %s - was given:\n%s"%(self.name,specimen,str(new_pars))) return self.get(coordinate_system) self.tmin = new_pars['measurement_step_min'] self.tmax = new_pars['measurement_step_max'] self.PCA_type = new_pars['calculation_type'] if self.GUI!=None: steps = self.GUI.Data[specimen]['zijdblock_steps'] tl = [self.tmin,self.tmax] for i,t in enumerate(tl): if str(t) in steps: tl[i] = str(t) elif str(int(t)) in steps: tl[i] = str(int(t)) elif "%.1fmT"%t in steps: tl[i] = "%.1fmT"%t elif "%.0fC"%t in steps: tl[i] = "%.0fC"%t else: print("-E- Step " + str(tl[i]) + " does not exsist (func: Fit.put)") tl[i] = str(t) self.tmin,self.tmax = tl elif meas_data != None: steps = meas_data[specimen]['zijdblock_steps'] tl = [self.tmin,self.tmax] for i,t in enumerate(tl): if str(t) in steps: tl[i] = str(t) elif str(int(t)) in steps: tl[i] = str(int(t)) elif "%.1fmT"%t in steps: tl[i] = "%.1fmT"%t elif "%.0fC"%t in steps: tl[i] = "%.0fC"%t else: print("-E- Step " + str(tl[i]) + " does not exsist (func: Fit.put)") tl[i] = str(t) self.tmin,self.tmax = tl else: self.tmin,self.tmax = list(map(str, tl)) if coordinate_system == 'DA-DIR' or coordinate_system == 'specimen': self.pars = new_pars elif coordinate_system == 'DA-DIR-GEO' or coordinate_system == 'geographic': self.geopars = new_pars elif coordinate_system == 'DA-DIR-TILT' or coordinate_system == 'tilt-corrected': self.tiltpars = new_pars else: print('-E- no such coordinate system could not assign those parameters to fit')
def put(self,specimen,coordinate_system,new_pars)
Given a coordinate system and a new parameters dictionary that follows pmagpy convention given by the pmag.py/domean function it alters this fit's bounds and parameters such that it matches the new data. @param: specimen -> None if fit is for a site or a sample or a valid specimen from self.GUI @param: coordinate_system -> the coordinate system to alter @param: new_pars -> the new paramters to change your fit to @alters: tmin, tmax, pars, geopars, tiltpars, PCA_type
2.486183
2.259495
1.100327
return str(self.name) == str(name) and str(self.tmin) == str(tmin) and str(self.tmax) == str(tmax)
def has_values(self, name, tmin, tmax)
A basic fit equality checker compares name and bounds of 2 fits @param: name -> name of the other fit @param: tmin -> lower bound of the other fit @param: tmax -> upper bound of the other fit @return: boolean comaparing 2 fits
2.277257
2.717764
0.837916
#print "tail_temps: {0}, tmax: {0}".format(tail_temps, tmax) t_index = 0 adj_tmax = 0 if tmax < tail_temps[0]: return 0 try: t_index = list(tail_temps).index(tmax) except: # finds correct tmax if there was no tail check performed at tmax for temp in tail_temps: if temp <= tmax: adj_tmax = temp t_index = list(tail_temps).index(adj_tmax) incl_temps = tail_temps[0:t_index+1] # b/c not inclusive return len(incl_temps)
def get_n_tail(tmax, tail_temps)
determines number of included tail checks in best fit segment
3.506437
3.220932
1.088641
if not n_tail: return float('nan'), [] tail_compare = [] y_Arai_compare = [] for temp in tail_temps[:n_tail]: tail_index = list(tail_temps).index(temp) tail_check = y_tail[tail_index] tail_compare.append(tail_check) arai_index = list(t_Arai).index(temp) nrm_orig = y_Arai[arai_index] y_Arai_compare.append(nrm_orig) diffs = numpy.array(y_Arai_compare) - numpy.array(tail_compare) abs_diffs = abs(diffs) max_check = max(abs_diffs) return max_check, diffs
def get_max_tail_check(y_Arai, y_tail, t_Arai, tail_temps, n_tail)
input: y_Arai, y_tail, t_Arai, tail_temps, n_tail output: max_check, diffs
2.593626
2.303735
1.125835
if max_check == 0: return float('nan') DRAT_tail = (old_div(max_check, L)) * 100. return DRAT_tail
def get_DRAT_tail(max_check, L)
input: tail_check_max, best fit line length output: DRAT_tail
3.794321
3.183594
1.191836
if tail_check_max == 0 or numpy.isnan(tail_check_max): return float('nan') delta_TR = (old_div(tail_check_max, abs(y_int))) * 100. return delta_TR
def get_delta_TR(tail_check_max, y_int)
input: tail_check_max, y_intercept output: delta_TR
3.201471
3.077257
1.040365
if tail_check_max == 0 or numpy.isnan(tail_check_max): return float('nan') MD_VDS = (old_div(tail_check_max, vds)) * 100 return MD_VDS
def get_MD_VDS(tail_check_max, vds)
input: tail_check_max, vector difference sum output: MD_VDS
3.378304
3.097763
1.090563
dir_path='.' zfile='zeq_redo' if '-WD' in sys.argv: ind=sys.argv.index('-WD') dir_path=sys.argv[ind+1] if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-f' in sys.argv: ind=sys.argv.index('-f') inspec=sys.argv[ind+1] if '-F' in sys.argv: ind=sys.argv.index('-F') zfile=sys.argv[ind+1] inspec=dir_path+"/"+inspec zfile=dir_path+"/"+zfile zredo=open(zfile,"w") # # read in DIR file # specs=[] prior_spec_data=open(inspec,'r').readlines() for line in prior_spec_data: line=line.replace("Dir"," Dir") line=line.replace("OKir"," OKir") line=line.replace("Fish"," Fish") line=line.replace("Man"," Man") line=line.replace("GC"," GC") line=line.replace("-T"," - T") line=line.replace("-M"," - M") rec=line.split() if len(rec)<2: sys.exit() if rec[1]=='Dir' or rec[1]=='GC': # skip all the other stuff spec=rec[0] specs.append(spec) comp_name=string.uppercase[specs.count(spec)-1] # assign component names calculation_type="DE-FM" if rec[1]=='Dir' and rec[2]=="Kir": calculation_type="DE-BFL" # assume default calculation type is best-fit line if rec[1]=='Dir' and rec[2]=="OKir": calculation_type="DE-BFL-A" # anchored best-fit line if rec[1]=='Dir' and rec[2]=="Fish": calculation_type="DE-FM" # fisher mean if rec[1]=='GC' : calculation_type="DE-BFP" # best-fit plane min,max=rec[3],rec[5] beg,end="","" if min=="NRM": beg=0 if min[0]=='M': beg=float(min[1:])*1e-3 # convert to T from mT elif min[0]=='T': beg=float(min[1:])+273 # convert to C to kelvin if max[0]=='M': end=float(max[1:])*1e-3 # convert to T from mT elif max[0]=='T': end=float(max[1:])+273 # convert to C to kelvin if beg==0:beg=273 outstring='%s %s %s %s %s \n'%(spec,calculation_type,beg,end,comp_name) zredo.write(outstring)
def main()
NAME dir_redo.py DESCRIPTION converts the Cogne DIR format to PmagPy redo file SYNTAX dir_redo.py [-h] [command line options] OPTIONS -h: prints help message and quits -f FILE: specify input file -F FILE: specify output file, default is 'zeq_redo'
3.47656
3.214935
1.081378
#enable or disable self.btn1a if self.data_model_num == 3: self.btn1a.Enable() else: self.btn1a.Disable() # # set pmag_gui_dialogs global pmag_gui_dialogs if self.data_model_num == 2: pmag_gui_dialogs = pgd2 wx.CallAfter(self.get_wd_data2) elif self.data_model_num == 3: pmag_gui_dialogs = pgd3 wx.CallAfter(self.get_wd_data) # do / re-do menubar menubar = pmag_gui_menu.MagICMenu(self, data_model_num=self.data_model_num) self.SetMenuBar(menubar) self.menubar = menubar
def set_dm(self, num)
Make GUI changes based on data model num. Get info from WD in appropriate format.
4.307293
3.87906
1.110396
wait = wx.BusyInfo('Reading in data from current working directory, please wait...') #wx.Yield() print('-I- Read in any available data from working directory') self.contribution = cb.Contribution(self.WD, dmodel=self.data_model) del wait
def get_wd_data(self)
Show dialog to get user input for which directory to set as working directory. Called by self.get_dm_and_wd
16.086525
15.915738
1.010731
wait = wx.BusyInfo('Reading in data from current working directory, please wait...') #wx.Yield() print('-I- Read in any available data from working directory (data model 2)') self.er_magic = builder.ErMagicBuilder(self.WD, data_model=self.data_model) del wait
def get_wd_data2(self)
Get 2.5 data from self.WD and put it into ErMagicBuilder object. Called by get_dm_and_wd
20.272758
12.926164
1.568351
if "-WD" in sys.argv and self.FIRST_RUN: ind = sys.argv.index('-WD') self.WD = os.path.abspath(sys.argv[ind+1]) os.chdir(self.WD) self.WD = os.getcwd() self.dir_path.SetValue(self.WD) else: self.on_change_dir_button(None) #self.WD = os.getcwd() self.FIRST_RUN = False
def get_dir(self)
Choose a working directory dialog. Called by self.get_dm_and_wd.
3.718477
3.261161
1.140231
if not self.check_for_meas_file(): return if not self.check_for_uncombined_files(): return outstring = "thellier_gui.py -WD %s"%self.WD print("-I- running python script:\n %s"%(outstring)) if self.data_model_num == 2.5: thellier_gui.main(self.WD, standalone_app=False, parent=self, DM=self.data_model_num) else: # disable and hide Pmag GUI mainframe self.Disable() self.Hide() # show busyinfo wait = wx.BusyInfo('Compiling required data, please wait...') wx.SafeYield() # create custom Thellier GUI closing event and bind it ThellierGuiExitEvent, EVT_THELLIER_GUI_EXIT = newevent.NewCommandEvent() self.Bind(EVT_THELLIER_GUI_EXIT, self.on_analysis_gui_exit) # make and show the Thellier GUI frame thellier_gui_frame = thellier_gui.Arai_GUI(self.WD, self, standalone=False, DM=self.data_model_num, evt_quit=ThellierGuiExitEvent) if not thellier_gui_frame: print("Thellier GUI failed to start aborting"); del wait; return thellier_gui_frame.Centre() thellier_gui_frame.Show() del wait
def on_btn_thellier_gui(self, event)
Open Thellier GUI
5.732827
5.69562
1.006533
if not self.check_for_meas_file(): return if not self.check_for_uncombined_files(): return outstring = "demag_gui.py -WD %s"%self.WD print("-I- running python script:\n %s"%(outstring)) if self.data_model_num == 2: demag_gui.start(self.WD, standalone_app=False, parent=self, DM=self.data_model_num) else: # disable and hide Pmag GUI mainframe self.Disable() self.Hide() # show busyinfo wait = wx.BusyInfo('Compiling required data, please wait...') wx.SafeYield() # create custom Demag GUI closing event and bind it DemagGuiExitEvent, EVT_DEMAG_GUI_EXIT = newevent.NewCommandEvent() self.Bind(EVT_DEMAG_GUI_EXIT, self.on_analysis_gui_exit) # make and show the Demag GUI frame demag_gui_frame = demag_gui.Demag_GUI(self.WD, self, write_to_log_file=False, data_model=self.data_model_num, evt_quit=DemagGuiExitEvent) demag_gui_frame.Centre() demag_gui_frame.Show() del wait
def on_btn_demag_gui(self, event)
Open Demag GUI
5.903833
5.761805
1.02465
dia = pw.UpgradeDialog(None) dia.Center() res = dia.ShowModal() if res == wx.ID_CANCEL: webbrowser.open("https://www2.earthref.org/MagIC/upgrade", new=2) return ## more nicely styled way, but doesn't link to earthref #msg = "This tool is meant for relatively simple upgrades (for instance, a measurement file, a sample file, and a criteria file).\nIf you have a more complex contribution to upgrade, and you want maximum accuracy, use the upgrade tool at https://www2.earthref.org/MagIC/upgrade.\n\nDo you want to continue?" #result = pw.warning_with_override(msg) #if result == wx.ID_NO: #webbrowser.open("https://www2.earthref.org/MagIC/upgrade", new=2) #return # turn files from 2.5 --> 3.0 (rough translation) meas, upgraded, no_upgrade = pmag.convert_directory_2_to_3('magic_measurements.txt', input_dir=self.WD, output_dir=self.WD, data_model=self.contribution.data_model) if not meas: wx.MessageBox('2.5 --> 3.0 failed. Do you have a magic_measurements.txt file in your working directory?', 'Info', wx.OK | wx.ICON_INFORMATION) return # create a contribution self.contribution = cb.Contribution(self.WD) # make skeleton files with specimen, sample, site, location data self.contribution.propagate_measurement_info() # pop up upgraded_string = ", ".join(upgraded) if no_upgrade: no_upgrade_string = ", ".join(no_upgrade) msg = '2.5 --> 3.0 translation completed!\n\nThese 3.0 format files were created: {}.\n\nHowever, these 2.5 format files could not be upgraded: {}.\n\nTo convert all 2.5 files, use the MagIC upgrade tool: https://www2.earthref.org/MagIC/upgrade\n'.format(upgraded_string, no_upgrade_string) if 'criteria.txt' in upgraded: msg += '\nNote: Please check your criteria file for completeness and accuracy, as not all 2.5 files will be fully upgraded.' if 'pmag_criteria.txt' in no_upgrade: msg += '\nNote: Not all criteria files can be upgraded, even on the MagIC site. You may need to recreate an old pmag_criteria file from scratch in Thellier GUI or Demag GUI.' wx.MessageBox(msg, 'Warning', wx.OK | wx.ICON_INFORMATION) else: msg = '2.5 --> 3.0 translation completed!\nThese files were converted: {}'.format(upgraded_string) wx.MessageBox(msg, 'Info', wx.OK | wx.ICON_INFORMATION)
def on_btn_convert_3(self, event)
Open dialog for rough conversion of 2.5 files to 3.0 files. Offer link to earthref for proper upgrade.
5.433988
5.210323
1.042927
# make sure we have a measurements file if not self.check_for_meas_file(): return # make sure all files of the same type have been combined if not self.check_for_uncombined_files(): return if self.data_model_num == 2: wait = wx.BusyInfo('Compiling required data, please wait...') wx.SafeYield() self.ErMagic_frame = ErMagicBuilder.MagIC_model_builder(self.WD, self, self.er_magic) elif self.data_model_num == 3: wait = wx.BusyInfo('Compiling required data, please wait...') wx.SafeYield() self.ErMagic_frame = ErMagicBuilder.MagIC_model_builder3(self.WD, self, self.contribution) # self.ErMagic_frame.Show() self.ErMagic_frame.Center() # gets total available screen space - 10% size = wx.DisplaySize() size = (size[0] - 0.3 * size[0], size[1] - 0.3 * size[1]) self.ErMagic_frame.Raise() del wait
def on_btn_metadata(self, event)
Initiate the series of windows to add metadata to the contribution.
4.647913
4.50109
1.03262
self.check_dia = pmag_er_magic_dialogs.ErMagicCheckFrame(self, 'Check Data', self.WD, self.er_magic)
def init_check_window2(self)
initiates the object that will control steps 1-6 of checking headers, filling in cell values, etc.
28.173519
26.812321
1.050768
self.check_dia = pmag_er_magic_dialogs.ErMagicCheckFrame3(self, 'Check Data', self.WD, self.contribution)
def init_check_window(self)
initiates the object that will control steps 1-6 of checking headers, filling in cell values, etc.
41.324818
39.735718
1.039992
wait = wx.BusyInfo('Compiling required data, please wait...') wx.SafeYield() #dw, dh = wx.DisplaySize() size = wx.DisplaySize() size = (size[0]-0.1 * size[0], size[1]-0.1 * size[1]) if self.data_model_num == 3: frame = pmag_gui_dialogs.OrientFrameGrid3(self, -1, 'demag_orient.txt', self.WD, self.contribution, size) else: frame = pmag_gui_dialogs.OrientFrameGrid(self, -1, 'demag_orient.txt', self.WD, self.er_magic, size) frame.Show(True) frame.Centre() self.Hide() del wait
def on_btn_orientation(self, event)
Create and fill wxPython grid for entering orientation data.
5.697572
5.365642
1.061862
dlg = wx.FileDialog( None, message = "choose txt file to unpack", defaultDir=self.WD, defaultFile="", style=wx.FD_OPEN #| wx.FD_CHANGE_DIR ) if dlg.ShowModal() == wx.ID_OK: FILE = dlg.GetPath() input_dir, f = os.path.split(FILE) else: return False outstring="download_magic.py -f {} -WD {} -ID {} -DM {}".format(f, self.WD, input_dir, self.data_model_num) # run as module: print("-I- running python script:\n %s"%(outstring)) wait = wx.BusyInfo("Please wait, working...") wx.SafeYield() ex = None try: if ipmag.download_magic(f, self.WD, input_dir, overwrite=True, data_model=self.data_model): text = "Successfully ran download_magic.py program.\nMagIC files were saved in your working directory.\nSee Terminal/message window for details." else: text = "Something went wrong. Make sure you chose a valid file downloaded from the MagIC database and try again." except Exception as ex: text = "Something went wrong. Make sure you chose a valid file downloaded from the MagIC database and try again." del wait dlg = wx.MessageDialog(self, caption="Saved", message=text, style=wx.OK) result = dlg.ShowModal() if result == wx.ID_OK: dlg.Destroy() if ex: raise(ex) self.contribution = cb.Contribution(self.WD)
def on_btn_unpack(self, event)
Create dialog to choose a file to unpack with download magic. Then run download_magic and create self.contribution.
5.008838
4.635531
1.080532
if not self.check_for_uncombined_files(): return outstring="upload_magic.py" print("-I- running python script:\n %s"%(outstring)) wait = wx.BusyInfo("Please wait, working...") wx.SafeYield() self.contribution.tables['measurements'].add_measurement_names() if self.data_model_num == 3: res, error_message, has_problems, all_failing_items = ipmag.upload_magic(concat=False, dir_path=self.WD, vocab=self.contribution.vocab, contribution=self.contribution) if self.data_model_num == 2: res, error_message, errors = ipmag.upload_magic2(dir_path=self.WD, data_model=self.er_magic.data_model) del wait if res: text = "You are ready to upload!\n{} was generated in {}".format(os.path.split(res)[1], os.path.split(res)[0]) dlg = pw.ChooseOne(self, "Go to MagIC for uploading", "Not ready yet", text, "Saved") del wait #dlg = wx.MessageDialog(self, caption="Saved", message=text, style=wx.OK) else: text = "There were some problems with the creation of your upload file.\nError message: {}\nSee Terminal/message window for details".format(error_message) dlg = wx.MessageDialog(self, caption="Error", message=text, style=wx.OK) dlg.Centre() result = dlg.ShowModal() if result == wx.ID_OK: dlg.Destroy() if result == wx.ID_YES: pw.on_database_upload(None) if self.data_model_num == 3: if not res: from programs import magic_gui self.Disable() self.Hide() self.magic_gui_frame = magic_gui.MainFrame(self.WD, dmodel=self.data_model, title="Validations", contribution=self.contribution) self.magic_gui_frame.validation_mode = ['specimens'] self.magic_gui_frame.failing_items = all_failing_items self.magic_gui_frame.change_dir_button.Disable() self.magic_gui_frame.Centre() self.magic_gui_frame.Show() self.magic_gui_frame.highlight_problems(has_problems) # # change name of upload button to 'exit validation mode' self.magic_gui_frame.bSizer2.GetStaticBox().SetLabel('return to main GUI') self.magic_gui_frame.btn_upload.SetLabel("exit validation mode") # bind that button to quitting magic gui and re-enabling Pmag GUI self.magic_gui_frame.Bind(wx.EVT_BUTTON, self.on_end_validation, self.magic_gui_frame.btn_upload) # do binding so that closing/quitting re-opens the main frame self.magic_gui_frame.Bind(wx.EVT_CLOSE, self.on_end_validation) # this makes it work with only the validation window open self.magic_gui_frame.Bind(wx.EVT_MENU, lambda event: self.menubar.on_quit(event, self.magic_gui_frame), self.magic_gui_frame.menubar.file_quit) # this makes it work if an additional grid is open self.Bind(wx.EVT_MENU, lambda event: self.menubar.on_quit(event, self.magic_gui_frame), self.magic_gui_frame.menubar.file_quit)
def on_btn_upload(self, event)
Try to run upload_magic. Open validation mode if the upload file has problems.
4.665507
4.541836
1.027229
self.Enable() self.Show() self.magic_gui_frame.Destroy()
def on_end_validation(self, event)
Switch back from validation mode to main Pmag GUI mode. Hide validation frame and show main frame.
14.623173
7.372368
1.983511
# also delete appropriate copy file try: self.help_window.Destroy() except: pass if '-i' in sys.argv: self.Destroy() try: sys.exit() # can raise TypeError if wx inspector was used except Exception as ex: if isinstance(ex, TypeError): pass else: raise ex
def on_menu_exit(self, event)
Exit the GUI
9.182778
9.212513
0.996772
wd_files = os.listdir(self.WD) if self.data_model_num == 2: ftypes = ['er_specimens.txt', 'er_samples.txt', 'er_sites.txt', 'er_locations.txt', 'pmag_specimens.txt', 'pmag_samples.txt', 'pmag_sites.txt', 'rmag_specimens.txt', 'rmag_results.txt', 'rmag_anisotropy.txt'] else: ftypes = ['specimens.txt', 'samples.txt', 'sites.txt', 'locations.txt'] uncombined = set() for ftype in ftypes: if ftype not in wd_files: for f in wd_files: if f.endswith('_' + ftype): uncombined.add(ftype) if uncombined: msg = 'It looks like you may have uncombined files of type(s) {} in your working directory.\nYou may want to go back to Step 1 and finish combining all files.\nIf you continue, the program will try to extract as much information as possible from your measurement file.'.format(", ".join(list(uncombined))) dlg = pw.ChooseOne(self, 'Continue anyway', 'Go back', msg, title="Warning!") res = dlg.ShowModal() if res == wx.ID_NO: return return True
def check_for_uncombined_files(self)
Go through working directory and check for uncombined files. (I.e., location1_specimens.txt and location2_specimens.txt but no specimens.txt.) Show a warning if uncombined files are found. Return True if no uncombined files are found OR user elects to continue anyway.
3.698585
3.417016
1.082402
if self.data_model_num == 2: meas_file_name = "magic_measurements.txt" dm = "2.5" else: meas_file_name = "measurements.txt" dm = "3.0" if not os.path.isfile(os.path.join(self.WD, meas_file_name)): pw.simple_warning("Your working directory must have a {} format {} file to run this step. Make sure you have fully completed step 1 (import magnetometer file) and ALSO converted to 3.0., if necessary), then try again.\n\nIf you are trying to look at data downloaded from MagIC, you must unpack the txt file first. Some contributions do not contain measurement data, in which case you won't be able to use this function.".format(dm, meas_file_name)) return False return True
def check_for_meas_file(self)
Check the working directory for a measurement file. If not found, show a warning and return False. Otherwise return True.
7.966942
7.627094
1.044558
args = sys.argv if "-h" in args: print(main.__doc__) sys.exit() dir_path = pmag.get_named_arg('-WD', '.') fmt = pmag.get_named_arg('-fmt', 'svg') save_plots = False interactive = True if '-sav' in sys.argv: save_plots = True interactive = False infile = pmag.get_named_arg("-f", "specimens.txt") ipmag.dayplot_magic(dir_path, infile, save=save_plots, fmt=fmt, interactive=interactive)
def main()
NAME dayplot_magic.py DESCRIPTION makes 'day plots' (Day et al. 1977) and squareness/coercivity, plots 'linear mixing' curve from Dunlop and Carter-Stiglitz (2006). squareness coercivity of remanence (Neel, 1955) plots after Tauxe et al. (2002) SYNTAX dayplot_magic.py [command line options] OPTIONS -h prints help message and quits -f: specify input hysteresis file, default is specimens.txt -fmt [svg,png,jpg] format for output plots, default svg -sav saves plots and quits quietly
3.782117
2.689426
1.406292
pmag_menu_dialogs.MoveFileIntoWD(self.parent, self.parent.WD)
def on_import1(self, event)
initialize window to import an arbitrary file into the working directory
51.942787
35.365383
1.468747
pmag_menu_dialogs.ImportAzDipFile(self.parent, self.parent.WD)
def orient_import2(self, event)
initialize window to import an AzDip format file into the working directory
48.650883
19.050924
2.553728
if 84 >= Lat >= 72: return 'X' elif 72 > Lat >= 64: return 'W' elif 64 > Lat >= 56: return 'V' elif 56 > Lat >= 48: return 'U' elif 48 > Lat >= 40: return 'T' elif 40 > Lat >= 32: return 'S' elif 32 > Lat >= 24: return 'R' elif 24 > Lat >= 16: return 'Q' elif 16 > Lat >= 8: return 'P' elif 8 > Lat >= 0: return 'N' elif 0 > Lat >=-8: return 'M' elif -8 > Lat >=-16: return 'L' elif -16 > Lat >=-24: return 'K' elif -24 > Lat >=-32: return 'J' elif -32 > Lat >=-40: return 'H' elif -40 > Lat >=-48: return 'G' elif -48 > Lat >=-56: return 'F' elif -56 > Lat >=-64: return 'E' elif -64 > Lat >=-72: return 'D' elif -72 > Lat >=-80: return 'C' else: return 'Z'
def _UTMLetterDesignator(Lat)
This routine determines the correct UTM letter designator for the given latitude returns 'Z' if latitude is outside the UTM limits of 84N to 80S Written by Chuck Gantz- chuck.gantz@globalstar.com
1.346076
1.270089
1.059828
k0 = 0.9996 a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius] eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared] e1 = old_div((1-sqrt(1-eccSquared)),(1+sqrt(1-eccSquared))) #NorthernHemisphere; //1 for northern hemispher, 0 for southern x = easting-500000.0 #remove 500,000 meter offset for longitude y = northing ZoneLetter = zone[-1] if ZoneLetter == 'Z': raise Exception("Latitude is outside the UTM limits") ZoneNumber = int(zone[:-1]) if ZoneLetter >= 'N': NorthernHemisphere = 1 # point is in northern hemisphere else: NorthernHemisphere = 0 # point is in southern hemisphere y-= 10000000.0 # remove 10,000,000 meter offset used for southern hemisphere LongOrigin = (ZoneNumber-1)*6-180+3 # +3 puts origin in middle of zone eccPrimeSquared = old_div((eccSquared),(1-eccSquared)) M = old_div(y, k0) mu = old_div(M,(a*(1-old_div(eccSquared,4)-3*eccSquared*eccSquared/64-5*eccSquared*eccSquared*eccSquared/256))) phi1Rad = (mu+(3*e1/2-27*e1*e1*e1/32)*sin(2*mu) +(21*e1*e1/16-55*e1*e1*e1*e1/32)*sin(4*mu) +(151*e1*e1*e1/96)*sin(6*mu)) phi1 = degrees(phi1Rad); N1 = old_div(a,sqrt(1-eccSquared*sin(phi1Rad)*sin(phi1Rad))) T1 = tan(phi1Rad)*tan(phi1Rad) C1 = eccPrimeSquared*cos(phi1Rad)*cos(phi1Rad) R1 = a*(1-eccSquared)/pow(1-eccSquared*sin(phi1Rad)*sin(phi1Rad), 1.5) D = old_div(x,(N1*k0)) Lat = phi1Rad-(N1*tan(phi1Rad)/R1)*(D*D/2-(5+3*T1+10*C1-4*C1*C1-9*eccPrimeSquared)*D*D*D*D/24 +(61+90*T1+298*C1+45*T1*T1-252*eccPrimeSquared-3*C1*C1)*D*D*D*D*D*D/720) Lat = degrees(Lat) Long = old_div((D-(1+2*T1+C1)*D*D*D/6+(5-2*C1+28*T1-3*C1*C1+8*eccPrimeSquared+24*T1*T1)*D*D*D*D*D/120),cos(phi1Rad)) Long = LongOrigin+degrees(Long) return (Long, Lat)
def UTMtoLL(ReferenceEllipsoid, easting, northing, zone)
converts UTM coords to lat/long. Equations from USGS Bulletin 1532 East Longitudes are positive, West longitudes are negative. North latitudes are positive, South latitudes are negative Lat and Long are in decimal degrees. Written by Chuck Gantz- chuck.gantz@globalstar.com Converted to Python by Russ Nelson <nelson@crynwr.com>
1.786418
1.800434
0.992215
currentDirectory = self.WD #os.getcwd() change_dir_dialog = wx.DirDialog(self.panel, "Choose your working directory to create or edit a MagIC contribution:", defaultPath=currentDirectory, style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON | wx.DD_CHANGE_DIR) result = change_dir_dialog.ShowModal() if result == wx.ID_CANCEL: return if result == wx.ID_OK: self.WD = change_dir_dialog.GetPath() self.dir_path.SetValue(self.WD) change_dir_dialog.Destroy() self.get_wd_data()
def on_change_dir_button(self, event=None)
create change directory frame
3.134894
3.125379
1.003045
if has_problems: self.validation_mode = set(has_problems) # highlighting doesn't work with Windows if sys.platform in ['win32', 'win62']: self.message.SetLabel('The following grid(s) have incorrect or incomplete data:\n{}'.format(', '.join(self.validation_mode))) # highlighting does work with OSX else: for dtype in ["specimens", "samples", "sites", "locations", "ages", "measurements"]: wind = self.FindWindowByName(dtype + '_btn') if dtype not in has_problems: wind.Unbind(wx.EVT_PAINT, handler=self.highlight_button) else: wind.Bind(wx.EVT_PAINT, self.highlight_button) self.Refresh() self.message.SetLabel('Highlighted grids have incorrect or incomplete data') self.bSizer_msg.ShowItems(True) # manually fire a paint event to make sure all buttons # are highlighted/unhighlighted appropriately paintEvent = wx.CommandEvent(wx.wxEVT_PAINT, self.GetId()) self.GetEventHandler().ProcessEvent(paintEvent) else: self.message.SetLabel("Validated!") self.bSizer_msg.ShowItems(True) self.hbox.Fit(self)
def highlight_problems(self, has_problems)
Outline grid buttons in red if they have validation errors
4.93057
4.735405
1.041214
for dtype in ["specimens", "samples", "sites", "locations", "ages"]: wind = self.FindWindowByName(dtype + '_btn') wind.Unbind(wx.EVT_PAINT, handler=self.highlight_button) self.Refresh() #self.message.SetLabel('Highlighted grids have incorrect or incomplete data') self.bSizer_msg.ShowItems(False) self.hbox.Fit(self)
def reset_highlights(self)
Remove red outlines from all buttons
10.976931
10.098999
1.086933
wind = event.GetEventObject() pos = wind.GetPosition() size = wind.GetSize() try: dc = wx.PaintDC(self) except wx._core.PyAssertionError: # if it's not a native paint event, we can't us wx.PaintDC dc = wx.ClientDC(self) dc.SetPen(wx.Pen('red', 5, wx.SOLID)) dc.DrawRectangle(pos[0], pos[1], size[0], size[1]) event.Skip()
def highlight_button(self, event)
Draw a red highlight line around the event object
3.302558
3.064418
1.077711
dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD) clear = dia.do_clear() if clear: print('-I- Clear data object') self.contribution = cb.Contribution(self.WD, dmodel=self.data_model) self.edited = False
def on_clear(self, event)
initialize window to allow user to empty the working directory
12.605948
11.702265
1.077223
if self.parent.grid_frame: self.parent.grid_frame.onSave(None) self.parent.grid_frame.Destroy()
def on_close_grid(self, event)
If there is an open grid, save its data and close it.
5.050507
3.560501
1.418482
firstline,itilt,igeo,linecnt,key=1,0,0,0,"" out="" data,k15=[],[] dir='./' ofile="" if '-WD' in sys.argv: ind=sys.argv.index('-WD') dir=sys.argv[ind+1]+'/' if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-i' in sys.argv: file=input("Input file name [.k15 format]: ") f=open(file,'r') data=f.readlines() f.close() file=input("Output file name [.s format]: ") out=open(file,'w') print (" [g]eographic, [t]ilt corrected, ") tg=input(" [return for specimen coordinates]: ") if tg=='g': igeo=1 elif tg=='t': igeo,itilt=1,1 elif '-f' in sys.argv: ind=sys.argv.index('-f') file=dir+sys.argv[ind+1] f=open(file,'r') data=f.readlines() f.close() else: data= sys.stdin.readlines() if len(data)==0: print(main.__doc__) sys.exit() if '-F' in sys.argv: ind=sys.argv.index('-F') ofile=dir+sys.argv[ind+1] out=open(ofile,'w') if '-crd' in sys.argv: ind=sys.argv.index('-crd') tg=sys.argv[ind+1] if tg=='g':igeo=1 if tg=='t': igeo,itilt=1,1 for line in data: rec=line.split() if firstline==1: firstline=0 nam=rec[0] if igeo==1: az,pl=float(rec[1]),float(rec[2]) if itilt==1: bed_az,bed_dip=90.+float(rec[3]),float(rec[4]) else: linecnt+=1 for i in range(5): k15.append(float(rec[i])) if linecnt==3: sbar,sigma,bulk=pmag.dok15_s(k15) if igeo==1: sbar=pmag.dosgeo(sbar,az,pl) if itilt==1: sbar=pmag.dostilt(sbar,bed_az,bed_dip) outstring="" for s in sbar:outstring+='%10.8f '%(s) outstring+='%10.8f'%(sigma) if out=="": print(outstring) else: out.write(outstring+'\n') linecnt,firstline,k15=0,1,[] if ofile!="":print ('Output saved in ',ofile)
def main()
NAME k15_s.py DESCRIPTION converts .k15 format data to .s format. assumes Jelinek Kappabridge measurement scheme SYNTAX k15_s.py [-h][-i][command line options][<filename] OPTIONS -h prints help message and quits -i allows interactive entry of options -f FILE, specifies input file, default: standard input -F FILE, specifies output file, default: standard output -crd [g, t] specifies [g]eographic rotation, or geographic AND tectonic rotation INPUT name [az,pl,strike,dip], followed by 3 rows of 5 measurements for each specimen OUTPUT least squares matrix elements and sigma: x11,x22,x33,x12,x23,x13,sigma
3.311236
3.10525
1.066335
if '-h' in sys.argv: print(main.__doc__) sys.exit() elif '-f' in sys.argv: ind=sys.argv.index('-f') file=sys.argv[ind+1] f=open(file,'r') data=f.readlines() elif '-i' not in sys.argv: data=sys.stdin.readlines() if '-i' not in sys.argv: for line in data: rec=line.split() print('%7.1f'%(pmag.plat(float(rec[0])))) else: while 1: try: inc=input("Inclination for converting to paleolatitude: <cntl-D> to quit ") print('%7.1f'%(pmag.plat(float(inc)))) except: print('\n Good-bye \n') sys.exit()
def main()
NAME dipole_plat.py DESCRIPTION gives paleolatitude from given inclination, assuming GAD field SYNTAX dipole_plat.py [command line options]<filename OPTIONS -h prints help message and quits -i allows interactive entry of latitude -f file, specifies file name on command line
4.174235
3.27567
1.274315
if '-h' in sys.argv: print(main.__doc__) return dir_path = pmag.get_named_arg("-WD", default_val=os.getcwd()) meas_file = pmag.get_named_arg( "-f", default_val="measurements.txt") spec_file = pmag.get_named_arg( "-fsp", default_val="specimens.txt") specimen = pmag.get_named_arg( "-spc", default_val="") samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt") site_file = pmag.get_named_arg("-fsi", default_val="sites.txt") plot_file = pmag.get_named_arg("-Fp", default_val="") crd = pmag.get_named_arg("-crd", default_val="s") fmt = pmag.get_named_arg("-fmt", "svg") specimen = pmag.get_named_arg("-spc", default_val="") interactive = True save_plots = False if "-sav" in sys.argv: interactive = False save_plots = True ipmag.zeq_magic(meas_file, spec_file, crd, dir_path, n_plots="all", save_plots=save_plots, fmt=fmt, interactive=interactive, specimen=specimen)
def main()
NAME zeq_magic.py DESCRIPTION reads in a MagIC measurements formatted file, makes plots of remanence decay during demagnetization experiments. Reads in prior interpretations saved in a specimens formatted file interpretations in a specimens file. interpretations are saved in the coordinate system used. SYNTAX zeq_magic.py [command line options] OPTIONS -h prints help message and quits -f MEASFILE: sets measurements format input file, default: measurements.txt -fsp SPECFILE: sets specimens format file with prior interpreations, default: specimens.txt -fsa SAMPFILE: sets samples format file sample=>site information, default: samples.txt -fsi SITEFILE: sets sites format file with site=>location informationprior interpreations, default: samples.txt -Fp PLTFILE: sets filename for saved plot, default is name_type.fmt (where type is zijd, eqarea or decay curve) -crd [s,g,t]: sets coordinate system, g=geographic, t=tilt adjusted, default: specimen coordinate system -spc SPEC plots single specimen SPEC, saves plot with specified format with optional -dir settings and quits -dir [L,P,F][beg][end]: sets calculation type for principal component analysis, default is none beg: starting step for PCA calculation end: ending step for PCA calculation [L,P,F]: calculation type for line, plane or fisher mean must be used with -spc option -fmt FMT: set format of saved plot [png,svg,jpg] -A: suppresses averaging of replicate measurements, default is to average -sav: saves all plots without review
2.63472
2.130167
1.236861
sx = sx + x[i] sx2 = sx2 + x[i]**2 sx3 = sx3 + x[i]**3 sy = sy + y[i] sy2 = sy2 + y[i]**2 sy3 = sy3 + y[i]**3 sxy = sxy + x[i] * y[i] sxy2 = sxy2 + x[i] * y[i]**2 syx2 = syx2 + y[i] * x[i]**2 A = n * sx2 - sx**2 B = n * sxy - sx*sy C = n * sy2 - sy**2 D = 0.5 * (n * sxy2 - sx * sy2 + n * sx3 - sx * sx2) E = 0.5 * (n * syx2 - sy * sx2 + n * sy3 - sy * sy2) # values check out up to here xo = old_div((D * C - B * E), (A * C - B**2)) yo = old_div((A * E - B * D), (A * C - B**2)) print("xo", xo) print("yo", yo) r = 0 for z in range(n): r = r + old_div(numpy.sqrt( (x[z]-xo)**2 + (y[z]-yo)**2 ), n) if xo <= numpy.mean(x) and yo <= numpy.mean(y): k = old_div(-1.,r) else: k = old_div(1.,r) SSE = lib_k.get_SSE(xo, yo, r, x, y) print("r", r) return k, xo, yo, SSE
def fitcircle(n, x, y): # n points, x points, y points # adding in normalize vectors step #x = numpy.array(x) / max(x) #y = numpy.array(y) / max(y) # sx, sx2, sx3, sy, sy2, sy3, sxy, sxy2, syx2 = (0,) * 9 print(type(sx), sx) for i in range(n)
c Fit circle to arbitrary number of x,y pairs, based on the c modified least squares method of Umback and Jones (2000), c IEEE Transactions on Instrumentation and Measurement.
2.260797
2.285125
0.989354
if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-i' in sys.argv: # if one is -i while 1: try: ans=input("Input Pole Latitude [positive north]: <cntrl-D to quit> ") plat=float(ans) # assign input to plat, after conversion to floating point ans=input("Input Pole Longitude [positive east]: ") plon =float(ans) ans=input("Input Site Latitude: ") slat =float(ans) ans=input("Input Site Longitude: ") slong =float(ans) dec,inc=pmag.vgp_di(plat,plon,slat,slong) # call vgp_di function from pmag module print('%7.1f %7.1f'%(dec,inc)) # print out returned stuff except EOFError: print("\n Good-bye\n") sys.exit() elif '-f' in sys.argv: # manual input of file name ind=sys.argv.index('-f') file=sys.argv[ind+1] f=open(file,'r') inp = f.readlines() # read from standard inp for line in inp: # read in the data (as string variable), line by line dec,inc= spitout(line) else: inp = sys.stdin.readlines() # read from standard input for line in inp: # read in the data (as string variable), line by line spitout(line)
def main()
NAME vgp_di.py DESCRIPTION converts site latitude, longitude and pole latitude, longitude to declination, inclination SYNTAX vgp_di.py [-h] [-i] [-f FILE] [< filename] OPTIONS -h prints help message and quits -i interactive data entry -f FILE to specify file name on the command line INPUT for file entry: PLAT PLON SLAT SLON where: PLAT: pole latitude PLON: pole longitude (positive east) SLAT: site latitude (positive north) SLON: site longitude (positive east) OUTPUT D I where: D: declination I: inclination
4.297571
3.566052
1.205134
PmagPyDir = os.path.abspath(".") COMMAND = % (PmagPyDir, PmagPyDir, PmagPyDir, PmagPyDir) frc_path = os.path.join( os.environ["HOME"], ".bashrc") # not recommended, but hey it freaking works fbprof_path = os.path.join(os.environ["HOME"], ".bash_profile") fprof_path = os.path.join(os.environ["HOME"], ".profile") all_paths = [frc_path, fbprof_path, fprof_path] for f_path in all_paths: open_type = 'a' if not os.path.isfile(f_path): open_type = 'w+' fout = open(f_path, open_type) fout.write(COMMAND) fout.close() else: fin = open(f_path, 'r') current_f = fin.read() fin.close() if COMMAND not in current_f: fout = open(f_path, open_type) fout.write(COMMAND) fout.close() print("Install complete. Please restart the shell to complete install.\nIf you are seeing strange or non-existent paths in your PATH or PYTHONPATH variable please manually check your .bashrc, .bash_profile, and .profile or attempt to reinstall.")
def unix_install()
Edits or creates .bashrc, .bash_profile, and .profile files in the users HOME directory in order to add your current directory (hopefully your PmagPy directory) and assorted lower directories in the PmagPy/programs directory to your PATH environment variable. It also adds the PmagPy and the PmagPy/programs directories to PYTHONPATH.
3.868872
3.296403
1.173665
if not path_to_python: print("Please enter the path to your python.exe you wish Windows to use to run python files. If you do not, this script will not be able to set up a full python environment in Windows. If you already have a python environment set up in Windows such that you can run python scripts from command prompt with just a file name then ignore this message. Otherwise, you will need to run dev_setup.py again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details") print("Would you like to continue? [y/N] ") ans = input() if ans == 'y': pass else: return # be sure to add python.exe if the user forgets to include the file name if os.path.isdir(path_to_python): path_to_python = os.path.join(path_to_python, "python.exe") if not os.path.isfile(path_to_python): print("The path to python provided is not a full path to the python.exe file or this path does not exist, was given %s.\nPlease run again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details" % path_to_python) return # make windows associate .py with python subprocess.check_call('assoc .py=Python', shell=True) subprocess.check_call('ftype Python=%s ' % path_to_python + '"%1" %*', shell=True) PmagPyDir = os.path.abspath(".") ProgramsDir = os.path.join(PmagPyDir, 'programs') dirs_to_add = [ProgramsDir] for d in next(os.walk(ProgramsDir))[1]: dirs_to_add.append(os.path.join(ProgramsDir, d)) path = str(subprocess.check_output('echo %PATH%', shell=True)).strip('\n') if "PATH" in path: path = '' pypath = str(subprocess.check_output( 'echo %PYTHONPATH%', shell=True)).strip('\n') if "PYTHONPATH" in pypath: pypath = PmagPyDir + ';' + ProgramsDir else: pypath += ';' + PmagPyDir + ';' + ProgramsDir for d_add in dirs_to_add: path += ';' + d_add unique_path_list = [] for p in path.split(';'): p = p.replace('"', '') if p not in unique_path_list: unique_path_list.append(p) unique_pypath_list = [] for p in pypath.split(';'): p = p.replace('"', '') if p not in unique_pypath_list: unique_pypath_list.append(p) path = functools.reduce(lambda x, y: x + ';' + y, unique_path_list) pypath = functools.reduce(lambda x, y: x + ';' + y, unique_pypath_list) print('setx PATH "%s"' % path) subprocess.call('setx PATH "%s"' % path, shell=True) print('setx PYTHONPATH "%s"' % pypath) subprocess.call('setx PYTHONPATH "%s"' % (pypath), shell=True) print("Install complete. Please restart the command prompt to complete install")
def windows_install(path_to_python="")
Sets the .py extension to be associated with the ftype Python which is then set to the python.exe you provide in the path_to_python variable or after the -p flag if run as a script. Once the python environment is set up the function proceeds to set PATH and PYTHONPATH using setx. Parameters ---------- path_to_python : the path the python.exe you want windows to execute when running .py files
3.212929
3.14318
1.02219
do_help = pmag.get_flag_arg_from_sys('-h') if do_help: print(main.__doc__) return False res_file = pmag.get_named_arg('-f', 'pmag_results.txt') crit_file = pmag.get_named_arg('-fcr', '') spec_file = pmag.get_named_arg('-fsp', '') age_file = pmag.get_named_arg('-fa', '') grade = pmag.get_flag_arg_from_sys('-g') latex = pmag.get_flag_arg_from_sys('-tex') WD = pmag.get_named_arg('-WD', os.getcwd()) ipmag.pmag_results_extract(res_file, crit_file, spec_file, age_file, latex, grade, WD)
def main()
NAME pmag_results_extract.py DESCRIPTION make a tab delimited output file from pmag_results table SYNTAX pmag_results_extract.py [command line options] OPTIONS -h prints help message and quits -f RFILE, specify pmag_results table; default is pmag_results.txt -fa AFILE, specify er_ages table; default is NONE -fsp SFILE, specify pmag_specimens table, default is NONE -fcr CFILE, specify pmag_criteria table, default is NONE -g include specimen_grade in table - only works for PmagPy generated pmag_specimen formatted files. -tex, output in LaTeX format
2.931121
2.361638
1.241139
dir_path='.' tspec="thellier_specimens.txt" aspec="AC_specimens.txt" ofile="TorAC_specimens.txt" critfile="pmag_criteria.txt" ACSamplist,Samplist,sigmin=[],[],10000 GoodSamps,SpecOuts=[],[] # get arguments from command line if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-fu' in sys.argv: ind=sys.argv.index('-fu') tspec=sys.argv[ind+1] if '-fc' in sys.argv: ind=sys.argv.index('-fc') aspec=sys.argv[ind+1] if '-F' in sys.argv: ind=sys.argv.index('-F') ofile=sys.argv[ind+1] if '-WD' in sys.argv: ind=sys.argv.index('-WD') dir_path=sys.argv[ind+1] # read in pmag_specimens file tspec=dir_path+'/'+tspec aspec=dir_path+'/'+aspec ofile=dir_path+'/'+ofile Specs,file_type=pmag.magic_read(tspec) Specs,file_type=pmag.magic_read(tspec) Speclist=pmag.get_specs(Specs) ACSpecs,file_type=pmag.magic_read(aspec) ACspeclist=pmag.get_specs(ACSpecs) for spec in Specs: if spec["er_sample_name"] not in Samplist:Samplist.append(spec["er_sample_name"]) for spec in ACSpecs: if spec["er_sample_name"] not in ACSamplist:ACSamplist.append(spec["er_sample_name"]) # for samp in Samplist: useAC,Ints,ACInts,GoodSpecs,AC,UC=0,[],[],[],[],[] for spec in Specs: if spec["er_sample_name"].lower()==samp.lower(): UC.append(spec) if samp in ACSamplist: for spec in ACSpecs: if spec["er_sample_name"].lower()==samp.lower(): AC.append(spec) if len(AC)>0: AClist=[] for spec in AC: SpecOuts.append(spec) AClist.append(spec['er_specimen_name']) print('using AC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int']))) for spec in UC: if spec['er_specimen_name'] not in AClist: SpecOuts.append(spec) # print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int'])) else: for spec in UC: SpecOuts.append(spec) # print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int'])) SpecOuts,keys=pmag.fillkeys(SpecOuts) pmag.magic_write(ofile,SpecOuts,'pmag_specimens') print('thellier data assessed for AC correction put in ', ofile)
def main()
NAME replace_AC_specimens.py DESCRIPTION finds anisotropy corrected data and replaces that specimen with it. puts in pmag_specimen format file SYNTAX replace_AC_specimens.py [command line options] OPTIONS -h prints help message and quits -i allows interactive setting of file names -fu TFILE uncorrected pmag_specimen format file with thellier interpretations created by thellier_magic_redo.py -fc AFILE anisotropy corrected pmag_specimen format file created by thellier_magic_redo.py -F FILE pmag_specimens format output file DEFAULTS TFILE: thellier_specimens.txt AFILE: AC_specimens.txt FILE: TorAC_specimens.txt
2.815912
2.447563
1.150496
''' # Check if specimen pass Acceptance criteria ''' #if 'pars' not in self.Data[specimen].kes(): # return pars['specimen_fail_criteria']=[] for crit in list(acceptance_criteria.keys()): if crit not in list(pars.keys()): continue if acceptance_criteria[crit]['value']==-999: continue if acceptance_criteria[crit]['category']!='IE-SPEC': continue cutoff_value=acceptance_criteria[crit]['value'] if crit=='specimen_scat': if pars["specimen_scat"] in ["Fail",'b',0,'0','FALSE',"False",False,"f"]: pars['specimen_fail_criteria'].append('specimen_scat') elif crit=='specimen_k' or crit=='specimen_k_prime': if abs(pars[crit])>cutoff_value: pars['specimen_fail_criteria'].append(crit) # high threshold value: elif acceptance_criteria[crit]['threshold_type']=="high": if pars[crit]>cutoff_value: pars['specimen_fail_criteria'].append(crit) elif acceptance_criteria[crit]['threshold_type']=="low": if pars[crit]<cutoff_value: pars['specimen_fail_criteria'].append(crit) return pars
def check_specimen_PI_criteria(pars,acceptance_criteria)
# Check if specimen pass Acceptance criteria
3.699362
3.523345
1.049957
dir_path = "./" if '-WD' in sys.argv: ind = sys.argv.index('-WD') dir_path = sys.argv[ind+1] if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-f' in sys.argv: ind = sys.argv.index('-f') magic_file = dir_path+'/'+sys.argv[ind+1] else: print(main.__doc__) sys.exit() if '-key' in sys.argv: ind = sys.argv.index('-key') grab_key = sys.argv[ind+1] else: print(main.__doc__) sys.exit() # # # get data read in Data, file_type = pmag.magic_read(magic_file) if len(Data) > 0: for rec in Data: print(rec[grab_key]) else: print('bad file name')
def main()
NAME grab_magic_key.py DESCRIPTION picks out key and saves to file SYNTAX grab_magic_key.py [command line optins] OPTIONS -h prints help message and quits -f FILE: specify input magic format file -key KEY: specify key to print to standard output
2.706456
2.375403
1.139367
if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-i' in sys.argv: # if one is -i while 1: try: ans=input("Input Declination: <cntrl-D to quit> ") Dec=float(ans) # assign input to Dec, after conversion to floating point ans=input("Input Inclination: ") Inc =float(ans) ans=input("Input Alpha 95: ") a95 =float(ans) ans=input("Input Site Latitude: ") slat =float(ans) ans=input("Input Site Longitude: ") slong =float(ans) spitout(Dec,Inc,a95,slat,slong) # call dia_vgp function from pmag module print('%7.1f %7.1f %7.1f %7.1f'%(plong,plat,dp,dm)) # print out returned stuff except: print("\n Good-bye\n") sys.exit() elif '-f' in sys.argv: # manual input of file name ind=sys.argv.index('-f') file=sys.argv[ind+1] f=open(file,'r') inlist = [] for line in f.readlines(): inlist.append([]) # loop over the elements, split by whitespace for el in line.split(): inlist[-1].append(float(el)) spitout(inlist) else: input = sys.stdin.readlines() # read from standard input inlist = [] for line in input: # read in the data (as string variable), line by line inlist.append([]) # loop over the elements, split by whitespace for el in line.split(): inlist[-1].append(float(el)) spitout(inlist)
def main()
NAME dia_vgp.py DESCRIPTION converts declination inclination alpha95 to virtual geomagnetic pole, dp and dm SYNTAX dia_vgp.py [-h] [-i] [-f FILE] [< filename] OPTIONS -h prints help message and quits -i interactive data entry -f FILE to specify file name on the command line INPUT for file entry: D I A95 SLAT SLON where: D: declination I: inclination A95: alpha_95 SLAT: site latitude (positive north) SLON: site longitude (positive east) OUTPUT PLON PLAT DP DM where: PLAT: pole latitude PLON: pole longitude (positive east) DP: 95% confidence angle in parallel DM: 95% confidence angle in meridian
4.087254
3.362762
1.215446
fmt='svg' title="" if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-f' in sys.argv: ind=sys.argv.index('-f') file=sys.argv[ind+1] X=numpy.loadtxt(file) file=sys.argv[ind+2] X2=numpy.loadtxt(file) # else: # X=numpy.loadtxt(sys.stdin,dtype=numpy.float) else: print('-f option required') print(main.__doc__) sys.exit() if '-fmt' in sys.argv: ind=sys.argv.index('-fmt') fmt=sys.argv[ind+1] if '-t' in sys.argv: ind=sys.argv.index('-t') title=sys.argv[ind+1] CDF={'X':1} pmagplotlib.plot_init(CDF['X'],5,5) pmagplotlib.plot_cdf(CDF['X'],X,'','r','') pmagplotlib.plot_cdf(CDF['X'],X2,title,'b','') D,p=scipy.stats.ks_2samp(X,X2) if p>=.05: print(D,p,' not rejected at 95%') else: print(D,p,' rejected at 95%') pmagplotlib.draw_figs(CDF) ans= input('S[a]ve plot, <Return> to quit ') if ans=='a': files={'X':'CDF_.'+fmt} pmagplotlib.save_plots(CDF,files)
def main()
NAME plot_2cdfs.py DESCRIPTION makes plots of cdfs of data in input file SYNTAX plot_2cdfs.py [-h][command line options] OPTIONS -h prints help message and quits -f FILE1 FILE2 -t TITLE -fmt [svg,eps,png,pdf,jpg..] specify format of output figure, default is svg
2.899871
2.868151
1.011059
infile='pmag_specimens.txt' sampfile="er_samples.txt" outfile="er_samples.txt" # get command line stuff if "-h" in sys.argv: print(main.__doc__) sys.exit() if '-fsp' in sys.argv: ind=sys.argv.index("-fsp") infile=sys.argv[ind+1] if '-fsm' in sys.argv: ind=sys.argv.index("-fsm") sampfile=sys.argv[ind+1] if '-F' in sys.argv: ind=sys.argv.index("-F") outfile=sys.argv[ind+1] if '-WD' in sys.argv: ind=sys.argv.index("-WD") dir_path=sys.argv[ind+1] infile=dir_path+'/'+infile sampfile=dir_path+'/'+sampfile outfile=dir_path+'/'+outfile # now do re-ordering pmag.ReorderSamples(infile,sampfile,outfile)
def main()
NAME reorder_samples.py DESCRIPTION takes specimen file and reorders sample file with selected orientation methods placed first SYNTAX reorder_samples.py [command line options] OPTIONS -h prints help message and quits -fsp: specimen input pmag_specimens format file, default is "pmag_specimens.txt" -fsm: sample input er_samples format file, default is "er_samples.txt" -F: output er_samples format file, default is "er_samples.txt" OUPUT writes re-ordered er_samples.txt file
2.422288
1.903953
1.272242
def can_iter(x): try: any(x) return True except TypeError: return False def not_empty(x): if len(x): return True return False def exists(x): if x: return True return False def is_nan(x): try: if np.isnan(x): return True except TypeError: return False return False # return True iff you have a non-empty iterable # and False for an empty iterable (including an empty string) if can_iter(val): return not_empty(val) # if value is not iterable, return False for np.nan, None, 0, or False # & True for all else else: if is_nan(val): return False if not zero_as_null: if val == 0: return True return exists(val)
def not_null(val, zero_as_null=True)
Comprehensive check to see if a value is null or not. Returns True for: non-empty iterables, True, non-zero floats and ints, non-emtpy strings. Returns False for: empty iterables, False, zero, empty strings. Parameters ---------- val : any Python object zero_as_null: bool treat zero as null, default True Returns --------- boolean
3.721386
3.553366
1.047285
# possible intensity columns intlist = ['magn_moment', 'magn_volume', 'magn_mass','magn_uncal'] # intensity columns that are in the data int_meths = [col_name for col_name in data.columns if col_name in intlist] # drop fully null columns data.dropna(axis='columns', how='all') # ignore columns with only blank values (including "") for col_name in int_meths[:]: if not data[col_name].any(): int_meths.remove(col_name) if len(int_meths): if 'magn_moment' in int_meths: return 'magn_moment' return int_meths[0] return ""
def get_intensity_col(data)
Check measurement dataframe for intensity columns 'magn_moment', 'magn_volume', 'magn_mass','magn_uncal'. Return the first intensity column that is in the dataframe AND has data. Parameters ---------- data : pandas DataFrame Returns --------- str intensity method column or ""
4.535319
3.136352
1.446049
reqd_tables = ['measurements', 'specimens', 'samples', 'sites'] con = Contribution(dir_path, read_tables=reqd_tables) # check that all required tables are available missing_tables = [] for table in reqd_tables: if table not in con.tables: missing_tables.append(table) if missing_tables: return False, "You are missing {} tables".format(", ".join(missing_tables)) # put sample column into the measurements table con.propagate_name_down('sample', 'measurements') # put site column into the measurements table con.propagate_name_down('site', 'measurements') # check that column propagation was successful if 'site' not in con.tables['measurements'].df.columns: return False, "Something went wrong with propagating sites down to the measurement level" return True, con.tables['measurements'].df
def add_sites_to_meas_table(dir_path)
Add site columns to measurements table (e.g., to plot intensity data), or generate an informative error message. Parameters ---------- dir_path : str directory with data files Returns ---------- status : bool True if successful, else False data : pandas DataFrame measurement data with site/sample
3.589623
3.80361
0.943741
# initialize dropna = list(dropna) reqd_cols = list(reqd_cols) # get intensity column try: magn_col = get_intensity_col(data) except AttributeError: return False, "Could not get intensity method from data" # drop empty columns if magn_col not in dropna: dropna.append(magn_col) data = data.dropna(axis=0, subset=dropna) # add to reqd_cols list if 'method_codes' not in reqd_cols: reqd_cols.append('method_codes') if magn_col not in reqd_cols: reqd_cols.append(magn_col) # drop non reqd cols, make sure all reqd cols are present try: data = data[reqd_cols] except KeyError as ex: print(ex) missing = set(reqd_cols).difference(data.columns) return False, "missing these required columns: {}".format(", ".join(missing)) # filter out records without the correct method code data = data[data['method_codes'].str.contains(meth_code).astype(bool)] return True, data
def prep_for_intensity_plot(data, meth_code, dropna=(), reqd_cols=())
Strip down measurement data to what is needed for an intensity plot. Find the column with intensity data. Drop empty columns, and make sure required columns are present. Keep only records with the specified method code. Parameters ---------- data : pandas DataFrame measurement dataframe meth_code : str MagIC method code to include, i.e. 'LT-AF-Z' dropna : list columns that must not be empty reqd_cols : list columns that must be present Returns ---------- status : bool True if successful, else False data : pandas DataFrame measurement data with required columns
2.91621
2.727689
1.069114
df = df.copy() df[col_name] = df[col_name].fillna("") df[col_name] = df[col_name].astype(str) return df
def stringify_col(df, col_name)
Take a dataframe and string-i-fy a column of values. Turn nan/None into "" and all other values into strings. Parameters ---------- df : dataframe col_name : string
2.03361
2.254746
0.901924
if dtype not in self.table_names: print("-W- {} is not a valid MagIC table name".format(dtype)) print("-I- Valid table names are: {}".format(", ".join(self.table_names))) return data_container = MagicDataFrame(dtype=dtype, columns=col_names, groups=groups) self.tables[dtype] = data_container
def add_empty_magic_table(self, dtype, col_names=None, groups=None)
Add a blank MagicDataFrame to the contribution. You can provide either a list of column names, or a list of column group names. If provided, col_names takes precedence.
4.107532
4.045399
1.015359
self.tables[dtype] = MagicDataFrame(dtype=dtype, data=data) if dtype == 'measurements': self.tables['measurements'].add_sequence() return dtype, self.tables[dtype]
def add_magic_table_from_data(self, dtype, data)
Add a MagIC table to the contribution from a data list Parameters ---------- dtype : str MagIC table type, i.e. 'specimens' data : list of dicts data list with format [{'key1': 'val1', ...}, {'key1': 'val2', ...}, ... }]
5.32988
5.555773
0.959341
if df is None: # if providing a filename but no data type if dtype == "unknown": filename = os.path.join(self.directory, fname) if not os.path.exists(filename): return False, False data_container = MagicDataFrame(filename, dmodel=self.data_model) dtype = data_container.dtype if dtype == 'empty': return False, False else: self.tables[dtype] = data_container return dtype, data_container # if providing a data type, use the canonical filename elif dtype not in self.filenames: print('-W- "{}" is not a valid MagIC table type'.format(dtype)) print("-I- Available table types are: {}".format(", ".join(self.table_names))) return False, False #filename = os.path.join(self.directory, self.filenames[dtype]) filename = pmag.resolve_file_name(self.filenames[dtype], self.directory) if os.path.exists(filename): data_container = MagicDataFrame(filename, dtype=dtype, dmodel=self.data_model) if data_container.dtype != "empty": self.tables[dtype] = data_container return dtype, data_container else: return False, False else: #print("-W- No such file: {}".format(filename)) return False, False # df is not None else: if not dtype: print("-W- Must provide dtype") return False, False data_container = MagicDataFrame(dtype=dtype, df=df) self.tables[dtype] = data_container self.tables[dtype].sort_dataframe_cols() return dtype, self.tables[dtype]
def add_magic_table(self, dtype, fname=None, df=None)
Read in a new file to add a table to self.tables. Requires dtype argument and EITHER filename or df. Parameters ---------- dtype : str MagIC table name (plural, i.e. 'specimens') fname : str filename of MagIC format file (short path, directory is self.directory) default: None df : pandas DataFrame data to create the new table with default: None
3.012653
2.940735
1.024456
meas_df = self.tables['measurements'].df names_list = ['specimen', 'sample', 'site', 'location'] # add in any tables that you can for num, name in enumerate(names_list): # don't replace tables that already exist if (name + "s") in self.tables: continue elif name in meas_df.columns: items = meas_df[name].unique() df = pd.DataFrame(columns=[name], index=items) df[name] = df.index # add in parent name if possible # (i.e., sample name to specimens table) if num < (len(names_list) - 1): parent = names_list[num+1] if parent in meas_df.columns: meas_df = meas_df.where(meas_df.notnull(), "") df[parent] = meas_df.drop_duplicates(subset=[name])[parent].values.astype(str) df = df.where(df != "", np.nan) df = df.dropna(how='all', axis='rows') if len(df): self.tables[name + "s"] = MagicDataFrame(dtype=name + "s", df=df) self.write_table_to_file(name + "s")
def propagate_measurement_info(self)
Take a contribution with a measurement table. Create specimen, sample, site, and location tables using the unique names in the measurement table to fill in the index.
3.997929
3.63149
1.100906
if table_name not in self.ancestry: return None, None parent_ind = self.ancestry.index(table_name) + 1 if parent_ind + 1 > len(self.ancestry): parent_name = None else: parent_name = self.ancestry[parent_ind] child_ind = self.ancestry.index(table_name) - 1 if child_ind < 0: child_name = None else: child_name = self.ancestry[child_ind] return parent_name, child_name
def get_parent_and_child(self, table_name)
Get the name of the parent table and the child table for a given MagIC table name. Parameters ---------- table_name : string of MagIC table name ['specimens', 'samples', 'sites', 'locations'] Returns ------- parent_name : string of parent table name child_name : string of child table name
1.693325
1.750224
0.967491
cols = ['lithologies', 'geologic_types', 'geologic_classes'] #for table in ['specimens', 'samples']: # convert "Not Specified" to blank #self.tables[table].df.replace("^[Nn]ot [Ss]pecified", '', # regex=True, inplace=True) self.propagate_cols(cols, 'samples', 'sites') cols = ['lithologies', 'geologic_types', 'geologic_classes'] self.propagate_cols(cols, 'specimens', 'samples') # if sites table is missing any values, # go ahead and propagate values UP as well if 'sites' not in self.tables: return for col in cols: if col not in self.tables['sites'].df.columns: self.tables['sites'].df[col] = None if not all(self.tables['sites'].df[cols].values.ravel()): print('-I- Propagating values up from samples to sites...') self.propagate_cols_up(cols, 'sites', 'samples')
def propagate_lithology_cols(self)
Propagate any data from lithologies, geologic_types, or geologic_classes from the sites table to the samples and specimens table. In the samples/specimens tables, null or "Not Specified" values will be overwritten based on the data from their parent site.
3.951835
3.232747
1.222439
# define some helper methods: def put_together_if_list(item): try: res = ":".join(item) return ":".join(item) except TypeError as ex: #print ex return item def replace_colon_delimited_value(df, col_name, old_value, new_value): count = 1 for index, row in df[df[col_name].notnull()].iterrows(): names_list = row[col_name] names_list = [name.strip() for name in names_list] try: ind = names_list.index(old_value) except ValueError as ex: count += 1 continue names_list[ind] = new_value df.loc[count, col_name] = names_list count += 1 # initialize some things item_type = table_name ###col_name = item_type[:-1] + "_name" col_name = item_type[:-1] col_name_plural = col_name + "s" table_df = self.tables[item_type].df if item_old_name == '': # just add a new item self.add_item(table_name, {col_name: item_new_name}, item_new_name) return # rename item in its own table table_df.rename(index={item_old_name: item_new_name}, inplace=True) # rename in any parent/child tables for table_name in self.tables: df = self.tables[table_name].df col_names = df.columns # change anywhere col_name (singular, i.e. site) is found if col_name in col_names: df[col_name].where(df[col_name] != item_old_name, item_new_name, inplace=True) # change anywhere col_name (plural, i.e. sites) is found if col_name_plural in col_names: df[col_name_plural + "_list"] = df[col_name_plural].str.split(":") replace_colon_delimited_value(df, col_name_plural + "_list", item_old_name, item_new_name) df[col_name_plural] = df[col_name_plural + "_list"].apply(put_together_if_list) df.drop(col_name_plural + "_list", axis=1, inplace=True) self.tables[table_name].df = df
def rename_item(self, table_name, item_old_name, item_new_name)
Rename item (such as a site) everywhere that it occurs. This change often spans multiple tables. For example, a site name will occur in the sites table, the samples table, and possibly in the locations/ages tables.
2.736309
2.716994
1.007109
if ind >= len(self.ancestry): return "", "" if ind > -1: table_name = self.ancestry[ind] ###name = table_name[:-1] + "_name" name = table_name[:-1] return table_name, name return "", ""
def get_table_name(self, ind)
Return both the table_name (i.e., 'specimens') and the col_name (i.e., 'specimen') for a given index in self.ancestry.
4.931142
3.739619
1.318622
print("-I- Trying to propagate {} columns from {} table into {} table".format(cols, source_df_name, target_df_name)) # make sure target table is read in if target_df_name not in self.tables: self.add_magic_table(target_df_name) if target_df_name not in self.tables: print("-W- Couldn't read in {} table".format(target_df_name)) return # make sure source table is read in if source_df_name not in self.tables: self.add_magic_table(source_df_name) print("-W- Couldn't read in {} table".format(source_df_name)) return target_df = self.tables[target_df_name] source_df = self.tables[source_df_name] target_name = target_df_name[:-1] # make sure source_df has relevant columns for col in cols: if col not in source_df.df.columns: source_df.df[col] = None # if target_df has info, propagate that into all rows target_df.front_and_backfill(cols) # make sure target_name is in source_df for merging if target_name not in source_df.df.columns: print("-W- You can't merge data from {} table into {} table".format(source_df_name, target_df_name)) print(" Your {} table is missing {} column".format(source_df_name, target_name)) self.tables[target_df_name] = target_df return target_df source_df.front_and_backfill([target_name]) # group source df by target_name grouped = source_df.df.groupby(source_df.df[target_name]) if not len(grouped): print("-W- Couldn't propagate from {} to {}".format(source_df_name, target_df_name)) return target_df # function to generate capitalized, sorted, colon-delimited list # of unique, non-null values from a column def func(group, col_name): lst = group[col_name][group[col_name].notnull()].unique() split_lst = [col.split(':') for col in lst if col] sorted_lst = sorted(np.unique([item.capitalize() for sublist in split_lst for item in sublist])) group_col = ":".join(sorted_lst) return group_col # apply func to each column for col in cols: res = grouped.apply(func, col) target_df.df['new_' + col] = res target_df.df[col] = np.where(target_df.df[col], target_df.df[col], target_df.df['new_' + col]) target_df.df.drop(['new_' + col], axis='columns', inplace=True) # set table self.tables[target_df_name] = target_df return target_df
def propagate_cols_up(self, cols, target_df_name, source_df_name)
Take values from source table, compile them into a colon-delimited list, and apply them to the target table. This method won't overwrite values in the target table, it will only supply values where they are missing. Parameters ---------- cols : list-like list of columns to propagate target_df_name : str name of table to propagate values into source_df_name: name of table to propagate values from Returns --------- target_df : MagicDataFrame updated MagicDataFrame with propagated values
2.65999
2.559061
1.03944
def get_level(ser, levels=('specimen', 'sample', 'site', 'location')): for level in levels: if pd.notnull(ser[level]): if len(ser[level]): # guard against empty strings return level return # get available levels in age table possible_levels = ['specimen', 'sample', 'site', 'location'] levels = [level for level in possible_levels if level in self.tables['ages'].df.columns] # find level for each age row age_levels = self.tables['ages'].df.apply(get_level, axis=1, args=[levels]) if any(age_levels): self.tables['ages'].df.loc[:, 'level'] = age_levels return self.tables['ages']
def get_age_levels(self)
Method to add a "level" column to the ages table. Finds the lowest filled in level (i.e., specimen, sample, etc.) for that particular row. I.e., a row with both site and sample name filled in is considered a sample-level age. Returns --------- self.tables['ages'] : MagicDataFrame updated ages table
4.095322
3.133826
1.306812
# if there is no age table, skip if 'ages' not in self.tables: return # if age table has no data, skip if not len(self.tables['ages'].df): return # get levels in age table self.get_age_levels() # if age levels could not be determined, skip if not "level" in self.tables["ages"].df.columns: return if not any(self.tables["ages"].df["level"]): return # go through each level of age data for level in self.tables['ages'].df['level'].unique(): table_name = level + 's' age_headers = self.data_model.get_group_headers(table_name, 'Age') # find age headers that are actually in table actual_age_headers = list(set(self.tables[table_name].df.columns).intersection(age_headers)) # find site age headers that are available in ages table available_age_headers = list(set(self.tables['ages'].df.columns).intersection(age_headers)) # fill in all available age info to all rows self.tables[table_name].front_and_backfill(actual_age_headers) # add any available headers to table add_headers = set(available_age_headers).difference(actual_age_headers) for header in add_headers: self.tables[table_name].df[header] = None # propagate values from ages into table def move_values(ser, level, available_headers): name = ser.name cond1 = self.tables['ages'].df[level] == name cond2 = self.tables['ages'].df['level'] == level mask = cond1 & cond2 sli = self.tables['ages'].df[mask] if len(sli): return list(sli[available_headers].values[0]) return [None] * len(available_headers) res = self.tables[table_name].df.apply(move_values, axis=1, args=[level, available_age_headers]) # fill in table with values gleaned from ages new_df = pd.DataFrame(data=list(res.values), index=res.index, columns=available_age_headers) age_values = np.where(self.tables[table_name].df[available_age_headers], self.tables[table_name].df[available_age_headers], new_df) self.tables[table_name].df[available_age_headers] = age_values # # put age_high, age_low into locations table print("-I- Adding age_high and age_low to locations table based on minimum/maximum ages found in sites table") self.propagate_min_max_up(cols=['age'], target_df_name='locations', source_df_name='sites')
def propagate_ages(self)
Mine ages table for any age data, and write it into specimens, samples, sites, locations tables. Do not overwrite existing age data.
3.467269
3.36473
1.030475
for table_name in self.tables: table = self.tables[table_name] table.remove_non_magic_cols_from_table()
def remove_non_magic_cols(self)
Remove all non-MagIC columns from all tables.
3.44949
2.715306
1.270387
if custom_name: fname = custom_name else: fname = self.filenames[dtype] if not dir_path: dir_path=self.directory if dtype in self.tables: write_df = self.remove_names(dtype) outfile = self.tables[dtype].write_magic_file(custom_name=fname, dir_path=dir_path, append=append, df=write_df) return outfile
def write_table_to_file(self, dtype, custom_name=None, append=False, dir_path=None)
Write out a MagIC table to file, using custom filename as specified in self.filenames. Parameters ---------- dtype : str magic table name
4.330293
4.009305
1.080061
if dtype not in self.ancestry: return if dtype in self.tables: # remove extra columns here self_ind = self.ancestry.index(dtype) parent_ind = self_ind + 1 if self_ind < (len(self.ancestry) -1) else self_ind remove = set(self.ancestry).difference([self.ancestry[self_ind], self.ancestry[parent_ind]]) remove = [dtype[:-1] for dtype in remove] columns = self.tables[dtype].df.columns.difference(remove) return self.tables[dtype].df[columns]
def remove_names(self, dtype)
Remove unneeded name columns ('specimen'/'sample'/etc) from the specified table. Parameters ---------- dtype : str Returns --------- pandas DataFrame without the unneeded columns Example --------- Contribution.tables['specimens'].df = Contribution.remove_names('specimens') # takes out 'location', 'site', and/or 'sample' columns from the # specimens dataframe if those columns have been added
3.732815
3.80388
0.981318
parent_dtype, child_dtype = self.get_parent_and_child(dtype) if not child_dtype in self.tables: return set() items = set(self.tables[dtype].df.index.unique()) items_in_child_table = set(self.tables[child_dtype].df[dtype[:-1]].unique()) return {i for i in (items_in_child_table - items) if not_null(i)}
def find_missing_items(self, dtype)
Find any items that are referenced in a child table but are missing in their own table. For example, a site that is listed in the samples table, but has no entry in the sites table. Parameters ---------- dtype : str table name, e.g. 'specimens' Returns --------- set of missing values
3.924477
3.827021
1.025465
con_id = "" if "contribution" in self.tables: if "id" in self.tables["contribution"].df.columns: con_id = str(self.tables["contribution"].df["id"].values[0]) return con_id
def get_con_id(self)
Return contribution id if available
3.385219
2.71224
1.248127
def stringify(x): # float --> string, # truncating floats like 3.0 --> 3 if isinstance(x, float): if x.is_integer(): #print('{} --> {}'.format(x, str(x).rstrip('0').rstrip('.'))) return str(x).rstrip('0').rstrip('.') return(str(x)) # keep strings as they are, # unless it is a string like "3.0", # in which case truncate that too if isinstance(x, str): try: float(x) if x.endswith('0'): if x.rstrip('0').endswith('.'): #print('{} --> {}'.format(x, x.rstrip('0').rstrip('.'))) return x.rstrip('0').rstrip('.') except (ValueError, TypeError): pass # integer --> string if isinstance(x, int): return str(x) # if it is not int/str/float, just return as is return x def remove_extra_digits(x, prog): if not isinstance(x, str): return x result = prog.match(x) if result: decimals = result.string.split('.')[1] result = result.string if decimals[-3] == '0': result = x[:-2].rstrip('0') if decimals[-3] == '9': result = x[:-2].rstrip('9') try: last_digit = int(result[-1]) result = result[:-1] + str(last_digit + 1) except ValueError: result = float(result[:-1]) + 1 #if result != x: # print('changing {} to {}'.format(x, result)) return result return x for col in self.df.columns: self.df[col] = self.df[col].apply(stringify) prog = re.compile("\d*[.]\d*([0]{5,100}|[9]{5,100})\d*\Z") for col in self.df.columns: self.df[col] = self.df[col].apply(lambda x: remove_extra_digits(x, prog))
def all_to_str(self)
In all columns, turn all floats/ints into strings. If a float ends with .0, strip off '.0' from the resulting string.
3.056505
2.947147
1.037106
unrecognized_cols = self.get_non_magic_cols() for col in ignore_cols: if col in unrecognized_cols: unrecognized_cols.remove(col) if unrecognized_cols: print('-I- Removing non-MagIC column names from {}:'.format(self.dtype), end=' ') for col in unrecognized_cols: self.df.drop(col, axis='columns', inplace=True) print(col, end=' ') print("\n") return unrecognized_cols
def remove_non_magic_cols_from_table(self, ignore_cols=())
Remove all non-magic columns from self.df. Changes in place. Parameters ---------- ignore_cols : list-like columns not to remove, whether they are proper MagIC columns or not Returns --------- unrecognized_cols : list any columns that were removed
3.052277
2.79167
1.093352
if sorted(row_data.keys()) != sorted(self.df.columns): # add any new column names for key in row_data: if key not in self.df.columns: self.df[key] = None # add missing column names into row_data for col_label in self.df.columns: if col_label not in list(row_data.keys()): row_data[col_label] = None try: self.df.iloc[ind] = pd.Series(row_data) except IndexError: return False return self.df
def update_row(self, ind, row_data)
Update a row with data. Must provide the specific numeric index (not row label). If any new keys are present in row_data dictionary, that column will be added to the dataframe. This is done inplace.
2.613307
2.425878
1.077262
# use provided column order, making sure you don't lose any values # from self.df.columns if len(columns): if sorted(self.df.columns) == sorted(columns): self.df.columns = columns else: new_columns = [] new_columns.extend(columns) for col in self.df.columns: if col not in new_columns: new_columns.append(col) # makes sure all columns have data or None if sorted(row_data.keys()) != sorted(self.df.columns): # add any new column names for key in row_data: if key not in self.df.columns: self.df[key] = None # add missing column names into row_data for col_label in self.df.columns: if col_label not in list(row_data.keys()): row_data[col_label] = None # (make sure you are working with strings) self.df.index = self.df.index.astype(str) label = str(label) # create a new row with suffix "new" # (this ensures that you get a unique, new row, # instead of adding on to an existing row with the same label) self.df.loc[label + "new"] = pd.Series(row_data) # rename it to be correct self.df.rename(index={label + "new": label}, inplace=True) # use next line to sort index inplace #self.df.sort_index(inplace=True) return self.df
def add_row(self, label, row_data, columns="")
Add a row with data. If any new keys are present in row_data dictionary, that column will be added to the dataframe. This is done inplace
3.53629
3.499171
1.010608
df.index = df[name] df.index.name = name + " name" self.df = df
def add_data(self, data): # add append option later df = pd.DataFrame(data) name, dtype = self.get_singular_and_plural_dtype(self.dtype) if name in df.columns
Add df to a MagicDataFrame using a data list. Parameters ---------- data : list of dicts data list with format [{'key1': 'val1', ...}, {'key1': 'val2', ...}, ... }] dtype : str MagIC table type
10.507619
13.483824
0.779276
col_labels = self.df.columns blank_item = pd.Series({}, index=col_labels, name=label) # use .loc to add in place (append won't do that) self.df.loc[blank_item.name] = blank_item return self.df
def add_blank_row(self, label)
Add a blank row with only an index value to self.df. This is done inplace.
6.161639
5.352903
1.151084
self.df = pd.concat([self.df[:ind], self.df[ind+1:]], sort=True) return self.df
def delete_row(self, ind)
remove self.df row at ind inplace
3.148049
2.826414
1.113796
self.df['num'] = list(range(len(self.df))) df_data = self.df # delete all records that meet condition if len(df_data[condition]) > 0: #we have one or more records to delete inds = df_data[condition]['num'] # list of all rows where condition is TRUE for ind in inds[::-1]: df_data = self.delete_row(ind) if info_str: print("-I- Deleting {}. ".format(info_str), end=' ') print('deleting row {}'.format(str(ind))) # sort so that all rows for an item are together df_data.sort_index(inplace=True) # redo temporary index df_data['num'] = list(range(len(df_data))) self.df = df_data return df_data
def delete_rows(self, condition, info_str=None)
delete all rows with condition==True inplace Parameters ---------- condition : pandas DataFrame indexer all self.df rows that meet this condition will be deleted info_str : str description of the kind of rows to be deleted, e.g "specimen rows with blank method codes" Returns -------- df_data : pandas DataFrame updated self.df
4.728871
4.604872
1.026928
# ignore citations if they just say 'This study' if 'citations' in self.df.columns: if list(self.df['citations'].unique()) == ['This study']: ignore_cols = ignore_cols + ('citations',) drop_cols = self.df.columns.difference(ignore_cols) self.df.dropna(axis='index', subset=drop_cols, how='all', inplace=True) return self.df
def drop_stub_rows(self, ignore_cols=('specimen', 'sample', 'software_packages', 'num'))
Drop self.df rows that have only null values, ignoring certain columns. Parameters ---------- ignore_cols : list-like list of column names to ignore for Returns --------- self.df : pandas DataFrame
3.194098
3.75672
0.850236
# keep any row with a unique index unique_index = self.df.index.unique() cond1 = ~self.df.index.duplicated(keep=False) # or with actual data ignore_cols = [col for col in ignore_cols if col in self.df.columns] relevant_df = self.df.drop(ignore_cols, axis=1) cond2 = relevant_df.notnull().any(axis=1) orig_len = len(self.df) new_df = self.df[cond1 | cond2] # make sure we haven't lost anything important if any(unique_index.difference(new_df.index.unique())): cond1 = ~self.df.index.duplicated(keep="first") self.df = self.df[cond1 | cond2] end_len = len(self.df) removed = orig_len - end_len if removed: print('-I- Removed {} redundant records from {} table'.format(removed, self.dtype)) return self.df
def drop_duplicate_rows(self, ignore_cols=['specimen', 'sample'])
Drop self.df rows that have only null values, ignoring certain columns BUT only if those rows do not have a unique index. Different from drop_stub_rows because it only drops empty rows if there is another row with that index. Parameters ---------- ignore_cols : list_like list of colum names to ignore Returns ---------- self.df : pandas DataFrame
3.609466
3.51989
1.025448
# add numeric index column temporarily self.df['num'] = list(range(len(self.df))) df_data = self.df condition2 = (df_data.index == name) # edit first of existing data that meets condition if len(df_data[condition & condition2]) > 0: #we have one or more records to update or delete # list of all rows where condition is true and index == name inds = df_data[condition & condition2]['num'] #inds = df_data[condition]['num'] # list of all rows where condition is true existing_data = dict(df_data.iloc[inds.iloc[0]]) # get first record of existing_data from dataframe existing_data.update(new_data) # update existing data with new interpretations # update row self.update_row(inds.iloc[0], existing_data) # now remove all the remaining records of same condition if len(inds) > 1: for ind in inds[1:]: print("deleting redundant records for:", name) df_data = self.delete_row(ind) else: if update_only: print("no record found for that condition, not updating ", name) else: print('no record found - creating new one for ', name) # add new row df_data = self.add_row(name, new_data) # sort so that all rows for an item are together df_data.sort_index(inplace=True) # redo temporary index df_data['num'] = list(range(len(df_data))) self.df = df_data return df_data
def update_record(self, name, new_data, condition, update_only=False, debug=False)
Find the first row in self.df with index == name and condition == True. Update that record with new_data, then delete any additional records where index == name and condition == True. Change is inplace
4.491676
4.207624
1.067509
cols = list(cols) for col in cols: if col not in self.df.columns: self.df[col] = np.nan short_df = self.df[cols] # horrible, bizarre hack to test for pandas malfunction tester = short_df.groupby(short_df.index, sort=False).fillna(method='ffill') if not_null(tester): short_df = short_df.groupby(short_df.index, sort=False).fillna(method='ffill').groupby(short_df.index, sort=False).fillna(method='bfill') else: print('-W- Was not able to front/back fill table {} with these columns: {}'.format(self.dtype, ', '.join(cols))) if inplace: self.df[cols] = short_df[cols] return self.df return short_df
def front_and_backfill(self, cols, inplace=True)
Groups dataframe by index name then replaces null values in selected columns with front/backfilled values if available. Changes self.df inplace. Parameters ---------- self : MagicDataFrame cols : array-like list of column names Returns --------- self.df
3.835676
3.941434
0.973168
# get the group for each column cols = self.df.columns groups = list(map(lambda x: self.data_model.get_group_for_col(self.dtype, x), cols)) sorted_cols = cols.groupby(groups) ordered_cols = [] # put names first try: names = sorted_cols.pop('Names') except KeyError: names = [] ordered_cols.extend(list(names)) no_group = [] # remove ungrouped columns if '' in sorted_cols: no_group = sorted_cols.pop('') # flatten list of columns for k in sorted(sorted_cols): ordered_cols.extend(sorted(sorted_cols[k])) # add back in ungrouped columns ordered_cols.extend(no_group) # put name first try: if self.name in ordered_cols: ordered_cols.remove(self.name) ordered_cols[:0] = [self.name] except AttributeError: pass # self.df = self.df[ordered_cols] return self.df
def sort_dataframe_cols(self)
Sort self.df so that self.name is the first column, and the rest of the columns are sorted by group.
3.282994
3.036601
1.081141
for col in col_list: if col in self.df.columns: if not all([is_null(val, False) for val in self.df[col]]): return col
def find_filled_col(self, col_list)
return the first col_name from the list that is both a. present in self.df.columns and b. self.df[col_name] has at least one non-null value Parameters ---------- self: MagicDataFrame col_list : iterable list of columns to check Returns ---------- col_name : str
4.128403
3.875601
1.065229
if isinstance(df, type(None)): df = self.df # replace np.nan / None with "" df = df.where(df.notnull(), "") # string-i-fy everything df = df.astype(str) if lst_or_dict == "lst": return list(df.T.apply(dict)) else: return {str(i[df.index.name.split(' ')[0]]): dict(i) for i in list(df.T.apply(dict))}
def convert_to_pmag_data_list(self, lst_or_dict="lst", df=None)
Take MagicDataFrame and turn it into a list of dictionaries. This will have the same format as reading in a 2.5 file with pmag.magic_read(), i.e.: if "lst": [{"sample": "samp_name", "azimuth": 12, ...}, {...}] if "dict": {"samp_name": {"azimuth": 12, ...}, "samp_name2": {...}, ...} NOTE: "dict" not recommended with 3.0, as one sample can have many rows, which means that dictionary items can be overwritten
4.31919
4.61182
0.936548
# if slice is provided, use it if any(df_slice): df_slice = df_slice # if given index_names, grab a slice using fancy indexing elif index_names: df_slice = self.df.loc[index_names] # otherwise, use the full DataFrame else: df_slice = self.df # if the slice is empty, return "" if len(df_slice) == 0: return "" # if the column name isn't present in the slice, return "" if col_name not in df_slice.columns: return "" # otherwise, return the first value from that column first_val = list(df_slice[col_name].dropna()) if any(first_val): return first_val[0] else: return ""
def get_name(self, col_name, df_slice="", index_names="")
Takes in a column name, and either a DataFrame slice or a list of index_names to slice self.df using fancy indexing. Then return the value for that column in the relevant slice. (Assumes that all values for column will be the same in the chosen slice, so return the first one.)
2.761145
2.37894
1.160662
tilt_corr = int(tilt_corr) if isinstance(df_slice, str): if df_slice.lower() == "all": # use entire DataFrame df_slice = self.df elif do_index: # use fancy indexing (but note this will give duplicates) df_slice = self.df.loc[item_names] elif not do_index: # otherwise use the provided slice df_slice = df_slice # once you have the slice, fix up the data # tilt correction must match if not ignore_tilt: if tilt_corr != 0: df_slice = df_slice[df_slice['dir_tilt_correction'] == tilt_corr] else: # if geographic ("0"), # use records with no tilt_corr and assume geographic cond1 = df_slice['dir_tilt_correction'] == None cond2 = df_slice['dir_tilt_correction'] == tilt_corr df_slice = df_slice[cond1 | cond2] # exclude data with unwanted codes if excl: for ex in excl: df_slice = self.get_records_for_code(ex, incl=False, use_slice=True, sli=df_slice) df_slice = df_slice[df_slice['dir_inc'].notnull() & df_slice['dir_dec'].notnull()] # possible add in: # split out di_block from this study from di_block from other studies (in citations column) # previously just used "This study", but it is no longer required #if 'citations' in df_slice.columns: # df_slice = df_slice[df_slice['citations'].str.contains("This study")] # convert values into DIblock format di_block = [[float(row['dir_dec']), float(row['dir_inc'])] for ind, row in df_slice.iterrows()] return di_block
def get_di_block(self, df_slice=None, do_index=False, item_names=None, tilt_corr='100', excl=None, ignore_tilt=False)
Input either a DataFrame slice or do_index=True and a list of index_names. Optional arguments: Provide tilt_corr (default 100). Excl is a list of method codes to exclude. Output dec/inc from the slice in this format: [[dec1, inc1], [dec2, inc2], ...]. Not inplace
4.582019
4.530319
1.011412