code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
fmt,plot='svg',0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-sav' in sys.argv: plot=1
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-f' in sys.argv: # ask for filename
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
X= [] # set up list for data
for line in data: # read in the data from standard input
rec=line.split() # split each line on space to get records
X.append(float(rec[0])) # append data to X
#
QQ={'qq':1}
pmagplotlib.plot_init(QQ['qq'],5,5)
pmagplotlib.plot_qq_norm(QQ['qq'],X,'Q-Q Plot') # make plot
if plot==0:
pmagplotlib.draw_figs(QQ)
files={}
for key in list(QQ.keys()):
files[key]=key+'.'+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles={}
titles['eq']='Q-Q Plot'
QQ = pmagplotlib.add_borders(EQ,titles,black,purple)
pmagplotlib.save_plots(QQ,files)
elif plot==0:
ans=input(" S[a]ve to save plot, [q]uit without saving: ")
if ans=="a":
pmagplotlib.save_plots(QQ,files)
else:
pmagplotlib.save_plots(QQ,files)
|
def main()
|
NAME
qqplot.py
DESCRIPTION
makes qq plot of input data against a Normal distribution.
INPUT FORMAT
takes real numbers in single column
SYNTAX
qqplot.py [-h][-i][-f FILE]
OPTIONS
-f FILE, specify file on command line
-fmt [png,svg,jpg,eps] set plot output format [default is svg]
-sav saves and quits
OUTPUT
calculates the K-S D and the D expected for a normal distribution
when D<Dc, distribution is normal (at 95% level of confidence).
| 3.695779
| 3.633422
| 1.017162
|
keycode = event.GetKeyCode()
meta_down = event.MetaDown() or event.GetCmdDown()
if keycode == 86 and meta_down:
# treat it as if it were a wx.EVT_TEXT_SIZE
self.do_fit(event)
|
def on_key_down(self, event)
|
If user does command v,
re-size window in case pasting has changed the content size.
| 6.863647
| 5.940163
| 1.155464
|
#self.grid.ShowScrollbars(wx.SHOW_SB_NEVER, wx.SHOW_SB_NEVER)
if event:
event.Skip()
self.main_sizer.Fit(self)
disp_size = wx.GetDisplaySize()
actual_size = self.GetSize()
# if there isn't enough room to display new content
# resize the frame
if disp_size[1] - 75 < actual_size[1]:
self.SetSize((actual_size[0], disp_size[1] * .95))
# make sure you adhere to a minimum size
if min_size:
actual_size = self.GetSize()
larger_width = max([actual_size[0], min_size[0]])
larger_height = max([actual_size[1], min_size[1]])
if larger_width > actual_size[0] or larger_height > actual_size[1]:
self.SetSize((larger_width, larger_height))
self.Centre()
|
def do_fit(self, event, min_size=None)
|
Re-fit the window to the size of the content.
| 2.825481
| 2.777517
| 1.017269
|
# if mode == 'open', show no matter what.
# if mode == 'close', close. otherwise, change state
btn = self.toggle_help_btn
shown = self.help_msg_boxsizer.GetStaticBox().IsShown()
# if mode is specified, do that mode
if mode == 'open':
self.help_msg_boxsizer.ShowItems(True)
btn.SetLabel('Hide help')
elif mode == 'close':
self.help_msg_boxsizer.ShowItems(False)
btn.SetLabel('Show help')
# otherwise, simply toggle states
else:
if shown:
self.help_msg_boxsizer.ShowItems(False)
btn.SetLabel('Show help')
else:
self.help_msg_boxsizer.ShowItems(True)
btn.SetLabel('Hide help')
self.do_fit(None)
|
def toggle_help(self, event, mode=None)
|
Show/hide help message on help button click.
| 2.999309
| 2.857282
| 1.049707
|
btn = event.GetEventObject()
if btn.Label == 'Show method codes':
self.code_msg_boxsizer.ShowItems(True)
btn.SetLabel('Hide method codes')
else:
self.code_msg_boxsizer.ShowItems(False)
btn.SetLabel('Show method codes')
self.do_fit(None)
|
def toggle_codes(self, event)
|
Show/hide method code explanation widget on button click
| 3.354722
| 2.891415
| 1.160235
|
if event:
col = event.GetCol()
if not col:
return
label = self.grid.GetColLabelValue(col)
if '**' in label:
label = label.strip('**')
elif '^^' in label:
label = label.strip('^^')
if label in self.reqd_headers:
pw.simple_warning("That header is required, and cannot be removed")
return False
else:
print('That header is not required:', label)
# remove column from wxPython grid
self.grid.remove_col(col)
# remove column from DataFrame if present
if self.grid_type in self.contribution.tables:
if label in self.contribution.tables[self.grid_type].df.columns:
del self.contribution.tables[self.grid_type].df[label]
# causes resize on each column header delete
# can leave this out if we want.....
self.main_sizer.Fit(self)
|
def remove_col_label(self, event=None, col=None)
|
check to see if column is required
if it is not, delete it from grid
| 5.201548
| 4.889103
| 1.063906
|
col_labels = self.grid.col_labels
dia = pw.ChooseOne(self, yes="Add single columns", no="Add groups")
result1 = dia.ShowModal()
if result1 == wx.ID_CANCEL:
return
elif result1 == wx.ID_YES:
items = sorted([col_name for col_name in self.dm.index if col_name not in col_labels])
dia = pw.HeaderDialog(self, 'columns to add',
items1=list(items), groups=[])
dia.Centre()
result2 = dia.ShowModal()
else:
groups = self.dm['group'].unique()
dia = pw.HeaderDialog(self, 'groups to add',
items1=list(groups), groups=True)
dia.Centre()
result2 = dia.ShowModal()
new_headers = []
if result2 == 5100:
new_headers = dia.text_list
# if there is nothing to add, quit
if not new_headers:
return
if result1 == wx.ID_YES:
# add individual headers
errors = self.add_new_grid_headers(new_headers)
else:
# add header groups
errors = self.add_new_header_groups(new_headers)
if errors:
errors_str = ', '.join(errors)
pw.simple_warning('You are already using the following headers: {}\nSo they will not be added'.format(errors_str))
# problem: if widgets above the grid are too wide,
# the grid does not re-size when adding columns
# awkward solution (causes flashing):
if self.grid.GetWindowStyle() != wx.DOUBLE_BORDER:
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.NO_BORDER)
self.Centre()
self.main_sizer.Fit(self)
#
self.grid.changes = set(range(self.grid.GetNumberRows()))
dia.Destroy()
|
def on_add_cols(self, event)
|
Show simple dialog that allows user to add a new column name
| 4.433463
| 4.457085
| 0.9947
|
already_present = []
for group in groups:
col_names = self.dm[self.dm['group'] == group].index
for col in col_names:
if col not in self.grid.col_labels:
col_number = self.grid.add_col(col)
# add to appropriate headers list
# add drop down menus for user-added column
if col in self.contribution.vocab.vocabularies:
self.drop_down_menu.add_drop_down(col_number, col)
elif col in self.contribution.vocab.suggested:
self.drop_down_menu.add_drop_down(col_number, col)
elif col in ['specimen', 'sample', 'site', 'location',
'specimens', 'samples', 'sites']:
self.drop_down_menu.add_drop_down(col_number, col)
elif col == 'experiments':
self.drop_down_menu.add_drop_down(col_number, col)
if col == "method_codes":
self.drop_down_menu.add_method_drop_down(col_number, col)
else:
already_present.append(col)
return already_present
|
def add_new_header_groups(self, groups)
|
compile list of all headers belonging to all specified groups
eliminate all headers that are already included
add any req'd drop-down menus
return errors
| 3.42226
| 3.25112
| 1.05264
|
already_present = []
for name in new_headers:
if name:
if name not in self.grid.col_labels:
col_number = self.grid.add_col(name)
# add to appropriate headers list
# add drop down menus for user-added column
if name in self.contribution.vocab.vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
elif name in self.contribution.vocab.suggested:
self.drop_down_menu.add_drop_down(col_number, name)
elif name in ['specimen', 'sample', 'site',
'specimens', 'samples', 'sites']:
self.drop_down_menu.add_drop_down(col_number, name)
elif name == 'experiments':
self.drop_down_menu.add_drop_down(col_number, name)
if name == "method_codes":
self.drop_down_menu.add_method_drop_down(col_number, name)
else:
already_present.append(name)
#pw.simple_warning('You are already using column header: {}'.format(name))
return already_present
|
def add_new_grid_headers(self, new_headers)
|
Add in all user-added headers.
If those new headers depend on other headers,
add the other headers too.
| 3.575727
| 3.438666
| 1.039859
|
num_rows = self.rows_spin_ctrl.GetValue()
#last_row = self.grid.GetNumberRows()
for row in range(num_rows):
self.grid.add_row()
#if not self.grid.changes:
# self.grid.changes = set([])
#self.grid.changes.add(last_row)
#last_row += 1
self.main_sizer.Fit(self)
|
def on_add_rows(self, event)
|
add rows to grid
| 3.522084
| 3.290507
| 1.070377
|
text = "Are you sure? If you select delete you won't be able to retrieve these rows..."
dia = pw.ChooseOne(self, "Yes, delete rows", "Leave rows for now", text)
dia.Centre()
result = dia.ShowModal()
if result == wx.ID_NO:
return
default = (255, 255, 255, 255)
if row_num == -1:
# unhighlight any selected rows:
for row in self.selected_rows:
attr = wx.grid.GridCellAttr()
attr.SetBackgroundColour(default)
self.grid.SetRowAttr(row, attr)
row_num = self.grid.GetNumberRows() - 1
self.deleteRowButton.Disable()
self.selected_rows = {row_num}
# remove row(s) from the contribution
df = self.contribution.tables[self.grid_type].df
row_nums = list(range(len(df)))
df = df.iloc[[i for i in row_nums if i not in self.selected_rows]]
self.contribution.tables[self.grid_type].df = df
# now remove row(s) from grid
# delete rows, adjusting the row # appropriately as you delete
for num, row in enumerate(self.selected_rows):
row -= num
if row < 0:
row = 0
self.grid.remove_row(row)
attr = wx.grid.GridCellAttr()
attr.SetBackgroundColour(default)
self.grid.SetRowAttr(row, attr)
# reset the grid
self.selected_rows = set()
self.deleteRowButton.Disable()
self.grid.Refresh()
self.main_sizer.Fit(self)
|
def on_remove_row(self, event, row_num=-1)
|
Remove specified grid row.
If no row number is given, remove the last row.
| 3.450295
| 3.470762
| 0.994103
|
if event.Col == -1 and event.Row == -1:
pass
if event.Row < 0:
if self.remove_cols_mode:
self.remove_col_label(event)
else:
self.drop_down_menu.on_label_click(event)
else:
if event.Col < 0 and self.grid_type != 'age':
self.onSelectRow(event)
|
def onLeftClickLabel(self, event)
|
When user clicks on a grid label,
determine if it is a row label or a col label.
Pass along the event to the appropriate function.
(It will either highlight a column for editing all values,
or highlight a row for deletion).
| 5.457751
| 4.836177
| 1.128526
|
if self.grid.changes:
print("-W- Your changes will be overwritten...")
wind = pw.ChooseOne(self, "Import file anyway", "Save grid first",
"-W- Your grid has unsaved changes which will be overwritten if you import a file now...")
wind.Centre()
res = wind.ShowModal()
# save grid first:
if res == wx.ID_NO:
self.onSave(None, alert=True, destroy=False)
# reset self.changes
self.grid.changes = set()
openFileDialog = wx.FileDialog(self, "Open MagIC-format file", self.WD, "",
"MagIC file|*.*", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
result = openFileDialog.ShowModal()
if result == wx.ID_OK:
# get filename
filename = openFileDialog.GetPath()
# make sure the dtype is correct
f = open(filename)
line = f.readline()
if line.startswith("tab"):
delim, dtype = line.split("\t")
else:
delim, dtype = line.split("")
f.close()
dtype = dtype.strip()
if (dtype != self.grid_type) and (dtype + "s" != self.grid_type):
text = "You are currently editing the {} grid, but you are trying to import a {} file.\nPlease open the {} grid and then re-try this import.".format(self.grid_type, dtype, dtype)
pw.simple_warning(text)
return
# grab old data for concatenation
if self.grid_type in self.contribution.tables:
old_df_container = self.contribution.tables[self.grid_type]
else:
old_df_container = None
old_col_names = self.grid.col_labels
# read in new file and update contribution
df_container = cb.MagicDataFrame(filename, dmodel=self.dm,
columns=old_col_names)
# concatenate if possible
if not isinstance(old_df_container, type(None)):
df_container.df = pd.concat([old_df_container.df, df_container.df],
axis=0, sort=True)
self.contribution.tables[df_container.dtype] = df_container
self.grid_builder = GridBuilder(self.contribution, self.grid_type,
self.panel, parent_type=self.parent_type,
reqd_headers=self.reqd_headers)
# delete old grid
self.grid_box.Hide(0)
self.grid_box.Remove(0)
# create new, updated grid
self.grid = self.grid_builder.make_grid()
self.grid.InitUI()
# add data to new grid
self.grid_builder.add_data_to_grid(self.grid, self.grid_type)
# add new grid to sizer and fit everything
self.grid_box.Add(self.grid, flag=wx.ALL, border=5)
self.main_sizer.Fit(self)
self.Centre()
# add any needed drop-down-menus
self.drop_down_menu = drop_down_menus.Menus(self.grid_type,
self.contribution,
self.grid)
# done!
return
|
def onImport(self, event)
|
Import a MagIC-format file
| 4.224576
| 4.1271
| 1.023619
|
if self.grid.changes:
dlg1 = wx.MessageDialog(self, caption="Message:",
message="Are you sure you want to exit this grid?\nYour changes will not be saved.\n ",
style=wx.OK|wx.CANCEL)
result = dlg1.ShowModal()
if result == wx.ID_OK:
dlg1.Destroy()
self.Destroy()
else:
self.Destroy()
if self.main_frame:
self.main_frame.Show()
self.main_frame.Raise()
|
def onCancelButton(self, event)
|
Quit grid with warning if unsaved changes present
| 3.30761
| 2.824442
| 1.171066
|
# tidy up drop_down menu
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# then save actual data
self.grid_builder.save_grid_data()
if not event and not alert:
return
# then alert user
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
if destroy:
self.Destroy()
|
def onSave(self, event, alert=False, destroy=True)
|
Save grid data
| 5.236491
| 4.679697
| 1.118981
|
if self.grid.GetSelectionBlockTopLeft():
#top_left = self.grid.GetSelectionBlockTopLeft()
#bottom_right = self.grid.GetSelectionBlockBottomRight()
# awkward hack to fix wxPhoenix memory problem, (Github issue #221)
bottom_right = eval(repr(self.grid.GetSelectionBlockBottomRight()).replace("GridCellCoordsArray: ", "").replace("GridCellCoords", ""))
top_left = eval(repr(self.grid.GetSelectionBlockTopLeft()).replace("GridCellCoordsArray: ", "").replace("GridCellCoords", ""))
#
top_left = top_left[0]
bottom_right = bottom_right[0]
else:
return
# GetSelectionBlock returns (row, col)
min_col = top_left[1]
max_col = bottom_right[1]
min_row = top_left[0]
max_row = bottom_right[0]
self.df_slice = self.contribution.tables[self.grid_type].df.iloc[min_row:max_row+1, min_col:max_col+1]
|
def onDragSelection(self, event)
|
Set self.df_slice based on user's selection
| 3.253
| 3.007522
| 1.081621
|
if event.CmdDown() or event.ControlDown():
if event.GetKeyCode() == 67:
self.onCopySelection(None)
|
def onKey(self, event)
|
Copy selection if control down and 'c'
| 5.228439
| 3.644622
| 1.434563
|
# do clean up here!!!
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# save all grid data
self.grid_builder.save_grid_data()
df = self.contribution.tables[self.grid_type].df
# write df to clipboard for pasting
# header arg determines whether columns are taken
# index arg determines whether index is taken
pd.DataFrame.to_clipboard(df, header=False, index=False)
print('-I- You have copied all cells! You may paste them into a text document or spreadsheet using Command v.')
|
def onSelectAll(self, event)
|
Selects full grid and copies it to the Clipboard
| 11.362897
| 10.514751
| 1.080662
|
if self.df_slice is not None:
pd.DataFrame.to_clipboard(self.df_slice, header=False, index=False)
self.grid.ClearSelection()
self.df_slice = None
print('-I- You have copied the selected cells. You may paste them into a text document or spreadsheet using Command v.')
else:
print('-W- No cells were copied! You must highlight a selection cells before hitting the copy button. You can do this by clicking and dragging, or by using the Shift key and click.')
|
def onCopySelection(self, event)
|
Copies self.df_slice to the Clipboard if slice exists
| 9.071151
| 7.221694
| 1.256097
|
changes = None
# if there is a MagicDataFrame, extract data from it
if isinstance(self.magic_dataframe, cb.MagicDataFrame):
# get columns and reorder slightly
col_labels = list(self.magic_dataframe.df.columns)
for ex_col in self.exclude_cols:
col_labels.pop(ex_col)
if self.grid_type == 'ages':
levels = ['specimen', 'sample', 'site', 'location']
for label in levels[:]:
if label in col_labels:
col_labels.remove(label)
else:
levels.remove(label)
col_labels[:0] = levels
else:
if self.parent_type:
if self.parent_type[:-1] in col_labels:
col_labels.remove(self.parent_type[:-1])
col_labels[:0] = [self.parent_type[:-1]]
if self.grid_type[:-1] in col_labels:
col_labels.remove(self.grid_type[:-1])
col_labels[:0] = (self.grid_type[:-1],)
for col in col_labels:
if col not in self.magic_dataframe.df.columns:
self.magic_dataframe.df[col] = None
self.magic_dataframe.df = self.magic_dataframe.df[col_labels]
self.magic_dataframe.sort_dataframe_cols()
col_labels = list(self.magic_dataframe.df.columns)
row_labels = self.magic_dataframe.df.index
# make sure minimum defaults are present
for header in self.reqd_headers:
if header not in col_labels:
changes = set([1])
col_labels.append(header)
# if there is no pre-existing MagicDataFrame,
# make a blank grid with do some defaults:
else:
# default headers
#col_labels = list(self.data_model.get_headers(self.grid_type, 'Names'))
#col_labels[:0] = self.reqd_headers
col_labels = list(self.reqd_headers)
if self.grid_type in ['specimens', 'samples', 'sites']:
col_labels.extend(['age', 'age_sigma'])
## use the following line if you want sorted column labels:
#col_labels = sorted(set(col_labels))
# defaults are different for ages
if self.grid_type == 'ages':
levels = ['specimen', 'sample', 'site', 'location']
for label in levels:
if label in col_labels:
col_labels.remove(label)
col_labels[:0] = levels
else:
if self.parent_type:
col_labels.remove(self.parent_type[:-1])
col_labels[:0] = [self.parent_type[:-1]]
col_labels.remove(self.grid_type[:-1])
col_labels[:0] = [self.grid_type[:-1]]
# make sure all reqd cols are in magic_dataframe
for col in col_labels:
if col not in self.magic_dataframe.df.columns:
self.magic_dataframe.df[col] = None
# make the grid
if not self.huge:
grid = magic_grid.MagicGrid(parent=self.panel, name=self.grid_type,
row_labels=[], col_labels=col_labels)
# make the huge grid
else:
row_labels = self.magic_dataframe.df.index
grid = magic_grid.HugeMagicGrid(parent=self.panel, name=self.grid_type,
row_labels=row_labels, col_labels=col_labels)
grid.do_event_bindings()
grid.changes = changes
self.grid = grid
return grid
|
def make_grid(self)
|
return grid
| 2.816108
| 2.812181
| 1.001397
|
if isinstance(self.magic_dataframe, cb.MagicDataFrame):
for col in ['age', 'age_unit']:
if col not in self.grid.col_labels:
self.grid.add_col(col)
for level in ['locations', 'sites', 'samples', 'specimens']:
if level in self.contribution.tables:
if level[:-1] not in self.grid.col_labels:
self.grid.add_col(level[:-1])
|
def add_age_defaults(self)
|
Add columns as needed:
age, age_unit, specimen, sample, site, location.
| 5.023109
| 3.827202
| 1.312475
|
empty = True
# df IS empty if there are no rows
if not any(self.magic_dataframe.df.index):
empty = True
# df is NOT empty if there are at least two rows
elif len(self.grid.row_labels) > 1:
empty = False
# if there is one row, df MIGHT be empty
else:
# check all the non-null values
non_null_vals = [val for val in self.magic_dataframe.df.values[0] if cb.not_null(val, False)]
for val in non_null_vals:
if not isinstance(val, str):
empty = False
break
# if there are any non-default values, grid is not empty
if val.lower() not in ['this study', 'g', 'i']:
empty = False
break
return empty
|
def current_grid_empty(self)
|
Check to see if grid is empty except for default values
| 5.083438
| 4.912947
| 1.034702
|
if not self.grid.changes:
print('-I- No changes to save')
return
starred_cols = self.grid.remove_starred_labels()
# locks in value in cell currently edited
self.grid.SaveEditControlValue()
# changes is a dict with key values == row number
if self.grid.changes:
new_data = self.grid.save_items()
# HugeMagicGrid will return a pandas dataframe
if self.huge:
self.magic_dataframe.df = new_data
# MagicGrid will return a dictionary with
# new/updated data that must be incorporated
# into the dataframe
else:
for key in new_data:
data = new_data[key]
# update the row if it exists already,
# otherwise create a new row
updated = self.magic_dataframe.update_row(key, data)
if not isinstance(updated, pd.DataFrame):
if self.grid_type == 'ages':
label = key
else:
label = self.grid_type[:-1]
self.magic_dataframe.add_row(label, data,
self.grid.col_labels)
# update the contribution with the new dataframe
self.contribution.tables[self.grid_type] = self.magic_dataframe
# *** probably don't actually want to write to file, here (but maybe)
self.contribution.write_table_to_file(self.grid_type)
#self.magic_dataframe.write_magic_file("{}.txt".format(self.grid_type),
# self.contribution.directory)
# propagate age info if age table was edited
if self.grid_type == 'ages':
self.contribution.propagate_ages()
return
|
def save_grid_data(self)
|
Save grid data in the data object
| 6.876109
| 6.84423
| 1.004658
|
defaults = {'result_quality': 'g',
'result_type': 'i',
'orientation_quality': 'g',
'citations': 'This study'}
for col_name in defaults:
if col_name in self.grid.col_labels:
# try to grab existing values from contribution
if self.grid_type in self.contribution.tables:
if col_name in self.contribution.tables[self.grid_type].df.columns:
old_vals = self.contribution.tables[self.grid_type].df[col_name]
# if column is completely filled in, skip
if all([cb.not_null(val, False) for val in old_vals]):
continue
new_val = defaults[col_name]
vals = list(np.where((old_vals.notnull()) & (old_vals != ''), old_vals, new_val))
else:
vals = [defaults[col_name]] * self.grid.GetNumberRows()
# if values not available in contribution, use defaults
else:
vals = [defaults[col_name]] * self.grid.GetNumberRows()
# if col_name not present in grid, skip
else:
vals = None
#
if vals:
print('-I- Updating column "{}" with default values'.format(col_name))
if self.huge:
self.grid.SetColumnValues(col_name, vals)
else:
col_ind = self.grid.col_labels.index(col_name)
for row, val in enumerate(vals):
self.grid.SetCellValue(row, col_ind, val)
self.grid.changes = set(range(self.grid.GetNumberRows()))
|
def fill_defaults(self)
|
Fill in self.grid with default values in certain columns.
Only fill in new values if grid is missing those values.
| 3.754047
| 3.52992
| 1.063494
|
specimens, samples, sites, locations = "", "", "", ""
children = {'specimen': specimens, 'sample': samples,
'site': sites, 'location': locations}
for dtype in children:
header_name = 'er_' + dtype + '_names'
if result_data[header_name]:
children[dtype] = result_data[header_name].split(":")
# make sure there are no extra spaces in names
children[dtype] = [child.strip() for child in children[dtype]]
return children['specimen'], children['sample'], children['site'], children['location']
|
def get_result_children(self, result_data)
|
takes in dict in form of {'er_specimen_names': 'name1:name2:name3'}
and so forth.
returns lists of specimens, samples, sites, and locations
| 3.542925
| 2.73432
| 1.295725
|
# extract arguments from sys.argv
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
dir_path = pmag.get_named_arg("-WD", default_val=".")
input_dir_path = pmag.get_named_arg('-ID', '')
if not input_dir_path:
input_dir_path = dir_path
in_file = pmag.get_named_arg("-f", default_val="sites.txt")
in_file = pmag.resolve_file_name(in_file, input_dir_path)
if "-ID" not in sys.argv:
input_dir_path = os.path.split(in_file)[0]
plot_by = pmag.get_named_arg("-obj", default_val="all").lower()
spec_file = pmag.get_named_arg("-fsp", default_val="specimens.txt")
samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt")
site_file = pmag.get_named_arg("-fsi", default_val="sites.txt")
loc_file = pmag.get_named_arg("-flo", default_val="locations.txt")
ignore_tilt = False
if '-no-tilt' in sys.argv:
ignore_tilt = True
color_map = "coolwarm"
if '-c' in sys.argv:
contour = True
if '-cm' in sys.argv:
ind = sys.argv.index('-cm')
color_map = sys.argv[ind+1]
else:
color_map = 'coolwarm'
else:
contour = False
interactive = True
save_plots = False
if '-sav' in sys.argv:
save_plots = True
interactive = False
plot_ell = False
if '-ell' in sys.argv:
plot_ell = pmag.get_named_arg("-ell", "F")
crd = pmag.get_named_arg("-crd", default_val="g")
fmt = pmag.get_named_arg("-fmt", "svg")
ipmag.eqarea_magic(in_file, dir_path, input_dir_path, spec_file, samp_file, site_file, loc_file,
plot_by, crd, ignore_tilt, save_plots, fmt, contour, color_map,
plot_ell, "all", interactive)
|
def main()
|
NAME
eqarea_magic.py
DESCRIPTION
makes equal area projections from declination/inclination data
SYNTAX
eqarea_magic.py [command line options]
INPUT
takes magic formatted sites, samples, specimens, or measurements
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file from magic, default='sites.txt'
supported types=[measurements, specimens, samples, sites]
-fsp FILE: specify specimen file name, (required if you want to plot measurements by sample)
default='specimens.txt'
-fsa FILE: specify sample file name, (required if you want to plot specimens by site)
default='samples.txt'
-fsi FILE: specify site file name, default='sites.txt'
-flo FILE: specify location file name, default='locations.txt'
-obj OBJ: specify level of plot [all, sit, sam, spc], default is all
-crd [s,g,t]: specify coordinate system, [s]pecimen, [g]eographic, [t]ilt adjusted
default is geographic, unspecified assumed geographic
-fmt [svg,png,jpg] format for output plots
-ell [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors
-c plot as colour contour
-cm CM use color map CM [default is coolwarm]
-sav save plot and quit quietly
-no-tilt data are unoriented, allows plotting of measurement dec/inc
NOTE
all: entire file; sit: site; sam: sample; spc: specimen
| 2.539676
| 1.972078
| 1.287817
|
#self.grid.ShowScrollbars(wx.SHOW_SB_NEVER, wx.SHOW_SB_NEVER)
if event:
event.Skip()
self.main_sizer.Fit(self)
disp_size = wx.GetDisplaySize()
actual_size = self.GetSize()
rows = self.grid.GetNumberRows()
# if there isn't enough room to display new content
# resize the frame
if disp_size[1] - 75 < actual_size[1]:
self.SetSize((actual_size[0], disp_size[1] * .95))
self.Centre()
|
def do_fit(self, event)
|
Re-fit the window to the size of the content.
| 3.974355
| 3.790308
| 1.048557
|
if self.grid.changes:
self.onSave(None)
label = event.GetEventObject().Label
self.er_magic.age_type = label
self.grid.Destroy()
# normally grid_frame is reset to None when grid is destroyed
# in this case we are simply replacing the grid, so we need to
# reset grid_frame
self.parent.Parent.grid_frame = self
self.parent.Parent.Hide()
self.grid = self.grid_builder.make_grid()
self.grid.InitUI()
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
self.grid_builder.add_data_to_grid(self.grid, self.grid_type)
self.grid_builder.add_age_data_to_grid()
self.drop_down_menu = drop_down_menus.Menus(self.grid_type, self, self.grid, None)
self.grid.SetColLabelValue(0, 'er_' + label + '_name')
self.grid.size_grid()
self.grid_box.Add(self.grid, flag=wx.ALL, border=5)
self.main_sizer.Fit(self)
if self.parent.Parent.validation_mode:
if 'age' in self.parent.Parent.validation_mode:
self.grid.paint_invalid_cells(self.parent.Parent.warn_dict['age'])
self.grid.ForceRefresh()
# the grid show up if it's the same size as the previous grid
# awkward solution (causes flashing):
if self.grid.Size[0] < 100:
if self.grid.GetWindowStyle() != wx.DOUBLE_BORDER:
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.NO_BORDER)
self.main_sizer.Fit(self)
|
def toggle_ages(self, event)
|
Switch the type of grid between site/sample
(Users may add ages at either level)
| 4.728818
| 4.594127
| 1.029318
|
er_possible_headers = self.grid_headers[self.grid_type]['er'][2]
pmag_possible_headers = self.grid_headers[self.grid_type]['pmag'][2]
er_actual_headers = self.grid_headers[self.grid_type]['er'][0]
pmag_actual_headers = self.grid_headers[self.grid_type]['pmag'][0]
col = event.GetCol()
label = self.grid.GetColLabelValue(col)
if '**' in label:
label = label.strip('**')
if label in self.grid_headers[self.grid_type]['er'][1]:
pw.simple_warning("That header is required, and cannot be removed")
return False
#elif include_pmag and label in self.grid_headers[self.grid_type]['pmag'][1]:
# pw.simple_warning("That header is required, and cannot be removed")
# return False
else:
print('That header is not required:', label)
self.grid.remove_col(col)
#if label in er_possible_headers:
try:
print('removing {} from er_actual_headers'.format(label))
er_actual_headers.remove(label)
except ValueError:
pass
#if label in pmag_possible_headers:
try:
pmag_actual_headers.remove(label)
except ValueError:
pass
# causes resize on each column header delete
# can leave this out if we want.....
self.main_sizer.Fit(self)
|
def remove_col_label(self, event):#, include_pmag=True)
|
check to see if column is required
if it is not, delete it from grid
| 3.02886
| 2.953706
| 1.025444
|
col_labels = self.grid.col_labels
# do not list headers that are already column labels in the grid
er_items = [head for head in self.grid_headers[self.grid_type]['er'][2] if head not in col_labels]
# remove unneeded headers
er_items = builder.remove_list_headers(er_items)
pmag_headers = sorted(list(set(self.grid_headers[self.grid_type]['pmag'][2]).union(self.grid_headers[self.grid_type]['pmag'][1])))
# do not list headers that are already column labels in the grid
# make sure that pmag_specific columns are marked with '++'
to_add = [i + '++' for i in self.er_magic.double if i in pmag_headers and i + '++' not in col_labels]
pmag_headers.extend(to_add)
pmag_items = [head for head in pmag_headers if head not in er_items and head not in col_labels]
# remove unneeded headers
pmag_items = sorted(builder.remove_list_headers(pmag_items))
dia = pw.HeaderDialog(self, 'columns to add', items1=er_items, items2=pmag_items)
dia.Centre()
result = dia.ShowModal()
new_headers = []
if result == 5100:
new_headers = dia.text_list
if not new_headers:
return
errors = self.add_new_grid_headers(new_headers, er_items, pmag_items)
if errors:
errors_str = ', '.join(errors)
pw.simple_warning('You are already using the following headers: {}\nSo they will not be added'.format(errors_str))
# problem: if widgets above the grid are too wide,
# the grid does not re-size when adding columns
# awkward solution (causes flashing):
if self.grid.GetWindowStyle() != wx.DOUBLE_BORDER:
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.NO_BORDER)
self.Centre()
self.main_sizer.Fit(self)
#
self.grid.changes = set(range(self.grid.GetNumberRows()))
dia.Destroy()
|
def on_add_cols(self, event)
|
Show simple dialog that allows user to add a new column name
| 4.546682
| 4.5315
| 1.00335
|
def add_pmag_reqd_headers():
if self.grid_type == 'result':
return []
add_in = []
col_labels = self.grid.col_labels
for reqd_head in self.grid_headers[self.grid_type]['pmag'][1]:
if reqd_head in self.er_magic.double:
if reqd_head + "++" not in col_labels:
add_in.append(reqd_head + "++")
else:
if reqd_head not in col_labels:
add_in.append(reqd_head)
add_in = builder.remove_list_headers(add_in)
return add_in
#
already_present = []
for name in new_headers:
if name:
if name not in self.grid.col_labels:
col_number = self.grid.add_col(name)
# add to appropriate headers list
if name in er_items:
self.grid_headers[self.grid_type]['er'][0].append(str(name))
if name in pmag_items:
name = name.strip('++')
if name not in self.grid_headers[self.grid_type]['pmag'][0]:
self.grid_headers[self.grid_type]['pmag'][0].append(str(name))
# add any required pmag headers that are not in the grid already
for header in add_pmag_reqd_headers():
col_number = self.grid.add_col(header)
# add drop_down_menus for added reqd columns
if header in vocab.possible_vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
if header in ['magic_method_codes++']:
self.drop_down_menu.add_method_drop_down(col_number, header)
# add drop down menus for user-added column
if name in vocab.possible_vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
if name in ['magic_method_codes', 'magic_method_codes++']:
self.drop_down_menu.add_method_drop_down(col_number, name)
else:
already_present.append(name)
#pw.simple_warning('You are already using column header: {}'.format(name))
return already_present
|
def add_new_grid_headers(self, new_headers, er_items, pmag_items)
|
Add in all user-added headers.
If those new headers depend on other headers, add the other headers too.
| 3.093278
| 3.044289
| 1.016092
|
# open the help message
self.toggle_help(event=None, mode='open')
# first unselect any selected cols/cells
self.remove_cols_mode = True
self.grid.ClearSelection()
self.remove_cols_button.SetLabel("end delete column mode")
# change button to exit the delete columns mode
self.Unbind(wx.EVT_BUTTON, self.remove_cols_button)
self.Bind(wx.EVT_BUTTON, self.exit_col_remove_mode, self.remove_cols_button)
# then disable all other buttons
for btn in [self.add_cols_button, self.remove_row_button, self.add_many_rows_button]:
btn.Disable()
# then make some visual changes
self.msg_text.SetLabel("Remove grid columns: click on a column header to delete it. Required headers for {}s may not be deleted.".format(self.grid_type))
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.grid_box.GetStaticBox().SetWindowStyle(wx.DOUBLE_BORDER)
self.grid.Refresh()
self.main_sizer.Fit(self) # might not need this one
self.grid.changes = set(range(self.grid.GetNumberRows()))
|
def on_remove_cols(self, event)
|
enter 'remove columns' mode
| 4.807821
| 4.603715
| 1.044335
|
if row_num == -1:
default = (255, 255, 255, 255)
# unhighlight any selected rows:
for row in self.selected_rows:
attr = wx.grid.GridCellAttr()
attr.SetBackgroundColour(default)
self.grid.SetRowAttr(row, attr)
row_num = self.grid.GetNumberRows() - 1
self.deleteRowButton.Disable()
self.selected_rows = {row_num}
function_mapping = {'specimen': self.er_magic.delete_specimen,
'sample': self.er_magic.delete_sample,
'site': self.er_magic.delete_site,
'location': self.er_magic.delete_location,
'result': self.er_magic.delete_result}
names = [self.grid.GetCellValue(row, 0) for row in self.selected_rows]
orphans = []
for name in names:
if name:
try:
row = self.grid.row_labels.index(name)
function_mapping[self.grid_type](name)
orphans.extend([name])
# if user entered a name, then deletes the row before saving,
# there will be a ValueError
except ValueError:
pass
self.grid.remove_row(row)
self.selected_rows = set()
self.deleteRowButton.Disable()
self.grid.Refresh()
self.main_sizer.Fit(self)
|
def on_remove_row(self, event, row_num=-1)
|
Remove specified grid row.
If no row number is given, remove the last row.
| 3.441136
| 3.461353
| 0.994159
|
# close help messge
self.toggle_help(event=None, mode='close')
# update mode
self.remove_cols_mode = False
# re-enable all buttons
for btn in [self.add_cols_button, self.remove_row_button, self.add_many_rows_button]:
btn.Enable()
# unbind grid click for deletion
self.Unbind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK)
# undo visual cues
self.grid.SetWindowStyle(wx.DEFAULT)
self.grid_box.GetStaticBox().SetWindowStyle(wx.DEFAULT)
self.msg_text.SetLabel(self.default_msg_text)
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
# re-bind self.remove_cols_button
self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button)
self.remove_cols_button.SetLabel("Remove columns")
|
def exit_col_remove_mode(self, event)
|
go back from 'remove cols' mode to normal
| 4.188779
| 4.060575
| 1.031573
|
if incl_pmag and self.grid_type in self.er_magic.incl_pmag_data:
incl_pmag = True
else:
incl_pmag = False
er_header = self.grid_headers[self.grid_type]['er'][0]
if incl_pmag:
pmag_header = self.grid_headers[self.grid_type]['pmag'][0]
else:
pmag_header = []
# if we need to use '++' to distinguish pmag magic_method_codes from er
if incl_pmag and self.grid_type in ('specimen', 'sample', 'site'):
for double_header in self.er_magic.double:
try:
pmag_header.remove(double_header)
pmag_header.append(double_header + '++')
except ValueError:
pass
header = sorted(list(set(er_header).union(pmag_header)))
first_headers = []
for string in ['citation', '{}_class'.format(self.grid_type),
'{}_lithology'.format(self.grid_type), '{}_type'.format(self.grid_type),
'site_definition']:
for head in header[:]:
if string in head:
header.remove(head)
first_headers.append(head)
# the way we work it, each specimen is assigned to a sample
# each sample is assigned to a site
# specimens can not be added en masse to a site object, for example
# this data will be written in
for string in ['er_specimen_names', 'er_sample_names', 'er_site_names']:
for head in header[:]:
if string in head:
header.remove(head)
# do headers for results type grid
if self.grid_type == 'result':
#header.remove('pmag_result_name')
header[:0] = ['pmag_result_name', 'er_citation_names', 'er_specimen_names',
'er_sample_names', 'er_site_names', 'er_location_names']
elif self.grid_type == 'age':
for header_type in self.er_magic.first_age_headers:
if header_type in header:
header.remove(header_type)
lst = ['er_' + self.grid_type + '_name']
lst.extend(self.er_magic.first_age_headers)
header[:0] = lst
# do headers for all other data types without parents
elif not self.parent_type:
lst = ['er_' + self.grid_type + '_name']
lst.extend(first_headers)
header[:0] = lst
# do headers for all data types with parents
else:
lst = ['er_' + self.grid_type + '_name', 'er_' + self.parent_type + '_name']
lst.extend(first_headers)
header[:0] = lst
grid = magic_grid.MagicGrid(parent=self.panel, name=self.grid_type,
row_labels=[], col_labels=header,
double=self.er_magic.double)
grid.do_event_bindings()
self.grid = grid
return grid
|
def make_grid(self, incl_pmag=True)
|
return grid
| 3.426454
| 3.420084
| 1.001863
|
#
# get command line arguments
#
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
dir_path = pmag.get_named_arg("-WD", ".")
user = pmag.get_named_arg("-usr", "")
labfield = pmag.get_named_arg("-dc", '0.5')
meas_file = pmag.get_named_arg("-F", "measurements.txt")
samp_file = pmag.get_named_arg("-fsa", "samples.txt")
try:
infile = pmag.get_named_arg("-f", reqd=True)
except pmag.MissingCommandLineArgException:
print(main.__doc__)
print("-f is required option")
sys.exit()
specnum = int(pmag.get_named_arg("-spc", 0))
location = pmag.get_named_arg("-loc", "")
specimen_name = pmag.get_named_arg("-spn", reqd=True)
syn = 0
if "-syn" in args:
syn = 1
samp_con = pmag.get_named_arg("-ncn", "1")
if "-ncn" in args:
ind = args.index("-ncn")
samp_con = sys.argv[ind+1]
data_model_num = int(pmag.get_named_arg("-DM", 3))
convert.mst(infile, specimen_name, dir_path, "", meas_file, samp_file,
user, specnum, samp_con, labfield, location, syn, data_model_num)
|
def main()
|
NAME
mst_magic.py
DESCRIPTION
converts MsT data (T,M) to measurements format files
SYNTAX
mst_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify T,M format input file, required
-spn SPEC: specimen name, required
-fsa SFILE: name with sample, site, location information
-F FILE: specify output file, default is measurements.txt
-dc H: specify applied field during measurement, default is 0.5 T
-DM NUM: output to MagIC data model 2.5 or 3, default 3
-syn : This is a synthetic specimen and has no sample/site/location information
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-ncn NCON: specify naming convention: default is #1 below
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
INPUT files:
T M: T is in Centigrade and M is uncalibrated magnitude
| 3.246791
| 2.39075
| 1.358063
|
infile = 'magic_measurements.txt'
sitefile = ""
specout = "er_specimens.txt"
instout = "magic_instruments.txt"
# get command line stuff
if "-h" in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind = sys.argv.index("-f")
infile = sys.argv[ind + 1]
if '-fsi' in sys.argv:
ind = sys.argv.index("-fsi")
sitefile = sys.argv[ind + 1]
if '-Fsp' in sys.argv:
ind = sys.argv.index("-Fsp")
specout = sys.argv[ind + 1]
if '-Fin' in sys.argv:
ind = sys.argv.index("-Fin")
instout = sys.argv[ind + 1]
if '-WD' in sys.argv:
ind = sys.argv.index("-WD")
dir_path = sys.argv[ind + 1]
infile = dir_path + '/' + infile
if sitefile != "":
sitefile = dir_path + '/' + sitefile
specout = dir_path + '/' + specout
instout = dir_path + '/' + instout
# now do re-ordering
pmag.ParseMeasFile(infile, sitefile, instout, specout)
|
def main()
|
NAME
parse_measurements.py
DESCRIPTION
takes measurments file and creates specimen and instrument files
SYNTAX
parse_measurements.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE magic_measurements input file, default is "magic_measurements.txt"
-fsi FILE er_sites input file, default is none
-Fsp FILE specimen output er_specimens format file, default is "er_specimens.txt"
-Fin FILE instrument output magic_instruments format file, default is "magic_instruments.txt"
OUPUT
writes er_specimens and magic_instruments formatted files
| 2.492892
| 1.897644
| 1.313677
|
os.chdir(self.WD)
options = {}
HUJI_file = self.bSizer0.return_value()
if not HUJI_file:
pw.simple_warning("You must select a HUJI format file")
return False
options['magfile'] = HUJI_file
magicoutfile=os.path.split(HUJI_file)[1]+".magic"
outfile=os.path.join(self.WD, magicoutfile)
options['meas_file'] = outfile
user = self.bSizer1.return_value()
options['user'] = user
if user:
user = '-usr ' + user
experiment_type = self.bSizer2.return_value()
options['codelist'] = experiment_type
if not experiment_type:
pw.simple_warning("You must select an experiment type")
return False
cooling_rate = self.cooling_rate.GetValue() or 0
if cooling_rate:
experiment_type = experiment_type + " " + cooling_rate
lab_field = self.bSizer3.return_value()
if not lab_field:
lab_field = "0 0 0"
lab_field_list = lab_field.split()
options['labfield'] = lab_field_list[0]
options['phi'] = lab_field_list[1]
options['theta'] = lab_field_list[2]
lab_field = '-dc ' + lab_field
spc = self.bSizer4.return_value()
options['specnum'] = spc or 0
if not spc:
spc = '-spc 0'
else:
spc = '-spc ' + spc
ncn = self.bSizer5.return_value()
options['samp_con'] = ncn
loc_name = self.bSizer6.return_value()
options['er_location_name'] = loc_name
if loc_name:
loc_name = '-loc ' + loc_name
peak_AF = self.bSizer7.return_value()
options['peakfield'] = peak_AF
replicate = self.bSizer8.return_value()
if replicate:
options['noave'] = 0
replicate = ''
else:
options['noave'] = 1
replicate = '-A'
old_format= self.bSizer0a.return_value()
if old_format:
COMMAND = "huji_magic.py -f {} -F {} {} -LP {} {} -ncn {} {} {} {} {}".format(HUJI_file, outfile, user, experiment_type, loc_name, ncn, lab_field, spc, peak_AF, replicate)
program_ran, error_message = huji_magic.main(False, **options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
else: # new format
COMMAND = "huji_magic_new.py -f {} -F {} {} -LP {} {} -ncn {} {} {} {}".format(HUJI_file, outfile, user, experiment_type, loc_name, ncn, lab_field, spc, peak_AF)
program_ran, error_message = huji_magic_new.main(False, **options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
|
def on_okButton(self, event)
|
grab user input values, format them, and run huji_magic.py with the appropriate flags
| 2.834388
| 2.696018
| 1.051324
|
os.chdir(self.WD)
options_dict = {}
wd = self.WD
options_dict['dir_path'] = wd
full_file = self.bSizer0.return_value()
if not full_file:
pw.simple_warning('You must provide a Utrecht format file')
return False
input_directory, Utrecht_file = os.path.split(full_file)
options_dict['mag_file'] = Utrecht_file
options_dict['input_dir_path'] = input_directory
if input_directory:
ID = "-ID " + input_directory
else:
ID = ''
outfile = Utrecht_file + ".magic"
options_dict['meas_file'] = outfile
samp_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_er_samples.txt"
options_dict['samp_file'] = samp_outfile
spec_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_er_specimens.txt"
options_dict['spec_file'] = spec_outfile
site_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_er_sites.txt"
options_dict['site_file'] = site_outfile
dc_flag,dc_params = '',''
if self.bSizer6.return_value() != '':
dc_params = list(map(float,self.bSizer6.return_value().split()))
options_dict['dc_params'] = dc_params
dc_flag = '-dc'
spec_num = self.bSizer3.return_value()
options_dict['specnum'] = spec_num
if spec_num:
spec_num = "-spc " + str(spec_num)
else:
spec_num = "-spc 0" # defaults to 0 if user doesn't choose number
loc_name = self.bSizer4.return_value()
options_dict['location_name'] = loc_name
if loc_name:
loc_name = "-loc " + loc_name
ncn = self.bSizer2.return_value()
options_dict['samp_con'] = ncn
particulars = self.bSizer1.return_value()
options_dict['meth_code'] = particulars
if particulars:
particulars = "-mcd " + particulars
euro_date = self.bSizer7.return_value()
if euro_date: options_dict['dmy_flag'] = True; dmy_flag='-dmy'
else: options_dict['dmy_flag'] = False; dmy_flag=''
try: site_lat,site_lon = self.bSizer8.return_value().split()
except ValueError: site_lat,site_lon = '',''
options_dict['site_lat'] = site_lat
options_dict['site_lon'] = site_lon
replicate = self.bSizer5.return_value()
if replicate:
options_dict['avg'] = False
replicate = ''
else:
options_dict['avg'] = True
replicate = '-A'
COMMAND = "cit_magic.py -WD {} -f {} -F {} {} {} {} -ncn {} {} -Fsp {} -Fsi {} -Fsa {} {} {} {} {} -lat {} -lon {}".format(wd, Utrecht_file, outfile, particulars, spec_num, loc_name, ncn, ID, spec_outfile, site_outfile, samp_outfile, replicate, dc_flag, dc_params, dmy_flag, site_lon, site_lat)
# to run as module:
program_ran, error_message = utrecht_magic.main(command_line=False, **options_dict)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
|
def on_okButton(self, event)
|
Complies information input in GUI into a kwargs dictionary which can
be passed into the utrecht_magic script and run to output magic files
| 3.14523
| 2.984625
| 1.053811
|
'''
This fucntion does exactly what the 'import orientation' fuction does in MagIC.py
after some dialog boxes the function calls orientation_magic.py
'''
# first see if demag_orient.txt
self.on_m_save_file(None)
orient_convention_dia = orient_convention(None)
orient_convention_dia.Center()
#orient_convention_dia.ShowModal()
if orient_convention_dia.ShowModal() == wx.ID_OK:
ocn_flag = orient_convention_dia.ocn_flag
dcn_flag = orient_convention_dia.dcn_flag
gmt_flags = orient_convention_dia.gmt_flags
orient_convention_dia.Destroy()
else:
return
or_con = orient_convention_dia.ocn
dec_correction_con = int(orient_convention_dia.dcn)
try:
hours_from_gmt = float(orient_convention_dia.gmt)
except:
hours_from_gmt = 0
try:
dec_correction = float(orient_convention_dia.correct_dec)
except:
dec_correction = 0
method_code_dia=method_code_dialog(None)
method_code_dia.Center()
if method_code_dia.ShowModal() == wx.ID_OK:
bedding_codes_flags=method_code_dia.bedding_codes_flags
methodcodes_flags=method_code_dia.methodcodes_flags
method_code_dia.Destroy()
else:
print("-I- Canceling calculation")
return
method_codes = method_code_dia.methodcodes
average_bedding = method_code_dia.average_bedding
bed_correction = method_code_dia.bed_correction
command_args=['orientation_magic.py']
command_args.append("-WD %s"%self.WD)
command_args.append("-Fsa er_samples_orient.txt")
command_args.append("-Fsi er_sites_orient.txt ")
command_args.append("-f %s"%"demag_orient.txt")
command_args.append(ocn_flag)
command_args.append(dcn_flag)
command_args.append(gmt_flags)
command_args.append(bedding_codes_flags)
command_args.append(methodcodes_flags)
commandline = " ".join(command_args)
print("-I- executing command: %s" %commandline)
os.chdir(self.WD)
if os.path.exists(os.path.join(self.WD, 'er_samples.txt')) or os.path.exists(os.path.join(self.WD, 'er_sites.txt')):
append = True
else:
append = False
samp_file = "er_samples.txt"
site_file = "er_sites.txt"
success, error_message = ipmag.orientation_magic(or_con, dec_correction_con, dec_correction,
bed_correction, hours_from_gmt=hours_from_gmt,
method_codes=method_codes, average_bedding=average_bedding,
orient_file='demag_orient.txt', samp_file=samp_file,
site_file=site_file, input_dir_path=self.WD,
output_dir_path=self.WD, append=append, data_model=3)
if not success:
dlg1 = wx.MessageDialog(None,caption="Message:", message="-E- ERROR: Error in running orientation_magic.py\n{}".format(error_message) ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy()
print("-E- ERROR: Error in running orientation_magic.py")
return
else:
dlg2 = wx.MessageDialog(None,caption="Message:", message="-I- Successfully ran orientation_magic", style=wx.OK|wx.ICON_INFORMATION)
dlg2.ShowModal()
dlg2.Destroy()
self.Parent.Show()
self.Parent.Raise()
self.Destroy()
self.contribution.add_magic_table('samples')
return
|
def on_m_calc_orient(self,event)
|
This fucntion does exactly what the 'import orientation' fuction does in MagIC.py
after some dialog boxes the function calls orientation_magic.py
| 3.20803
| 2.839883
| 1.129634
|
file=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
elif '-i' in sys.argv:
file=input("Enter eigenparameters data file name: ")
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
if file!="":
f=open(file,'r')
data=f.readlines()
f.close()
else:
data=sys.stdin.readlines()
ofile=""
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
file_outstring = ""
for line in data:
tau,Vdirs=[],[]
rec=line.split()
for k in range(0,9,3):
tau.append(float(rec[k]))
Vdirs.append((float(rec[k+1]),float(rec[k+2])))
srot=pmag.doeigs_s(tau,Vdirs)
outstring=""
for s in srot:outstring+='%10.8f '%(s)
if ofile=="":
print(outstring)
else:
out.write(outstring+'\n')
|
def main()
|
NAME
eigs_s.py
DESCRIPTION
converts eigenparamters format data to s format
SYNTAX
eigs_s.py [-h][-i][command line options][<filename]
OPTIONS
-h prints help message and quits
-i allows interactive file name entry
-f FILE, specifies input file name
-F FILE, specifies output file name
< filenmae, reads file from standard input (Unix-like operating systems only)
INPUT
tau_i, dec_i inc_i of eigenvectors
OUTPUT
x11,x22,x33,x12,x23,x13
| 3.657728
| 3.185416
| 1.148273
|
if ndarray is not FakeObject:
# NumPy is available
import numpy as np
if isinstance(obj, np.generic) or isinstance(obj, np.ndarray):
# Numpy scalars all inherit from np.generic.
# Numpy arrays all inherit from np.ndarray.
# If we check that we are certain we have one of these
# types then we are less likely to generate an exception below.
try:
return obj.dtype.type
except (AttributeError, RuntimeError):
# AttributeError: some NumPy objects have no dtype attribute
# RuntimeError: happens with NetCDF objects (Issue 998)
return
|
def get_numpy_dtype(obj)
|
Return NumPy data type associated to obj
Return None if NumPy is not available
or if obj is not a NumPy array or scalar
| 7.747027
| 7.774215
| 0.996503
|
return "<%s @ %s>" % (obj.__class__.__name__,
hex(id(obj)).upper().replace('X', 'x'))
|
def address(obj)
|
Return object address as a string: '<classname @ address>
| 5.217271
| 3.767188
| 1.384925
|
if isinstance(item, (list, set, tuple, dict)):
return len(item)
elif isinstance(item, (ndarray, MaskedArray)):
return item.shape
elif isinstance(item, Image):
return item.size
if isinstance(item, (DataFrame, Index, Series)):
return item.shape
else:
return 1
|
def get_size(item)
|
Return size of an item of arbitrary type
| 2.870868
| 2.646468
| 1.084792
|
attrs = [k for k in dir(obj) if not k.startswith('__')]
if not attrs:
attrs = dir(obj)
return attrs
|
def get_object_attrs(obj)
|
Get the attributes of an object using dir.
This filters protected attributes
| 2.901624
| 3.536953
| 0.820374
|
m = re.match(r'^(?:(?:datetime\.)?timedelta)?'
r'\(?'
r'([^)]*)'
r'\)?$', value)
if not m:
raise ValueError('Invalid string for datetime.timedelta')
args = [int(a.strip()) for a in m.group(1).split(',')]
return datetime.timedelta(*args)
|
def str_to_timedelta(value)
|
Convert a string to a datetime.timedelta value.
The following strings are accepted:
- 'datetime.timedelta(1, 5, 12345)'
- 'timedelta(1, 5, 12345)'
- '(1, 5, 12345)'
- '1, 5, 12345'
- '1'
if there are less then three parameters, the missing parameters are
assumed to be 0. Variations in the spacing of the parameters are allowed.
Raises:
ValueError for strings not matching the above criterion.
| 3.708518
| 3.497791
| 1.060246
|
if not is_known_type(value):
return CUSTOM_TYPE_COLOR
for typ, name in list(COLORS.items()):
if isinstance(value, typ):
return name
else:
np_dtype = get_numpy_dtype(value)
if np_dtype is None or not hasattr(value, 'size'):
return UNSUPPORTED_COLOR
elif value.size == 1:
return SCALAR_COLOR
else:
return ARRAY_COLOR
|
def get_color_name(value)
|
Return color name depending on value type
| 3.880356
| 3.674377
| 1.056058
|
try:
return [item for _, item in
sorted(zip(list2, list1), key=lambda x: x[0], reverse=reverse)]
except:
return list1
|
def sort_against(list1, list2, reverse=False)
|
Arrange items of list1 in the same order as sorted(list2).
In other words, apply to list1 the permutation which takes list2
to sorted(list2, reverse).
| 2.753498
| 2.707296
| 1.017066
|
object_type = type(value)
try:
name = object_type.__name__
module = object_type.__module__
if with_module:
return name + ' object of ' + module + ' module'
else:
return name
except:
type_str = to_text_string(object_type)
return type_str[1:-1]
|
def default_display(value, with_module=True)
|
Default display for unknown objects.
| 2.977007
| 2.949689
| 1.009261
|
is_dict = isinstance(value, dict)
is_set = isinstance(value, set)
# Get elements
if is_dict:
elements = iteritems(value)
else:
elements = value
# Truncate values
truncate = False
if level == 1 and len(value) > 10:
elements = islice(elements, 10) if is_dict or is_set else value[:10]
truncate = True
elif level == 2 and len(value) > 5:
elements = islice(elements, 5) if is_dict or is_set else value[:5]
truncate = True
# Get display of each element
if level <= 2:
if is_dict:
displays = [value_to_display(k, level=level) + ':' +
value_to_display(v, level=level)
for (k, v) in list(elements)]
else:
displays = [value_to_display(e, level=level)
for e in elements]
if truncate:
displays.append('...')
display = ', '.join(displays)
else:
display = '...'
# Return display
if is_dict:
display = '{' + display + '}'
elif isinstance(value, list):
display = '[' + display + ']'
elif isinstance(value, set):
display = '{' + display + '}'
else:
display = '(' + display + ')'
return display
|
def collections_display(value, level)
|
Display for collections (i.e. list, set, tuple and dict).
| 2.174235
| 2.10969
| 1.030594
|
from qtpy.compat import from_qvariant
value = from_qvariant(value, to_text_string)
try:
np_dtype = get_numpy_dtype(default_value)
if isinstance(default_value, bool):
# We must test for boolean before NumPy data types
# because `bool` class derives from `int` class
try:
value = bool(float(value))
except ValueError:
value = value.lower() == "true"
elif np_dtype is not None:
if 'complex' in str(type(default_value)):
value = np_dtype(complex(value))
else:
value = np_dtype(value)
elif is_binary_string(default_value):
value = to_binary_string(value, 'utf8')
elif is_text_string(default_value):
value = to_text_string(value)
elif isinstance(default_value, complex):
value = complex(value)
elif isinstance(default_value, float):
value = float(value)
elif isinstance(default_value, int):
try:
value = int(value)
except ValueError:
value = float(value)
elif isinstance(default_value, datetime.datetime):
value = datestr_to_datetime(value)
elif isinstance(default_value, datetime.date):
value = datestr_to_datetime(value).date()
elif isinstance(default_value, datetime.timedelta):
value = str_to_timedelta(value)
elif ignore_errors:
value = try_to_eval(value)
else:
value = eval(value)
except (ValueError, SyntaxError):
if ignore_errors:
value = try_to_eval(value)
else:
return default_value
return value
|
def display_to_value(value, default_value, ignore_errors=True)
|
Convert back to value
| 2.323314
| 2.315202
| 1.003504
|
if isinstance(item, DataFrame):
return "DataFrame"
if isinstance(item, Index):
return type(item).__name__
if isinstance(item, Series):
return "Series"
found = re.findall(r"<(?:type|class) '(\S*)'>",
to_text_string(type(item)))
if found:
return found[0]
|
def get_type_string(item)
|
Return type string of an object.
| 3.528458
| 3.31424
| 1.064636
|
if isinstance(item, (ndarray, MaskedArray)):
return item.dtype.name
elif isinstance(item, Image):
return "Image"
else:
text = get_type_string(item)
if text is None:
text = to_text_string('unknown')
else:
return text[text.find('.')+1:]
|
def get_human_readable_type(item)
|
Return human-readable type string of an item
| 4.471699
| 4.271468
| 1.046876
|
assert filters is not None
if value is None:
return True
if not is_editable_type(value):
return False
elif not isinstance(value, filters):
return False
elif iterate:
if isinstance(value, (list, tuple, set)):
valid_count = 0
for val in value:
if is_supported(val, filters=filters, iterate=check_all):
valid_count += 1
if not check_all:
break
return valid_count > 0
elif isinstance(value, dict):
for key, val in list(value.items()):
if not is_supported(key, filters=filters, iterate=check_all) \
or not is_supported(val, filters=filters,
iterate=check_all):
return False
if not check_all:
break
return True
|
def is_supported(value, check_all=False, filters=None, iterate=False)
|
Return True if the value is supported, False otherwise
| 2.238632
| 2.247565
| 0.996026
|
output_dict = {}
for key, value in list(input_dict.items()):
excluded = (exclude_private and key.startswith('_')) or \
(exclude_capitalized and key[0].isupper()) or \
(exclude_uppercase and key.isupper()
and len(key) > 1 and not key[1:].isdigit()) or \
(key in excluded_names) or \
(exclude_unsupported and \
not is_supported(value, check_all=check_all,
filters=filters))
if not excluded:
output_dict[key] = value
return output_dict
|
def globalsfilter(input_dict, check_all=False, filters=None,
exclude_private=None, exclude_capitalized=None,
exclude_uppercase=None, exclude_unsupported=None,
excluded_names=None)
|
Keep only objects that can be pickled
| 1.985909
| 1.99222
| 0.996832
|
from datetime import date, timedelta
editable_types = [int, float, complex, list, set, dict, tuple, date,
timedelta] + list(TEXT_TYPES) + list(INT_TYPES)
try:
from numpy import ndarray, matrix, generic
editable_types += [ndarray, matrix, generic]
except:
pass
try:
from pandas import DataFrame, Series, DatetimeIndex
editable_types += [DataFrame, Series, Index]
except:
pass
picklable_types = editable_types[:]
try:
from spyder.pil_patch import Image
editable_types.append(Image.Image)
except:
pass
return dict(picklable=picklable_types, editable=editable_types)
|
def get_supported_types()
|
Return a dictionnary containing types lists supported by the
namespace browser.
Note:
If you update this list, don't forget to update variablexplorer.rst
in spyder-docs
| 3.578651
| 3.759381
| 0.951926
|
supported_types = get_supported_types()
assert mode in list(supported_types.keys())
excluded_names = settings['excluded_names']
if more_excluded_names is not None:
excluded_names += more_excluded_names
return globalsfilter(data, check_all=settings['check_all'],
filters=tuple(supported_types[mode]),
exclude_private=settings['exclude_private'],
exclude_uppercase=settings['exclude_uppercase'],
exclude_capitalized=settings['exclude_capitalized'],
exclude_unsupported=settings['exclude_unsupported'],
excluded_names=excluded_names)
|
def get_remote_data(data, settings, mode, more_excluded_names=None)
|
Return globals according to filter described in *settings*:
* data: data to be filtered (dictionary)
* settings: variable explorer settings (dictionary)
* mode (string): 'editable' or 'picklable'
* more_excluded_names: additional excluded names (list)
| 3.492516
| 3.336998
| 1.046604
|
data = get_remote_data(data, settings, mode='editable',
more_excluded_names=more_excluded_names)
remote = {}
for key, value in list(data.items()):
view = value_to_display(value, minmax=settings['minmax'])
remote[key] = {'type': get_human_readable_type(value),
'size': get_size(value),
'color': get_color_name(value),
'view': view}
return remote
|
def make_remote_view(data, settings, more_excluded_names=None)
|
Make a remote view of dictionary *data*
-> globals explorer
| 4.256175
| 4.400013
| 0.96731
|
if self._pdb_obj is not None and self._pdb_obj.curframe is not None:
return self._pdb_obj.curframe
|
def _pdb_frame(self)
|
Return current Pdb frame if there is any
| 3.502031
| 2.845018
| 1.230935
|
from spyder_kernels.utils.nsview import make_remote_view
settings = self.namespace_view_settings
if settings:
ns = self._get_current_namespace()
view = repr(make_remote_view(ns, settings, EXCLUDED_NAMES))
return view
else:
return repr(None)
|
def get_namespace_view(self)
|
Return the namespace view
This is a dictionary with the following structure
{'a': {'color': '#800000', 'size': 1, 'type': 'str', 'view': '1'}}
Here:
* 'a' is the variable name
* 'color' is the color used to show it
* 'size' and 'type' are self-evident
* and'view' is its value or the text shown in the last column
| 6.669168
| 7.401915
| 0.901006
|
from spyder_kernels.utils.nsview import get_remote_data
settings = self.namespace_view_settings
if settings:
ns = self._get_current_namespace()
data = get_remote_data(ns, settings, mode='editable',
more_excluded_names=EXCLUDED_NAMES)
properties = {}
for name, value in list(data.items()):
properties[name] = {
'is_list': isinstance(value, (tuple, list)),
'is_dict': isinstance(value, dict),
'is_set': isinstance(value, set),
'len': self._get_len(value),
'is_array': self._is_array(value),
'is_image': self._is_image(value),
'is_data_frame': self._is_data_frame(value),
'is_series': self._is_series(value),
'array_shape': self._get_array_shape(value),
'array_ndim': self._get_array_ndim(value)
}
return repr(properties)
else:
return repr(None)
|
def get_var_properties(self)
|
Get some properties of the variables in the current
namespace
| 3.428009
| 3.373716
| 1.016093
|
import cloudpickle
if content is None:
content = {}
content['spyder_msg_type'] = spyder_msg_type
msg = self.session.send(
self.iopub_socket,
'spyder_msg',
content=content,
buffers=[cloudpickle.dumps(data, protocol=PICKLE_PROTOCOL)],
parent=self._parent_header,
)
self.log.debug(msg)
|
def send_spyder_msg(self, spyder_msg_type, content=None, data=None)
|
Publish custom messages to the Spyder frontend.
Parameters
----------
spyder_msg_type: str
The spyder message type
content: dict
The (JSONable) content of the message
data: any
Any object that is serializable by cloudpickle (should be most
things). Will arrive as cloudpickled bytes in `.buffers[0]`.
| 3.525323
| 3.17798
| 1.109297
|
ns = self._get_current_namespace()
value = ns[name]
try:
self.send_spyder_msg('data', data=value)
except:
# * There is no need to inform users about
# these errors.
# * value = None makes Spyder to ignore
# petitions to display a value
self.send_spyder_msg('data', data=None)
self._do_publish_pdb_state = False
|
def get_value(self, name)
|
Get the value of a variable
| 10.263248
| 10.385018
| 0.988274
|
import cloudpickle
ns = self._get_reference_namespace(name)
# We send serialized values in a list of one element
# from Spyder to the kernel, to be able to send them
# at all in Python 2
svalue = value[0]
# We need to convert svalue to bytes if the frontend
# runs in Python 2 and the kernel runs in Python 3
if PY2_frontend and not PY2:
svalue = bytes(svalue, 'latin-1')
# Deserialize and set value in namespace
dvalue = cloudpickle.loads(svalue)
ns[name] = dvalue
self.log.debug(ns)
|
def set_value(self, name, value, PY2_frontend)
|
Set the value of a variable
| 6.640599
| 6.946514
| 0.955961
|
ns = self._get_reference_namespace(name)
ns.pop(name)
|
def remove_value(self, name)
|
Remove a variable
| 10.287585
| 11.331714
| 0.907858
|
ns = self._get_reference_namespace(orig_name)
ns[new_name] = ns[orig_name]
|
def copy_value(self, orig_name, new_name)
|
Copy a variable
| 5.075315
| 5.369645
| 0.945186
|
from spyder_kernels.utils.iofuncs import iofunctions
from spyder_kernels.utils.misc import fix_reference_name
glbs = self._mglobals()
load_func = iofunctions.load_funcs[ext]
data, error_message = load_func(filename)
if error_message:
return error_message
for key in list(data.keys()):
new_key = fix_reference_name(key, blacklist=list(glbs.keys()))
if new_key != key:
data[new_key] = data.pop(key)
try:
glbs.update(data)
except Exception as error:
return str(error)
return None
|
def load_data(self, filename, ext)
|
Load data from filename
| 3.918907
| 3.969607
| 0.987228
|
from spyder_kernels.utils.nsview import get_remote_data
from spyder_kernels.utils.iofuncs import iofunctions
ns = self._get_current_namespace()
settings = self.namespace_view_settings
data = get_remote_data(ns, settings, mode='picklable',
more_excluded_names=EXCLUDED_NAMES).copy()
return iofunctions.save(data, filename)
|
def save_namespace(self, filename)
|
Save namespace into filename
| 7.439115
| 7.4331
| 1.000809
|
if self._pdb_obj and self._do_publish_pdb_state:
state = dict(namespace_view = self.get_namespace_view(),
var_properties = self.get_var_properties(),
step = self._pdb_step)
self.send_spyder_msg('pdb_state', content={'pdb_state': state})
self._do_publish_pdb_state = True
|
def publish_pdb_state(self)
|
Publish Variable Explorer state and Pdb step through
send_spyder_msg.
| 5.546028
| 4.127784
| 1.343585
|
from spyder_kernels.utils.dochelpers import isdefined
ns = self._get_current_namespace(with_magics=True)
return isdefined(obj, force_import=force_import, namespace=ns)
|
def is_defined(self, obj, force_import=False)
|
Return True if object is defined in current namespace
| 6.290189
| 5.431157
| 1.158167
|
try:
import matplotlib
matplotlib.rcParams['docstring.hardcopy'] = True
except:
pass
from spyder_kernels.utils.dochelpers import getdoc
obj, valid = self._eval(objtxt)
if valid:
return getdoc(obj)
|
def get_doc(self, objtxt)
|
Get object documentation dictionary
| 7.571845
| 7.09606
| 1.067049
|
from spyder_kernels.utils.dochelpers import getsource
obj, valid = self._eval(objtxt)
if valid:
return getsource(obj)
|
def get_source(self, objtxt)
|
Get object source
| 8.463636
| 7.526487
| 1.124514
|
ns = {}
glbs = self._mglobals()
if self._pdb_frame is None:
ns.update(glbs)
else:
ns.update(glbs)
ns.update(self._pdb_locals)
# Add magics to ns so we can show help about them on the Help
# plugin
if with_magics:
line_magics = self.shell.magics_manager.magics['line']
cell_magics = self.shell.magics_manager.magics['cell']
ns.update(line_magics)
ns.update(cell_magics)
return ns
|
def _get_current_namespace(self, with_magics=False)
|
Return current namespace
This is globals() if not debugging, or a dictionary containing
both locals() and globals() for current frame when debugging
| 3.682627
| 3.511369
| 1.048772
|
glbs = self._mglobals()
if self._pdb_frame is None:
return glbs
else:
lcls = self._pdb_locals
if name in lcls:
return lcls
else:
return glbs
|
def _get_reference_namespace(self, name)
|
Return namespace where reference name is defined
It returns the globals() if reference has not yet been defined
| 6.904901
| 5.73631
| 1.203718
|
if self._pdb_frame is not None:
return self._pdb_frame.f_globals
else:
return self.shell.user_ns
|
def _mglobals(self)
|
Return current globals -- handles Pdb frames
| 5.023277
| 3.146452
| 1.596489
|
try:
from PIL import Image
return isinstance(var, Image.Image)
except:
return False
|
def _is_image(self, var)
|
Return True if variable is a PIL.Image image
| 3.476019
| 2.759713
| 1.259558
|
if not self._pdb_obj:
return
# Breakpoints come serialized from Spyder. We send them
# in a list of one element to be able to send them at all
# in Python 2
serialized_breakpoints = breakpoints[0]
breakpoints = pickle.loads(serialized_breakpoints)
self._pdb_obj.set_spyder_breakpoints(breakpoints)
|
def _set_spyder_breakpoints(self, breakpoints)
|
Set all Spyder breakpoints in an active pdb session
| 7.193348
| 6.305001
| 1.140896
|
from spyder_kernels.py3compat import is_text_string
assert is_text_string(text)
ns = self._get_current_namespace(with_magics=True)
try:
return eval(text, ns), True
except:
return None, False
|
def _eval(self, text)
|
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
| 5.175825
| 4.766314
| 1.085918
|
import traceback
from IPython.core.getipython import get_ipython
generic_error = (
"\n" + "="*73 + "\n"
"NOTE: The following error appeared when setting "
"your Matplotlib backend!!\n" + "="*73 + "\n\n"
"{0}"
)
magic = 'pylab' if pylab else 'matplotlib'
error = None
try:
get_ipython().run_line_magic(magic, backend)
except RuntimeError as err:
# This catches errors generated by ipykernel when
# trying to set a backend. See issue 5541
if "GUI eventloops" in str(err):
import matplotlib
previous_backend = matplotlib.get_backend()
if not backend in previous_backend.lower():
# Only inform about an error if the user selected backend
# and the one set by Matplotlib are different. Else this
# message is very confusing.
error = (
"\n"
"NOTE: Spyder *can't* set your selected Matplotlib "
"backend because there is a previous backend already "
"in use.\n\n"
"Your backend will be {0}".format(previous_backend)
)
del matplotlib
# This covers other RuntimeError's
else:
error = generic_error.format(traceback.format_exc())
except Exception:
error = generic_error.format(traceback.format_exc())
self._mpl_backend_error = error
|
def _set_mpl_backend(self, backend, pylab=False)
|
Set a backend for Matplotlib.
backend: A parameter that can be passed to %matplotlib
(e.g. 'inline' or 'tk').
| 5.108315
| 5.150647
| 0.991781
|
from IPython.core.getipython import get_ipython
try:
get_ipython().run_line_magic('reload_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
except Exception:
pass
|
def _load_autoreload_magic(self)
|
Load %autoreload magic.
| 2.535362
| 2.259899
| 1.121892
|
# Wurlitzer has no effect on Windows
if not os.name == 'nt':
from IPython.core.getipython import get_ipython
# Enclose this in a try/except because if it fails the
# console will be totally unusable.
# Fixes spyder-ide/spyder#8668
try:
get_ipython().run_line_magic('reload_ext', 'wurlitzer')
except Exception:
pass
|
def _load_wurlitzer(self)
|
Load wurlitzer extension.
| 5.037004
| 4.782699
| 1.053172
|
here = osp.dirname(__file__)
parent = osp.dirname(here)
customize_dir = osp.join(parent, 'customize')
# Remove current directory from sys.path to prevent kernel
# crashes when people name Python files or modules with
# the same name as standard library modules.
# See spyder-ide/spyder#8007
while '' in sys.path:
sys.path.remove('')
# Import our customizations
site.addsitedir(customize_dir)
import spydercustomize
# Remove our customize path from sys.path
try:
sys.path.remove(customize_dir)
except ValueError:
pass
|
def import_spydercustomize()
|
Import our customizations into the kernel.
| 4.097641
| 3.847428
| 1.065034
|
ip = get_ipython() #analysis:ignore
funcname, name = line.split()
try:
import guiqwt.pyplot as pyplot
except:
import matplotlib.pyplot as pyplot
__fig__ = pyplot.figure();
__items__ = getattr(pyplot, funcname[2:])(ip.user_ns[name])
pyplot.show()
del __fig__, __items__
|
def varexp(line)
|
Spyder's variable explorer magic
Used to generate plots, histograms and images of the variables displayed
on it.
| 7.410586
| 6.993644
| 1.059617
|
name = "".join(re.split(r'[^0-9a-zA-Z_]', name))
while name and not re.match(r'([a-zA-Z]+[0-9a-zA-Z_]*)$', name):
if not re.match(r'[a-zA-Z]', name[0]):
name = name[1:]
continue
name = str(name)
if not name:
name = "data"
if blacklist is not None and name in blacklist:
get_new_name = lambda index: name+('%03d' % index)
index = 0
while get_new_name(index) in blacklist:
index += 1
name = get_new_name(index)
return name
|
def fix_reference_name(name, blacklist=None)
|
Return a syntax-valid Python reference name from an arbitrary name
| 2.707836
| 2.654731
| 1.020004
|
# This is useful when debugging in an active interpreter (otherwise,
# the debugger will stop before reaching the target file)
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
self._old_Pdb_user_return(frame, return_value)
|
def user_return(self, frame, return_value)
|
This function is called when a return trap is set here.
| 6.388863
| 6.264694
| 1.01982
|
clear_post_mortem()
ipython_shell = get_ipython()
ipython_shell.showtraceback((type, value, tb))
p = pdb.Pdb(ipython_shell.colors)
if not type == SyntaxError:
# wait for stderr to print (stderr.flush does not work in this case)
time.sleep(0.1)
_print('*' * 40)
_print('Entering post mortem debugging...')
_print('*' * 40)
# add ability to move between frames
p.send_initial_notification = False
p.reset()
frame = tb.tb_frame
prev = frame
while frame.f_back:
prev = frame
frame = frame.f_back
frame = prev
# wait for stdout to print
time.sleep(0.1)
p.interaction(frame, tb)
|
def post_mortem_excepthook(type, value, tb)
|
For post mortem exception handling, print a banner and enable post
mortem debugging.
| 4.735547
| 4.606833
| 1.02794
|
def ipython_post_mortem_debug(shell, etype, evalue, tb,
tb_offset=None):
post_mortem_excepthook(etype, evalue, tb)
ipython_shell = get_ipython()
ipython_shell.set_custom_exc((Exception,), ipython_post_mortem_debug)
|
def set_post_mortem()
|
Enable the post mortem debugging excepthook.
| 3.464378
| 3.198006
| 1.083293
|
try:
filename = filename.decode('utf-8')
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
if __umr__.enabled:
__umr__.run()
if args is not None and not isinstance(args, basestring):
raise TypeError("expected a character buffer object")
if namespace is None:
namespace = _get_globals()
namespace['__file__'] = filename
sys.argv = [filename]
if args is not None:
for arg in shlex.split(args):
sys.argv.append(arg)
if wdir is not None:
try:
wdir = wdir.decode('utf-8')
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
os.chdir(wdir)
if post_mortem:
set_post_mortem()
if __umr__.has_cython:
# Cython files
with io.open(filename, encoding='utf-8') as f:
ipython_shell = get_ipython()
ipython_shell.run_cell_magic('cython', '', f.read())
else:
execfile(filename, namespace)
clear_post_mortem()
sys.argv = ['']
# Avoid error when running `%reset -f` programmatically
# See issue spyder-ide/spyder-kernels#91
try:
namespace.pop('__file__')
except KeyError:
pass
|
def runfile(filename, args=None, wdir=None, namespace=None, post_mortem=False)
|
Run filename
args: command line arguments (string)
wdir: working directory
post_mortem: boolean, whether to enter post-mortem mode on error
| 3.274054
| 3.338141
| 0.980801
|
try:
filename = filename.decode('utf-8')
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
ipython_shell = get_ipython()
namespace = _get_globals()
namespace['__file__'] = filename
try:
cell_code = ipython_shell.cell_code
except AttributeError:
_print("--Run Cell Error--\n"
"Please use only through Spyder's Editor; "
"shouldn't be called manually from the console")
return
# Trigger `post_execute` to exit the additional pre-execution.
# See Spyder PR #7310.
ipython_shell.events.trigger('post_execute')
ipython_shell.run_cell(cell_code)
namespace.pop('__file__')
del ipython_shell.cell_code
|
def runcell(cellname, filename)
|
Run a code cell from an editor as a file.
Currently looks for code in an `ipython` property called `cell_code`.
This property must be set by the editor prior to calling this function.
This function deletes the contents of `cell_code` upon completion.
Parameters
----------
cellname : str
Used as a reference in the history log of which
cell was run with the fuction. This variable is not used.
filename : str
Needed to allow for proper traceback links.
| 7.011564
| 6.387506
| 1.0977
|
debugger = pdb.Pdb()
filename = debugger.canonic(filename)
debugger._wait_for_mainpyfile = 1
debugger.mainpyfile = filename
debugger._user_requested_quit = 0
if os.name == 'nt':
filename = filename.replace('\\', '/')
debugger.run("runfile(%r, args=%r, wdir=%r)" % (filename, args, wdir))
|
def debugfile(filename, args=None, wdir=None, post_mortem=False)
|
Debug filename
args: command line arguments (string)
wdir: working directory
post_mortem: boolean, included for compatiblity with runfile
| 3.812342
| 4.054632
| 0.940244
|
# Get standard installation paths
try:
paths = sysconfig.get_paths()
standard_paths = [paths['stdlib'],
paths['purelib'],
paths['scripts'],
paths['data']]
except Exception:
standard_paths = []
# Get user installation path
# See Spyder issue 8776
try:
import site
if getattr(site, 'getusersitepackages', False):
# Virtualenvs don't have this function but
# conda envs do
user_path = [site.getusersitepackages()]
elif getattr(site, 'USER_SITE', False):
# However, it seems virtualenvs have this
# constant
user_path = [site.USER_SITE]
else:
user_path = []
except Exception:
user_path = []
return initial_pathlist + standard_paths + user_path
|
def create_pathlist(self, initial_pathlist)
|
Add to pathlist Python library paths to be skipped from module
reloading.
| 3.853182
| 3.621359
| 1.064015
|
if self.has_cython:
# Don't return cached inline compiled .PYX files
return False
else:
if (self.is_module_in_pathlist(module) or
self.is_module_in_namelist(modname)):
return False
else:
return True
|
def is_module_reloadable(self, module, modname)
|
Decide if a module is reloadable or not.
| 6.870707
| 6.712466
| 1.023574
|
modpath = getattr(module, '__file__', None)
# Skip module according to different criteria
if modpath is None:
# *module* is a C module that is statically linked into the
# interpreter. There is no way to know its path, so we
# choose to ignore it.
return True
elif any([p in modpath for p in self.pathlist]):
# We don't want to reload modules that belong to the
# standard library or installed to site-packages,
# just modules created by the user.
return True
elif not os.name == 'nt':
# Module paths containing the strings below can be ihherited
# from the default Linux installation, Homebrew or the user
# site-packages in a virtualenv.
patterns = [r'^/usr/lib.*',
r'^/usr/local/lib.*',
r'^/usr/.*/dist-packages/.*',
r'^/home/.*/.local/lib.*',
r'^/Library/.*',
r'^/Users/.*/Library/.*',
r'^/Users/.*/.local/.*',
]
if [p for p in patterns if re.search(p, modpath)]:
return True
else:
return False
else:
return False
|
def is_module_in_pathlist(self, module)
|
Decide if a module can be reloaded or not according to its path.
| 5.345763
| 5.179714
| 1.032057
|
run_cython = os.environ.get("SPY_RUN_CYTHON") == "True"
if run_cython:
try:
__import__('Cython')
self.has_cython = True
except Exception:
pass
if self.has_cython:
# Import pyximport to enable Cython files support for
# import statement
import pyximport
pyx_setup_args = {}
# Add Numpy include dir to pyximport/distutils
try:
import numpy
pyx_setup_args['include_dirs'] = numpy.get_include()
except Exception:
pass
# Setup pyximport and enable Cython files reload
pyximport.install(setup_args=pyx_setup_args,
reload_support=True)
|
def activate_cython(self)
|
Activate Cython support.
We need to run this here because if the support is
active, we don't to run the UMR at all.
| 3.669233
| 3.644566
| 1.006768
|
self.modnames_to_reload = []
for modname, module in list(sys.modules.items()):
if modname not in self.previous_modules:
# Decide if a module can be reloaded or not
if self.is_module_reloadable(module, modname):
self.modnames_to_reload.append(modname)
del sys.modules[modname]
else:
continue
# Report reloaded modules
if self.verbose and self.modnames_to_reload:
modnames = self.modnames_to_reload
_print("\x1b[4;33m%s\x1b[24m%s\x1b[0m"\
% ("Reloaded modules", ": "+", ".join(modnames)))
|
def run(self)
|
Delete user modules to force Python to deeply reload them
Do not del modules which are considered as system modules, i.e.
modules installed in subdirectories of Python interpreter's binary
Do not del C modules
| 3.62501
| 3.382074
| 1.071831
|
import numpy as np
# Extract each item of a list.
if isinstance(val, list):
return [get_matlab_value(v) for v in val]
# Ignore leaf objects.
if not isinstance(val, np.ndarray):
return val
# Convert user defined classes.
if hasattr(val, 'classname'):
out = dict()
for name in val.dtype.names:
out[name] = get_matlab_value(val[name].squeeze().tolist())
cls = type(val.classname, (object,), out)
return cls()
# Extract struct data.
elif val.dtype.names:
out = MatlabStruct()
for name in val.dtype.names:
out[name] = get_matlab_value(val[name].squeeze().tolist())
val = out
# Extract cells.
elif val.dtype.kind == 'O':
val = val.squeeze().tolist()
if not isinstance(val, list):
val = [val]
val = get_matlab_value(val)
# Compress singleton values.
elif val.size == 1:
val = val.item()
# Compress empty values.
elif val.size == 0:
if val.dtype.kind in 'US':
val = ''
else:
val = []
return val
|
def get_matlab_value(val)
|
Extract a value from a Matlab file
From the oct2py project, see
https://pythonhosted.org/oct2py/conversions.html
| 2.90379
| 2.987105
| 0.972109
|
try:
if pd:
return pd.read_pickle(filename), None
else:
with open(filename, 'rb') as fid:
data = pickle.load(fid)
return data, None
except Exception as err:
return None, str(err)
|
def load_pickle(filename)
|
Load a pickle file as a dictionary
| 2.701074
| 2.841034
| 0.950736
|
try:
if PY2:
args = 'rb'
else:
args = 'r'
with open(filename, args) as fid:
data = json.load(fid)
return data, None
except Exception as err:
return None, str(err)
|
def load_json(filename)
|
Load a json file as a dictionary
| 3.006442
| 3.105153
| 0.968211
|
filename = osp.abspath(filename)
old_cwd = getcwd()
os.chdir(osp.dirname(filename))
error_message = None
skipped_keys = []
data_copy = {}
try:
# Copy dictionary before modifying it to fix #6689
for obj_name, obj_value in data.items():
# Skip modules, since they can't be pickled, users virtually never
# would want them to be and so they don't show up in the skip list.
# Skip callables, since they are only pickled by reference and thus
# must already be present in the user's environment anyway.
if not (callable(obj_value) or isinstance(obj_value,
types.ModuleType)):
# If an object cannot be deepcopied, then it cannot be pickled.
# Ergo, we skip it and list it later.
try:
data_copy[obj_name] = copy.deepcopy(obj_value)
except Exception:
skipped_keys.append(obj_name)
data = data_copy
if not data:
raise RuntimeError('No supported objects to save')
saved_arrays = {}
if load_array is not None:
# Saving numpy arrays with np.save
arr_fname = osp.splitext(filename)[0]
for name in list(data.keys()):
try:
if isinstance(data[name],
np.ndarray) and data[name].size > 0:
# Save arrays at data root
fname = __save_array(data[name], arr_fname,
len(saved_arrays))
saved_arrays[(name, None)] = osp.basename(fname)
data.pop(name)
elif isinstance(data[name], (list, dict)):
# Save arrays nested in lists or dictionaries
if isinstance(data[name], list):
iterator = enumerate(data[name])
else:
iterator = iter(list(data[name].items()))
to_remove = []
for index, value in iterator:
if isinstance(value,
np.ndarray) and value.size > 0:
fname = __save_array(value, arr_fname,
len(saved_arrays))
saved_arrays[(name, index)] = (
osp.basename(fname))
to_remove.append(index)
for index in sorted(to_remove, reverse=True):
data[name].pop(index)
except (RuntimeError, pickle.PicklingError, TypeError,
AttributeError, IndexError):
# If an array can't be saved with numpy for some reason,
# leave the object intact and try to save it normally.
pass
if saved_arrays:
data['__saved_arrays__'] = saved_arrays
pickle_filename = osp.splitext(filename)[0] + '.pickle'
# Attempt to pickle everything.
# If pickling fails, iterate through to eliminate problem objs & retry.
with open(pickle_filename, 'w+b') as fdesc:
try:
pickle.dump(data, fdesc, protocol=2)
except (pickle.PicklingError, AttributeError, TypeError,
ImportError, IndexError, RuntimeError):
data_filtered = {}
for obj_name, obj_value in data.items():
try:
pickle.dumps(obj_value, protocol=2)
except Exception:
skipped_keys.append(obj_name)
else:
data_filtered[obj_name] = obj_value
if not data_filtered:
raise RuntimeError('No supported objects to save')
pickle.dump(data_filtered, fdesc, protocol=2)
# Use PAX (POSIX.1-2001) format instead of default GNU.
# This improves interoperability and UTF-8/long variable name support.
with tarfile.open(filename, "w", format=tarfile.PAX_FORMAT) as tar:
for fname in ([pickle_filename]
+ [fn for fn in list(saved_arrays.values())]):
tar.add(osp.basename(fname))
os.remove(fname)
except (RuntimeError, pickle.PicklingError, TypeError) as error:
error_message = to_text_string(error)
else:
if skipped_keys:
skipped_keys.sort()
error_message = ('Some objects could not be saved: '
+ ', '.join(skipped_keys))
finally:
os.chdir(old_cwd)
return error_message
|
def save_dictionary(data, filename)
|
Save dictionary in a single file .spydata file
| 3.383953
| 3.37447
| 1.00281
|
filename = osp.abspath(filename)
old_cwd = getcwd()
tmp_folder = tempfile.mkdtemp()
os.chdir(tmp_folder)
data = None
error_message = None
try:
with tarfile.open(filename, "r") as tar:
tar.extractall()
pickle_filename = glob.glob('*.pickle')[0]
# 'New' format (Spyder >=2.2 for Python 2 and Python 3)
with open(pickle_filename, 'rb') as fdesc:
data = pickle.loads(fdesc.read())
saved_arrays = {}
if load_array is not None:
# Loading numpy arrays saved with np.save
try:
saved_arrays = data.pop('__saved_arrays__')
for (name, index), fname in list(saved_arrays.items()):
arr = np.load( osp.join(tmp_folder, fname) )
if index is None:
data[name] = arr
elif isinstance(data[name], dict):
data[name][index] = arr
else:
data[name].insert(index, arr)
except KeyError:
pass
# Except AttributeError from e.g. trying to load function no longer present
except (AttributeError, EOFError, ValueError) as error:
error_message = to_text_string(error)
# To ensure working dir gets changed back and temp dir wiped no matter what
finally:
os.chdir(old_cwd)
try:
shutil.rmtree(tmp_folder)
except OSError as error:
error_message = to_text_string(error)
return data, error_message
|
def load_dictionary(filename)
|
Load dictionary from .spydata file
| 3.956274
| 3.825374
| 1.034219
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.