sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def _find_parameter(self, name_list, create_missing=False, quiet=False):
"""
Tries to find and return the parameter of the specified name. The name
should be of the form
['branch1','branch2', 'parametername']
Setting create_missing=True means if it doesn't find a branch it
will create one.
Setting quiet=True will suppress error messages (for checking)
"""
# make a copy so this isn't destructive to the supplied list
s = list(name_list)
# if the length is zero, return the root widget
if len(s)==0: return self._widget
# the first name must be treated differently because it is
# the main widget, not a branch
r = self._clean_up_name(s.pop(0))
# search for the root name
result = self._widget.findItems(r, _g.QtCore.Qt.MatchCaseSensitive | _g.QtCore.Qt.MatchFixedString)
# if it pooped and we're not supposed to create it, quit
if len(result) == 0 and not create_missing:
if not quiet: self.print_message("ERROR: Could not find '"+r+"'")
return None
# otherwise use the first value
elif len(result): x = result[0].param
# otherwise, if there are more names in the list,
# create the branch and keep going
else:
x = _g.parametertree.Parameter.create(name=r, type='group', children=[])
self._widget.addParameters(x)
# loop over the remaining names, and use a different search method
for n in s:
# first clean up
n = self._clean_up_name(n)
# try to search for the name
try: x = x.param(n)
# name doesn't exist
except:
# if we're supposed to, create the new branch
if create_missing: x = x.addChild(_g.parametertree.Parameter.create(name=n, type='group', children=[]))
# otherwise poop out
else:
if not quiet: self.print_message("ERROR: Could not find '"+n+"' in '"+x.name()+"'")
return None
# return the last one we found / created.
return x | Tries to find and return the parameter of the specified name. The name
should be of the form
['branch1','branch2', 'parametername']
Setting create_missing=True means if it doesn't find a branch it
will create one.
Setting quiet=True will suppress error messages (for checking) | entailment |
def _clean_up_name(self, name):
"""
Cleans up the name according to the rules specified in this exact
function. Uses self.naughty, a list of naughty characters.
"""
for n in self.naughty: name = name.replace(n, '_')
return name | Cleans up the name according to the rules specified in this exact
function. Uses self.naughty, a list of naughty characters. | entailment |
def add_button(self, name, checkable=False, checked=False):
"""
Adds (and returns) a button at the specified location.
"""
# first clean up the name
name = self._clean_up_name(name)
# split into (presumably existing) branches and parameter
s = name.split('/')
# make sure it doesn't already exist
if not self._find_parameter(s, quiet=True) == None:
self.print_message("Error: '"+name+"' already exists.")
return None
# get the leaf name off the list.
p = s.pop(-1)
# create / get the branch on which to add the leaf
b = self._find_parameter(s, create_missing=True)
# quit out if it pooped
if b == None: return None
# create the leaf object
ap = _g.parametertree.Parameter.create(name=p, type='action')
# add it to the tree (different methods for root)
if b == self._widget: b.addParameters(ap)
else: b.addChild(ap)
# modify the existing class to fit our conventions
ap.signal_clicked = ap.sigActivated
# Now set the default value if any
if name in self._lazy_load:
v = self._lazy_load.pop(name)
self._set_value_safe(name, v, True, True)
# Connect it to autosave (will only create unique connections)
self.connect_any_signal_changed(self.autosave)
return Button(name, checkable, checked, list(ap.items.keys())[0].button) | Adds (and returns) a button at the specified location. | entailment |
def add_parameter(self, name='test', value=42.0, **kwargs):
"""
Adds a parameter "leaf" to the tree.
Parameters
----------
name='test'
The name of the leaf. It should be a string of the form
"branch1/branch2/parametername" and will be nested as indicated.
value=42.0
Value of the leaf.
Common Keyword Arguments
------------------------
type=None
If set to None, type will be automatically set to type(value).__name__.
This will not work for all data types, but is
a nice shortcut for floats, ints, strings, etc.
If it doesn't work, just specify the type manually (see below).
values
Not used by default. Only relevant for 'list' type, and should then
be a list of possible values.
step=1
Step size of incrementing numbers
dec=False
Set to True to enable decade increments.
limits
Not used by default. Should be a 2-element tuple or list used to
bound numerical values.
default
Not used by default. Used to specify the default numerical value
siPrefix=False
Set to True to display units on numbers
suffix
Not used by default. Used to add unit labels to elements.
See pyqtgraph ParameterTree for more options. Particularly useful is the
tip='insert your text' option, which supplies a tooltip!
"""
# update the default kwargs
other_kwargs = dict(type=None)
other_kwargs.update(kwargs)
# Auto typing
if other_kwargs['type'] == None: other_kwargs['type'] = type(value).__name__
# Fix 'values' for list objects to be only strings
if other_kwargs['type'] == 'list':
for n in range(len(other_kwargs['values'])):
other_kwargs['values'][n] = str(other_kwargs['values'][n])
# split into (presumably existing) branches and parameter
s = name.split('/')
# make sure it doesn't already exist
if not self._find_parameter(s, quiet=True) == None:
self.print_message("Error: '"+name+"' already exists.")
return self
# get the leaf name off the list.
p = s.pop(-1)
# create / get the branch on which to add the leaf
b = self._find_parameter(s, create_missing=True)
# quit out if it pooped
if b == None: return self
# create the leaf object
leaf = _g.parametertree.Parameter.create(name=p, value=value, **other_kwargs)
# add it to the tree (different methods for root)
if b == self._widget: b.addParameters(leaf)
else: b.addChild(leaf)
# Now set the default value if any
if name in self._lazy_load:
v = self._lazy_load.pop(name)
self._set_value_safe(name, v, True, True)
# Connect it to autosave (will only create unique connections)
self.connect_any_signal_changed(self.autosave)
return self | Adds a parameter "leaf" to the tree.
Parameters
----------
name='test'
The name of the leaf. It should be a string of the form
"branch1/branch2/parametername" and will be nested as indicated.
value=42.0
Value of the leaf.
Common Keyword Arguments
------------------------
type=None
If set to None, type will be automatically set to type(value).__name__.
This will not work for all data types, but is
a nice shortcut for floats, ints, strings, etc.
If it doesn't work, just specify the type manually (see below).
values
Not used by default. Only relevant for 'list' type, and should then
be a list of possible values.
step=1
Step size of incrementing numbers
dec=False
Set to True to enable decade increments.
limits
Not used by default. Should be a 2-element tuple or list used to
bound numerical values.
default
Not used by default. Used to specify the default numerical value
siPrefix=False
Set to True to display units on numbers
suffix
Not used by default. Used to add unit labels to elements.
See pyqtgraph ParameterTree for more options. Particularly useful is the
tip='insert your text' option, which supplies a tooltip! | entailment |
def _get_parameter_dictionary(self, base_name, dictionary, sorted_keys, parameter):
"""
Recursively loops over the parameter's children, adding
keys (starting with base_name) and values to the supplied dictionary
(provided they do not have a value of None).
"""
# assemble the key for this parameter
k = base_name + "/" + parameter.name()
# first add this parameter (if it has a value)
if not parameter.value()==None:
sorted_keys.append(k[1:])
dictionary[sorted_keys[-1]] = parameter.value()
# now loop over the children
for p in parameter.children():
self._get_parameter_dictionary(k, dictionary, sorted_keys, p) | Recursively loops over the parameter's children, adding
keys (starting with base_name) and values to the supplied dictionary
(provided they do not have a value of None). | entailment |
def send_to_databox_header(self, destination_databox):
"""
Sends all the information currently in the tree to the supplied
databox's header, in alphabetical order. If the entries already
exists, just updates them.
"""
k, d = self.get_dictionary()
destination_databox.update_headers(d,k) | Sends all the information currently in the tree to the supplied
databox's header, in alphabetical order. If the entries already
exists, just updates them. | entailment |
def get_dictionary(self):
"""
Returns the list of parameters and a dictionary of values
(good for writing to a databox header!)
Return format is sorted_keys, dictionary
"""
# output
k = list()
d = dict()
# loop over the root items
for i in range(self._widget.topLevelItemCount()):
# grab the parameter item, and start building the name
x = self._widget.topLevelItem(i).param
# now start the recursive loop
self._get_parameter_dictionary('', d, k, x)
return k, d | Returns the list of parameters and a dictionary of values
(good for writing to a databox header!)
Return format is sorted_keys, dictionary | entailment |
def get_value(self, name):
"""
Returns the value of the parameter with the specified name.
"""
# first clean up the name
name = self._clean_up_name(name)
# now get the parameter object
x = self._find_parameter(name.split('/'))
# quit if it pooped.
if x == None: return None
# get the value and test the bounds
value = x.value()
# handles the two versions of pyqtgraph
bounds = None
# For lists, just make sure it's a valid value
if x.opts['type'] == 'list':
# If it's not one from the master list, choose
# and return the default value.
if not value in x.opts['values']:
# Only choose a default if there exists one
if len(x.opts('values')):
self.set_value(name, x.opts['values'][0])
return x.opts['values'][0]
# Otherwise, just return None and do nothing
else: return None
# For strings, make sure the returned value is always a string.
elif x.opts['type'] in ['str']: return str(value)
# Otherwise assume it is a value with bounds or limits (depending on
# the version of pyqtgraph)
else:
if 'limits' in x.opts: bounds = x.opts['limits']
elif 'bounds' in x.opts: bounds = x.opts['bounds']
if not bounds == None:
if not bounds[1]==None and value > bounds[1]: value = bounds[1]
if not bounds[0]==None and value < bounds[0]: value = bounds[0]
# return it
return value | Returns the value of the parameter with the specified name. | entailment |
def get_list_values(self, name):
"""
Returns the values for a list item of the specified name.
"""
# Make sure it's a list
if not self.get_type(name) in ['list']:
self.print_message('ERROR: "'+name+'" is not a list.')
return
# Return a copy of the list values
return list(self.get_widget(name).opts['values']) | Returns the values for a list item of the specified name. | entailment |
def set_value(self, name, value, ignore_error=False, block_user_signals=False):
"""
Sets the variable of the supplied name to the supplied value.
Setting block_user_signals=True will temporarily block the widget from
sending any signals when setting the value.
"""
# first clean up the name
name = self._clean_up_name(name)
# If we're supposed to, block the user signals for this parameter
if block_user_signals: self.block_user_signals(name, ignore_error)
# now get the parameter object
x = self._find_parameter(name.split('/'), quiet=ignore_error)
# quit if it pooped.
if x == None: return None
# for lists, make sure the value exists!!
if x.type() in ['list']:
# Make sure it exists before trying to set it
if str(value) in list(x.forward.keys()): x.setValue(str(value))
# Otherwise default to the first key
else: x.setValue(list(x.forward.keys())[0])
# Bail to a hopeful set method for other types
else: x.setValue(eval(x.opts['type'])(value))
# If we're supposed to unblock the user signals for this parameter
if block_user_signals: self.unblock_user_signals(name, ignore_error)
return self | Sets the variable of the supplied name to the supplied value.
Setting block_user_signals=True will temporarily block the widget from
sending any signals when setting the value. | entailment |
def save(self, path=None):
"""
Saves all the parameters to a text file using the databox
functionality. If path=None, saves to self._autosettings_path. If
self._autosettings_path=None, does not save.
"""
if path==None:
if self._autosettings_path == None: return self
# Get the gui settings directory
gui_settings_dir = _os.path.join(_cwd, 'egg_settings')
# make sure the directory exists
if not _os.path.exists(gui_settings_dir): _os.mkdir(gui_settings_dir)
# Assemble the path
path = _os.path.join(gui_settings_dir, self._autosettings_path)
# make the databox object
d = _d.databox()
# get the keys and dictionary
keys, dictionary = self.get_dictionary()
# loop over the keys and add them to the databox header
for k in keys:
d.insert_header(k, dictionary[k])
# save it
try:
d.save_file(path, force_overwrite=True, header_only=True)
except:
print('Warning: could not save '+path.__repr__()+' once. Could be that this is being called too rapidly.')
return self | Saves all the parameters to a text file using the databox
functionality. If path=None, saves to self._autosettings_path. If
self._autosettings_path=None, does not save. | entailment |
def load(self, path=None, ignore_errors=True, block_user_signals=False):
"""
Loads all the parameters from a databox text file. If path=None,
loads from self._autosettings_path (provided this is not None).
Parameters
----------
path=None
Path to load the settings from. If None, will load from the
specified autosettings_path.
ignore_errors=True
Whether we should raise a stink when a setting doesn't exist.
When settings do not exist, they are stuffed into the dictionary
self._lazy_load.
block_user_signals=False
If True, the load will not trigger any signals.
"""
if path==None:
# Bail if there is no path
if self._autosettings_path == None: return self
# Get the gui settings directory
gui_settings_dir = _os.path.join(_cwd, 'egg_settings')
# Get the final path
path = _os.path.join(gui_settings_dir, self._autosettings_path)
# make the databox object
d = _d.databox()
# only load if it exists
if _os.path.exists(path): d.load_file(path, header_only=True)
else: return None
# update the settings
self.update(d, ignore_errors=ignore_errors, block_user_signals=block_user_signals)
return self | Loads all the parameters from a databox text file. If path=None,
loads from self._autosettings_path (provided this is not None).
Parameters
----------
path=None
Path to load the settings from. If None, will load from the
specified autosettings_path.
ignore_errors=True
Whether we should raise a stink when a setting doesn't exist.
When settings do not exist, they are stuffed into the dictionary
self._lazy_load.
block_user_signals=False
If True, the load will not trigger any signals. | entailment |
def update(self, d, ignore_errors=True, block_user_signals=False):
"""
Supply a dictionary or databox with a header of the same format
and see what happens! (Hint: it updates the existing values.)
This will store non-existent key-value pairs in the dictionary
self._lazy_load. When you add settings in the future,
these will be checked for the default values.
"""
if not type(d) == dict: d = d.headers
# Update the lazy load
self._lazy_load.update(d)
# loop over the dictionary and update
for k in list(self._lazy_load.keys()):
# Only proceed if the parameter exists
if not self._find_parameter(k.split('/'), False, True) == None:
# Pop the value so it's not set again in the future
v = self._lazy_load.pop(k)
self._set_value_safe(k, v, ignore_errors, block_user_signals)
return self | Supply a dictionary or databox with a header of the same format
and see what happens! (Hint: it updates the existing values.)
This will store non-existent key-value pairs in the dictionary
self._lazy_load. When you add settings in the future,
these will be checked for the default values. | entailment |
def _set_value_safe(self, k, v, ignore_errors=False, block_user_signals=False):
"""
Actually sets the value, first by trying it directly, then by
"""
# for safety: by default assume it's a repr() with python code
try:
self.set_value(k, v, ignore_error = ignore_errors,
block_user_signals = block_user_signals)
except:
print("TreeDictionary ERROR: Could not set '"+k+"' to '"+v+"'") | Actually sets the value, first by trying it directly, then by | entailment |
def _button_autosave_clicked(self, checked):
"""
Called whenever the button is clicked.
"""
if checked:
# get the path from the user
path = _spinmob.dialogs.save(filters=self.file_type)
# abort if necessary
if not path:
self.button_autosave.set_checked(False)
return
# otherwise, save the info!
self._autosave_directory, filename = _os.path.split(path)
self._label_path.set_text(filename)
self.save_gui_settings() | Called whenever the button is clicked. | entailment |
def save_file(self, path=None, force_overwrite=False, just_settings=False, **kwargs):
"""
Saves the data in the databox to a file.
Parameters
----------
path=None
Path for output. If set to None, use a save dialog.
force_overwrite=False
Do not question the overwrite if the file already exists.
just_settings=False
Set to True to save only the state of the DataboxPlot controls
**kwargs are sent to the normal databox save_file() function.
"""
# Update the binary mode
if not 'binary' in kwargs: kwargs['binary'] = self.combo_binary.get_text()
# if it's just the settings file, make a new databox
if just_settings: d = _d.databox()
# otherwise use the internal databox
else: d = self
# add all the controls settings to the header
for x in self._autosettings_controls: self._store_gui_setting(d, x)
# save the file using the skeleton function, so as not to recursively
# call this one again!
_d.databox.save_file(d, path, self.file_type, self.file_type, force_overwrite, **kwargs) | Saves the data in the databox to a file.
Parameters
----------
path=None
Path for output. If set to None, use a save dialog.
force_overwrite=False
Do not question the overwrite if the file already exists.
just_settings=False
Set to True to save only the state of the DataboxPlot controls
**kwargs are sent to the normal databox save_file() function. | entailment |
def load_file(self, path=None, just_settings=False):
"""
Loads a data file. After the file is loaded, calls self.after_load_file(self),
which you can overwrite if you like!
just_settings=True will only load the configuration of the controls,
and will not plot anything or run after_load_file
"""
# if it's just the settings file, make a new databox
if just_settings:
d = _d.databox()
header_only = True
# otherwise use the internal databox
else:
d = self
header_only = False
# import the settings if they exist in the header
if not None == _d.databox.load_file(d, path, filters=self.file_type, header_only=header_only, quiet=just_settings):
# loop over the autosettings and update the gui
for x in self._autosettings_controls: self._load_gui_setting(x,d)
# always sync the internal data
self._synchronize_controls()
# plot the data if this isn't just a settings load
if not just_settings:
self.plot()
self.after_load_file() | Loads a data file. After the file is loaded, calls self.after_load_file(self),
which you can overwrite if you like!
just_settings=True will only load the configuration of the controls,
and will not plot anything or run after_load_file | entailment |
def _autoscript(self):
"""
Automatically generates a python script for plotting.
"""
# This should never happen unless I screwed up.
if self.combo_autoscript.get_index() == 0: return "ERROR: Ask Jack."
# if there is no data, leave it blank
if len(self)==0: return "x = []; y = []; xlabels=[]; ylabels=[]"
# if there is one column, make up a one-column script
elif len(self)==1: return "x = [None]\ny = [ d[0] ]\n\nxlabels=[ 'Data Point' ]\nylabels=[ 'd[0]' ]"
# Shared x-axis (column 0)
elif self.combo_autoscript.get_index() == 1:
# hard code the first columns
sx = "x = [ d[0]"
sy = "y = [ d[1]"
# hard code the first labels
sxlabels = "xlabels = '" +self.ckeys[0]+"'"
sylabels = "ylabels = [ '"+self.ckeys[1]+"'"
# loop over any remaining columns and append.
for n in range(2,len(self)):
sy += ", d["+str(n)+"]"
sylabels += ", '"+self.ckeys[n]+"'"
return sx+" ]\n"+sy+" ]\n\n"+sxlabels+"\n"+sylabels+" ]\n"
# Column pairs
elif self.combo_autoscript.get_index() == 2:
# hard code the first columns
sx = "x = [ d[0]"
sy = "y = [ d[1]"
# hard code the first labels
sxlabels = "xlabels = [ '"+self.ckeys[0]+"'"
sylabels = "ylabels = [ '"+self.ckeys[1]+"'"
# Loop over the remaining columns and append
for n in range(1,int(len(self)/2)):
sx += ", d["+str(2*n )+"]"
sy += ", d["+str(2*n+1)+"]"
sxlabels += ", '"+self.ckeys[2*n ]+"'"
sylabels += ", '"+self.ckeys[2*n+1]+"'"
return sx+" ]\n"+sy+" ]\n\n"+sxlabels+" ]\n"+sylabels+" ]\n"
print("test")
# Column triples
elif self.combo_autoscript.get_index() == 3:
# hard code the first columns
sx = "x = [ d[0], d[0]"
sy = "y = [ d[1], d[2]"
# hard code the first labels
sxlabels = "xlabels = [ '"+self.ckeys[0]+"', '"+self.ckeys[0]+"'"
sylabels = "ylabels = [ '"+self.ckeys[1]+"', '"+self.ckeys[2]+"'"
# Loop over the remaining columns and append
for n in range(1,int(len(self)/3)):
sx += ", d["+str(3*n )+"], d["+str(3*n )+"]"
sy += ", d["+str(3*n+1)+"], d["+str(3*n+2)+"]"
sxlabels += ", '"+self.ckeys[3*n ]+"', '"+self.ckeys[3*n ]+"'"
sylabels += ", '"+self.ckeys[3*n+1]+"', '"+self.ckeys[3*n+2]+"'"
return sx+" ]\n"+sy+" ]\n\n"+sxlabels+" ]\n"+sylabels+" ]\n"
else: return self.autoscript_custom() | Automatically generates a python script for plotting. | entailment |
def plot(self):
"""
Sets the internal databox to the supplied value and plots it.
If databox=None, this will plot the internal databox.
"""
# if we're disabled or have no data columns, clear everything!
if not self.button_enabled.is_checked() or len(self) == 0:
self._set_number_of_plots(0)
return self
# if there is no script, create a default
if not self.combo_autoscript.get_index()==0:
self.script.set_text(self._autoscript())
##### Try the script and make the curves / plots match
try:
# get globals for sin, cos etc
g = _n.__dict__
g.update(dict(d=self))
g.update(dict(xlabels='x', ylabels='y'))
# run the script.
exec(self.script.get_text(), g)
# x & y should now be data arrays, lists of data arrays or Nones
x = g['x']
y = g['y']
# make it the right shape
if x == None: x = [None]
if y == None: y = [None]
if not _spinmob.fun.is_iterable(x[0]) and not x[0] == None: x = [x]
if not _spinmob.fun.is_iterable(y[0]) and not y[0] == None: y = [y]
if len(x) == 1 and not len(y) == 1: x = x*len(y)
if len(y) == 1 and not len(x) == 1: y = y*len(x)
# xlabels and ylabels should be strings or lists of strings
xlabels = g['xlabels']
ylabels = g['ylabels']
# make sure we have exactly the right number of plots
self._set_number_of_plots(len(x))
self._update_linked_axes()
# return if there is nothing.
if len(x) == 0: return
# now plot everything
for n in range(max(len(x),len(y))-1,-1,-1):
# Create data for "None" cases.
if x[n] is None: x[n] = list(range(len(y[n])))
if y[n] is None: y[n] = list(range(len(x[n])))
self._curves[n].setData(x[n],y[n])
# get the labels for the curves
# if it's a string, use the same label for all axes
if type(xlabels) in [str,type(None)]: xlabel = xlabels
elif n < len(xlabels): xlabel = xlabels[n]
else: xlabel = ''
if type(ylabels) in [str,type(None)]: ylabel = ylabels
elif n < len(ylabels): ylabel = ylabels[n]
else: ylabel = ''
# set the labels
i = min(n, len(self.plot_widgets)-1)
self.plot_widgets[i].setLabel('left', ylabel)
self.plot_widgets[i].setLabel('bottom', xlabel)
# special case: hide if None
if xlabel == None: self.plot_widgets[i].getAxis('bottom').showLabel(False)
if ylabel == None: self.plot_widgets[i].getAxis('left') .showLabel(False)
# unpink the script, since it seems to have worked
self.script.set_colors('black','white')
# otherwise, look angry and don't autosave
except: self.script.set_colors('black','pink')
return self | Sets the internal databox to the supplied value and plots it.
If databox=None, this will plot the internal databox. | entailment |
def autosave(self):
"""
Autosaves the currently stored data, but only if autosave is checked!
"""
# make sure we're suppoed to
if self.button_autosave.is_checked():
# save the file
self.save_file(_os.path.join(self._autosave_directory, "%04d " % (self.number_file.get_value()) + self._label_path.get_text()))
# increment the counter
self.number_file.increment() | Autosaves the currently stored data, but only if autosave is checked! | entailment |
def autozoom(self, n=None):
"""
Auto-scales the axes to fit all the data in plot index n. If n == None,
auto-scale everyone.
"""
if n==None:
for p in self.plot_widgets: p.autoRange()
else: self.plot_widgets[n].autoRange()
return self | Auto-scales the axes to fit all the data in plot index n. If n == None,
auto-scale everyone. | entailment |
def _synchronize_controls(self):
"""
Updates the gui based on button configs.
"""
# whether the script is visible
self.grid_script._widget.setVisible(self.button_script.get_value())
# whether we should be able to edit it.
if not self.combo_autoscript.get_index()==0: self.script.disable()
else: self.script.enable() | Updates the gui based on button configs. | entailment |
def _set_number_of_plots(self, n):
"""
Adjusts number of plots & curves to the desired value the gui.
"""
# multi plot, right number of plots and curves = great!
if self.button_multi.is_checked() \
and len(self._curves) == len(self.plot_widgets) \
and len(self._curves) == n: return
# single plot, right number of curves = great!
if not self.button_multi.is_checked() \
and len(self.plot_widgets) == 1 \
and len(self._curves) == n: return
# time to rebuild!
# don't show the plots as they are built
self.grid_plot.block_events()
# make sure the number of curves is on target
while len(self._curves) > n: self._curves.pop(-1)
while len(self._curves) < n: self._curves.append(_g.PlotCurveItem(pen = (len(self._curves), n)))
# figure out the target number of plots
if self.button_multi.is_checked(): n_plots = n
else: n_plots = min(n,1)
# clear the plots
while len(self.plot_widgets):
# pop the last plot widget and remove all items
p = self.plot_widgets.pop(-1)
p.clear()
# remove it from the grid
self.grid_plot.remove_object(p)
# add new plots
for i in range(n_plots):
self.plot_widgets.append(self.grid_plot.place_object(_g.PlotWidget(), 0, i, alignment=0))
# loop over the curves and add them to the plots
for i in range(n):
self.plot_widgets[min(i,len(self.plot_widgets)-1)].addItem(self._curves[i])
# loop over the ROI's and add them
if self.ROIs is not None:
for i in range(len(self.ROIs)):
# get the ROIs for this plot
ROIs = self.ROIs[i]
if not _spinmob.fun.is_iterable(ROIs): ROIs = [ROIs]
# loop over the ROIs for this plot
for ROI in ROIs:
# determine which plot to add the ROI to
m = min(i, len(self.plot_widgets)-1)
# add the ROI to the appropriate plot
if m>=0: self.plot_widgets[m].addItem(ROI)
# show the plots
self.grid_plot.unblock_events() | Adjusts number of plots & curves to the desired value the gui. | entailment |
def _update_linked_axes(self):
"""
Loops over the axes and links / unlinks them.
"""
# no axes to link!
if len(self.plot_widgets) <= 1: return
# get the first plotItem
a = self.plot_widgets[0].plotItem.getViewBox()
# now loop through all the axes and link / unlink the axes
for n in range(1,len(self.plot_widgets)):
# Get one of the others
b = self.plot_widgets[n].plotItem.getViewBox()
# link the axis, but only if it isn't already
if self.button_link_x.is_checked() and b.linkedView(b.XAxis) == None:
b.linkView(b.XAxis, a)
# Otherwise, unlink the guy, but only if it's linked to begin with
elif not self.button_link_x.is_checked() and not b.linkedView(b.XAxis) == None:
b.linkView(b.XAxis, None) | Loops over the axes and links / unlinks them. | entailment |
def resolve_const_spec(self, name, lineno):
"""Finds and links the ConstSpec with the given name."""
if name in self.const_specs:
return self.const_specs[name].link(self)
if '.' in name:
include_name, component = name.split('.', 1)
if include_name in self.included_scopes:
return self.included_scopes[include_name].resolve_const_spec(
component, lineno
)
raise ThriftCompilerError(
'Unknown constant "%s" referenced at line %d%s' % (
name, lineno, self.__in_path()
)
) | Finds and links the ConstSpec with the given name. | entailment |
def resolve_type_spec(self, name, lineno):
"""Finds and links the TypeSpec with the given name."""
if name in self.type_specs:
return self.type_specs[name].link(self)
if '.' in name:
include_name, component = name.split('.', 1)
if include_name in self.included_scopes:
return self.included_scopes[include_name].resolve_type_spec(
component, lineno
)
raise ThriftCompilerError(
'Unknown type "%s" referenced at line %d%s' % (
name, lineno, self.__in_path()
)
) | Finds and links the TypeSpec with the given name. | entailment |
def resolve_service_spec(self, name, lineno):
"""Finds and links the ServiceSpec with the given name."""
if name in self.service_specs:
return self.service_specs[name].link(self)
if '.' in name:
include_name, component = name.split('.', 2)
if include_name in self.included_scopes:
return self.included_scopes[
include_name
].resolve_service_spec(component, lineno)
raise ThriftCompilerError(
'Unknown service "%s" referenced at line %d%s' % (
name, lineno, self.__in_path()
)
) | Finds and links the ServiceSpec with the given name. | entailment |
def add_include(self, name, included_scope, module):
"""Register an imported module into this scope.
Raises ``ThriftCompilerError`` if the name has already been used.
"""
# The compiler already ensures this. If we still get here with a
# conflict, that's a bug.
assert name not in self.included_scopes
self.included_scopes[name] = included_scope
self.add_surface(name, module) | Register an imported module into this scope.
Raises ``ThriftCompilerError`` if the name has already been used. | entailment |
def add_service_spec(self, service_spec):
"""Registers the given ``ServiceSpec`` into the scope.
Raises ``ThriftCompilerError`` if the name has already been used.
"""
assert service_spec is not None
if service_spec.name in self.service_specs:
raise ThriftCompilerError(
'Cannot define service "%s". That name is already taken.'
% service_spec.name
)
self.service_specs[service_spec.name] = service_spec | Registers the given ``ServiceSpec`` into the scope.
Raises ``ThriftCompilerError`` if the name has already been used. | entailment |
def add_const_spec(self, const_spec):
"""Adds a ConstSpec to the compliation scope.
If the ConstSpec's ``save`` attribute is True, the constant will be
added to the module at the top-level.
"""
if const_spec.name in self.const_specs:
raise ThriftCompilerError(
'Cannot define constant "%s". That name is already taken.'
% const_spec.name
)
self.const_specs[const_spec.name] = const_spec | Adds a ConstSpec to the compliation scope.
If the ConstSpec's ``save`` attribute is True, the constant will be
added to the module at the top-level. | entailment |
def add_surface(self, name, surface):
"""Adds a top-level attribute with the given name to the module."""
assert surface is not None
if hasattr(self.module, name):
raise ThriftCompilerError(
'Cannot define "%s". The name has already been used.' % name
)
setattr(self.module, name, surface) | Adds a top-level attribute with the given name to the module. | entailment |
def add_type_spec(self, name, spec, lineno):
"""Adds the given type to the scope.
:param str name:
Name of the new type
:param spec:
``TypeSpec`` object containing information on the type, or a
``TypeReference`` if this is meant to be resolved during the
``link`` stage.
:param lineno:
Line number on which this type is defined.
"""
assert type is not None
if name in self.type_specs:
raise ThriftCompilerError(
'Cannot define type "%s" at line %d. '
'Another type with that name already exists.'
% (name, lineno)
)
self.type_specs[name] = spec | Adds the given type to the scope.
:param str name:
Name of the new type
:param spec:
``TypeSpec`` object containing information on the type, or a
``TypeReference`` if this is meant to be resolved during the
``link`` stage.
:param lineno:
Line number on which this type is defined. | entailment |
def coarsen_array(a, level=2, method='mean'):
"""
Returns a coarsened (binned) version of the data. Currently supports
any of the numpy array operations, e.g. min, max, mean, std, ...
level=2 means every two data points will be binned.
level=0 or 1 just returns a copy of the array
"""
if a is None: return None
# make sure it's a numpy array
a = _n.array(a)
# quickest option
if level in [0,1,False]: return a
# otherwise assemble the python code to execute
code = 'a.reshape(-1, level).'+method+'(axis=1)'
# execute, making sure the array can be reshaped!
try: return eval(code, dict(a=a[0:int(len(a)/level)*level], level=level))
except:
print("ERROR: Could not coarsen array with method "+repr(method))
return a | Returns a coarsened (binned) version of the data. Currently supports
any of the numpy array operations, e.g. min, max, mean, std, ...
level=2 means every two data points will be binned.
level=0 or 1 just returns a copy of the array | entailment |
def coarsen_data(x, y, ey=None, ex=None, level=2, exponential=False):
"""
Coarsens the supplied data set. Returns coarsened arrays of x, y, along with
quadrature-coarsened arrays of ey and ex if specified.
Parameters
----------
x, y
Data arrays. Can be lists (will convert to numpy arrays).
These are coarsened by taking an average.
ey=None, ex=None
y and x uncertainties. Accepts arrays, lists, or numbers.
These are coarsened by averaging in quadrature.
level=2
For linear coarsening (default, see below), every n=level points will
be averaged together (in quadrature for errors). For exponential
coarsening, bins will be spaced by the specified scaling=level factor;
for example, level=1.4 will group points within 40% of each other's x
values. This is a great option for log-x plots, as the outcome will be
evenly spaced.
exponential=False
If False, coarsen using linear spacing. If True, the bins will be
exponentially spaced by the specified level.
"""
# Normal coarsening
if not exponential:
# Coarsen the data
xc = coarsen_array(x, level, 'mean')
yc = coarsen_array(y, level, 'mean')
# Coarsen the y error in quadrature
if not ey is None:
if not is_iterable(ey): ey = [ey]*len(y)
eyc = _n.sqrt(coarsen_array(_n.power(ey,2)/level, level, 'mean'))
# Coarsen the x error in quadrature
if not ex is None:
if not is_iterable(ey): ex = [ex]*len(x)
exc = _n.sqrt(coarsen_array(_n.power(ex,2)/level, level, 'mean'))
# Exponential coarsen
else:
# Make sure the data are arrays
x = _n.array(x)
y = _n.array(y)
# Create the new arrays to fill
xc = []
yc = []
if not ey is None:
if not is_iterable(ey): ey = _n.array([ey]*len(y))
eyc = []
if not ex is None:
if not is_iterable(ex): ex = _n.array([ex]*len(x))
exc = []
# Find the first element that is greater than zero
x0 = x[x>0][0]
# Now loop over the exponential bins
n = 0
while x0*level**n < x[-1]:
# Get all the points between x[n] and x[n]*r
mask = _n.logical_and(x0*level**n <= x, x < x0*level**(n+1))
# Only do something if points exist from this range!
if len(x[mask]):
# Take the average x value
xc.append(_n.average(x[mask]))
yc.append(_n.average(y[mask]))
# do the errors in quadrature
if not ey is None: eyc.append(_n.sqrt(_n.average((ey**2)[mask])/len(ey[mask])))
if not ex is None: exc.append(_n.sqrt(_n.average((ex**2)[mask])/len(ex[mask])))
# Increment the counter
n += 1
# Done exponential loop
# Done coarsening
# Return depending on situation
if ey is None and ex is None: return _n.array(xc), _n.array(yc)
elif ex is None : return _n.array(xc), _n.array(yc), _n.array(eyc)
elif ey is None : return _n.array(xc), _n.array(yc), _n.array(exc)
else : return _n.array(xc), _n.array(yc), _n.array(eyc), _n.array(exc) | Coarsens the supplied data set. Returns coarsened arrays of x, y, along with
quadrature-coarsened arrays of ey and ex if specified.
Parameters
----------
x, y
Data arrays. Can be lists (will convert to numpy arrays).
These are coarsened by taking an average.
ey=None, ex=None
y and x uncertainties. Accepts arrays, lists, or numbers.
These are coarsened by averaging in quadrature.
level=2
For linear coarsening (default, see below), every n=level points will
be averaged together (in quadrature for errors). For exponential
coarsening, bins will be spaced by the specified scaling=level factor;
for example, level=1.4 will group points within 40% of each other's x
values. This is a great option for log-x plots, as the outcome will be
evenly spaced.
exponential=False
If False, coarsen using linear spacing. If True, the bins will be
exponentially spaced by the specified level. | entailment |
def coarsen_matrix(Z, xlevel=0, ylevel=0, method='average'):
"""
This returns a coarsened numpy matrix.
method can be 'average', 'maximum', or 'minimum'
"""
# coarsen x
if not ylevel:
Z_coarsened = Z
else:
temp = []
for z in Z: temp.append(coarsen_array(z, ylevel, method))
Z_coarsened = _n.array(temp)
# coarsen y
if xlevel:
Z_coarsened = Z_coarsened.transpose()
temp = []
for z in Z_coarsened: temp.append(coarsen_array(z, xlevel, method))
Z_coarsened = _n.array(temp).transpose()
return Z_coarsened
# first coarsen the columns (if necessary)
if ylevel:
Z_ycoarsened = []
for c in Z: Z_ycoarsened.append(coarsen_array(c, ylevel, method))
Z_ycoarsened = _n.array(Z_ycoarsened)
# now coarsen the rows
if xlevel: return coarsen_array(Z_ycoarsened, xlevel, method)
else: return _n.array(Z_ycoarsened) | This returns a coarsened numpy matrix.
method can be 'average', 'maximum', or 'minimum' | entailment |
def erange(start, end, steps):
"""
Returns a numpy array over the specified range taking geometric steps.
See also numpy.logspace()
"""
if start == 0:
print("Nothing you multiply zero by gives you anything but zero. Try picking something small.")
return None
if end == 0:
print("It takes an infinite number of steps to get to zero. Try a small number?")
return None
# figure out our multiplication scale
x = (1.0*end/start)**(1.0/(steps-1))
# now generate the array
ns = _n.array(list(range(0,steps)))
a = start*_n.power(x,ns)
# tidy up the last element (there's often roundoff error)
a[-1] = end
return a | Returns a numpy array over the specified range taking geometric steps.
See also numpy.logspace() | entailment |
def is_a_number(s):
"""
This takes an object and determines whether it's a number or a string
representing a number.
"""
if _s.fun.is_iterable(s) and not type(s) == str: return False
try:
float(s)
return 1
except:
try:
complex(s)
return 2
except:
try:
complex(s.replace('(','').replace(')','').replace('i','j'))
return 2
except:
return False | This takes an object and determines whether it's a number or a string
representing a number. | entailment |
def array_shift(a, n, fill="average"):
"""
This will return an array with all the elements shifted forward in index by n.
a is the array
n is the amount by which to shift (can be positive or negative)
fill="average" fill the new empty elements with the average of the array
fill="wrap" fill the new empty elements with the lopped-off elements
fill=37.2 fill the new empty elements with the value 37.2
"""
new_a = _n.array(a)
if n==0: return new_a
fill_array = _n.array([])
fill_array.resize(_n.abs(n))
# fill up the fill array before we do the shift
if fill is "average": fill_array = 0.0*fill_array + _n.average(a)
elif fill is "wrap" and n >= 0:
for i in range(0,n): fill_array[i] = a[i-n]
elif fill is "wrap" and n < 0:
for i in range(0,-n): fill_array[i] = a[i]
else: fill_array = 0.0*fill_array + fill
# shift and fill
if n > 0:
for i in range(n, len(a)): new_a[i] = a[i-n]
for i in range(0, n): new_a[i] = fill_array[i]
else:
for i in range(0, len(a)+n): new_a[i] = a[i-n]
for i in range(0, -n): new_a[-i-1] = fill_array[-i-1]
return new_a | This will return an array with all the elements shifted forward in index by n.
a is the array
n is the amount by which to shift (can be positive or negative)
fill="average" fill the new empty elements with the average of the array
fill="wrap" fill the new empty elements with the lopped-off elements
fill=37.2 fill the new empty elements with the value 37.2 | entailment |
def assemble_covariance(error, correlation):
"""
This takes an error vector and a correlation matrix and assembles the covariance
"""
covariance = []
for n in range(0, len(error)):
covariance.append([])
for m in range(0, len(error)):
covariance[n].append(correlation[n][m]*error[n]*error[m])
return _n.array(covariance) | This takes an error vector and a correlation matrix and assembles the covariance | entailment |
def combine_dictionaries(a, b):
"""
returns the combined dictionary. a's values preferentially chosen
"""
c = {}
for key in list(b.keys()): c[key]=b[key]
for key in list(a.keys()): c[key]=a[key]
return c | returns the combined dictionary. a's values preferentially chosen | entailment |
def decompose_covariance(c):
"""
This decomposes a covariance matrix into an error vector and a correlation matrix
"""
# make it a kickass copy of the original
c = _n.array(c)
# first get the error vector
e = []
for n in range(0, len(c[0])): e.append(_n.sqrt(c[n][n]))
# now cycle through the matrix, dividing by e[1]*e[2]
for n in range(0, len(c[0])):
for m in range(0, len(c[0])):
c[n][m] = c[n][m] / (e[n]*e[m])
return [_n.array(e), _n.array(c)] | This decomposes a covariance matrix into an error vector and a correlation matrix | entailment |
def derivative(xdata, ydata):
"""
performs d(ydata)/d(xdata) with nearest-neighbor slopes
must be well-ordered, returns new arrays [xdata, dydx_data]
neighbors:
"""
D_ydata = []
D_xdata = []
for n in range(1, len(xdata)-1):
D_xdata.append(xdata[n])
D_ydata.append((ydata[n+1]-ydata[n-1])/(xdata[n+1]-xdata[n-1]))
return [D_xdata, D_ydata] | performs d(ydata)/d(xdata) with nearest-neighbor slopes
must be well-ordered, returns new arrays [xdata, dydx_data]
neighbors: | entailment |
def derivative_fit(xdata, ydata, neighbors=1):
"""
loops over the data points, performing a least-squares linear fit of the
nearest neighbors at each point. Returns an array of x-values and slopes.
xdata should probably be well-ordered.
neighbors How many data point on the left and right to include.
"""
x = []
dydx = []
nmax = len(xdata)-1
for n in range(nmax+1):
# get the indices of the data to fit
i1 = max(0, n-neighbors)
i2 = min(nmax, n+neighbors)
# get the sub data to fit
xmini = _n.array(xdata[i1:i2+1])
ymini = _n.array(ydata[i1:i2+1])
slope, intercept = fit_linear(xmini, ymini)
# make x the average of the xmini
x.append(float(sum(xmini))/len(xmini))
dydx.append(slope)
return _n.array(x), _n.array(dydx) | loops over the data points, performing a least-squares linear fit of the
nearest neighbors at each point. Returns an array of x-values and slopes.
xdata should probably be well-ordered.
neighbors How many data point on the left and right to include. | entailment |
def distort_matrix_X(Z, X, f, new_xmin, new_xmax, subsample=3):
"""
Applies a distortion (remapping) to the matrix Z (and x-values X) using function f.
returns new_Z, new_X
f is an INVERSE function old_x(new_x)
Z is a matrix. X is an array where X[n] is the x-value associated with the array Z[n].
new_xmin, new_xmax is the possible range of the distorted x-variable for generating Z
points is how many elements the stretched Z should have. "auto" means use the same number of bins
"""
Z = _n.array(Z)
X = _n.array(X)
points = len(Z)*subsample
# define a function for searching
def zero_me(new_x): return f(new_x)-target_old_x
# do a simple search to find the new_x that gives old_x = min(X)
target_old_x = min(X)
new_xmin = find_zero_bisect(zero_me, new_xmin, new_xmax, _n.abs(new_xmax-new_xmin)*0.0001)
target_old_x = max(X)
new_xmax = find_zero_bisect(zero_me, new_xmin, new_xmax, _n.abs(new_xmax-new_xmin)*0.0001)
# now loop over all the new x values
new_X = []
new_Z = []
bin_width = float(new_xmax-new_xmin)/(points)
for new_x in frange(new_xmin, new_xmax, bin_width):
# make sure we're in the range of X
if f(new_x) <= max(X) and f(new_x) >= min(X):
# add this guy to the array
new_X.append(new_x)
# get the interpolated column
new_Z.append( interpolate(X,Z,f(new_x)) )
return _n.array(new_Z), _n.array(new_X) | Applies a distortion (remapping) to the matrix Z (and x-values X) using function f.
returns new_Z, new_X
f is an INVERSE function old_x(new_x)
Z is a matrix. X is an array where X[n] is the x-value associated with the array Z[n].
new_xmin, new_xmax is the possible range of the distorted x-variable for generating Z
points is how many elements the stretched Z should have. "auto" means use the same number of bins | entailment |
def distort_matrix_Y(Z, Y, f, new_ymin, new_ymax, subsample=3):
"""
Applies a distortion (remapping) to the matrix Z (and y-values Y) using function f.
returns new_Z, new_Y
f is a function old_y(new_y)
Z is a matrix. Y is an array where Y[n] is the y-value associated with the array Z[:,n].
new_ymin, new_ymax is the range of the distorted x-variable for generating Z
points is how many elements the stretched Z should have. "auto" means use the same number of bins
"""
# just use the same methodology as before by transposing, distorting X, then
# transposing back
new_Z, new_Y = distort_matrix_X(Z.transpose(), Y, f, new_ymin, new_ymax, subsample)
return new_Z.transpose(), new_Y | Applies a distortion (remapping) to the matrix Z (and y-values Y) using function f.
returns new_Z, new_Y
f is a function old_y(new_y)
Z is a matrix. Y is an array where Y[n] is the y-value associated with the array Z[:,n].
new_ymin, new_ymax is the range of the distorted x-variable for generating Z
points is how many elements the stretched Z should have. "auto" means use the same number of bins | entailment |
def dumbguy_minimize(f, xmin, xmax, xstep):
"""
This just steps x and looks for a peak
returns x, f(x)
"""
prev = f(xmin)
this = f(xmin+xstep)
for x in frange(xmin+xstep,xmax,xstep):
next = f(x+xstep)
# see if we're on top
if this < prev and this < next: return x, this
prev = this
this = next
return x, this | This just steps x and looks for a peak
returns x, f(x) | entailment |
def elements_are_numbers(array):
"""
Tests whether the elements of the supplied array are numbers.
"""
# empty case
if len(array) == 0: return 0
output_value = 1
for x in array:
# test it and die if it's not a number
test = is_a_number(x)
if not test: return False
# mention if it's complex
output_value = max(output_value,test)
return output_value | Tests whether the elements of the supplied array are numbers. | entailment |
def equalize_list_lengths(a,b):
"""
Modifies the length of list a to match b. Returns a.
a can also not be a list (will convert it to one).
a will not be modified.
"""
if not _s.fun.is_iterable(a): a = [a]
a = list(a)
while len(a)>len(b): a.pop(-1)
while len(a)<len(b): a.append(a[-1])
return a | Modifies the length of list a to match b. Returns a.
a can also not be a list (will convert it to one).
a will not be modified. | entailment |
def find_N_peaks(array, N=4, max_iterations=100, rec_max_iterations=3, recursion=1):
"""
This will run the find_peaks algorythm, adjusting the baseline until exactly N peaks are found.
"""
if recursion<0: return None
# get an initial guess as to the baseline
ymin = min(array)
ymax = max(array)
for n in range(max_iterations):
# bisect the range to estimate the baseline
y1 = (ymin+ymax)/2.0
# now see how many peaks this finds. p could have 40 for all we know
p, s, i = find_peaks(array, y1, True)
# now loop over the subarrays and make sure there aren't two peaks in any of them
for n in range(len(i)):
# search the subarray for two peaks, iterating 3 times (75% selectivity)
p2 = find_N_peaks(s[n], 2, rec_max_iterations, rec_max_iterations=rec_max_iterations, recursion=recursion-1)
# if we found a double-peak
if not p2 is None:
# push these non-duplicate values into the master array
for x in p2:
# if this point is not already in p, push it on
if not x in p: p.append(x+i[n]) # don't forget the offset, since subarrays start at 0
# if we nailed it, finish up
if len(p) == N: return p
# if we have too many peaks, we need to increase the baseline
if len(p) > N: ymin = y1
# too few? decrease the baseline
else: ymax = y1
return None | This will run the find_peaks algorythm, adjusting the baseline until exactly N peaks are found. | entailment |
def find_peaks(array, baseline=0.1, return_subarrays=False):
"""
This will try to identify the indices of the peaks in array, returning a list of indices in ascending order.
Runs along the data set until it jumps above baseline. Then it considers all the subsequent data above the baseline
as part of the peak, and records the maximum of this data as one peak value.
"""
peaks = []
if return_subarrays:
subarray_values = []
subarray_indices = []
# loop over the data
n = 0
while n < len(array):
# see if we're above baseline, then start the "we're in a peak" loop
if array[n] > baseline:
# start keeping track of the subarray here
if return_subarrays:
subarray_values.append([])
subarray_indices.append(n)
# find the max
ymax=baseline
nmax = n
while n < len(array) and array[n] > baseline:
# add this value to the subarray
if return_subarrays:
subarray_values[-1].append(array[n])
if array[n] > ymax:
ymax = array[n]
nmax = n
n = n+1
# store the max
peaks.append(nmax)
else: n = n+1
if return_subarrays: return peaks, subarray_values, subarray_indices
else: return peaks | This will try to identify the indices of the peaks in array, returning a list of indices in ascending order.
Runs along the data set until it jumps above baseline. Then it considers all the subsequent data above the baseline
as part of the peak, and records the maximum of this data as one peak value. | entailment |
def find_zero_bisect(f, xmin, xmax, xprecision):
"""
This will bisect the range and zero in on zero.
"""
if f(xmax)*f(xmin) > 0:
print("find_zero_bisect(): no zero on the range",xmin,"to",xmax)
return None
temp = min(xmin,xmax)
xmax = max(xmin,xmax)
xmin = temp
xmid = (xmin+xmax)*0.5
while xmax-xmin > xprecision:
y = f(xmid)
# pick the direction with one guy above and one guy below zero
if y > 0:
# move left or right?
if f(xmin) < 0: xmax=xmid
else: xmin=xmid
# f(xmid) is below zero
elif y < 0:
# move left or right?
if f(xmin) > 0: xmax=xmid
else: xmin=xmid
# yeah, right
else: return xmid
# bisect again
xmid = (xmin+xmax)*0.5
return xmid | This will bisect the range and zero in on zero. | entailment |
def fit_linear(xdata, ydata):
"""
Returns slope and intercept of line of best fit:
y = a*x + b
through the supplied data.
Parameters
----------
xdata, ydata:
Arrays of x data and y data (having matching lengths).
"""
x = _n.array(xdata)
y = _n.array(ydata)
ax = _n.average(x)
ay = _n.average(y)
axx = _n.average(x*x)
ayx = _n.average(y*x)
slope = (ayx - ay*ax) / (axx - ax*ax)
intercept = ay - slope*ax
return slope, intercept | Returns slope and intercept of line of best fit:
y = a*x + b
through the supplied data.
Parameters
----------
xdata, ydata:
Arrays of x data and y data (having matching lengths). | entailment |
def frange(start, end, inc=1.0):
"""
A range function, that accepts float increments and reversed direction.
See also numpy.linspace()
"""
start = 1.0*start
end = 1.0*end
inc = 1.0*inc
# if we got a dumb increment
if not inc: return _n.array([start,end])
# if the increment is going the wrong direction
if 1.0*(end-start)/inc < 0.0:
inc = -inc
# get the integer steps
ns = _n.array(list(range(0, int(1.0*(end-start)/inc)+1)))
return start + ns*inc | A range function, that accepts float increments and reversed direction.
See also numpy.linspace() | entailment |
def generate_fake_data(f='2*x-5', x=_n.linspace(-5,5,11), ey=1, ex=0, include_errors=False, **kwargs):
"""
Generates a set of fake data from the underlying "reality" (or mean
behavior) function f.
Parameters
----------
f:
Underlying "reality" function or mean behavior. This can be any
python-evaluable string, and will have access to all the numpy
functions (e.g., cos), scipy's special functions (e.g., erf), and
any other variables defined by keyword arguments
ex, ey:
Uncertainty "strength" for x and y data. This can be a constant or an
array of values. If the distributions (below) are normal, this
corresponds to the standard deviation.
include_errors=True
Whether the databox should include a column for ex and ey.
Keyword arguments are used as additional globals in the function evaluation.
Returns a databox containing the data and other relevant information in the
header.
"""
# Make a fitter object, which handily interprets string functions
# The "+0*x" is a trick to ensure the function takes x as an argument
# (makes it a little more idiot proof).
fitty = _s.data.fitter().set_functions(f+"+0*x",'')
# Make sure both errors are arrays of the right length
if not _s.fun.is_iterable(ex): ex = _n.array([ex]*len(x))
if not _s.fun.is_iterable(ey): ey = _n.array([ey]*len(x))
# Get the x and y exact values first, then randomize
x = _n.array(x)
y = fitty.f[0](x)
x = _n.random.normal(_n.array(x),ex)
y = _n.random.normal(y, ey)
# make a databox
d = _s.data.databox()
d['x'] = x
d['y'] = y
if include_errors:
d['ey'] = ey
d['ex'] = ex
d.h(reality=f, ey=ey[0], ex=ex[0])
return d | Generates a set of fake data from the underlying "reality" (or mean
behavior) function f.
Parameters
----------
f:
Underlying "reality" function or mean behavior. This can be any
python-evaluable string, and will have access to all the numpy
functions (e.g., cos), scipy's special functions (e.g., erf), and
any other variables defined by keyword arguments
ex, ey:
Uncertainty "strength" for x and y data. This can be a constant or an
array of values. If the distributions (below) are normal, this
corresponds to the standard deviation.
include_errors=True
Whether the databox should include a column for ex and ey.
Keyword arguments are used as additional globals in the function evaluation.
Returns a databox containing the data and other relevant information in the
header. | entailment |
def get_shell_history():
"""
This only works with some shells.
"""
# try for ipython
if 'get_ipython' in globals():
a = list(get_ipython().history_manager.input_hist_raw)
a.reverse()
return a
elif 'SPYDER_SHELL_ID' in _os.environ:
try:
p = _os.path.join(_settings.path_user, ".spyder2", "history.py")
a = read_lines(p)
a.reverse()
return a
except:
pass
# otherwise try pyshell or pycrust (requires wx)
else:
try:
import wx
for x in wx.GetTopLevelWindows():
if type(x) in [wx.py.shell.ShellFrame, wx.py.crust.CrustFrame]:
a = x.shell.GetText().split(">>>")
a.reverse()
return a
except:
pass
return ['shell history not available'] | This only works with some shells. | entailment |
def index(value, array):
"""
Array search that behaves like I want it to. Totally dumb, I know.
"""
i = array.searchsorted(value)
if i == len(array): return -1
else: return i | Array search that behaves like I want it to. Totally dumb, I know. | entailment |
def index_nearest(value, array):
"""
expects a _n.array
returns the global minimum of (value-array)^2
"""
a = (array-value)**2
return index(a.min(), a) | expects a _n.array
returns the global minimum of (value-array)^2 | entailment |
def index_next_crossing(value, array, starting_index=0, direction=1):
"""
starts at starting_index, and walks through the array until
it finds a crossing point with value
set direction=-1 for down crossing
"""
for n in range(starting_index, len(array)-1):
if (value-array[n] )*direction >= 0 \
and (value-array[n+1])*direction < 0: return n
# no crossing found
return -1 | starts at starting_index, and walks through the array until
it finds a crossing point with value
set direction=-1 for down crossing | entailment |
def insert_ordered(value, array):
"""
This will insert the value into the array, keeping it sorted, and returning the
index where it was inserted
"""
index = 0
# search for the last array item that value is larger than
for n in range(0,len(array)):
if value >= array[n]: index = n+1
array.insert(index, value)
return index | This will insert the value into the array, keeping it sorted, and returning the
index where it was inserted | entailment |
def integrate_data(xdata, ydata, xmin=None, xmax=None, autozero=0):
"""
Numerically integrates up the ydata using the trapezoid approximation.
estimate the bin width (scaled by the specified amount).
Returns (xdata, integrated ydata).
autozero is the number of data points to use as an estimate of the background
(then subtracted before integrating).
"""
# sort the arrays and make sure they're numpy arrays
[xdata, ydata] = sort_matrix([xdata,ydata],0)
xdata = _n.array(xdata)
ydata = _n.array(ydata)
if xmin is None: xmin = min(xdata)
if xmax is None: xmax = max(xdata)
# find the index range
imin = xdata.searchsorted(xmin)
imax = xdata.searchsorted(xmax)
xint = [xdata[imin]]
yint = [0]
# get the autozero
if autozero >= 1:
zero = _n.average(ydata[imin:imin+int(autozero)])
ydata = ydata-zero
for n in range(imin+1,imax):
if len(yint):
xint.append(xdata[n])
yint.append(yint[-1]+0.5*(xdata[n]-xdata[n-1])*(ydata[n]+ydata[n-1]))
else:
xint.append(xdata[n])
yint.append(0.5*(xdata[n]-xdata[n-1])*(ydata[n]+ydata[n-1]))
return _n.array(xint), _n.array(yint) | Numerically integrates up the ydata using the trapezoid approximation.
estimate the bin width (scaled by the specified amount).
Returns (xdata, integrated ydata).
autozero is the number of data points to use as an estimate of the background
(then subtracted before integrating). | entailment |
def invert_increasing_function(f, f0, xmin, xmax, tolerance, max_iterations=100):
"""
This will try try to qickly find a point on the f(x) curve between xmin and xmax that is
equal to f0 within tolerance.
"""
for n in range(max_iterations):
# start at the middle
x = 0.5*(xmin+xmax)
df = f(x)-f0
if _n.fabs(df) < tolerance: return x
# if we're high, set xmin to x etc...
if df > 0: xmin=x
else: xmax=x
print("Couldn't find value!")
return 0.5*(xmin+xmax) | This will try try to qickly find a point on the f(x) curve between xmin and xmax that is
equal to f0 within tolerance. | entailment |
def fft(t, y, pow2=False, window=None, rescale=False):
"""
FFT of y, assuming complex or real-valued inputs. This goes through the
numpy fourier transform process, assembling and returning (frequencies,
complex fft) given time and signal data y.
Parameters
----------
t,y
Time (t) and signal (y) arrays with which to perform the fft. Note the t
array is assumed to be evenly spaced.
pow2 = False
Set this to true if you only want to keep the first 2^n data
points (speeds up the FFT substantially)
window = None
Can be set to any of the windowing functions in numpy that require only
the number of points as the argument, e.g. window='hanning'.
rescale = False
If True, the FFT will be rescaled by the square root of the ratio of
variances before and after windowing, such that the sum of component
amplitudes squared is equal to the actual variance.
"""
# make sure they're numpy arrays, and make copies to avoid the referencing error
y = _n.array(y)
t = _n.array(t)
# if we're doing the power of 2, do it
if pow2:
keep = 2**int(_n.log2(len(y)))
# now resize the data
y.resize(keep)
t.resize(keep)
# Window the data
if not window in [None, False, 0]:
try:
# Get the windowing array
w = eval("_n."+window, dict(_n=_n))(len(y))
# Store the original variance
v0 = _n.average(abs(y)**2)
# window the time domain data
y = y * w
# Rescale by the variance ratio
if rescale: y = y * _n.sqrt(v0 / _n.average(abs(y)**2))
except:
print("ERROR: Bad window!")
return
# do the actual fft, and normalize
Y = _n.fft.fftshift( _n.fft.fft(y) / len(t) )
f = _n.fft.fftshift( _n.fft.fftfreq(len(t), t[1]-t[0]) )
return f, Y | FFT of y, assuming complex or real-valued inputs. This goes through the
numpy fourier transform process, assembling and returning (frequencies,
complex fft) given time and signal data y.
Parameters
----------
t,y
Time (t) and signal (y) arrays with which to perform the fft. Note the t
array is assumed to be evenly spaced.
pow2 = False
Set this to true if you only want to keep the first 2^n data
points (speeds up the FFT substantially)
window = None
Can be set to any of the windowing functions in numpy that require only
the number of points as the argument, e.g. window='hanning'.
rescale = False
If True, the FFT will be rescaled by the square root of the ratio of
variances before and after windowing, such that the sum of component
amplitudes squared is equal to the actual variance. | entailment |
def psd(t, y, pow2=False, window=None, rescale=False):
"""
Single-sided power spectral density, assuming real valued inputs. This goes
through the numpy fourier transform process, assembling and returning
(frequencies, psd) given time and signal data y.
Note it is defined such that sum(psd)*df, where df is the frequency
spacing, is the variance of the original signal for any range of frequencies.
This includes the DC and Nyquist components:
sqrt(psd[0]*df) = average value of original time trace
sqrt(psd[-1]*df) = amplitude of Nyquist component (for even # points)
Parameters
----------
t,y
Time (t) and signal (y) arrays with which to perform the PSD. Note the t
array is assumed to be evenly spaced.
pow2 = False
Set this to true if you only want to keep the first 2^n data
points (speeds up the FFT substantially)
window = None
can be set to any of the windowing functions in numpy,
e.g. window='hanning'.
rescale = False
If True, the FFT will be rescaled by the square root of the ratio of
variances before and after windowing, such that the integral
sum(PSD)*df is the variance of the *original* time-domain data.
returns frequencies, psd (y^2/Hz)
"""
# do the actual fft
f, Y = fft(t,y,pow2,window,rescale)
# take twice the negative frequency branch, because it contains the
# extra frequency point when the number of points is odd.
f = _n.abs(f[int(len(f)/2)::-1])
P = _n.abs(Y[int(len(Y)/2)::-1])**2 / (f[1]-f[0])
# Since this is the same as the positive frequency branch, double the
# appropriate frequencies. For even number of points, there is one
# extra negative frequency to avoid doubling. For odd, you only need to
# avoid the DC value.
# For the even
if len(t)%2 == 0: P[1:len(P)-1] = P[1:len(P)-1]*2
else: P[1:] = P[1:]*2
return f, P | Single-sided power spectral density, assuming real valued inputs. This goes
through the numpy fourier transform process, assembling and returning
(frequencies, psd) given time and signal data y.
Note it is defined such that sum(psd)*df, where df is the frequency
spacing, is the variance of the original signal for any range of frequencies.
This includes the DC and Nyquist components:
sqrt(psd[0]*df) = average value of original time trace
sqrt(psd[-1]*df) = amplitude of Nyquist component (for even # points)
Parameters
----------
t,y
Time (t) and signal (y) arrays with which to perform the PSD. Note the t
array is assumed to be evenly spaced.
pow2 = False
Set this to true if you only want to keep the first 2^n data
points (speeds up the FFT substantially)
window = None
can be set to any of the windowing functions in numpy,
e.g. window='hanning'.
rescale = False
If True, the FFT will be rescaled by the square root of the ratio of
variances before and after windowing, such that the integral
sum(PSD)*df is the variance of the *original* time-domain data.
returns frequencies, psd (y^2/Hz) | entailment |
def replace_in_files(search, replace, depth=0, paths=None, confirm=True):
"""
Does a line-by-line search and replace, but only up to the "depth" line.
"""
# have the user select some files
if paths==None:
paths = _s.dialogs.MultipleFiles('DIS AND DAT|*.*')
if paths == []: return
for path in paths:
lines = read_lines(path)
if depth: N=min(len(lines),depth)
else: N=len(lines)
for n in range(0,N):
if lines[n].find(search) >= 0:
lines[n] = lines[n].replace(search,replace)
print(path.split(_os.path.pathsep)[-1]+ ': "'+lines[n]+'"')
# only write if we're not confirming
if not confirm:
_os.rename(path, path+".backup")
write_to_file(path, join(lines, ''))
if confirm:
if input("yes? ")=="yes":
replace_in_files(search,replace,depth,paths,False)
return | Does a line-by-line search and replace, but only up to the "depth" line. | entailment |
def replace_lines_in_files(search_string, replacement_line):
"""
Finds lines containing the search string and replaces the whole line with
the specified replacement string.
"""
# have the user select some files
paths = _s.dialogs.MultipleFiles('DIS AND DAT|*.*')
if paths == []: return
for path in paths:
_shutil.copy(path, path+".backup")
lines = read_lines(path)
for n in range(0,len(lines)):
if lines[n].find(search_string) >= 0:
print(lines[n])
lines[n] = replacement_line.strip() + "\n"
write_to_file(path, join(lines, ''))
return | Finds lines containing the search string and replaces the whole line with
the specified replacement string. | entailment |
def reverse(array):
"""
returns a reversed numpy array
"""
l = list(array)
l.reverse()
return _n.array(l) | returns a reversed numpy array | entailment |
def round_sigfigs(x, n=2):
"""
Rounds the number to the specified significant figures. x can also be
a list or array of numbers (in these cases, a numpy array is returned).
"""
iterable = is_iterable(x)
if not iterable: x = [x]
# make a copy to be safe
x = _n.array(x)
# loop over the elements
for i in range(len(x)):
# Handle the weird cases
if not x[i] in [None, _n.inf, _n.nan]:
sig_figs = -int(_n.floor(_n.log10(abs(x[i]))))+n-1
x[i] = _n.round(x[i], sig_figs)
if iterable: return x
else: return x[0] | Rounds the number to the specified significant figures. x can also be
a list or array of numbers (in these cases, a numpy array is returned). | entailment |
def shift_feature_to_x0(xdata, ydata, x0=0, feature=imax):
"""
Finds a feature in the the ydata and shifts xdata so the feature is centered
at x0. Returns shifted xdata, ydata. Try me with plot.tweaks.manipulate_shown_data()!
xdata,ydata data set
x0=0 where to shift the peak
feature=imax function taking an array/list and returning the index of said feature
"""
i = feature(ydata)
return xdata-xdata[i]+x0, ydata | Finds a feature in the the ydata and shifts xdata so the feature is centered
at x0. Returns shifted xdata, ydata. Try me with plot.tweaks.manipulate_shown_data()!
xdata,ydata data set
x0=0 where to shift the peak
feature=imax function taking an array/list and returning the index of said feature | entailment |
def smooth_data(xdata, ydata, yerror, amount=1):
"""
Returns smoothed [xdata, ydata, yerror]. Does not destroy the input arrays.
"""
new_xdata = smooth_array(_n.array(xdata), amount)
new_ydata = smooth_array(_n.array(ydata), amount)
if yerror is None: new_yerror = None
else: new_yerror = smooth_array(_n.array(yerror), amount)
return [new_xdata, new_ydata, new_yerror] | Returns smoothed [xdata, ydata, yerror]. Does not destroy the input arrays. | entailment |
def sort_matrix(a,n=0):
"""
This will rearrange the array a[n] from lowest to highest, and
rearrange the rest of a[i]'s in the same way. It is dumb and slow.
Returns a numpy array.
"""
a = _n.array(a)
return a[:,a[n,:].argsort()] | This will rearrange the array a[n] from lowest to highest, and
rearrange the rest of a[i]'s in the same way. It is dumb and slow.
Returns a numpy array. | entailment |
def submatrix(matrix,i1,i2,j1,j2):
"""
returns the submatrix defined by the index bounds i1-i2 and j1-j2
Endpoints included!
"""
new = []
for i in range(i1,i2+1):
new.append(matrix[i][j1:j2+1])
return _n.array(new) | returns the submatrix defined by the index bounds i1-i2 and j1-j2
Endpoints included! | entailment |
def trim_data(xmin, xmax, xdata, *args):
"""
Removes all the data except that in which xdata is between xmin and xmax.
This does not mutilate the input arrays, and additional arrays
can be supplied via args (provided they match xdata in shape)
xmin and xmax can be None
"""
# make sure it's a numpy array
if not isinstance(xdata, _n.ndarray): xdata = _n.array(xdata)
# make sure xmin and xmax are numbers
if xmin is None: xmin = min(xdata)
if xmax is None: xmax = max(xdata)
# get all the indices satisfying the trim condition
ns = _n.argwhere((xdata >= xmin) & (xdata <= xmax)).transpose()[0]
# trim the xdata
output = []
output.append(xdata[ns])
# trim the rest
for a in args:
# make sure it's a numpy array
if not isinstance(a, _n.ndarray): a = _n.array(a)
output.append(a[ns])
return output | Removes all the data except that in which xdata is between xmin and xmax.
This does not mutilate the input arrays, and additional arrays
can be supplied via args (provided they match xdata in shape)
xmin and xmax can be None | entailment |
def trim_data_uber(arrays, conditions):
"""
Non-destructively selects data from the supplied list of arrays based on
the supplied list of conditions. Importantly, if any of the conditions are
not met for the n'th data point, the n'th data point is rejected for
all supplied arrays.
Example
-------
x = numpy.linspace(0,10,20)
y = numpy.sin(x)
trim_data_uber([x,y], [x>3,x<9,y<0.7])
This will keep only the x-y pairs in which 3<x<9 and y<0.7, returning
a list of shorter arrays (all having the same length, of course).
"""
# dumb conditions
if len(conditions) == 0: return arrays
if len(arrays) == 0: return []
# find the indices to keep
all_conditions = conditions[0]
for n in range(1,len(conditions)): all_conditions = all_conditions & conditions[n]
ns = _n.argwhere(all_conditions).transpose()[0]
# assemble and return trimmed data
output = []
for n in range(len(arrays)):
if not arrays[n] is None: output.append(arrays[n][ns])
else: output.append(None)
return output | Non-destructively selects data from the supplied list of arrays based on
the supplied list of conditions. Importantly, if any of the conditions are
not met for the n'th data point, the n'th data point is rejected for
all supplied arrays.
Example
-------
x = numpy.linspace(0,10,20)
y = numpy.sin(x)
trim_data_uber([x,y], [x>3,x<9,y<0.7])
This will keep only the x-y pairs in which 3<x<9 and y<0.7, returning
a list of shorter arrays (all having the same length, of course). | entailment |
def _fetch(self, searchtype, fields, **kwargs):
'''Fetch a response from the Geocoding API.'''
fields['vintage'] = self.vintage
fields['benchmark'] = self.benchmark
fields['format'] = 'json'
if 'layers' in kwargs:
fields['layers'] = kwargs['layers']
returntype = kwargs.get('returntype', 'geographies')
url = self._geturl(searchtype, returntype)
try:
with requests.get(url, params=fields, timeout=kwargs.get('timeout')) as r:
content = r.json()
if "addressMatches" in content.get('result', {}):
return AddressResult(content)
if "geographies" in content.get('result', {}):
return GeographyResult(content)
raise ValueError()
except (ValueError, KeyError):
raise ValueError("Unable to parse response from Census")
except RequestException as e:
raise e | Fetch a response from the Geocoding API. | entailment |
def coordinates(self, x, y, **kwargs):
'''Geocode a (lon, lat) coordinate.'''
kwargs['returntype'] = 'geographies'
fields = {
'x': x,
'y': y
}
return self._fetch('coordinates', fields, **kwargs) | Geocode a (lon, lat) coordinate. | entailment |
def address(self, street, city=None, state=None, zipcode=None, **kwargs):
'''Geocode an address.'''
fields = {
'street': street,
'city': city,
'state': state,
'zip': zipcode,
}
return self._fetch('address', fields, **kwargs) | Geocode an address. | entailment |
def onelineaddress(self, address, **kwargs):
'''Geocode an an address passed as one string.
e.g. "4600 Silver Hill Rd, Suitland, MD 20746"
'''
fields = {
'address': address,
}
return self._fetch('onelineaddress', fields, **kwargs) | Geocode an an address passed as one string.
e.g. "4600 Silver Hill Rd, Suitland, MD 20746" | entailment |
def addressbatch(self, data, **kwargs):
'''
Send either a CSV file or data to the addressbatch API.
According to the Census, "there is currently an upper limit of 1000 records per batch file."
If a file, must have no header and fields id,street,city,state,zip
If data, should be a list of dicts with the above fields (although ID is optional)
'''
# Does data quack like a file handle?
if hasattr(data, 'read'):
return self._post_batch(f=data, **kwargs)
# Check if it's a string file
elif isinstance(data, string_types):
with open(data, 'rb') as f:
return self._post_batch(f=f, **kwargs)
else:
# Otherwise, assume a list of dicts
return self._post_batch(data=data, **kwargs) | Send either a CSV file or data to the addressbatch API.
According to the Census, "there is currently an upper limit of 1000 records per batch file."
If a file, must have no header and fields id,street,city,state,zip
If data, should be a list of dicts with the above fields (although ID is optional) | entailment |
def load_colormap(self, name=None):
"""
Loads a colormap of the supplied name. None means used the internal
name. (See self.get_name())
"""
if name == None: name = self.get_name()
if name == "" or not type(name)==str: return "Error: Bad name."
# assemble the path to the colormap
path = _os.path.join(_settings.path_home, "colormaps", name+".cmap")
# make sure the file exists
if not _os.path.exists(path):
print("load_colormap(): Colormap '"+name+"' does not exist. Creating.")
self.save_colormap(name)
return
# open the file and get the lines
f = open(path, 'r')
x = f.read()
f.close()
try:
self._colorpoint_list = eval(x)
except:
print("Invalid colormap. Overwriting.")
self.save_colormap()
# update the image
self.update_image()
return self | Loads a colormap of the supplied name. None means used the internal
name. (See self.get_name()) | entailment |
def save_colormap(self, name=None):
"""
Saves the colormap with the specified name. None means use internal
name. (See get_name())
"""
if name == None: name = self.get_name()
if name == "" or not type(name)==str: return "Error: invalid name."
# get the colormaps directory
colormaps = _os.path.join(_settings.path_home, 'colormaps')
# make sure we have the colormaps directory
_settings.MakeDir(colormaps)
# assemble the path to the colormap
path = _os.path.join(_settings.path_home, 'colormaps', name+".cmap")
# open the file and overwrite
f = open(path, 'w')
f.write(str(self._colorpoint_list))
f.close()
return self | Saves the colormap with the specified name. None means use internal
name. (See get_name()) | entailment |
def delete_colormap(self, name=None):
"""
Deletes the colormap with the specified name. None means use the internal
name (see get_name())
"""
if name == None: name = self.get_name()
if name == "" or not type(name)==str: return "Error: invalid name."
# assemble the path to the colormap
path = _os.path.join(_settings.path_home, 'colormaps', name+".cmap")
_os.unlink(path)
return self | Deletes the colormap with the specified name. None means use the internal
name (see get_name()) | entailment |
def set_name(self, name="My Colormap"):
"""
Sets the name.
Make sure the name is something your OS could name a file.
"""
if not type(name)==str:
print("set_name(): Name must be a string.")
return
self._name = name
return self | Sets the name.
Make sure the name is something your OS could name a file. | entailment |
def set_image(self, image='auto'):
"""
Set which pylab image to tweak.
"""
if image=="auto": image = _pylab.gca().images[0]
self._image=image
self.update_image() | Set which pylab image to tweak. | entailment |
def update_image(self):
"""
Set's the image's cmap.
"""
if self._image:
self._image.set_cmap(self.get_cmap())
_pylab.draw() | Set's the image's cmap. | entailment |
def pop_colorpoint(self, n=0):
"""
Removes and returns the specified colorpoint. Will always leave two behind.
"""
# make sure we have more than 2; otherwise don't pop it, just return it
if len(self._colorpoint_list) > 2:
# do the popping
x = self._colorpoint_list.pop(n)
# make sure the endpoints are 0 and 1
self._colorpoint_list[0][0] = 0.0
self._colorpoint_list[-1][0] = 1.0
# update the image
self.update_image()
return x
# otherwise just return the indexed item
else: return self[n] | Removes and returns the specified colorpoint. Will always leave two behind. | entailment |
def insert_colorpoint(self, position=0.5, color1=[1.0,1.0,0.0], color2=[1.0,1.0,0.0]):
"""
Inserts the specified color into the list.
"""
L = self._colorpoint_list
# if position = 0 or 1, push the end points inward
if position <= 0.0:
L.insert(0,[0.0,color1,color2])
elif position >= 1.0:
L.append([1.0,color1,color2])
# otherwise, find the position where it belongs
else:
# loop over all the points
for n in range(len(self._colorpoint_list)):
# check if it's less than the next one
if position <= L[n+1][0]:
# found the place to insert it
L.insert(n+1,[position,color1,color2])
break
# update the image with the new cmap
self.update_image()
return self | Inserts the specified color into the list. | entailment |
def modify_colorpoint(self, n, position=0.5, color1=[1.0,1.0,1.0], color2=[1.0,1.0,1.0]):
"""
Changes the values of an existing colorpoint, then updates the colormap.
"""
if n==0.0 : position = 0.0
elif n==len(self._colorpoint_list)-1: position = 1.0
else: position = max(self._colorpoint_list[n-1][0], position)
self._colorpoint_list[n] = [position, color1, color2]
self.update_image()
self.save_colormap("Last Used") | Changes the values of an existing colorpoint, then updates the colormap. | entailment |
def get_cmap(self):
"""
Generates a pylab cmap object from the colorpoint data.
"""
# now generate the colormap from the ordered list
r = []
g = []
b = []
for p in self._colorpoint_list:
r.append((p[0], p[1][0]*1.0, p[2][0]*1.0))
g.append((p[0], p[1][1]*1.0, p[2][1]*1.0))
b.append((p[0], p[1][2]*1.0, p[2][2]*1.0))
# store the formatted dictionary
c = {'red':r, 'green':g, 'blue':b}
# now set the dang thing
return _mpl.colors.LinearSegmentedColormap('custom', c) | Generates a pylab cmap object from the colorpoint data. | entailment |
def _build_gui(self):
"""
Removes all existing sliders and rebuilds them based on the colormap.
"""
# remove all widgets (should destroy all children too)
self._central_widget.deleteLater()
# remove all references to other controls
self._sliders = []
self._buttons_top_color = []
self._buttons_bottom_color = []
self._checkboxes = []
self._buttons_plus = []
self._buttons_minus = []
self._color_dialogs_top = []
self._color_dialogs_bottom = []
# create the new central widget
self._central_widget = _qtw.QWidget()
self._window.setCentralWidget(self._central_widget)
# layout for main widget
self._layout = _qtw.QGridLayout(self._central_widget)
self._central_widget.setLayout(self._layout)
# add the list of cmaps
self._combobox_cmaps = _qtw.QComboBox(self._central_widget)
self._combobox_cmaps.setEditable(True)
self._load_cmap_list()
# add the save and delete buttons
self._button_save = _qtw.QPushButton("Save", self._central_widget)
self._button_delete = _qtw.QPushButton("Delete", self._central_widget)
self._button_save.setFixedWidth(70)
self._button_delete.setFixedWidth(70)
# layouts
self._layout.addWidget(self._combobox_cmaps, 1,1, 1,3, _qtcore.Qt.Alignment(0))
self._layout.addWidget(self._button_save, 1,5, 1,1, _qtcore.Qt.Alignment(1))
self._layout.addWidget(self._button_delete, 1,6, 1,2, _qtcore.Qt.Alignment(1))
# actions
self._combobox_cmaps.currentIndexChanged.connect(self._signal_load)
self._button_save .clicked.connect(self._button_save_clicked)
self._button_delete.clicked.connect(self._button_delete_clicked)
# ensmallen the window
self._window.resize(10,10)
# now create a control set for each color point
for n in range(len(self._colorpoint_list)):
c1 = self._colorpoint_list[n][1]
c2 = self._colorpoint_list[n][2]
# create a top-color button
self._buttons_top_color.append(_qtw.QPushButton(self._central_widget))
self._buttons_top_color[-1].setStyleSheet("background-color: rgb("+str(int(c2[0]*255))+","+str(int(c2[1]*255))+","+str(int(c2[2]*255))+"); border-radius: 3px;")
# create a bottom-color button
self._buttons_bottom_color.append(_qtw.QPushButton(self._central_widget))
self._buttons_bottom_color[-1].setStyleSheet("background-color: rgb("+str(int(c1[0]*255))+","+str(int(c1[1]*255))+","+str(int(c1[2]*255))+"); border-radius: 3px;")
# create color dialogs
self._color_dialogs_top.append(_qtw.QColorDialog(self._central_widget))
self._color_dialogs_top[-1].setCurrentColor(self._buttons_top_color[-1].palette().color(1))
self._color_dialogs_bottom.append(_qtw.QColorDialog(self._central_widget))
self._color_dialogs_bottom[-1].setCurrentColor(self._buttons_top_color[-1].palette().color(1))
# create link checkboxes
self._checkboxes.append(_qtw.QCheckBox(self._central_widget))
self._checkboxes[-1].setChecked(c1==c2)
# create a slider
self._sliders.append(_qtw.QSlider(self._central_widget))
self._sliders[-1].setOrientation(_qtcore.Qt.Horizontal)
self._sliders[-1].setMaximum(1000)
self._sliders[-1].setValue(int(self._colorpoint_list[n][0]*1000))
self._sliders[-1].setFixedWidth(250)
# create + and - buttons
self._buttons_plus.append(_qtw.QPushButton(self._central_widget))
self._buttons_plus[-1].setText("+")
self._buttons_plus[-1].setFixedWidth(25)
self._buttons_minus.append(_qtw.QPushButton(self._central_widget))
self._buttons_minus[-1].setText("-")
self._buttons_minus[-1].setFixedWidth(25)
# layout
self._layout.addWidget(self._buttons_bottom_color[-1], n+3,1, _qtcore.Qt.AlignCenter)
self._layout.addWidget(self._checkboxes[-1], n+3,2, 1,1, _qtcore.Qt.AlignCenter)
self._layout.addWidget(self._buttons_top_color[-1], n+3,3, _qtcore.Qt.AlignCenter)
self._layout.addWidget(self._sliders[-1], n+3,4, 1,2, _qtcore.Qt.AlignCenter)
self._layout.setColumnStretch(5,100)
self._layout.addWidget(self._buttons_minus[-1], n+3,7, _qtcore.Qt.AlignCenter)
self._layout.addWidget(self._buttons_plus[-1], n+3,6, _qtcore.Qt.AlignCenter)
# connect the buttons and slider actions to the calls
self._buttons_bottom_color[-1] .clicked.connect(_partial(self._color_button_clicked, n, 0))
self._buttons_top_color[-1] .clicked.connect(_partial(self._color_button_clicked, n, 1))
self._color_dialogs_bottom[-1].currentColorChanged.connect(_partial(self._color_dialog_changed, n, 0))
self._color_dialogs_top[-1] .currentColorChanged.connect(_partial(self._color_dialog_changed, n, 1))
self._buttons_plus[-1] .clicked.connect(_partial(self._button_plus_clicked, n))
self._buttons_minus[-1] .clicked.connect(_partial(self._button_minus_clicked, n))
self._sliders[-1] .valueChanged.connect(_partial(self._slider_changed, n))
# disable the appropriate sliders
self._sliders[0] .setDisabled(True)
self._sliders[-1].setDisabled(True) | Removes all existing sliders and rebuilds them based on the colormap. | entailment |
def _signal_load(self):
"""
Load the selected cmap.
"""
# set our name
self.set_name(str(self._combobox_cmaps.currentText()))
# load the colormap
self.load_colormap()
# rebuild the interface
self._build_gui()
self._button_save.setEnabled(False) | Load the selected cmap. | entailment |
def _button_save_clicked(self):
"""
Save the selected cmap.
"""
self.set_name(str(self._combobox_cmaps.currentText()))
self.save_colormap()
self._button_save.setEnabled(False)
self._load_cmap_list() | Save the selected cmap. | entailment |
def _button_delete_clicked(self):
"""
Save the selected cmap.
"""
name = str(self._combobox_cmaps.currentText())
self.delete_colormap(name)
self._combobox_cmaps.setEditText("")
self._load_cmap_list() | Save the selected cmap. | entailment |
def _color_dialog_changed(self, n, top, c):
"""
Updates the color of the slider.
"""
self._button_save.setEnabled(True)
cp = self._colorpoint_list[n]
# if they're linked, set both
if self._checkboxes[n].isChecked():
self.modify_colorpoint(n, cp[0], [c.red()/255.0, c.green()/255.0, c.blue()/255.0],
[c.red()/255.0, c.green()/255.0, c.blue()/255.0])
self._buttons_top_color [n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;")
self._buttons_bottom_color[n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;")
elif top:
self.modify_colorpoint(n, cp[0], cp[1], [c.red()/255.0, c.green()/255.0, c.blue()/255.0])
self._buttons_top_color [n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;")
else:
self.modify_colorpoint(n, cp[0], [c.red()/255.0, c.green()/255.0, c.blue()/255.0], cp[2])
self._buttons_bottom_color[n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;") | Updates the color of the slider. | entailment |
def _button_plus_clicked(self, n):
"""
Create a new colorpoint.
"""
self._button_save.setEnabled(True)
self.insert_colorpoint(self._colorpoint_list[n][0],
self._colorpoint_list[n][1],
self._colorpoint_list[n][2])
self._build_gui() | Create a new colorpoint. | entailment |
def _button_minus_clicked(self, n):
"""
Remove a new colorpoint.
"""
self._button_save.setEnabled(True)
self.pop_colorpoint(n)
self._build_gui() | Remove a new colorpoint. | entailment |
def _slider_changed(self, n):
"""
updates the colormap / plot
"""
self._button_save.setEnabled(True)
self.modify_colorpoint(n, self._sliders[n].value()*0.001, self._colorpoint_list[n][1], self._colorpoint_list[n][2]) | updates the colormap / plot | entailment |
def _color_button_clicked(self, n,top):
"""
Opens the dialog.
"""
self._button_save.setEnabled(True)
if top: self._color_dialogs_top[n].open()
else: self._color_dialogs_bottom[n].open() | Opens the dialog. | entailment |
def _load_cmap_list(self):
"""
Searches the colormaps directory for all files, populates the list.
"""
# store the current name
name = self.get_name()
# clear the list
self._combobox_cmaps.blockSignals(True)
self._combobox_cmaps.clear()
# list the existing contents
paths = _settings.ListDir('colormaps')
# loop over the paths and add the names to the list
for path in paths:
self._combobox_cmaps.addItem(_os.path.splitext(path)[0])
# try to select the current name
self._combobox_cmaps.setCurrentIndex(self._combobox_cmaps.findText(name))
self._combobox_cmaps.blockSignals(False) | Searches the colormaps directory for all files, populates the list. | entailment |
def save(filters='*.*', text='Save THIS, facehead!', default_directory='default_directory', force_extension=None):
"""
Pops up a save dialog and returns the string path of the selected file.
Parameters
----------
filters='*.*'
Which file types should appear in the dialog.
text='Save THIS, facehead!'
Title text for the dialog.
default_directory='default_directory'
Key for the spinmob.settings default directory. If you use a name, e.g.
'my_defaultypoo', for one call of this function, the next time you use
the same name, it will start in the last dialog's directory by default.
force_extension=None
Setting this to a string, e.g. 'txt', will enforce that the filename
will have this extension.
"""
# make sure the filters contains "*.*" as an option!
if not '*' in filters.split(';'): filters = filters + ";;All files (*)"
# if this type of pref doesn't exist, we need to make a new one
if default_directory in _settings.keys(): default = _settings[default_directory]
else: default = ""
# pop up the dialog
result = _qtw.QFileDialog.getSaveFileName(None,text,default,filters)
# If Qt5, take the zeroth element
if _s._qt.VERSION_INFO[0:5] == "PyQt5": result = result[0]
# Make sure it's a string
result = str(result)
# Enforce the extension if necessary
if not force_extension == None:
# In case the user put "*.txt" instead of just "txt"
force_extension = force_extension.replace('*','').replace('.','')
# If it doesn't end with the right extension, add this.
if not _os.path.splitext(result)[-1][1:] == force_extension:
result = result + '.' + force_extension
if result == '': return None
else:
_settings[default_directory] = _os.path.split(result)[0]
return result | Pops up a save dialog and returns the string path of the selected file.
Parameters
----------
filters='*.*'
Which file types should appear in the dialog.
text='Save THIS, facehead!'
Title text for the dialog.
default_directory='default_directory'
Key for the spinmob.settings default directory. If you use a name, e.g.
'my_defaultypoo', for one call of this function, the next time you use
the same name, it will start in the last dialog's directory by default.
force_extension=None
Setting this to a string, e.g. 'txt', will enforce that the filename
will have this extension. | entailment |
def load(filters="*.*", text='Select a file, FACEFACE!', default_directory='default_directory'):
"""
Pops up a dialog for opening a single file. Returns a string path or None.
"""
# make sure the filters contains "*.*" as an option!
if not '*' in filters.split(';'): filters = filters + ";;All files (*)"
# if this type of pref doesn't exist, we need to make a new one
if default_directory in _settings.keys(): default = _settings[default_directory]
else: default = ""
# pop up the dialog
result = _qtw.QFileDialog.getOpenFileName(None,text,default,filters)
# If Qt5, take the zeroth element
if _s._qt.VERSION_INFO[0:5] == "PyQt5": result = result[0]
# Make sure it's a string
result = str(result)
if result == '': return None
else:
_settings[default_directory] = _os.path.split(result)[0]
return result | Pops up a dialog for opening a single file. Returns a string path or None. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.