sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def p_ref_type(self, p):
'''ref_type : IDENTIFIER'''
p[0] = ast.DefinedType(p[1], lineno=p.lineno(1)) | ref_type : IDENTIFIER | entailment |
def p_base_type(self, p): # noqa
'''base_type : BOOL annotations
| BYTE annotations
| I8 annotations
| I16 annotations
| I32 annotations
| I64 annotations
| DOUBLE annotations
| STRING annotations
| BINARY annotations'''
name = p[1]
if name == 'i8':
name = 'byte'
p[0] = ast.PrimitiveType(name, p[2]) | base_type : BOOL annotations
| BYTE annotations
| I8 annotations
| I16 annotations
| I32 annotations
| I64 annotations
| DOUBLE annotations
| STRING annotations
| BINARY annotations | entailment |
def p_map_type(self, p):
'''map_type : MAP '<' field_type ',' field_type '>' annotations'''
p[0] = ast.MapType(key_type=p[3], value_type=p[5], annotations=p[7]) | map_type : MAP '<' field_type ',' field_type '>' annotations | entailment |
def p_list_type(self, p):
'''list_type : LIST '<' field_type '>' annotations'''
p[0] = ast.ListType(value_type=p[3], annotations=p[5]) | list_type : LIST '<' field_type '>' annotations | entailment |
def p_set_type(self, p):
'''set_type : SET '<' field_type '>' annotations'''
p[0] = ast.SetType(value_type=p[3], annotations=p[5]) | set_type : SET '<' field_type '>' annotations | entailment |
def p_annotation(self, p):
'''annotation : IDENTIFIER '=' LITERAL
| IDENTIFIER'''
if len(p) == 4:
p[0] = ast.Annotation(p[1], p[3], lineno=p.lineno(1))
else:
p[0] = ast.Annotation(p[1], True, lineno=p.lineno(1)) | annotation : IDENTIFIER '=' LITERAL
| IDENTIFIER | entailment |
def _parse_seq(self, p):
"""Helper to parse sequence rules.
Sequence rules are in the form::
foo : foo_item sep foo
| foo_item foo
|
This function builds a deque of the items in-order.
If the number of tokens doesn't match, an exception is raised.
"""
# This basically says:
#
# - When you reach the end of the list, construct and return an empty
# deque.
# - Otherwise, prepend to start of what you got from the parser.
#
# So this ends up constructing an in-order list.
if len(p) == 4:
p[3].appendleft(p[1])
p[0] = p[3]
elif len(p) == 3:
p[2].appendleft(p[1])
p[0] = p[2]
elif len(p) == 1:
p[0] = deque()
else:
raise ThriftParserError(
'Wrong number of tokens received for expression at line %d'
% p.lineno(1)
) | Helper to parse sequence rules.
Sequence rules are in the form::
foo : foo_item sep foo
| foo_item foo
|
This function builds a deque of the items in-order.
If the number of tokens doesn't match, an exception is raised. | entailment |
def parse(self, input, **kwargs):
"""Parse the given input.
:param input:
String containing the text to be parsed.
:raises thriftrw.errors.ThriftParserError:
For parsing errors.
"""
return self._parser.parse(input, lexer=self._lexer, **kwargs) | Parse the given input.
:param input:
String containing the text to be parsed.
:raises thriftrw.errors.ThriftParserError:
For parsing errors. | entailment |
def _get_comments(session, group_or_user_id, wall_id):
"""
https://vk.com/dev/wall.getComments
"""
return session.fetch_items("wall.getComments", Comment.from_json, count=100, owner_id=group_or_user_id, post_id=wall_id, need_likes=1) | https://vk.com/dev/wall.getComments | entailment |
def _get_comments_count(session, group_or_user_id, wall_id):
"""
https://vk.com/dev/wall.getComments
"""
response = session.fetch("wall.getComments", count=100, owner_id=group_or_user_id, post_id=wall_id)
return response.get('count') | https://vk.com/dev/wall.getComments | entailment |
def get_likes(self):
"""
https://vk.com/dev/likes.getList
"""
from .users import User
return self._session.fetch_items('likes.getList', User._get_user, count=100, type='post', owner_id=self.from_id, item_id=self.id) | https://vk.com/dev/likes.getList | entailment |
def get_likes_count(self):
"""
https://vk.com/dev/likes.getList
"""
response = self._session.fetch('likes.getList', count=1, type='post', owner_id=self.from_id, item_id=self.id)
likes_count = response.get('count')
return likes_count | https://vk.com/dev/likes.getList | entailment |
def get_reposts(self):
"""
https://vk.com/dev/wall.getReposts
"""
return self._session.fetch_items('wall.getReposts', self.from_json, count=1000, owner_id=self.from_id, post_id=self.id) | https://vk.com/dev/wall.getReposts | entailment |
def _wall_post(session, owner_id, message=None, attachments=None, from_group=True):
"""
https://vk.com/dev/wall.post
attachments: "photo100172_166443618,photo-1_265827614"
"""
response = session.fetch("wall.post", owner_id=owner_id, message=message, attachments=attachments, from_group=from_group)
return response | https://vk.com/dev/wall.post
attachments: "photo100172_166443618,photo-1_265827614" | entailment |
def _get_friends_count(session, user_id):
"""
https://vk.com/dev/friends.get
"""
response = session.fetch('friends.get', user_id=user_id, count=1)
return response["count"] | https://vk.com/dev/friends.get | entailment |
def acquire_fake_data(number_of_points=1000):
"""
This function generates some fake data and returns two channels of data
in the form
time_array, [channel1, channel2]
"""
# time array
t = _n.linspace(0,10,number_of_points)
return(t, [_n.cos(t)*(1.0+0.2*_n.random.random(number_of_points)),
_n.sin(t +0.5*_n.random.random(number_of_points))]) | This function generates some fake data and returns two channels of data
in the form
time_array, [channel1, channel2] | entailment |
def load(path=None, first_data_line='auto', filters='*.*', text='Select a file, FACEHEAD.', default_directory='default_directory', quiet=True, header_only=False, transpose=False, **kwargs):
"""
Loads a data file into the databox data class. Returns the data object.
Most keyword arguments are sent to databox.load() so check there
for documentation.(if their function isn't obvious).
Parameters
----------
path=None
Supply a path to a data file; None means use a dialog.
first_data_line="auto"
Specify the index of the first data line, or have it figure this out
automatically.
filters="*.*"
Specify file filters.
text="Select a file, FACEHEAD."
Window title text.
default_directory="default_directory"
Which directory to start in (by key). This lives in spinmob.settings.
quiet=True
Don't print stuff while loading.
header_only=False
Load only the header information.
transpose = False
Return databox.transpose().
Additioinal optional keyword arguments are sent to spinmob.data.databox(),
so check there for more information.
"""
d = databox(**kwargs)
d.load_file(path=path, first_data_line=first_data_line,
filters=filters, text=text, default_directory=default_directory,
header_only=header_only)
if not quiet: print("\nloaded", d.path, "\n")
if transpose: return d.transpose()
return d | Loads a data file into the databox data class. Returns the data object.
Most keyword arguments are sent to databox.load() so check there
for documentation.(if their function isn't obvious).
Parameters
----------
path=None
Supply a path to a data file; None means use a dialog.
first_data_line="auto"
Specify the index of the first data line, or have it figure this out
automatically.
filters="*.*"
Specify file filters.
text="Select a file, FACEHEAD."
Window title text.
default_directory="default_directory"
Which directory to start in (by key). This lives in spinmob.settings.
quiet=True
Don't print stuff while loading.
header_only=False
Load only the header information.
transpose = False
Return databox.transpose().
Additioinal optional keyword arguments are sent to spinmob.data.databox(),
so check there for more information. | entailment |
def load_multiple(paths=None, first_data_line="auto", filters="*.*", text="Select some files, FACEHEAD.", default_directory="default_directory", quiet=True, header_only=False, transpose=False, **kwargs):
"""
Loads a list of data files into a list of databox data objects.
Returns said list.
Parameters
----------
path=None
Supply a path to a data file; None means pop up a dialog.
first_data_line="auto"
Specify the index of the first data line, or have it figure this out
automatically.
filters="*.*"
Specify file filters.
text="Select some files, FACEHEAD."
Window title text.
default_directory="default_directory"
Which directory to start in (by key). This lives in spinmob.settings.
quiet=True
Don't print stuff while loading.
header_only=False
Load only the header information.
transpose = False
Return databox.transpose().
Optional keyword arguments are sent to spinmob.data.load(), so check there for more information.
"""
if paths == None: paths = _s.dialogs.load_multiple(filters, text, default_directory)
if paths is None : return
datas = []
for path in paths:
if _os.path.isfile(path): datas.append(load(path=path, first_data_line=first_data_line,
filters=filters, text=text, default_directory=default_directory,
header_only=header_only, transpose=transpose, **kwargs))
return datas | Loads a list of data files into a list of databox data objects.
Returns said list.
Parameters
----------
path=None
Supply a path to a data file; None means pop up a dialog.
first_data_line="auto"
Specify the index of the first data line, or have it figure this out
automatically.
filters="*.*"
Specify file filters.
text="Select some files, FACEHEAD."
Window title text.
default_directory="default_directory"
Which directory to start in (by key). This lives in spinmob.settings.
quiet=True
Don't print stuff while loading.
header_only=False
Load only the header information.
transpose = False
Return databox.transpose().
Optional keyword arguments are sent to spinmob.data.load(), so check there for more information. | entailment |
def more_info(self):
"""
Prints out more information about the databox.
"""
print("\nDatabox Instance", self.path)
print("\nHeader")
for h in self.hkeys: print(" "+h+":", self.h(h))
s = "\nColumns ("+str(len(self.ckeys))+"): "
for c in self.ckeys: s = s+c+", "
print(s[:-2]) | Prints out more information about the databox. | entailment |
def _globals(self):
"""
Returns the globals needed for eval() statements.
"""
# start with numpy
globbies = dict(_n.__dict__)
globbies.update(_special.__dict__)
# update with required stuff
globbies.update({'h':self.h, 'c':self.c, 'd':self, 'self':self})
# update with user stuff
globbies.update(self.extra_globals)
return globbies | Returns the globals needed for eval() statements. | entailment |
def load_file(self, path=None, first_data_line='auto', filters='*.*', text='Select a file, FACEPANTS.', default_directory=None, header_only=False, quiet=False):
"""
This will clear the databox, load a file, storing the header info in
self.headers, and the data in self.columns
If first_data_line="auto", then the first data line is assumed to be
the first line where all the elements are numbers.
If you specify a first_data_line (index, starting at 0), the columns
need not be numbers. Everything above will be considered header
information and below will be data columns.
In both cases, the line used to label the columns will always be the
last header line with the same (or more) number of elements as the
first data line.
Parameters
----------
path=None
Path to the file. Using None will bring up a dialog.
filters='*.*'
Filter for the file dialog (if path isn't specified)
text='Select a file, FACEPANTS.'
Prompt on file dialog
default_directory=None
Which spinmob.settings key to use for the dialog's default
directory. Will create one if it doesn't already exist.
header_only=False
Only load the header
quiet=False
Don't print anything while loading.
"""
# Set the default directory
if default_directory is None: default_directory = self.directory
# Ask user for a file to open
if path == None:
path = _s.dialogs.load(filters=filters,
default_directory=self.directory,
text=text)
self.path = path
if path is None:
return None
# make sure the file exists!
if not _os.path.exists(path):
if not quiet: print("ERROR: "+repr(path)+" does not exist.")
return None
# clear all the existing data
self.clear()
# First check if the file is SPINMOB_BINARY format!
f = open(path, 'rb')
# If this file is in SPINMOB_BINARY mode!
if f.read(14).decode('utf-8') == 'SPINMOB_BINARY':
# Next character is the delimiter
self.delimiter = f.read(1).decode('utf-8')
# Find the newline and get the data type
s = ' '
while not s[-1] == '\n': s = s+f.read(1).decode('utf-8')
# Rest of the line is the binary dtype
self.h(SPINMOB_BINARY = s.strip())
# Now manually assemble the header lines to use in the analysis
# below. If I try readline() on the binary file, it will crash.
lines = ['\n']
# The end of the header is specified by 'SPINMOB_BINARY' on its own line.
while not lines[-1] == 'SPINMOB_BINARY':
# Get the next line, one character at a time.
s = ' '
while not s[-1] == '\n': s = s+f.read(1).decode('utf-8')
# Okay we have it
lines.append(s.strip())
# Pop that last line, which should be 'SPINMOB_BINARY'
lines.pop(-1)
# We've reached the end of the header.
# Close the binary read.
f.close()
# If we're not in binary mode, we can read all the lines and find
# the delimiter as usual. (In binary mode, the delimiter is specified)
if not 'SPINMOB_BINARY' in self.hkeys:
# For non-binary files, we always read all the lines.
f = open(path, 'r')
lines = f.readlines()
f.close()
# Determine the delimiter
if self.delimiter is None:
# loop from the end of the file until we get something other than white space
for n in range(len(lines)):
# strip away the white space
s = lines[-n-1].strip()
# if this line has any content
if len(s) > 0:
# try the different delimiter schemes until we find one
# that produces a number. Otherwise it's ambiguous.
if _s.fun.is_a_number(s.split(None)[0]): self.delimiter = None
elif _s.fun.is_a_number(s.split(',') [0]): self.delimiter = ','
elif _s.fun.is_a_number(s.split(';') [0]): self.delimiter = ';'
# quit the loop!
break
# Done reading lines and auto-determining delimiter.
##### Pares the header from lines
self.header_lines = []
for n in range(len(lines)):
# split the line by the delimiter
s = lines[n].strip().split(self.delimiter)
# remove a trailing whitespace entry if it exists.
if len(s) and s[-1].strip() == '': s.pop(-1)
# first check and see if this is a data line (all elements are numbers)
if first_data_line == "auto" and _s.fun.elements_are_numbers(s):
# we've reached the first data line
first_data_line = n
# quit the header loop
break;
### after that check, we know it's a header line
# save the lines for the avid user.
self.header_lines.append(lines[n].strip())
# store the hkey and the rest of it
if len(s):
hkey = s[0]
if self.delimiter is None: remainder = ' '.join(s[1:])
else: remainder = self.delimiter.join(s[1:])
# first thing to try is simply evaluating the remaining string
try: self.insert_header(hkey, eval(remainder, self._globals()))
# otherwise store the string
except: self.insert_header(hkey, remainder)
# now we have a valid set of column ckeys one way or another, and we know first_data_line.
if header_only: return self
# Deal with the binary mode
if 'SPINMOB_BINARY' in self.hkeys:
# Read the binary file
f = open(path, 'rb')
s = f.read()
f.close()
# Get the delimiter for easier coding
delimiter = self.delimiter.encode('utf-8')
# Get the binary mode, e.g., 'float32'
binary = self.h('SPINMOB_BINARY')
# Number of bytes per element
size = eval('_n.'+binary+'().itemsize', dict(_n=_n))
# Location of first ckey
start = s.find(b'SPINMOB_BINARY',14) + 15
# Continue until we reach the last character.
while not start >= len(s):
# Get the location of the end of the ckey
stop = s.find(delimiter, start)
# Woa, Nelly! We're at the end of the file.
if stop == -1: break
ckey = s[start:stop].decode('utf-8').strip()
# Get the array length
start = stop+1
stop = s.find(b'\n', start)
length = int(s[start:stop].strip())
# Get the data!
start = stop+1
stop = start+size*length
self[ckey] = _n.fromstring(s[start:stop], binary)
# Go to next ckey
start = stop+1
# Otherwise we have a text file to load.
else:
# Make sure first_data_line isn't still 'auto'
# which happens if there's no data, or if it's a binary file
if first_data_line == "auto" and not 'SPINMOB_BINARY' in self.hkeys:
if not quiet: print("\ndatabox.load_file(): Could not find a line of pure data! Perhaps check the delimiter?")
return self
##### at this point we've found the first_data_line,
# look for the ckeys
# special case: no header
if first_data_line == 0: ckeys = []
# start by assuming it's the previous line
else: ckeys = lines[first_data_line-1].strip().split(self.delimiter)
# count the number of actual data columns for comparison
column_count = len(lines[first_data_line].strip().split(self.delimiter))
# check to see if ckeys is equal in length to the
# number of data columns. If it isn't, it's a false ckeys line
if len(ckeys) >= column_count:
# it is close enough
# if we have too many column keys, mention it
while len(ckeys) > column_count:
extra = ckeys.pop(-1)
if not quiet: print("Extra ckey: "+extra)
else:
# it is an invalid ckeys line. Generate our own!
ckeys = []
for m in range(0, column_count): ckeys.append("c"+str(m))
# last step with ckeys: make sure they're all different!
self.ckeys = []
while len(ckeys):
# remove the key
ckey = ckeys.pop(0)
# if there is a duplicate
if (ckey in ckeys) or (ckey in self.ckeys):
# increase the label index until it's unique
n=0
while (ckey+"_"+str(n) in ckeys) or (ckey+"_"+str(n) in self.ckeys): n+=1
ckey = ckey+"_"+str(n)
self.ckeys.append(ckey)
# initialize the columns arrays
# I did benchmarks and there's not much improvement by using numpy-arrays here.
for label in self.ckeys: self.columns[label] = []
# Python 2 format
#if _sys.version_info[0] == 2:
try:
def fix(x): return str(x.replace('i','j'))
# loop over the remaining data lines, converting to numbers
z = _n.genfromtxt((fix(x) for x in lines[first_data_line:]),
delimiter=self.delimiter,
dtype=_n.complex)
# Python 3 format
except:
def fix(x): return bytearray(x.replace('i','j'), encoding='utf-8')
# loop over the remaining data lines, converting to numbers
z = _n.genfromtxt((fix(x) for x in lines[first_data_line:]),
delimiter=self.delimiter,
dtype=_n.complex)
# genfromtxt returns a 1D array if there is only one data line.
# highly confusing behavior, numpy!
if len(_n.shape(z)) == 1:
# check to make sure the data file contains only 1 column of data
rows_of_data = len(lines) - first_data_line
if rows_of_data == 1: z = _n.array([z])
else: z = _n.array(z)
# fix for different behavior of genfromtxt on single columns
if len(z.shape) == 2: z = z.transpose()
else: z = [z]
# Add all the columns
for n in range(len(self.ckeys)):
# if any of the imaginary components are non-zero, use complex
if _n.any(_n.imag(z[n])): self[n] = z[n]
else: self[n] = _n.real(z[n])
# Done with loading in the columns of data
# now, as an added bonus, rename some of the obnoxious headers
for k in self.obnoxious_ckeys:
if k in self.columns:
self.columns[self.obnoxious_ckeys[k]] = self.columns[k]
return self | This will clear the databox, load a file, storing the header info in
self.headers, and the data in self.columns
If first_data_line="auto", then the first data line is assumed to be
the first line where all the elements are numbers.
If you specify a first_data_line (index, starting at 0), the columns
need not be numbers. Everything above will be considered header
information and below will be data columns.
In both cases, the line used to label the columns will always be the
last header line with the same (or more) number of elements as the
first data line.
Parameters
----------
path=None
Path to the file. Using None will bring up a dialog.
filters='*.*'
Filter for the file dialog (if path isn't specified)
text='Select a file, FACEPANTS.'
Prompt on file dialog
default_directory=None
Which spinmob.settings key to use for the dialog's default
directory. Will create one if it doesn't already exist.
header_only=False
Only load the header
quiet=False
Don't print anything while loading. | entailment |
def save_file(self, path=None, filters='*.dat', force_extension=None, force_overwrite=False, header_only=False, delimiter='use current', binary=None):
"""
This will save all the header info and columns to an ascii file with
the specified path.
Parameters
----------
path=None
Path for saving the data. If None, this will bring up
a save file dialog.
filters='*.dat'
File filter for the file dialog (for path=None)
force_extension=None
If set to a string, e.g., 'txt', it will enforce that the chosen
filename will have this extension.
force_overwrite=False
Normally, if the file * exists, this will copy that
to *.backup. If the backup already exists, this
function will abort. Setting this to True will
force overwriting the backup file.
header_only=False
Only output the header?
delimiter='use current'
This will set the delimiter of the output file
'use current' means use self.delimiter
binary=None
Set to one of the allowed numpy dtypes, e.g., float32, float64,
complex64, int32, etc. Setting binary=True defaults to float64.
Note if the header contains the key SPINMOB_BINARY and binary=None,
it will save as binary using the header specification.
"""
# Make sure there isn't a problem later with no-column databoxes
if len(self)==0: header_only=True
# This is the final path. We now write to a temporary file in the user
# directory, then move it to the destination. This (hopefully) fixes
# problems with sync programs.
if path in [None]: path = _s.dialogs.save(filters, default_directory=self.directory)
if path in ["", None]:
print("Aborted.")
return False
# Force the extension (we do this here redundantly, because the user may have also
# specified a path explicitly)
if not force_extension == None:
# In case the user put "*.txt" instead of just "txt"
force_extension = force_extension.replace('*','').replace('.','')
# If the file doesn't end with the extension, add it
if not _os.path.splitext(path)[-1][1:] == force_extension:
path = path + '.' + force_extension
# Save the path for future reference
self.path=path
# if the path exists, make a backup
if _os.path.exists(path) and not force_overwrite:
_os.rename(path,path+".backup")
# get the delimiter
if delimiter == "use current":
if self.delimiter is None: delimiter = "\t"
else: delimiter = self.delimiter
# figure out the temporary path
temporary_path = _os.path.join(_s.settings.path_home, "temp-"+str(int(1e3*_time.time()))+'-'+str(int(1e9*_n.random.rand(1))))
# open the temporary file
f = open(temporary_path, 'w')
# Override any existing binary if we're supposed to
if binary in [False, 'text', 'Text', 'ASCII', 'csv', 'CSV']:
self.pop_header('SPINMOB_BINARY', True)
binary = None
# If the binary flag is any kind of binary format, add the key
if not binary in [None, False, 'text', 'Text', 'ASCII', 'csv', 'CSV']:
self.h(SPINMOB_BINARY=binary)
# Now use the header element to determine the binary mode
if 'SPINMOB_BINARY' in self.hkeys:
# Get the binary mode (we'll use this later)
binary = self.pop_header('SPINMOB_BINARY')
# If it's "True", default to float32
if binary in ['True', True, 1]: binary = 'float32'
# Write the special first key.
f.write('SPINMOB_BINARY' + delimiter + binary + '\n')
# Write the usual header
for k in self.hkeys: f.write(k + delimiter + repr(self.headers[k]) + "\n")
f.write('\n')
# if we're not just supposed to write the header
if not header_only:
# Normal ascii saving mode.
if binary in [None, 'None', False, 'False']:
# write the ckeys
elements = []
for ckey in self.ckeys:
elements.append(str(ckey).replace(delimiter,'_'))
f.write(delimiter.join(elements) + "\n")
# now loop over the data
for n in range(0, len(self[0])):
# loop over each column
elements = []
for m in range(0, len(self.ckeys)):
# write the data if there is any, otherwise, placeholder
if n < len(self[m]):
elements.append(str(self[m][n]))
else:
elements.append('_')
f.write(delimiter.join(elements) + "\n")
# Binary mode
else:
# Announce that we're done with the header. It's binary time
f.write('SPINMOB_BINARY\n')
# Loop over the ckeys
for n in range(len(self.ckeys)):
# Get the binary data string
data_string = _n.array(self[n]).astype(binary).tostring()
# Write the column
# ckey + delimiter + count + \n + datastring + \n
f.write(str(self.ckeys[n]).replace(delimiter,'_')
+ delimiter + str(len(self[n])) + '\n')
f.close()
f = open(temporary_path, 'ab')
f.write(data_string)
f.close()
f = open(temporary_path, 'a')
f.write('\n')
f.close()
# now move it
_shutil.move(temporary_path, path)
return self | This will save all the header info and columns to an ascii file with
the specified path.
Parameters
----------
path=None
Path for saving the data. If None, this will bring up
a save file dialog.
filters='*.dat'
File filter for the file dialog (for path=None)
force_extension=None
If set to a string, e.g., 'txt', it will enforce that the chosen
filename will have this extension.
force_overwrite=False
Normally, if the file * exists, this will copy that
to *.backup. If the backup already exists, this
function will abort. Setting this to True will
force overwriting the backup file.
header_only=False
Only output the header?
delimiter='use current'
This will set the delimiter of the output file
'use current' means use self.delimiter
binary=None
Set to one of the allowed numpy dtypes, e.g., float32, float64,
complex64, int32, etc. Setting binary=True defaults to float64.
Note if the header contains the key SPINMOB_BINARY and binary=None,
it will save as binary using the header specification. | entailment |
def get_data_point(self, n):
"""
Returns the n'th data point (starting at 0) from all columns.
Parameters
----------
n
Index of data point to return.
"""
# loop over the columns and pop the data
point = []
for k in self.ckeys: point.append(self[k][n])
return point | Returns the n'th data point (starting at 0) from all columns.
Parameters
----------
n
Index of data point to return. | entailment |
def pop_data_point(self, n):
"""
This will remove and return the n'th data point (starting at 0) from
all columns.
Parameters
----------
n
Index of data point to pop.
"""
# loop over the columns and pop the data
popped = []
for k in self.ckeys:
# first convert to a list
data = list(self.c(k))
# pop the data
popped.append(data.pop(n))
# now set this column again
self.insert_column(_n.array(data), k)
return popped | This will remove and return the n'th data point (starting at 0) from
all columns.
Parameters
----------
n
Index of data point to pop. | entailment |
def insert_data_point(self, new_data, index=None):
"""
Inserts a data point at index n.
Parameters
----------
new_data
A list or array of new data points, one for each column.
index
Where to insert the point(s) in each column. None => append.
"""
if not len(new_data) == len(self.columns) and not len(self.columns)==0:
print("ERROR: new_data must have as many elements as there are columns.")
return
# otherwise, we just auto-add this data point as new columns
elif len(self.columns)==0:
for i in range(len(new_data)): self[i] = [new_data[i]]
# otherwise it matches length so just insert it.
else:
for i in range(len(new_data)):
# get the array and turn it into a list
data = list(self[i])
# append or insert
if index is None: data.append( new_data[i])
else: data.insert(index, new_data[i])
# reconvert to an array
self[i] = _n.array(data)
return self | Inserts a data point at index n.
Parameters
----------
new_data
A list or array of new data points, one for each column.
index
Where to insert the point(s) in each column. None => append. | entailment |
def execute_script(self, script, g=None):
"""
Runs a script, returning the result.
Parameters
----------
script
String script to be evaluated (see below).
g=None
Optional dictionary of additional globals for the script evaluation.
These will automatically be inserted into self.extra_globals.
Usage
-----
Scripts are of the form:
"3.0 + x/y - d[0] where x=3.0*c('my_column')+h('setting'); y=d[1]"
By default, "d" refers to the databox object itself, giving access to
everything and enabling complete control over the universe. Meanwhile,
c() and h() give quick reference to d.c() and d.h() to get columns and
header lines. Additionally, these scripts can see all of the numpy
functions like sin, cos, sqrt, etc.
If you would like access to additional globals in a script,
there are a few options in addition to specifying the g parametres.
You can set self.extra_globals to the appropriate globals dictionary
or add globals using self.insert_global(). Setting g=globals() will
automatically insert all of your current globals into this databox
instance.
There are a few shorthand scripts available as well. You can simply type
a column name such as 'my_column' or a column number like 2. However, I
only added this functionality as a shortcut, and something like
"2.0*a where a=my_column" will not work unless 'my_column is otherwise
defined. I figure since you're already writing a complicated script in
that case, you don't want to accidentally shortcut your way into using
a column instead of a constant! Use "2.0*a where a=c('my_column')"
instead.
"""
# add any extra user-supplied global variables for the eventual eval() call.
if not g==None: self.extra_globals.update(g)
# If the script is not a list of scripts, return the script value.
# This is the termination of a recursive call.
if not _s.fun.is_iterable(script):
# special case
if script is None: return None
# get the expression and variables dictionary
[expression, v] = self._parse_script(script)
# if there was a problem parsing the script
if v is None:
print("ERROR: Could not parse '"+script+"'")
return None
# get all the numpy stuff too
g = self._globals()
g.update(v)
# otherwise, evaluate the script using python's eval command
return eval(expression, g)
# Otherwise, this is a list of (lists of) scripts. Make the recursive call.
output = []
for s in script: output.append(self.execute_script(s))
return output | Runs a script, returning the result.
Parameters
----------
script
String script to be evaluated (see below).
g=None
Optional dictionary of additional globals for the script evaluation.
These will automatically be inserted into self.extra_globals.
Usage
-----
Scripts are of the form:
"3.0 + x/y - d[0] where x=3.0*c('my_column')+h('setting'); y=d[1]"
By default, "d" refers to the databox object itself, giving access to
everything and enabling complete control over the universe. Meanwhile,
c() and h() give quick reference to d.c() and d.h() to get columns and
header lines. Additionally, these scripts can see all of the numpy
functions like sin, cos, sqrt, etc.
If you would like access to additional globals in a script,
there are a few options in addition to specifying the g parametres.
You can set self.extra_globals to the appropriate globals dictionary
or add globals using self.insert_global(). Setting g=globals() will
automatically insert all of your current globals into this databox
instance.
There are a few shorthand scripts available as well. You can simply type
a column name such as 'my_column' or a column number like 2. However, I
only added this functionality as a shortcut, and something like
"2.0*a where a=my_column" will not work unless 'my_column is otherwise
defined. I figure since you're already writing a complicated script in
that case, you don't want to accidentally shortcut your way into using
a column instead of a constant! Use "2.0*a where a=c('my_column')"
instead. | entailment |
def _parse_script(self, script, n=0):
"""
This takes a script such as "a/b where a=c('current'), b=3.3" and returns
["a/b", {"a":self.columns["current"], "b":3.3}]
You can also just use an integer for script to reference columns by number
or use the column label as the script.
n is for internal use. Don't use it. In fact, don't use this function, user.
"""
if n > 1000:
print("This script ran recursively 1000 times!")
a = input("<enter> or (q)uit: ")
if a.strip().lower() in ['q', 'quit']:
script = None
if script is None: return [None, None]
# check if the script is simply an integer
if type(script) in [int,int]:
if script<0: script = script+len(self.ckeys)
return ["___"+str(script), {"___"+str(script):self[script]}]
# the scripts would like to use calls like "h('this')/3.0*c('that')",
# so to make eval() work we should add these functions to a local list
# first split up by "where"
split_script = script.split(" where ")
########################################
# Scripts without a "where" statement:
########################################
# if it's a simple script, like "column0" or "c(3)/2.0"
if len(split_script) is 1:
if self.debug: print("script of length 1")
# try to evaluate the script
# first try to evaluate it as a simple column label
if n==0 and script in self.ckeys:
# only try this on the zero'th attempt
# if this is a recursive call, there can be ambiguities if the
# column names are number strings
return ['___', {'___':self[script]}]
# Otherwise, evaluate it.
try:
b = eval(script, self._globals())
return ['___', {'___':b}]
except:
print()
print("ERROR: Could not evaluate '"+str(script)+"'")
return [None, None]
#######################################
# Full-on fancy scripts
#######################################
# otherwise it's a complicated script like "c(1)-a/2 where a=h('this')"
# tidy up the expression
expression = split_script[0].strip()
# now split the variables list up by ,
varsplit = split_script[1].split(';')
# loop over the entries in the list of variables, storing the results
# of evaluation in the "stuff" dictionary
stuff = dict()
for var in varsplit:
# split each entry by the "=" sign
s = var.split("=")
if len(s) == 1:
print(s, "has no '=' in it")
return [None, None]
# tidy up into "variable" and "column label"
v = s[0].strip()
c = s[1].strip()
# now try to evaluate c, given our current globbies
# recursively call this sub-script. At the end of all this mess
# we want the final return value to be the first expression
# and a full dictionary of variables to fill it
[x,y] = self._parse_script(c, n+1)
# if it's not working, just quit out.
if y is None: return [None, None]
stuff[v] = y[x]
# at this point we've found or generated the list
return [expression, stuff] | This takes a script such as "a/b where a=c('current'), b=3.3" and returns
["a/b", {"a":self.columns["current"], "b":3.3}]
You can also just use an integer for script to reference columns by number
or use the column label as the script.
n is for internal use. Don't use it. In fact, don't use this function, user. | entailment |
def copy_headers(self, source_databox):
"""
Loops over the hkeys of the source_databox, updating this databoxes' header.
"""
for k in source_databox.hkeys: self.insert_header(k, source_databox.h(k))
return self | Loops over the hkeys of the source_databox, updating this databoxes' header. | entailment |
def copy_columns(self, source_databox):
"""
Loops over the ckeys of the source_databox, updating this databoxes' columns.
"""
for k in source_databox.ckeys: self.insert_column(source_databox[k], k)
return self | Loops over the ckeys of the source_databox, updating this databoxes' columns. | entailment |
def copy_all(self, source_databox):
"""
Copies the header and columns from source_databox to this databox.
"""
self.copy_headers(source_databox)
self.copy_columns(source_databox)
return self | Copies the header and columns from source_databox to this databox. | entailment |
def insert_globals(self, *args, **kwargs):
"""
Appends or overwrites the supplied object in the self.extra_globals.
Use this to expose execute_script() or _parse_script() etc... to external
objects and functions.
Regular arguments are assumed to have a __name__ attribute (as is the
case for functions) to use as the key, and keyword arguments will just
be added as dictionary elements.
"""
for a in args: kwargs[a.__name__] = a
self.extra_globals.update(kwargs) | Appends or overwrites the supplied object in the self.extra_globals.
Use this to expose execute_script() or _parse_script() etc... to external
objects and functions.
Regular arguments are assumed to have a __name__ attribute (as is the
case for functions) to use as the key, and keyword arguments will just
be added as dictionary elements. | entailment |
def insert_header(self, hkey, value, index=None):
"""
This will insert/overwrite a value to the header and hkeys.
Parameters
----------
hkey
Header key. Will be appended to self.hkeys if non existent, or
inserted at the specified index.
If hkey is an integer, uses self.hkeys[hkey].
value
Value of the header.
index=None
If specified (integer), hkey will be inserted at this location in
self.hkeys.
"""
#if hkey is '': return
# if it's an integer, use the hkey from the list
if type(hkey) in [int, int]: hkey = self.hkeys[hkey]
# set the data
self.headers[str(hkey)] = value
if not hkey in self.hkeys:
if index is None: self.hkeys.append(str(hkey))
else: self.hkeys.insert(index, str(hkey))
return self | This will insert/overwrite a value to the header and hkeys.
Parameters
----------
hkey
Header key. Will be appended to self.hkeys if non existent, or
inserted at the specified index.
If hkey is an integer, uses self.hkeys[hkey].
value
Value of the header.
index=None
If specified (integer), hkey will be inserted at this location in
self.hkeys. | entailment |
def is_same_as(self, other_databox, headers=True, columns=True, header_order=True, column_order=True, ckeys=True):
"""
Tests that the important (i.e. savable) information in this databox
is the same as that of the other_databox.
Parameters
----------
other_databox
Databox with which to compare.
headers=True
Make sure all header elements match.
columns=True
Make sure every element of every column matches.
header_order=True
Whether the order of the header elements must match.
column_order=True
Whether the order of the columns must match. This is only a sensible
concern if ckeys=True.
ckeys=True
Whether the actual ckeys matter, or just the ordered columns of data.
Note the == symbol runs this function with everything True.
"""
d = other_databox
if not hasattr(other_databox, '_is_spinmob_databox'): return False
# Proceed by testing things one at a time, returning false if one fails
if headers:
# Same number of elements
if not len(self.hkeys) == len(d.hkeys): return False
# Elements
if header_order and not self.hkeys == d.hkeys: return False
# Each value
for k in self.hkeys:
# Make sure the key exists
if not k in d.hkeys: return False
# Make sure it's the same.
if not self.h(k) == d.h(k): return False
if columns:
# Same number of columns
if not len(self.ckeys) == len(d.ckeys): return False
# If we're checking columns by ckeys
if ckeys:
# Columns
if column_order and not self.ckeys == d.ckeys: return False
# Each value of each array
for k in self.ckeys:
# Make sure the key exists
if not k in d.ckeys: return False
# Check the values
if not (_n.array(self[k]) == _n.array(d[k])).all(): return False
# Otherwise we're ignoring ckeys
else:
for n in range(len(self.ckeys)):
if not (_n.array(self[n]) == _n.array(d[n])).all(): return False
# Passes all tests
return True | Tests that the important (i.e. savable) information in this databox
is the same as that of the other_databox.
Parameters
----------
other_databox
Databox with which to compare.
headers=True
Make sure all header elements match.
columns=True
Make sure every element of every column matches.
header_order=True
Whether the order of the header elements must match.
column_order=True
Whether the order of the columns must match. This is only a sensible
concern if ckeys=True.
ckeys=True
Whether the actual ckeys matter, or just the ordered columns of data.
Note the == symbol runs this function with everything True. | entailment |
def pop_header(self, hkey, ignore_error=False):
"""
This will remove and return the specified header value.
Parameters
----------
hkey
Header key you wish to pop.
You can specify either a key string or an index.
ignore_error=False
Whether to quietly ignore any errors (i.e., hkey not found).
"""
# try the integer approach first to allow negative values
if type(hkey) is not str:
try:
return self.headers.pop(self.hkeys.pop(hkey))
except:
if not ignore_error:
print("ERROR: pop_header() could not find hkey "+str(hkey))
return None
else:
try:
# find the key integer and pop it
hkey = self.hkeys.index(hkey)
# pop it!
return self.headers.pop(self.hkeys.pop(hkey))
except:
if not ignore_error:
print("ERROR: pop_header() could not find hkey "+str(hkey))
return | This will remove and return the specified header value.
Parameters
----------
hkey
Header key you wish to pop.
You can specify either a key string or an index.
ignore_error=False
Whether to quietly ignore any errors (i.e., hkey not found). | entailment |
def pop_column(self, ckey):
"""
This will remove and return the data in the specified column.
You can specify either a key string or an index.
"""
# try the integer approach first to allow negative values
if type(ckey) is not str:
return self.columns.pop(self.ckeys.pop(ckey))
else:
# find the key integer and pop it
ckey = self.ckeys.index(ckey)
# if we didn't find the column, quit
if ckey < 0:
print("Column does not exist (yes, we looked).")
return
# pop it!
return self.columns.pop(self.ckeys.pop(ckey)) | This will remove and return the data in the specified column.
You can specify either a key string or an index. | entailment |
def insert_column(self, data_array, ckey='temp', index=None):
"""
This will insert/overwrite a new column and fill it with the data from the
the supplied array.
Parameters
----------
data_array
Data; can be a list, but will be converted to numpy array
ckey
Name of the column; if an integer is supplied, uses self.ckeys[ckey]
index
Where to insert this column. None => append to end.
"""
# if it's an integer, use the ckey from the list
if type(ckey) in [int, int]: ckey = self.ckeys[ckey]
# append/overwrite the column value
self.columns[ckey] = _n.array(data_array)
if not ckey in self.ckeys:
if index is None: self.ckeys.append(ckey)
else: self.ckeys.insert(index, ckey)
return self | This will insert/overwrite a new column and fill it with the data from the
the supplied array.
Parameters
----------
data_array
Data; can be a list, but will be converted to numpy array
ckey
Name of the column; if an integer is supplied, uses self.ckeys[ckey]
index
Where to insert this column. None => append to end. | entailment |
def append_column(self, data_array, ckey='temp'):
"""
This will append a new column and fill it with the data from the
the supplied array.
Parameters
----------
data_array
Data; can be a list, but will be converted to numpy array
ckey
Name of the column.
"""
if not type(ckey) is str:
print("ERROR: ckey should be a string!")
return
if ckey in self.ckeys:
print("ERROR: ckey '"+ckey+"' already exists!")
return
return self.insert_column(data_array, ckey) | This will append a new column and fill it with the data from the
the supplied array.
Parameters
----------
data_array
Data; can be a list, but will be converted to numpy array
ckey
Name of the column. | entailment |
def rename_header(self, old_name, new_name):
"""
This will rename the header. The supplied names need to be strings.
"""
self.hkeys[self.hkeys.index(old_name)] = new_name
self.headers[new_name] = self.headers.pop(old_name)
return self | This will rename the header. The supplied names need to be strings. | entailment |
def rename_column(self, column, new_name):
"""
This will rename the column.
The supplied column can be an integer or the old column name.
"""
if type(column) is not str: column = self.ckeys[column]
self.ckeys[self.ckeys.index(column)] = new_name
self.columns[new_name] = self.columns.pop(column)
return self | This will rename the column.
The supplied column can be an integer or the old column name. | entailment |
def trim(self, *conditions):
"""
Removes data points not satisfying the supplied conditions. Conditions
can be truth arrays (having the same length as the columns!)
or scripted strings.
Example Workflow
----------------
d1 = spinmob.data.load()
d2 = d1.trim( (2<d1[0]) & (d1[0]<10) | (d1[3]==22), 'sin(d[2])*h("gain")<32.2')
Note this will not modify the databox, rather it will generate a new
one with the same header information and return it.
"""
conditions = list(conditions)
# if necessary, evaluate string scripts
for n in range(len(conditions)):
if type(conditions[n]) is str:
conditions[n] = self.execute_script(conditions[n])
# make a new databox with the same options and headers
new_databox = databox(delimiter=self.delimiter)
new_databox.copy_headers(self)
# trim it up, send it out.
cs = _s.fun.trim_data_uber(self, conditions)
for n in range(len(cs)): new_databox.append_column(cs[n], self.ckeys[n])
return new_databox | Removes data points not satisfying the supplied conditions. Conditions
can be truth arrays (having the same length as the columns!)
or scripted strings.
Example Workflow
----------------
d1 = spinmob.data.load()
d2 = d1.trim( (2<d1[0]) & (d1[0]<10) | (d1[3]==22), 'sin(d[2])*h("gain")<32.2')
Note this will not modify the databox, rather it will generate a new
one with the same header information and return it. | entailment |
def transpose(self):
"""
Returns a copy of this databox with the columns as rows.
Currently requires that the databox has equal-length columns.
"""
# Create an empty databox with the same headers and delimiter.
d = databox(delimter=self.delimiter)
self.copy_headers(d)
# Get the transpose
z = _n.array(self[:]).transpose()
# Build the columns of the new databox
for n in range(len(z)): d['c'+str(n)] = z[n]
return d | Returns a copy of this databox with the columns as rows.
Currently requires that the databox has equal-length columns. | entailment |
def update_headers(self, dictionary, keys=None):
"""
Updates the header with the supplied dictionary. If keys=None, it
will be unsorted. Otherwise it will loop over the supplied keys
(a list) in order.
"""
if keys is None: keys = list(dictionary.keys())
for k in keys: self.insert_header(k, dictionary[k])
return self | Updates the header with the supplied dictionary. If keys=None, it
will be unsorted. Otherwise it will loop over the supplied keys
(a list) in order. | entailment |
def c(self, *args, **kwargs):
"""
Takes a single argument or keyword argument, and returns the specified
column. If the argument (or keyword argument) is an integer, return the
n'th column, otherwise return the column based on key.
If no arguments are supplied, simply print the column information.
"""
# If not arguments, print everything
if len(args) + len(kwargs) == 0:
print("Columns")
if len(self.ckeys)==0: print (' No columns of data yet.')
# Loop over the ckeys and display their information
for n in range(len(self.ckeys)):
print(' '+str(n)+': '+str(self.ckeys[n])+' '+str(_n.shape(self[n])))
return
# Otherwise, find n
elif len(args): n = args[0]
elif len(kwargs):
for k in kwargs: n = kwargs[k]
# Nothing to do here.
if len(self.columns) == 0: return None
# if it's a string, use it as a key for the dictionary
if type(n) is str: return self.columns[n]
# if it's a list, return the specified columns
if type(n) in [list, tuple, range]:
output = []
for i in n: output.append(self[i])
return output
# If it's a slice, do the slice thing
if type(n) is slice:
start = n.start
stop = n.stop
step = n.step
# Fix up the unspecifieds
if start == None: start = 0
if stop == None or stop>len(self): stop = len(self)
if step == None: step = 1
# Return what was asked for
return self[range(start, stop, step)]
# Otherwise assume it's an integer
return self.columns[self.ckeys[n]] | Takes a single argument or keyword argument, and returns the specified
column. If the argument (or keyword argument) is an integer, return the
n'th column, otherwise return the column based on key.
If no arguments are supplied, simply print the column information. | entailment |
def h(self, *args, **kwargs):
"""
This function searches through hkeys for one *containing* a key string
supplied by args[0] and returns that header value.
Also can take integers, returning the key'th header value.
kwargs can be specified to set header elements.
Finally, if called with no arguments or keyword arguments, this
simply prints the header information.
"""
# If not arguments, print everything
if len(args) + len(kwargs) == 0:
print("Headers")
for n in range(len(self.hkeys)):
print(' '+str(n)+': '+str(self.hkeys[n])+' = '+repr(self.h(n)))
return
# first loop over kwargs if there are any to set header elements
for k in list(kwargs.keys()):
self.insert_header(k, kwargs[k])
# Meow search for a key if specified
if len(args):
# this can be shortened. Eventually, it'd be nice to get a tuple back!
hkey = args[0]
# if this is an index
if type(hkey) in [int, int]: return self.headers[self.hkeys[hkey]]
# if this is an exact match
elif hkey in self.hkeys: return self.headers[hkey]
# Look for a fragment.
else:
for k in self.hkeys:
if k.find(hkey) >= 0:
return self.headers[k]
print()
print("ERROR: Couldn't find '"+str(hkey) + "' in header.")
print("Possible values:")
for k in self.hkeys: print(k)
print()
return None | This function searches through hkeys for one *containing* a key string
supplied by args[0] and returns that header value.
Also can take integers, returning the key'th header value.
kwargs can be specified to set header elements.
Finally, if called with no arguments or keyword arguments, this
simply prints the header information. | entailment |
def set(self, **kwargs):
"""
Changes a setting or multiple settings. Can also call self() or
change individual parameters with self['parameter'] = value
"""
if len(kwargs)==0: return self
# Set settings
for k in list(kwargs.keys()): self[k] = kwargs[k]
# Plot if we're supposed to.
if self['autoplot'] and not self._initializing: self.plot()
return self | Changes a setting or multiple settings. Can also call self() or
change individual parameters with self['parameter'] = value | entailment |
def print_fit_parameters(self):
"""
Just prints them out in a way that's easy to copy / paste into python.
"""
s = ''
if self.results and self.results[1] is not None:
s = s + "\n# FIT RESULTS (reduced chi squared = {:s})\n".format(str(self.reduced_chi_squareds()))
for n in range(len(self._pnames)):
s = s + "{:10s} = {:G}\n".format(self._pnames[n], self.results[0][n])
elif self.results and self.results[1] is None:
s = s + "\n# FIT DID NOT CONVERGE\n"
for n in range(len(self._pnames)):
s = s + "{:10s} = {:G}\n".format(self._pnames[n], self.results[0][n])
else: s = s + "\n# NO FIT RESULTS\n"
print(s) | Just prints them out in a way that's easy to copy / paste into python. | entailment |
def set_functions(self, f='a*x*cos(b*x)+c', p='a=-0.2, b, c=3', c=None, bg=None, **kwargs):
"""
Sets the function(s) used to describe the data.
Parameters
----------
f=['a*x*cos(b*x)+c', 'a*x+c']
This can be a string function, a defined function
my_function(x,a,b), or a list of some combination
of these two types of objects. The length of such
a list must be equal to the number of data sets
supplied to the fit routine.
p='a=1.5, b'
This must be a comma-separated string list of
parameters used to fit. If an initial guess value is
not specified, 1.0 will be used.
If a function object is supplied, it is assumed that
this string lists the parameter names in order.
c=None
Fit _constants; like p, but won't be allowed to float
during the fit. This can also be None.
bg=None
Can be functions in the same format as f describing a
background (which can be subtracted during fits, etc)
Additional keyword arguments are added to the globals used when
evaluating the functions.
"""
# initialize everything
self._pnames = []
self._cnames = []
self._pguess = []
self._constants = []
# Update the globals
self._globals.update(kwargs)
# store these for later
self._f_raw = f
self._bg_raw = bg
# break up the constant names and initial values.
if c:
for s in c.split(','):
# split by '=' and see if there is an initial value
s = s.split('=')
# add the name to the list
self._cnames.append(s[0].strip())
# if there is a guess value, add this (or 1.0)
if len(s) > 1: self._constants.append(float(s[1]))
else: self._constants.append(1.0)
# break up the parameter names and initial values.
for s in p.split(','):
# split by '=' and see if there is an initial value
s = s.split('=')
# add the name to the list
self._pnames.append(s[0].strip())
# if there is a guess value, add this (or 1.0)
if len(s) > 1: self._pguess.append(float(s[1]))
else: self._pguess.append(1.0)
# use the internal settings we just set to create the functions
self._update_functions()
if self['autoplot']: self.plot()
return self | Sets the function(s) used to describe the data.
Parameters
----------
f=['a*x*cos(b*x)+c', 'a*x+c']
This can be a string function, a defined function
my_function(x,a,b), or a list of some combination
of these two types of objects. The length of such
a list must be equal to the number of data sets
supplied to the fit routine.
p='a=1.5, b'
This must be a comma-separated string list of
parameters used to fit. If an initial guess value is
not specified, 1.0 will be used.
If a function object is supplied, it is assumed that
this string lists the parameter names in order.
c=None
Fit _constants; like p, but won't be allowed to float
during the fit. This can also be None.
bg=None
Can be functions in the same format as f describing a
background (which can be subtracted during fits, etc)
Additional keyword arguments are added to the globals used when
evaluating the functions. | entailment |
def _update_functions(self):
"""
Uses internal settings to update the functions.
"""
self.f = []
self.bg = []
self._fnames = []
self._bgnames = []
self._odr_models = [] # Like f, but different parameters, for use in ODR
f = self._f_raw
bg = self._bg_raw
# make sure f and bg are lists of matching length
if not _s.fun.is_iterable(f) : f = [f]
if not _s.fun.is_iterable(bg): bg = [bg]
while len(bg) < len(f): bg.append(None)
# get a comma-delimited string list of parameter names for the "normal" function
pstring = 'x, ' + ', '.join(self._pnames)
# get the comma-delimited string for the ODR function
pstring_odr = 'x, '
for n in range(len(self._pnames)): pstring_odr = pstring_odr+'p['+str(n)+'], '
# update the globals for the functions
# the way this is done, we must redefine the functions
# every time we change a constant
for cname in self._cnames: self._globals[cname] = self[cname]
# loop over all the functions and create the master list
for n in range(len(f)):
# if f[n] is a string, define a function on the fly.
if isinstance(f[n], str):
# "Normal" least squares function (y-error bars only)
self.f.append( eval('lambda ' + pstring + ': ' + f[n], self._globals))
self._fnames.append(f[n])
# "ODR" compatible function (for x-error bars), based on self.f
self._odr_models.append( _odr.Model(eval('lambda p,x: self.f[n]('+pstring_odr+')', dict(self=self, n=n))))
# Otherwise, just append it.
else:
self.f.append(f[n])
self._fnames.append(f[n].__name__)
# if bg[n] is a string, define a function on the fly.
if isinstance(bg[n], str):
self.bg.append(eval('lambda ' + pstring + ': ' + bg[n], self._globals))
self._bgnames.append(bg[n])
else:
self.bg.append(bg[n])
if bg[n] is None: self._bgnames.append("None")
else: self._bgnames.append(bg[n].__name__)
# update the format of all the settings
for k in list(self._settings.keys()): self[k] = self[k]
# make sure we don't think our fit results are valid!
self.clear_results() | Uses internal settings to update the functions. | entailment |
def set_data(self, xdata=[1,2,3,4,5], ydata=[1.7,2,3,4,3], eydata=None, **kwargs):
"""
This will handle the different types of supplied data and put everything
in a standard format for processing.
Parameters
----------
xdata, ydata
These can be a single array of data or a list of data arrays.
eydata=None
Error bars for ydata. These can be None (for guessed error) or data
/ numbers matching the dimensionality of xdata and ydata
Notes
-----
xdata, ydata, and eydata can all be scripts or lists of scripts that
produce arrays. Any python code will work, and the scripts
automatically know about all numpy functions, the guessed parameters,
and the data itself (as x, y, ey). However, the scripts are
executed in order -- xdata, ydata, and eydata -- so the xdata script
cannot know about ydata or eydata, the ydata script cannot know about
eydata, and the eydata script knows about xdata and ydata.
Example:
xdata = [1,2,3,4,5]
ydata = [[1,2,1,2,1], 'cos(x[0])']
eydata = ['arctan(y[1])*a+b', 5]
In this example, there will be two data sets to fit (so there better be
two functions!), they will share the same xdata, the second ydata set
will be the array cos([1,2,3,4,5]) (note since there are multiple data
sets assumed (always), you have to select the data set with an index
on x and y), the error on the first data set will be this weird
functional dependence on the second ydata set and fit parameters a and
b (note, if a and b are not fit parameters, then you must
send them as keyword arguments so that they are defined) and the second
data set error bar will be a constant, 5.
Note this function is "somewhat" smart about reshaping the input
data to ease life a bit, but it can't handle ambiguities. If you
want to play it safe, supply lists for all three arguments that
match in dimensionality.
results can be obtained by calling get_data()
Additional optional keyword arguments are added to the globals for
script evaluation.
"""
# SET UP DATA SETS TO MATCH EACH OTHER AND NUMBER OF FUNCTIONS
# At this stage:
# xdata, ydata 'script', [1,2,3], [[1,2,3],'script'], ['script', [1,2,3]]
# eydata, exdata 'script', [1,1,1], [[1,1,1],'script'], ['script', [1,1,1]], 3, [3,[1,2,3]], None
# if xdata, ydata, or eydata are bare scripts, make them into lists
if type(xdata) is str: xdata = [xdata]
if type(ydata) is str: ydata = [ydata]
if type(eydata) is str or _s.fun.is_a_number(eydata) or eydata is None: eydata = [eydata]
#if type(exdata) is str or _s.fun.is_a_number(exdata) or exdata is None: exdata = [exdata]
# xdata and ydata ['script'], [1,2,3], [[1,2,3],'script'], ['script', [1,2,3]]
# eydata ['script'], [1,1,1], [[1,1,1],'script'], ['script', [1,1,1]], [3], [3,[1,2,3]], [None]
# if the first element of data is a number, then this is a normal array
if _s.fun.is_a_number(xdata[0]): xdata = [xdata]
if _s.fun.is_a_number(ydata[0]): ydata = [ydata]
# xdata and ydata ['script'], [[1,2,3]], [[1,2,3],'script'], ['script', [1,2,3]]
# eydata ['script'], [1,1,1], [[1,1,1],'script'], ['script', [1,1,1]], [3], [3,[1,2,3]], [None]
# if the first element of eydata is a number, this could also just be an error bar value
# Note: there is some ambiguity here, if the number of data sets equals the number of data points!
if _s.fun.is_a_number(eydata[0]) and len(eydata) == len(ydata[0]): eydata = [eydata]
#if _s.fun.is_a_number(exdata[0]) and len(exdata) == len(xdata[0]): exdata = [exdata]
# xdata and ydata ['script'], [[1,2,3]], [[1,2,3],'script'], ['script', [1,2,3]]
# eydata ['script'], [[1,1,1]], [[1,1,1],'script'], ['script', [1,1,1]], [3], [3,[1,2,3]], [None]
# Inflate the x, ex, and ey data sets to match the ydata sets
while len(xdata) < len(ydata): xdata .append( xdata[0])
while len(ydata) < len(xdata): ydata .append( ydata[0])
#while len(exdata) < len(xdata): exdata.append(exdata[0])
while len(eydata) < len(ydata): eydata.append(eydata[0])
# make sure these lists are the same length as the number of functions
while len(ydata) < len(self.f): ydata.append(ydata[0])
while len(xdata) < len(self.f): xdata.append(xdata[0])
while len(eydata) < len(self.f): eydata.append(eydata[0])
#while len(exdata) < len(self.f): exdata.append(exdata[0])
# xdata and ydata ['script','script'], [[1,2,3],[1,2,3]], [[1,2,3],'script'], ['script', [1,2,3]]
# eydata ['script','script'], [[1,1,1],[1,1,1]], [[1,1,1],'script'], ['script', [1,1,1]], [3,3], [3,[1,2,3]], [None,None]
# Clean up exdata. If any element isn't None, the other None elements need
# to be set to 0 so that ODR works.
# if not exdata.count(None) == len(exdata):
# # Search for and replace all None's with 0
# for n in range(len(exdata)):
# if exdata[n] == None: exdata[n] = 0
#
# store the data, script, or whatever it is!
self._set_xdata = xdata
self._set_ydata = ydata
self._set_eydata = eydata
#self._set_exdata = exdata
self._set_data_globals.update(kwargs)
# set the eyscale to 1 for each data set
self['scale_eydata'] = [1.0]*len(self._set_xdata)
#self['scale_exdata'] = [1.0]*len(self._set_xdata)
# Update the settings so they match the number of data sets.
for k in self._settings.keys(): self[k] = self[k]
# Plot if necessary
if self['autoplot']: self.plot()
return self | This will handle the different types of supplied data and put everything
in a standard format for processing.
Parameters
----------
xdata, ydata
These can be a single array of data or a list of data arrays.
eydata=None
Error bars for ydata. These can be None (for guessed error) or data
/ numbers matching the dimensionality of xdata and ydata
Notes
-----
xdata, ydata, and eydata can all be scripts or lists of scripts that
produce arrays. Any python code will work, and the scripts
automatically know about all numpy functions, the guessed parameters,
and the data itself (as x, y, ey). However, the scripts are
executed in order -- xdata, ydata, and eydata -- so the xdata script
cannot know about ydata or eydata, the ydata script cannot know about
eydata, and the eydata script knows about xdata and ydata.
Example:
xdata = [1,2,3,4,5]
ydata = [[1,2,1,2,1], 'cos(x[0])']
eydata = ['arctan(y[1])*a+b', 5]
In this example, there will be two data sets to fit (so there better be
two functions!), they will share the same xdata, the second ydata set
will be the array cos([1,2,3,4,5]) (note since there are multiple data
sets assumed (always), you have to select the data set with an index
on x and y), the error on the first data set will be this weird
functional dependence on the second ydata set and fit parameters a and
b (note, if a and b are not fit parameters, then you must
send them as keyword arguments so that they are defined) and the second
data set error bar will be a constant, 5.
Note this function is "somewhat" smart about reshaping the input
data to ease life a bit, but it can't handle ambiguities. If you
want to play it safe, supply lists for all three arguments that
match in dimensionality.
results can be obtained by calling get_data()
Additional optional keyword arguments are added to the globals for
script evaluation. | entailment |
def evaluate_script(self, script, **kwargs):
"""
Evaluates the supplied script (python-executable string).
Useful for testing your scripts!
globals already include all of numpy objects plus
self = self
f = self.f
bg = self.bg
and all the current guess parameters and constants
kwargs are added to globals for script evaluation.
"""
self._set_data_globals.update(kwargs)
return eval(script, self._set_data_globals) | Evaluates the supplied script (python-executable string).
Useful for testing your scripts!
globals already include all of numpy objects plus
self = self
f = self.f
bg = self.bg
and all the current guess parameters and constants
kwargs are added to globals for script evaluation. | entailment |
def get_data(self):
"""
Returns current xdata, ydata, eydata, after set_data()
has been run.
"""
# make sure we've done a "set data" call
if len(self._set_xdata)==0 or len(self._set_ydata)==0: return [[],[],[]]
# update the globals with the current fit parameter guess values
for n in range(len(self._pnames)): self._set_data_globals[self._pnames[n]] = self._pguess[n]
for n in range(len(self._cnames)): self._set_data_globals[self._cnames[n]] = self._constants[n]
# update the globals with x, y, and ey, and the functions
self._set_data_globals['f'] = self.f
self._set_data_globals['bg'] = self.bg
self._set_data_globals['self'] = self
# possibilities after calling set_data():
# xdata and ydata ['script','script'], [[1,2,3],[1,2,3]], [[1,2,3],'script'], ['script', [1,2,3]]
# eydata ['script','script'], [[1,1,1],[1,1,1]], [[1,1,1],'script'], ['script', [1,1,1]], [3,3], [3,[1,2,3]], [None,None]
# make a copy
xdata = list(self._set_xdata)
ydata = list(self._set_ydata)
eydata = list(self._set_eydata)
#exdata = list(self._set_exdata)
# make sure they're all lists of numpy arrays
for n in range(len(xdata)):
# For xdata, handle scripts or arrays
if type(xdata[n]) is str: xdata[n] = self.evaluate_script(xdata[n], **self._set_data_globals)
else: xdata[n] = _n.array(xdata[n])*1.0
# update the globals
self._set_data_globals['x'] = xdata
# make sure they're all lists of numpy arrays
for n in range(len(ydata)):
# For ydata, handle scripts or arrays
if type(ydata[n]) is str: ydata[n] = self.evaluate_script(ydata[n], **self._set_data_globals)
else: ydata[n] = _n.array(ydata[n])*1.0
# update the globals
self._set_data_globals['y'] = ydata
# make sure they're all lists of numpy arrays
self._guessed_eydata = False
for n in range(len(eydata)):
# handle scripts
if type(eydata[n]) is str:
eydata[n] = self.evaluate_script(eydata[n], **self._set_data_globals)
# handle None (possibly returned by script): take a visually-appealing guess at the error
if eydata[n] is None:
eydata[n] = _n.ones(len(xdata[n])) * (max(ydata[n])-min(ydata[n]))*0.05
self._guessed_eydata = True
# handle constant error bars (possibly returned by script)
if _s.fun.is_a_number(eydata[n]):
eydata[n] = _n.ones(len(xdata[n])) * eydata[n]
# make it an array
eydata[n] = _n.array(eydata[n]) * self["scale_eydata"][n]
# # make sure they're all lists of numpy arrays
# for n in range(len(exdata)):
#
# # handle scripts
# if type(exdata[n]) is str:
# exdata[n] = self.evaluate_script(exdata[n], **self._set_data_globals)
#
# # None is okay for exdata
#
# # handle constant error bars (possibly returned by script)
# if _s.fun.is_a_number(exdata[n]):
# exdata[n] = _n.ones(len(xdata[n])) * exdata[n]
#
# # make it an array
# if not exdata[n] == None:
# exdata[n] = _n.array(exdata[n]) * self["scale_exdata"][n]
# return it
return xdata, ydata, eydata | Returns current xdata, ydata, eydata, after set_data()
has been run. | entailment |
def set_guess_to_fit_result(self):
"""
If you have a fit result, set the guess parameters to the
fit parameters.
"""
if self.results is None:
print("No fit results to use! Run fit() first.")
return
# loop over the results and set the guess values
for n in range(len(self._pguess)): self._pguess[n] = self.results[0][n]
if self['autoplot']: self.plot()
return self | If you have a fit result, set the guess parameters to the
fit parameters. | entailment |
def get_processed_data(self, do_coarsen=True, do_trim=True):
"""
This will coarsen and then trim the data sets according to settings.
Returns processed xdata, ydata, eydata.
Parameters
----------
do_coarsen=True
Whether we should coarsen the data
do_trim=True
Whether we should trim the data
Settings
--------
xmin, xmax, ymin, ymax
Limits on x and y data points for trimming.
coarsen
Break the data set(s) into this many groups of points, and average
each group into one point, propagating errors.
"""
# get the data
xdatas, ydatas, eydatas = self.get_data()
# get the trim limits (trimits)
xmins = self['xmin']
xmaxs = self['xmax']
ymins = self['ymin']
ymaxs = self['ymax']
coarsen = self['coarsen']
# make sure we have one limit for each data set
if type(xmins) is not list: xmins = [xmins] *len(xdatas)
if type(xmaxs) is not list: xmaxs = [xmaxs] *len(xdatas)
if type(ymins) is not list: ymins = [ymins] *len(xdatas)
if type(ymaxs) is not list: ymaxs = [ymaxs] *len(xdatas)
if type(coarsen) is not list: coarsen = [coarsen]*len(xdatas)
# this should cover all the data sets (dimensions should match!)
xdata_massaged = []
ydata_massaged = []
eydata_massaged = []
#exdata_massaged = []
for n in range(len(xdatas)):
x = xdatas[n]
y = ydatas[n]
ey = eydatas[n]
#ex = exdatas[n]
# coarsen the data
if do_coarsen:
x = _s.fun.coarsen_array(x, self['coarsen'][n], 'mean')
y = _s.fun.coarsen_array(y, self['coarsen'][n], 'mean')
ey = _n.sqrt(_s.fun.coarsen_array(ey**2, self['coarsen'][n], 'mean')/self['coarsen'][n])
# if not ex == None:
# ex = _n.sqrt(_s.fun.coarsen_array(ex**2, self['coarsen'][n], 'mean')/self['coarsen'][n])
if do_trim:
# Create local mins and maxes
xmin = xmins[n]
ymin = ymins[n]
xmax = xmaxs[n]
ymax = ymaxs[n]
# handle "None" limits
if xmin is None: xmin = min(x)
if xmax is None: xmax = max(x)
if ymin is None: ymin = min(y)
if ymax is None: ymax = max(y)
# trim the data
[xt, yt, eyt] = _s.fun.trim_data_uber([x, y, ey],
[x>=xmin, x<=xmax,
y>=ymin, y<=ymax])
# Catch the over-trimmed case
if(len(xt)==0):
self._error("\nDATA SET "+str(n)+": OOPS! OOPS! Specified limits (xmin, xmax, ymin, ymax) eliminate all data! Ignoring.")
else:
x = xt
y = yt
ey = eyt
#ex = ext
# store the result
xdata_massaged.append(x)
ydata_massaged.append(y)
eydata_massaged.append(ey)
#exdata_massaged.append(ex)
return xdata_massaged, ydata_massaged, eydata_massaged | This will coarsen and then trim the data sets according to settings.
Returns processed xdata, ydata, eydata.
Parameters
----------
do_coarsen=True
Whether we should coarsen the data
do_trim=True
Whether we should trim the data
Settings
--------
xmin, xmax, ymin, ymax
Limits on x and y data points for trimming.
coarsen
Break the data set(s) into this many groups of points, and average
each group into one point, propagating errors. | entailment |
def _massage_data(self):
"""
Processes the data and stores it.
"""
self._xdata_massaged, self._ydata_massaged, self._eydata_massaged = self.get_processed_data()
# # Create the odr data.
# self._odr_datas = []
# for n in range(len(self._xdata_massaged)):
# # Only exdata can be None; make sure it's zeros at least.
# ex = self._exdata_massaged[n]
# if ex == None: ex = _n.zeros(len(self._eydata_massaged[n]))
# self._odr_datas.append(_odr.RealData(self._xdata_massaged[n],
# self._ydata_massaged[n],
# sx=ex,
# sy=self._eydata_massaged[n]))
return self | Processes the data and stores it. | entailment |
def fit(self, **kwargs):
"""
This will try to determine fit parameters using scipy.optimize.leastsq
algorithm. This function relies on a previous call of set_data() and
set_functions().
Notes
-----
results of the fit algorithm are stored in self.results.
See scipy.optimize.leastsq for more information.
Optional keyword arguments are sent to self.set() prior to
fitting.
"""
if len(self._set_xdata)==0 or len(self._set_ydata)==0:
return self._error("No data. Please use set_data() prior to fitting.")
if self._f_raw is None:
return self._error("No functions. Please use set_functions() prior to fitting.")
# Do the processing once, to increase efficiency
self._massage_data()
# Send the keyword arguments to the settings
self.set(**kwargs)
# do the actual optimization
self.results = _opt.leastsq(self._studentized_residuals_concatenated, self._pguess, full_output=1)
# plot if necessary
if self['autoplot']: self.plot()
return self | This will try to determine fit parameters using scipy.optimize.leastsq
algorithm. This function relies on a previous call of set_data() and
set_functions().
Notes
-----
results of the fit algorithm are stored in self.results.
See scipy.optimize.leastsq for more information.
Optional keyword arguments are sent to self.set() prior to
fitting. | entailment |
def fix(self, *args, **kwargs):
"""
Turns parameters to constants. As arguments, parameters must be strings.
As keyword arguments, they can be set at the same time.
Note this will NOT work when specifying a non-string fit function,
because there is no flexibility in the number of arguments. To get
around this, suppose you've defined a function stuff(x,a,b). Instead
of sending the stuff object to self.set_functions() directly, make it
a string function, e.g.:
self.set_functions('stuff(x,a,b)', 'a,b', stuff=stuff)
"""
# first set all the keyword argument values
self.set(**kwargs)
# get everything into one big list
pnames = list(args) + list(kwargs.keys())
# move each pname to the constants
for pname in pnames:
if not pname in self._pnames:
self._error("Naughty. '"+pname+"' is not a valid fit parameter name.")
else:
n = self._pnames.index(pname)
# use the fit result if it exists
if self.results: value = self.results[0][n]
# otherwise use the guess value
else: value = self._pguess[n]
# make the switcheroo
if type(self._pnames) is not list: self._pnames = list(self._pnames)
if type(self._pguess) is not list: self._pguess = list(self._pguess)
if type(self._cnames) is not list: self._cnames = list(self._cnames)
if type(self._constants) is not list: self._constants = list(self._constants)
self._pnames.pop(n)
self._pguess.pop(n)
self._cnames.append(pname)
self._constants.append(value)
# update
self._update_functions()
return self | Turns parameters to constants. As arguments, parameters must be strings.
As keyword arguments, they can be set at the same time.
Note this will NOT work when specifying a non-string fit function,
because there is no flexibility in the number of arguments. To get
around this, suppose you've defined a function stuff(x,a,b). Instead
of sending the stuff object to self.set_functions() directly, make it
a string function, e.g.:
self.set_functions('stuff(x,a,b)', 'a,b', stuff=stuff) | entailment |
def free(self, *args, **kwargs):
"""
Turns a constant into a parameter. As arguments, parameters must be strings.
As keyword arguments, they can be set at the same time.
"""
# first set all the keyword argument values
self.set(**kwargs)
# get everything into one big list
cnames = list(args) + list(kwargs.keys())
# move each pname to the constants
for cname in cnames:
if not cname in self._cnames:
self._error("Naughty. '"+cname+"' is not a valid constant name.")
else:
n = self._cnames.index(cname)
# make the switcheroo
if type(self._pnames) is not list: self._pnames = list(self._pnames)
if type(self._pguess) is not list: self._pguess = list(self._pguess)
if type(self._cnames) is not list: self._cnames = list(self._cnames)
if type(self._constants) is not list: self._constants = list(self._constants)
self._pnames.append(self._cnames.pop(n))
self._pguess.append(self._constants.pop(n))
# update
self._update_functions()
return self | Turns a constant into a parameter. As arguments, parameters must be strings.
As keyword arguments, they can be set at the same time. | entailment |
def _evaluate_all_functions(self, xdata, p=None):
"""
This returns a list of function outputs given the stored data sets.
This function relies on a previous call of set_data().
p=None means use the fit results
"""
if p is None: p = self.results[0]
output = []
for n in range(len(self.f)):
output.append(self._evaluate_f(n, self._xdata_massaged[n], p) )
return output | This returns a list of function outputs given the stored data sets.
This function relies on a previous call of set_data().
p=None means use the fit results | entailment |
def _evaluate_f(self, n, xdata, p=None):
"""
Evaluates a single function n for arbitrary xdata and p tuple.
p=None means use the fit results
"""
# by default, use the fit values, otherwise, use the guess values.
if p is None and self.results is not None: p = self.results[0]
elif p is None and self.results is None: p = self._pguess
# assemble the arguments for the function
args = (xdata,) + tuple(p)
# evaluate this function.
return self.f[n](*args) | Evaluates a single function n for arbitrary xdata and p tuple.
p=None means use the fit results | entailment |
def _evaluate_bg(self, n, xdata, p=None):
"""
Evaluates a single background function n for arbitrary xdata and p tuple.
p=None means use the fit results
"""
# by default, use the fit values, otherwise, use the guess values.
if p is None and self.results is not None: p = self.results[0]
elif p is None and self.results is None: p = self._pguess
# return None if there is no background function
if self.bg[n] is None: return None
# assemble the arguments for the function
args = (xdata,) + tuple(p)
# evaluate the function
return self.bg[n](*args) | Evaluates a single background function n for arbitrary xdata and p tuple.
p=None means use the fit results | entailment |
def _format_value_error(self, v, e, pm=" +/- "):
"""
Returns a string v +/- e with the right number of sig figs.
"""
# If we have weird stuff
if not _s.fun.is_a_number(v) or not _s.fun.is_a_number(e) \
or v in [_n.inf, _n.nan, _n.NAN] or e in [_n.inf, _n.nan, _n.NAN]:
return str(v)+pm+str(e)
# Normal values.
try:
sig_figs = -int(_n.floor(_n.log10(abs(e))))+1
return str(_n.round(v, sig_figs)) + pm + str(_n.round(e, sig_figs))
except:
return str(v)+pm+str(e) | Returns a string v +/- e with the right number of sig figs. | entailment |
def _studentized_residuals_fast(self, p=None):
"""
Returns a list of studentized residuals, (ydata - model)/error
This function relies on a previous call to set_data(), and assumes
self._massage_data() has been called (to increase speed).
Parameters
----------
p=None
Function parameters to use. None means use the fit results; if no fit, use guess results.
"""
if len(self._set_xdata)==0 or len(self._set_ydata)==0: return
if p is None:
if self.results is None: p = self._pguess
else: p = self.results[0]
# evaluate the function for all the data, returns a list!
f = self._evaluate_all_functions(self._xdata_massaged, p)
# get the full residuals list
residuals = []
for n in range(len(f)):
numerator = self._ydata_massaged[n]-f[n]
denominator = _n.absolute(self._eydata_massaged[n])
residuals.append(numerator/denominator)
return residuals | Returns a list of studentized residuals, (ydata - model)/error
This function relies on a previous call to set_data(), and assumes
self._massage_data() has been called (to increase speed).
Parameters
----------
p=None
Function parameters to use. None means use the fit results; if no fit, use guess results. | entailment |
def chi_squareds(self, p=None):
"""
Returns a list of chi squared for each data set. Also uses ydata_massaged.
p=None means use the fit results
"""
if len(self._set_xdata)==0 or len(self._set_ydata)==0: return None
if p is None: p = self.results[0]
# get the residuals
rs = self.studentized_residuals(p)
# Handle the none case
if rs == None: return None
# square em and sum em.
cs = []
for r in rs: cs.append(sum(r**2))
return cs | Returns a list of chi squared for each data set. Also uses ydata_massaged.
p=None means use the fit results | entailment |
def chi_squared(self, p=None):
"""
Returns the total chi squared (summed over all massaged data sets).
p=None means use the fit results.
"""
chi2s = self.chi_squareds(p)
if chi2s == None: return None
return sum(self.chi_squareds(p)) | Returns the total chi squared (summed over all massaged data sets).
p=None means use the fit results. | entailment |
def degrees_of_freedom(self):
"""
Returns the number of degrees of freedom.
"""
if len(self._set_xdata)==0 or len(self._set_ydata)==0: return None
# Temporary hack: get the studentized residuals, which uses the massaged data
# This should later be changed to get_massaged_data()
r = self.studentized_residuals()
# Happens if data / functions not defined
if r == None: return
# calculate the number of points
N = 0.0
for i in range(len(r)): N += len(r[i])
return N-len(self._pnames) | Returns the number of degrees of freedom. | entailment |
def reduced_chi_squareds(self, p=None):
"""
Returns the reduced chi squared for each massaged data set.
p=None means use the fit results.
"""
if len(self._set_xdata)==0 or len(self._set_ydata)==0: return None
if p is None: p = self.results[0]
r = self.studentized_residuals(p)
# In case it's not possible to calculate
if r is None: return
# calculate the number of points
N = 0
for i in range(len(r)): N += len(r[i])
# degrees of freedom
dof_per_point = self.degrees_of_freedom()/N
for n in range(len(r)):
r[n] = sum(r[n]**2)/(len(r[n])*dof_per_point)
return r | Returns the reduced chi squared for each massaged data set.
p=None means use the fit results. | entailment |
def reduced_chi_squared(self, p=None):
"""
Returns the reduced chi squared for all massaged data sets.
p=None means use the fit results.
"""
if len(self._set_xdata)==0 or len(self._set_ydata)==0: return None
if p is None: p = self.results[0]
chi2 = self.chi_squared(p)
dof = self.degrees_of_freedom()
if not _s.fun.is_a_number(chi2) or not _s.fun.is_a_number(dof):
return None
return _n.divide(self.chi_squared(p), self.degrees_of_freedom()) | Returns the reduced chi squared for all massaged data sets.
p=None means use the fit results. | entailment |
def autoscale_eydata(self):
"""
Rescales the error so the next fit will give reduced chi squareds of 1.
Each data set will be scaled independently, and you may wish to run
this a few times until it converges.
"""
if not self.results:
self._error("You must complete a fit first.")
return
r = self.reduced_chi_squareds()
# loop over the eydata and rescale
for n in range(len(r)): self["scale_eydata"][n] *= _n.sqrt(r[n])
# the fit is no longer valid
self.clear_results()
# replot
if self['autoplot']: self.plot()
return self | Rescales the error so the next fit will give reduced chi squareds of 1.
Each data set will be scaled independently, and you may wish to run
this a few times until it converges. | entailment |
def plot(self, **kwargs):
"""
This will plot the data (with error) for inspection.
Setting self.figures to a figure instance or list of figure instances
will override the creation of new figures. If you specify
a list, its length had better be at least as large as the
number of data sets.
kwargs will update the settings
"""
# Make sure there is data to plot.
if len(self._set_xdata)==0 or len(self._set_ydata)==0: return self
# Make sure the figures is a list
if not self.figures == None and not type(self.figures) == list:
self.figures = [self.figures]
# Get the trimmed and full processed data
xts, yts, eyts = self.get_processed_data()
xas, yas, eyas = self.get_processed_data(do_trim=False)
# update settings
for k in kwargs: self[k] = kwargs[k]
# Calculate all studentized residuals
if len(self.f) > 0: rt = self.studentized_residuals()
# make a new figure for each data set
for n in range(len(self._set_xdata)):
xt = xts[n]
xa = xas[n]
yt = yts[n]
ya = yas[n]
eyt = eyts[n]
eya = eyas[n]
#ext = exts[n]
eyt = eyts[n]
# get the next figure
if self.figures == None: fig = _p.figure(self['first_figure']+n)
else: fig = self.figures[n]
# turn off interactive mode and clear the figure
_p.ioff()
fig.clear()
# set up two axes. One for data and one for residuals.
a1 = fig.add_subplot(211) # Residuals
a2 = fig.add_subplot(212, sharex=a1) # Data
a1.set_position([0.15, 0.72, 0.75, 0.15])
a2.set_position([0.15, 0.10, 0.75, 0.60])
# set the scales
a1.set_xscale(self['xscale'][n])
a2.set_xscale(self['xscale'][n])
a2.set_yscale(self['yscale'][n])
# Get the function xdata
fxa = self._get_xdata_for_function(n,xa)
fxt = self._get_xdata_for_function(n,xt)
# get the values to subtract from ydata if subtracting the background
if self['subtract_bg'][n] and not self.bg[n] is None:
# if we have a fit, use that for the background
if self.results: p = self.results[0]
else: p = self._pguess
# Get the background data
d_ya = self._evaluate_bg(n, xa, p)
d_fya = self._evaluate_bg(n, fxa, p)
d_yt = self._evaluate_bg(n, xt, p)
d_fyt = self._evaluate_bg(n, fxt, p)
# Otherwise just make some zero arrays
else:
d_ya = 0*xa
d_fya = 0*fxa
d_yt = 0*xt
d_fyt = 0*fxt
# PLOT DATA FIRST
# If we're supposed to, add the "all" data and function
if self['plot_all_data'][n]:
# Make it faint.
style_data = dict(self['style_data' ][n]); style_data ['alpha'] = 0.3
if self['plot_errors'][n]: a2.errorbar(xa, ya-d_ya, eya, zorder=5, **style_data)
else: a2.plot (xa, ya-d_ya, zorder=5, **style_data)
# add the trimmed data
if self['plot_errors'][n]: a2.errorbar(xt, yt-d_yt, eyt, zorder=7, **self['style_data'][n])
else: a2.plot( xt, yt-d_yt, zorder=7, **self['style_data'][n])
# Zoom on just the data for now
_s.tweaks.auto_zoom(axes=a2, draw=False)
# PLOT FUNCTIONS
if n < len(self.f): # If there are any functions to plot
# Plot the GUESS under the fit
if self['plot_guess'][n]:
# FULL GUESS
if self['plot_all_data'][n]:
# Make it faint.
style_guess = dict(self['style_guess'][n]); style_guess['alpha'] = 0.3
# FULL background GUESS
if self['plot_bg'][n] and self.bg[n] is not None:
bg_gya = self._evaluate_bg(n, fxa, self._pguess)
a2.plot(fxa, bg_gya-d_fya, zorder=9, **style_guess)
# FULL guess
gya = self._evaluate_f (n, fxa, self._pguess)
a2.plot(fxa, gya-d_fya, zorder=9, **style_guess)
# Update the trimmed datas so that the points line up
[fxt] = _s.fun.trim_data_uber([fxt], [fxt>=min(xt), fxt<=max(xt)])
# TRIMMED GUESS BACKGROUND
# TRIMMED guess background curve
if self['plot_bg'][n] and self.bg[n] is not None:
bg_gyt = self._evaluate_bg(n, fxt, self._pguess)
a2.plot(fxt, bg_gyt-d_fyt, zorder=9, **self['style_guess'][n])
# TRIMMED main guess curve
gyt = self._evaluate_f (n, fxt, self._pguess)
a2.plot(fxt, gyt-d_fyt, zorder=9, **self['style_guess'][n])
# Plot the FIT if there is one
if not self.results == None:
# FULL FIT
if self['plot_all_data'][n]:
# Make it faint.
style_fit = dict(self['style_fit'][n]); style_fit['alpha'] = 0.3
# FULL background fit
if self['plot_bg'][n] and self.bg[n] is not None:
bg_fya = self._evaluate_bg(n, fxa, self.results[0])
a2.plot(fxa, bg_fya-d_fya, zorder=10, **style_fit)
# FULL fit
fya = self._evaluate_f (n, fxa, self.results[0])
a2.plot(fxa, fya-d_fya, zorder=10, **style_fit)
# Update the trimmed datas so that the points line up
[fxt] = _s.fun.trim_data_uber([fxt], [fxt>=min(xt), fxt<=max(xt)])
# TRIMMED FIT BACKGROUND
if self['plot_bg'][n] and self.bg[n] is not None:
bg_fyt = self._evaluate_bg(n, fxt, self.results[0])
a2.plot(fxt, bg_fyt-d_fyt, zorder=10, **self['style_fit'][n])
# TRIMMED main curve
fyt = self._evaluate_f(n, fxt, self.results[0])
a2.plot(fxt, fyt-d_fyt, zorder=10, **self['style_fit'][n])
if self['plot_guess_zoom'][n]: _s.tweaks.auto_zoom(axes=a2, draw=False)
# plot the residuals only if there are functions defined
if len(self.f):
# If we're supposed to also plot all the data, we have to
# Manually calculate the residuals. Clunky, I know.
if self['plot_all_data'][n]:
# Figure out what guy to use for the residuals
if self.results is None:
p = self._pguess
style = style_guess
else:
p = self.results[0]
style = style_fit
# Calculate them
ra = (ya-self._evaluate_f(n, xa, p))/eya
# style_data, style_guess, and style_fit should already be faint
a1.errorbar(xa, ra, _n.ones(len(ra)), **style_data)
# Put the line on top
a1.plot([min(xa), max(xa)], [0,0], **style)
# Figure out what style to use for the line
if self.results is None: style = self['style_guess'][n]
else: style = self['style_fit' ][n]
# Main residuals plot
a1.errorbar (xt, rt[n], _n.ones(len(xt)), **self['style_data'][n])
a1.plot([min(xt), max(xt)], [0,0], **style)
# Tidy up
yticklabels = a1.get_yticklabels()
for m in range(2,len(yticklabels)-2): yticklabels[m].set_visible(False)
for m in a1.get_xticklabels(): m.set_visible(False)
# Add labels to the axes
if self['xlabel'][n] is None: a2.set_xlabel('xdata['+str(n)+']')
else: a2.set_xlabel(self['xlabel'][n])
if self['ylabel'][n] is None:
ylabel='ydata['+str(n)+']'
if self['subtract_bg'][n] and self.bg[n] is not None:
ylabel=ylabel+' - bg['+str(n)+']'
a2.set_ylabel(ylabel)
else: a2.set_ylabel(self['ylabel'][n])
a1.set_ylabel('Studentized\nResiduals')
# Assemble the title
wrap = 80
indent = ' '
# Include the function names if available
if n < len(self.f):
t = _textwrap.fill('Function ('+str(n)+'/'+str(len(self._fnames)-1)+'): y = '+self._fnames[n], wrap, subsequent_indent=indent)
else:
t = "No functions defined. Use set_functions()."
if len(self._cnames):
t1 = "Constants: "
for i in range(len(self._cnames)):
t1 = t1 + self._cnames[i] + "={:G}, ".format(self._constants[i])
t = t + '\n' + _textwrap.fill(t1, wrap, subsequent_indent=indent)
if self.results and not self.results[1] is None:
t1 = "Fit: "
for i in range(len(self._pnames)):
t1 = t1 + self._pnames[i] + "={:s}, ".format(self._format_value_error(self.results[0][i], _n.sqrt(self.results[1][i][i]), '$\pm$'))
t1 = t1 + '$\chi^2_r$={} ({} DOF)'.format(
self._format_value_error(self.reduced_chi_squared(), _n.sqrt(_n.divide(2.0,self.degrees_of_freedom())), '$\pm$'),
int(self.degrees_of_freedom()))
t = t + '\n' + _textwrap.fill(t1, wrap, subsequent_indent=indent)
elif self.results:
t1 = "Fit did not converge: "
for i in range(len(self._pnames)):
t1 = t1 + self._pnames[i] + "={:8G}$, "
t = t + '\n' + _textwrap.fill(t1, wrap, subsequent_indent=indent)
a1.set_title(t, fontsize=10, ha='left', position=(0,1))
# turn back to interactive and show the plots.
_p.ion()
if self.figures == None:
_p.draw()
_p.show()
# End of new figure for each data set loop
return self | This will plot the data (with error) for inspection.
Setting self.figures to a figure instance or list of figure instances
will override the creation of new figures. If you specify
a list, its length had better be at least as large as the
number of data sets.
kwargs will update the settings | entailment |
def _get_xdata_for_function(self, n, xdata):
"""
Generates the x-data for plotting the function.
Parameters
----------
n
Which data set we're using
xdata
Data set upon which to base this
Returns
-------
float
"""
# Use the xdata itself for the function
if self['fpoints'][n] in [None, 0]: return _n.array(xdata)
# Otherwise, generate xdata with the number of fpoints
# do exponential ranging if xscale is log
if self['xscale'][n] == 'log':
return _n.logspace(_n.log10(min(xdata)), _n.log10(max(xdata)),
self['fpoints'][n], True, 10.0)
# otherwise do linear spacing
else:
return _n.linspace(min(xdata), max(xdata), self['fpoints'][n]) | Generates the x-data for plotting the function.
Parameters
----------
n
Which data set we're using
xdata
Data set upon which to base this
Returns
-------
float | entailment |
def trim(self, n='all', x=True, y=True):
"""
This will set xmin and xmax based on the current zoom-level of the
figures.
n='all' Which figure to use for setting xmin and xmax.
'all' means all figures. You may also specify a list.
x=True Trim the x-range
y=True Trim the y-range
"""
if len(self._set_xdata)==0 or len(self._set_ydata)==0:
self._error("No data. Please use set_data() and plot() prior to trimming.")
return
if _s.fun.is_a_number(n): n = [n]
elif isinstance(n,str): n = list(range(len(self._set_xdata)))
# loop over the specified plots
for i in n:
try:
if x:
xmin, xmax = _p.figure(self['first_figure']+i).axes[1].get_xlim()
self['xmin'][i] = xmin
self['xmax'][i] = xmax
if y:
ymin, ymax = _p.figure(self['first_figure']+i).axes[1].get_ylim()
self['ymin'][i] = ymin
self['ymax'][i] = ymax
except:
self._error("Data "+str(i)+" is not currently plotted.")
# now show the update.
self.clear_results()
if self['autoplot']: self.plot()
return self | This will set xmin and xmax based on the current zoom-level of the
figures.
n='all' Which figure to use for setting xmin and xmax.
'all' means all figures. You may also specify a list.
x=True Trim the x-range
y=True Trim the y-range | entailment |
def untrim(self, n='all'):
"""
Removes xmin, xmax, ymin, and ymax.
Parameters
----------
n='all'
Which data set to perform this action upon. 'all' means all data
sets, or you can specify a list.
"""
if len(self._set_xdata)==0 or len(self._set_ydata)==0:
self._error("No data. Please use set_data() and plot() prior to zooming.")
return
if _s.fun.is_a_number(n): n = [n]
elif isinstance(n,str): n = list(range(len(self._set_xdata)))
# loop over the specified plots
for i in n:
self['xmin'][i] = None
self['xmax'][i] = None
self['ymin'][i] = None
self['ymax'][i] = None
# now show the update.
self.clear_results()
if self['autoplot']: self.plot()
return self | Removes xmin, xmax, ymin, and ymax.
Parameters
----------
n='all'
Which data set to perform this action upon. 'all' means all data
sets, or you can specify a list. | entailment |
def zoom(self, n='all', xfactor=2.0, yfactor=2.0):
"""
This will scale the chosen data set's plot range by the
specified xfactor and yfactor, respectively, and set the trim limits
xmin, xmax, ymin, ymax accordingly
Parameters
----------
n='all'
Which data set to perform this action upon. 'all' means all data
sets, or you can specify a list.
xfactor=2.0
Factor by which to scale the x range.
yfactor=2.0
Factor by which to scale the y range.
"""
if len(self._set_xdata)==0 or len(self._set_ydata)==0:
self._error("No data. Please use set_data() and plot() prior to zooming.")
return
# get the data
xdata, ydata, eydata = self.get_data()
if _s.fun.is_a_number(n): n = [n]
elif isinstance(n,str): n = list(range(len(xdata)))
# loop over the specified plots
for i in n:
fig = self['first_figure']+i
try:
xmin, xmax = _p.figure(fig).axes[1].get_xlim()
xc = 0.5*(xmin+xmax)
xs = 0.5*abs(xmax-xmin)
self['xmin'][i] = xc - xfactor*xs
self['xmax'][i] = xc + xfactor*xs
ymin, ymax = _p.figure(fig).axes[1].get_ylim()
yc = 0.5*(ymin+ymax)
ys = 0.5*abs(ymax-ymin)
self['ymin'][i] = yc - yfactor*ys
self['ymax'][i] = yc + yfactor*ys
except:
self._error("Data "+str(fig)+" is not currently plotted.")
# now show the update.
self.clear_results()
if self['autoplot']: self.plot()
return self | This will scale the chosen data set's plot range by the
specified xfactor and yfactor, respectively, and set the trim limits
xmin, xmax, ymin, ymax accordingly
Parameters
----------
n='all'
Which data set to perform this action upon. 'all' means all data
sets, or you can specify a list.
xfactor=2.0
Factor by which to scale the x range.
yfactor=2.0
Factor by which to scale the y range. | entailment |
def ginput(self, data_set=0, **kwargs):
"""
Pops up the figure for the specified data set.
Returns value from pylab.ginput().
kwargs are sent to pylab.ginput()
"""
# this will temporarily fix the deprecation warning
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
_s.tweaks.raise_figure_window(data_set+self['first_figure'])
return _p.ginput(**kwargs) | Pops up the figure for the specified data set.
Returns value from pylab.ginput().
kwargs are sent to pylab.ginput() | entailment |
def _match_data_sets(x,y):
"""
Makes sure everything is the same shape. "Intelligently".
"""
# Handle the None for x or y
if x is None:
# If x is none, y can be either [1,2] or [[1,2],[1,2]]
if _fun.is_iterable(y[0]):
# make an array of arrays to match
x = []
for n in range(len(y)):
x.append(list(range(len(y[n]))))
else: x = list(range(len(y)))
if y is None:
# If x is none, y can be either [1,2] or [[1,2],[1,2]]
if _fun.is_iterable(x[0]):
# make an array of arrays to match
y = []
for n in range(len(x)):
y.append(list(range(len(x[n]))))
else: y = list(range(len(x)))
# At this point they should be matched, but may still be 1D
# Default behavior: if all elements are numbers in both, assume they match
if _fun.elements_are_numbers(x) and _fun.elements_are_numbers(y):
x = [x]
y = [y]
# Second default behavior: shared array [1,2,3], [[1,2,1],[1,2,1]] or vis versa
if _fun.elements_are_numbers(x) and not _fun.elements_are_numbers(y): x = [x]*len(y)
if _fun.elements_are_numbers(y) and not _fun.elements_are_numbers(x): y = [y]*len(x)
# Clean up any remaining Nones
for n in range(len(x)):
if x[n] is None: x[n] = list(range(len(y[n])))
if y[n] is None: y[n] = list(range(len(x[n])))
return x, y | Makes sure everything is the same shape. "Intelligently". | entailment |
def _match_error_to_data_set(x, ex):
"""
Inflates ex to match the dimensionality of x, "intelligently".
x is assumed to be a 2D array.
"""
# Simplest case, ex is None or a number
if not _fun.is_iterable(ex):
# Just make a matched list of Nones
if ex is None: ex = [ex]*len(x)
# Make arrays of numbers
if _fun.is_a_number(ex):
value = ex # temporary storage
ex = []
for n in range(len(x)):
ex.append([value]*len(x[n]))
# Otherwise, ex is iterable
# Default behavior: If the elements are all numbers and the length matches
# that of the first x-array, assume this is meant to match all the x
# data sets
if _fun.elements_are_numbers(ex) and len(ex) == len(x[0]): ex = [ex]*len(x)
# The user may specify a list of some iterable and some not. Assume
# in this case that at least the lists are the same length
for n in range(len(x)):
# do nothing to the None's
# Inflate single numbers to match
if _fun.is_a_number(ex[n]): ex[n] = [ex[n]]*len(x[n])
return ex | Inflates ex to match the dimensionality of x, "intelligently".
x is assumed to be a 2D array. | entailment |
def complex_data(data, edata=None, draw=True, **kwargs):
"""
Plots the imaginary vs real for complex data.
Parameters
----------
data
Array of complex data
edata=None
Array of complex error bars
draw=True
Draw the plot after it's assembled?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
_pylab.ioff()
# generate the data the easy way
try:
rdata = _n.real(data)
idata = _n.imag(data)
if edata is None:
erdata = None
eidata = None
else:
erdata = _n.real(edata)
eidata = _n.imag(edata)
# generate the data the hard way.
except:
rdata = []
idata = []
if edata is None:
erdata = None
eidata = None
else:
erdata = []
eidata = []
for n in range(len(data)):
rdata.append(_n.real(data[n]))
idata.append(_n.imag(data[n]))
if not edata is None:
erdata.append(_n.real(edata[n]))
eidata.append(_n.imag(edata[n]))
if 'xlabel' not in kwargs: kwargs['xlabel'] = 'Real'
if 'ylabel' not in kwargs: kwargs['ylabel'] = 'Imaginary'
xy_data(rdata, idata, eidata, erdata, draw=False, **kwargs)
if draw:
_pylab.ion()
_pylab.draw()
_pylab.show() | Plots the imaginary vs real for complex data.
Parameters
----------
data
Array of complex data
edata=None
Array of complex error bars
draw=True
Draw the plot after it's assembled?
See spinmob.plot.xy.data() for additional optional keyword arguments. | entailment |
def complex_databoxes(ds, script='d[1]+1j*d[2]', escript=None, **kwargs):
"""
Uses databoxes and specified script to generate data and send to
spinmob.plot.complex_data()
Parameters
----------
ds
List of databoxes
script='d[1]+1j*d[2]'
Complex-valued script for data array.
escript=None
Complex-valued script for error bars
See spinmob.plot.complex.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
"""
datas = []
labels = []
if escript is None: errors = None
else: errors = []
for d in ds:
datas.append(d(script))
labels.append(_os.path.split(d.path)[-1])
if not escript is None: errors.append(d(escript))
complex_data(datas, errors, label=labels, **kwargs)
if "draw" in kwargs and not kwargs["draw"]: return
_pylab.ion()
_pylab.draw()
_pylab.show()
return ds | Uses databoxes and specified script to generate data and send to
spinmob.plot.complex_data()
Parameters
----------
ds
List of databoxes
script='d[1]+1j*d[2]'
Complex-valued script for data array.
escript=None
Complex-valued script for error bars
See spinmob.plot.complex.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts. | entailment |
def complex_files(script='d[1]+1j*d[2]', escript=None, paths=None, **kwargs):
"""
Loads files and plots complex data in the real-imaginary plane.
Parameters
----------
script='d[1]+1j*d[2]'
Complex-valued script for data array.
escript=None
Complex-valued script for error bars
paths=None
List of paths to open. None means use a dialog
See spinmob.plot.complex.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
"""
ds = _data.load_multiple(paths=paths)
if len(ds) == 0: return
if 'title' not in kwargs: kwargs['title'] = _os.path.split(ds[0].path)[0]
return complex_databoxes(ds, script=script, **kwargs) | Loads files and plots complex data in the real-imaginary plane.
Parameters
----------
script='d[1]+1j*d[2]'
Complex-valued script for data array.
escript=None
Complex-valued script for error bars
paths=None
List of paths to open. None means use a dialog
See spinmob.plot.complex.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog. | entailment |
def complex_function(f='1.0/(1+1j*x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, **kwargs):
"""
Plots function(s) in the complex plane over the specified range.
Parameters
----------
f='1.0/(1+1j*x)'
Complex-valued function or list of functions to plot.
These can be string functions or single-argument python functions;
additional globals can be supplied by g.
xmin=-1, xmax=1, steps=200
Range over which to plot and how many points to plot
p='x'
If using strings for functions, p is the independent parameter name.
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the x data?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
kwargs2 = dict(xlabel='Real', ylabel='Imaginary')
kwargs2.update(kwargs)
function(f, xmin, xmax, steps, p, g, erange, plotter=xy_data, complex_plane=True, draw=True, **kwargs2) | Plots function(s) in the complex plane over the specified range.
Parameters
----------
f='1.0/(1+1j*x)'
Complex-valued function or list of functions to plot.
These can be string functions or single-argument python functions;
additional globals can be supplied by g.
xmin=-1, xmax=1, steps=200
Range over which to plot and how many points to plot
p='x'
If using strings for functions, p is the independent parameter name.
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the x data?
See spinmob.plot.xy.data() for additional optional keyword arguments. | entailment |
def magphase_data(xdata, ydata, eydata=None, exdata=None, xscale='linear', mscale='linear', pscale='linear', mlabel='Magnitude', plabel='Phase', phase='degrees', figure='gcf', clear=1, draw=True, **kwargs):
"""
Plots the magnitude and phase of complex ydata vs xdata.
Parameters
----------
xdata
Real-valued x-axis data
ydata
Complex-valued y-axis data
eydata=None
Complex-valued y-error
exdata=None
Real-valued x-error
xscale='linear'
'log' or 'linear' scale of the x axis
mscale='linear'
'log' or 'linear' scale of the magnitude axis
pscale='linear'
'log' or 'linear' scale of the phase axis
mlabel='Magnitude'
y-axis label for magnitude plot
plabel='Phase'
y-axis label for phase plot
phase='degrees'
'degrees' or 'radians' for the phase axis
figure='gcf'
Plot on the specified figure instance or 'gcf' for current figure.
clear=1
Clear the figure?
draw=True
Draw the figure when complete?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
_pylab.ioff()
# set up the figure and axes
if figure == 'gcf': f = _pylab.gcf()
if clear: f.clear()
axes1 = _pylab.subplot(211)
axes2 = _pylab.subplot(212,sharex=axes1)
# Make sure the dimensionality of the data sets matches
xdata, ydata = _match_data_sets(xdata, ydata)
exdata = _match_error_to_data_set(xdata, exdata)
eydata = _match_error_to_data_set(ydata, eydata)
# convert to magnitude and phase
m = []
p = []
em = []
ep = []
# Note this is a loop over data sets, not points.
for l in range(len(ydata)):
m.append(_n.abs(ydata[l]))
p.append(_n.angle(ydata[l]))
# get the mag - phase errors
if eydata[l] is None:
em.append(None)
ep.append(None)
else:
er = _n.real(eydata[l])
ei = _n.imag(eydata[l])
em.append(0.5*((er+ei) + (er-ei)*_n.cos(p[l])) )
ep.append(0.5*((er+ei) - (er-ei)*_n.cos(p[l]))/m[l] )
# convert to degrees
if phase=='degrees':
p[-1] = p[-1]*180.0/_n.pi
if not ep[l] is None:
ep[l] = ep[l]*180.0/_n.pi
if phase=='degrees': plabel = plabel + " (degrees)"
else: plabel = plabel + " (radians)"
if 'xlabel' in kwargs: xlabel=kwargs.pop('xlabel')
else: xlabel=''
if 'ylabel' in kwargs: kwargs.pop('ylabel')
if 'autoformat' not in kwargs: kwargs['autoformat'] = True
autoformat = kwargs['autoformat']
kwargs['autoformat'] = False
kwargs['xlabel'] = ''
xy_data(xdata, m, em, exdata, ylabel=mlabel, axes=axes1, clear=0, xscale=xscale, yscale=mscale, draw=False, **kwargs)
kwargs['autoformat'] = autoformat
kwargs['xlabel'] = xlabel
xy_data(xdata, p, ep, exdata, ylabel=plabel, axes=axes2, clear=0, xscale=xscale, yscale=pscale, draw=False, **kwargs)
axes2.set_title('')
if draw:
_pylab.ion()
_pylab.draw()
_pylab.show() | Plots the magnitude and phase of complex ydata vs xdata.
Parameters
----------
xdata
Real-valued x-axis data
ydata
Complex-valued y-axis data
eydata=None
Complex-valued y-error
exdata=None
Real-valued x-error
xscale='linear'
'log' or 'linear' scale of the x axis
mscale='linear'
'log' or 'linear' scale of the magnitude axis
pscale='linear'
'log' or 'linear' scale of the phase axis
mlabel='Magnitude'
y-axis label for magnitude plot
plabel='Phase'
y-axis label for phase plot
phase='degrees'
'degrees' or 'radians' for the phase axis
figure='gcf'
Plot on the specified figure instance or 'gcf' for current figure.
clear=1
Clear the figure?
draw=True
Draw the figure when complete?
See spinmob.plot.xy.data() for additional optional keyword arguments. | entailment |
def magphase_databoxes(ds, xscript=0, yscript='d[1]+1j*d[2]', eyscript=None, exscript=None, g=None, **kwargs):
"""
Use databoxes and scripts to generate data and plot the complex magnitude
and phase versus xdata.
Parameters
----------
ds
List of databoxes
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.magphase.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
"""
databoxes(ds, xscript, yscript, eyscript, exscript, plotter=magphase_data, g=g, **kwargs) | Use databoxes and scripts to generate data and plot the complex magnitude
and phase versus xdata.
Parameters
----------
ds
List of databoxes
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.magphase.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts. | entailment |
def magphase_files(xscript=0, yscript='d[1]+1j*d[2]', eyscript=None, exscript=None, paths=None, g=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata's magnitude and phase versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.magphase.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
"""
return files(xscript, yscript, eyscript, exscript, plotter=magphase_databoxes, paths=paths, g=g, **kwargs) | This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata's magnitude and phase versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.magphase.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog. | entailment |
def magphase_function(f='1.0/(1+1j*x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, **kwargs):
"""
Plots function(s) magnitude and phase over the specified range.
Parameters
----------
f='1.0/(1+1j*x)'
Complex-valued function or list of functions to plot.
These can be string functions or single-argument python functions;
additional globals can be supplied by g.
xmin=-1, xmax=1, steps=200
Range over which to plot and how many points to plot
p='x'
If using strings for functions, p is the independent parameter name.
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the x data?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
function(f, xmin, xmax, steps, p, g, erange, plotter=magphase_data, **kwargs) | Plots function(s) magnitude and phase over the specified range.
Parameters
----------
f='1.0/(1+1j*x)'
Complex-valued function or list of functions to plot.
These can be string functions or single-argument python functions;
additional globals can be supplied by g.
xmin=-1, xmax=1, steps=200
Range over which to plot and how many points to plot
p='x'
If using strings for functions, p is the independent parameter name.
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the x data?
See spinmob.plot.xy.data() for additional optional keyword arguments. | entailment |
def realimag_data(xdata, ydata, eydata=None, exdata=None, xscale='linear', rscale='linear', iscale='linear', rlabel='Real', ilabel='Imaginary', figure='gcf', clear=1, draw=True, **kwargs):
"""
Plots the real and imaginary parts of complex ydata vs xdata.
Parameters
----------
xdata
Real-valued x-axis data
ydata
Complex-valued y-axis data
eydata=None
Complex-valued y-error
exdata=None
Real-valued x-error
xscale='linear'
'log' or 'linear' scale of the x axis
rscale='linear'
'log' or 'linear' scale of the real axis
iscale='linear'
'log' or 'linear' scale of the imaginary axis
rlabel='Magnitude'
y-axis label for real value plot
ilabel='Phase'
y-axis label for imaginary value plot
figure='gcf'
Plot on the specified figure instance or 'gcf' for current figure.
clear=1
Clear the figure?
draw=True
Draw the figure when completed?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
_pylab.ioff()
# Make sure the dimensionality of the data sets matches
xdata, ydata = _match_data_sets(xdata, ydata)
exdata = _match_error_to_data_set(xdata, exdata)
eydata = _match_error_to_data_set(ydata, eydata)
# convert to real imag, and get error bars
rdata = []
idata = []
erdata = []
eidata = []
for l in range(len(ydata)):
rdata.append(_n.real(ydata[l]))
idata.append(_n.imag(ydata[l]))
if eydata[l] is None:
erdata.append(None)
eidata.append(None)
else:
erdata.append(_n.real(eydata[l]))
eidata.append(_n.imag(eydata[l]))
# set up the figure and axes
if figure == 'gcf': f = _pylab.gcf()
if clear: f.clear()
axes1 = _pylab.subplot(211)
axes2 = _pylab.subplot(212,sharex=axes1)
if 'xlabel' in kwargs : xlabel=kwargs.pop('xlabel')
else: xlabel=''
if 'ylabel' in kwargs : kwargs.pop('ylabel')
if 'tall' not in kwargs: kwargs['tall'] = False
if 'autoformat' not in kwargs: kwargs['autoformat'] = True
autoformat = kwargs['autoformat']
kwargs['autoformat'] = False
kwargs['xlabel'] = ''
xy_data(xdata, rdata, eydata=erdata, exdata=exdata, ylabel=rlabel, axes=axes1, clear=0, xscale=xscale, yscale=rscale, draw=False, **kwargs)
kwargs['autoformat'] = autoformat
kwargs['xlabel'] = xlabel
xy_data(xdata, idata, eydata=eidata, exdata=exdata, ylabel=ilabel, axes=axes2, clear=0, xscale=xscale, yscale=iscale, draw=False, **kwargs)
axes2.set_title('')
if draw:
_pylab.ion()
_pylab.draw()
_pylab.show() | Plots the real and imaginary parts of complex ydata vs xdata.
Parameters
----------
xdata
Real-valued x-axis data
ydata
Complex-valued y-axis data
eydata=None
Complex-valued y-error
exdata=None
Real-valued x-error
xscale='linear'
'log' or 'linear' scale of the x axis
rscale='linear'
'log' or 'linear' scale of the real axis
iscale='linear'
'log' or 'linear' scale of the imaginary axis
rlabel='Magnitude'
y-axis label for real value plot
ilabel='Phase'
y-axis label for imaginary value plot
figure='gcf'
Plot on the specified figure instance or 'gcf' for current figure.
clear=1
Clear the figure?
draw=True
Draw the figure when completed?
See spinmob.plot.xy.data() for additional optional keyword arguments. | entailment |
def realimag_databoxes(ds, xscript=0, yscript="d[1]+1j*d[2]", eyscript=None, exscript=None, g=None, **kwargs):
"""
Use databoxes and scripts to generate data and plot the real and
imaginary ydata versus xdata.
Parameters
----------
ds
List of databoxes
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.realimag.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
"""
databoxes(ds, xscript, yscript, eyscript, exscript, plotter=realimag_data, g=g, **kwargs) | Use databoxes and scripts to generate data and plot the real and
imaginary ydata versus xdata.
Parameters
----------
ds
List of databoxes
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.realimag.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts. | entailment |
def realimag_files(xscript=0, yscript="d[1]+1j*d[2]", eyscript=None, exscript=None, paths=None, g=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata's real and imaginary parts versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.realimag.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
"""
return files(xscript, yscript, eyscript, exscript, plotter=realimag_databoxes, paths=paths, g=g, **kwargs) | This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata's real and imaginary parts versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.realimag.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog. | entailment |
def realimag_function(f='1.0/(1+1j*x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, **kwargs):
"""
Plots function(s) real and imaginary parts over the specified range.
Parameters
----------
f='1.0/(1+1j*x)'
Complex-valued function or list of functions to plot.
These can be string functions or single-argument python functions;
additional globals can be supplied by g.
xmin=-1, xmax=1, steps=200
Range over which to plot and how many points to plot
p='x'
If using strings for functions, p is the independent parameter name.
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the x data?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
function(f, xmin, xmax, steps, p, g, erange, plotter=realimag_data, **kwargs) | Plots function(s) real and imaginary parts over the specified range.
Parameters
----------
f='1.0/(1+1j*x)'
Complex-valued function or list of functions to plot.
These can be string functions or single-argument python functions;
additional globals can be supplied by g.
xmin=-1, xmax=1, steps=200
Range over which to plot and how many points to plot
p='x'
If using strings for functions, p is the independent parameter name.
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the x data?
See spinmob.plot.xy.data() for additional optional keyword arguments. | entailment |
def xy_data(xdata, ydata, eydata=None, exdata=None, label=None, xlabel='', ylabel='', \
title='', shell_history=0, xshift=0, yshift=0, xshift_every=1, yshift_every=1, \
coarsen=0, style=None, clear=True, axes=None, xscale='linear', yscale='linear', grid=False, \
legend='best', legend_max=20, autoformat=True, autoformat_window=True, tall=False, draw=True, **kwargs):
"""
Plots specified data.
Parameters
----------
xdata, ydata
Arrays (or arrays of arrays) of data to plot
eydata=None, exdata=None
Arrays of x and y errorbar values
label=None
String or array of strings for the line labels
xlabel=''
Label for the x-axis
ylabel=''
Label for the y-axis
title=''
Title for the axes; set to None to have nothing.
shell_history=0
How many commands from the pyshell history to include with the title
xshift=0, yshift=0
Progressive shifts on the data, to make waterfall plots
xshift_every=1
Perform the progressive shift every 1 or n'th line.
yshift_every=1
perform the progressive shift every 1 or n'th line.
style=None
style cycle object.
clear=True
If no axes are specified (see below), clear the figure, otherwise clear just the axes.
axes=None
Which matplotlib axes to use, or "gca" for the current axes
xscale='linear', yscale='linear'
'linear' or 'log' x and y axis scales.
grid=False
Should we draw a grid on the axes?
legend='best'
Where to place the legend (see pylab.legend() for options)
Set this to None to ignore the legend.
legend_max=20
Number of legend entries before it's truncated with '...'
autoformat=True
Should we format the figure for printing?
autoformat_window=True
Should we resize and reposition the window when autoformatting?
tall=False
Should the format be tall?
draw=True
Whether or not to draw the plot after plotting.
See matplotlib's errorbar() function for additional optional keyword arguments.
"""
_pylab.ioff()
# Make sure the dimensionality of the data sets matches
xdata, ydata = _match_data_sets(xdata, ydata)
exdata = _match_error_to_data_set(xdata, exdata)
eydata = _match_error_to_data_set(ydata, eydata)
# check that the labels is a list of strings of the same length
if not _fun.is_iterable(label): label = [label]*len(xdata)
while len(label) < len(ydata): label.append(label[0])
# concatenate if necessary
if len(label) > legend_max:
label[legend_max-2] = '...'
for n in range(legend_max-1,len(label)-1): label[n] = "_nolegend_"
# clear the figure?
if clear and not axes: _pylab.gcf().clear() # axes cleared later
# setup axes
if axes=="gca" or axes is None: axes = _pylab.gca()
# if we're clearing the axes
if clear: axes.clear()
# set the current axes
_pylab.axes(axes)
# now loop over the list of data in xdata and ydata
for n in range(0,len(xdata)):
# get the label
if label[n]=='_nolegend_':
l = '_nolegend_'
else:
l = str(n)+": "+str(label[n])
# calculate the x an y progressive shifts
dx = xshift*(n/xshift_every)
dy = yshift*(n/yshift_every)
# if we're supposed to coarsen the data, do so.
x = _fun.coarsen_array(xdata[n], coarsen)
y = _fun.coarsen_array(ydata[n], coarsen)
ey = _fun.coarsen_array(eydata[n], coarsen, 'quadrature')
ex = _fun.coarsen_array(exdata[n], coarsen, 'quadrature')
# update the style
if not style is None: kwargs.update(next(style))
axes.errorbar(x+dx, y+dy, label=l, yerr=ey, xerr=ex, **kwargs)
_pylab.xscale(xscale)
_pylab.yscale(yscale)
if legend: axes.legend(loc=legend)
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
# for some arguments there should be no title.
if title in [None, False, 0]:
axes.set_title('')
# add the commands to the title
else:
title = str(title)
history = _fun.get_shell_history()
for n in range(0, min(shell_history, len(history))):
title = title + "\n" + history[n].split('\n')[0].strip()
title = title + '\nPlot created ' + _time.asctime()
axes.set_title(title)
if grid: _pylab.grid(True)
if autoformat:
_pt.format_figure(draw=False, modify_geometry=autoformat_window)
_pt.auto_zoom(axes=axes, draw=False)
# update the canvas
if draw:
_pylab.ion()
_pylab.draw()
_pylab.show()
return axes | Plots specified data.
Parameters
----------
xdata, ydata
Arrays (or arrays of arrays) of data to plot
eydata=None, exdata=None
Arrays of x and y errorbar values
label=None
String or array of strings for the line labels
xlabel=''
Label for the x-axis
ylabel=''
Label for the y-axis
title=''
Title for the axes; set to None to have nothing.
shell_history=0
How many commands from the pyshell history to include with the title
xshift=0, yshift=0
Progressive shifts on the data, to make waterfall plots
xshift_every=1
Perform the progressive shift every 1 or n'th line.
yshift_every=1
perform the progressive shift every 1 or n'th line.
style=None
style cycle object.
clear=True
If no axes are specified (see below), clear the figure, otherwise clear just the axes.
axes=None
Which matplotlib axes to use, or "gca" for the current axes
xscale='linear', yscale='linear'
'linear' or 'log' x and y axis scales.
grid=False
Should we draw a grid on the axes?
legend='best'
Where to place the legend (see pylab.legend() for options)
Set this to None to ignore the legend.
legend_max=20
Number of legend entries before it's truncated with '...'
autoformat=True
Should we format the figure for printing?
autoformat_window=True
Should we resize and reposition the window when autoformatting?
tall=False
Should the format be tall?
draw=True
Whether or not to draw the plot after plotting.
See matplotlib's errorbar() function for additional optional keyword arguments. | entailment |
def xy_databoxes(ds, xscript=0, yscript='d[1]', eyscript=None, exscript=None, g=None, **kwargs):
"""
Use databoxes and scripts to generate and plot ydata versus xdata.
Parameters
----------
ds
List of databoxes
xscript=0
Script for x data
yscript='d[1]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.xy.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
"""
databoxes(ds, xscript, yscript, eyscript, exscript, plotter=xy_data, g=g, **kwargs) | Use databoxes and scripts to generate and plot ydata versus xdata.
Parameters
----------
ds
List of databoxes
xscript=0
Script for x data
yscript='d[1]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.xy.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts. | entailment |
def xy_files(xscript=0, yscript='d[1]', eyscript=None, exscript=None, paths=None, g=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.xy.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
"""
return files(xscript, yscript, eyscript, exscript, plotter=xy_databoxes, paths=paths, g=g, **kwargs) | This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.xy.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog. | entailment |
def xy_function(f='sin(x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, **kwargs):
"""
Plots function(s) over the specified range.
Parameters
----------
f='sin(x)'
Function or list of functions to plot.
These can be string functions or single-argument python functions;
additional globals can be supplied by g.
xmin=-1, xmax=1, steps=200
Range over which to plot and how many points to plot
p='x'
If using strings for functions, p is the independent parameter name.
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the x data?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
function(f, xmin, xmax, steps, p, g, erange, plotter=xy_data, **kwargs) | Plots function(s) over the specified range.
Parameters
----------
f='sin(x)'
Function or list of functions to plot.
These can be string functions or single-argument python functions;
additional globals can be supplied by g.
xmin=-1, xmax=1, steps=200
Range over which to plot and how many points to plot
p='x'
If using strings for functions, p is the independent parameter name.
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the x data?
See spinmob.plot.xy.data() for additional optional keyword arguments. | entailment |
def databoxes(ds, xscript=0, yscript=1, eyscript=None, exscript=None, g=None, plotter=xy_data, transpose=False, **kwargs):
"""
Plots the listed databox objects with the specified scripts.
ds list of databoxes
xscript script for x data
yscript script for y data
eyscript script for y error
exscript script for x error
plotter function used to do the plotting
transpose applies databox.transpose() prior to plotting
g optional dictionary of globals for the supplied scripts
**kwargs are sent to plotter()
"""
if not _fun.is_iterable(ds): ds = [ds]
if 'xlabel' not in kwargs: kwargs['xlabel'] = str(xscript)
if 'ylabel' not in kwargs: kwargs['ylabel'] = str(yscript)
# First make sure everything is a list of scripts (or None's)
if not _fun.is_iterable(xscript): xscript = [xscript]
if not _fun.is_iterable(yscript): yscript = [yscript]
if not _fun.is_iterable(exscript): exscript = [exscript]
if not _fun.is_iterable(eyscript): eyscript = [eyscript]
# make sure exscript matches shape with xscript (and the same for y)
if len(exscript) < len(xscript):
for n in range(len(xscript)-1): exscript.append(exscript[0])
if len(eyscript) < len(yscript):
for n in range(len(yscript)-1): eyscript.append(eyscript[0])
# Make xscript and exscript match in shape with yscript and eyscript
if len(xscript) < len(yscript):
for n in range(len(yscript)-1):
xscript.append(xscript[0])
exscript.append(exscript[0])
# check for the reverse possibility
if len(yscript) < len(xscript):
for n in range(len(xscript)-1):
yscript.append(yscript[0])
eyscript.append(eyscript[0])
# now check for None's (counting scripts)
for n in range(len(xscript)):
if xscript[n] is None and yscript[n] is None:
print("Two None scripts? But why?")
return
if xscript[n] is None:
if type(yscript[n])==str: xscript[n] = 'range(len('+yscript[n]+'))'
else: xscript[n] = 'range(len(c('+str(yscript[n])+')))'
if yscript[n] is None:
if type(xscript[n])==str: yscript[n] = 'range(len('+xscript[n]+'))'
else: yscript[n] = 'range(len(c('+str(xscript[n])+')))'
xdatas = []
ydatas = []
exdatas = []
eydatas = []
labels = []
# Loop over all the data boxes
for i in range(len(ds)):
# Reset the default globals
all_globals = dict(n=i,m=len(ds)-1-i)
# Update them with the user-specified globals
if not g==None: all_globals.update(g)
# For ease of coding
d = ds[i]
# Take the transpose if necessary
if transpose: d = d.transpose()
# Generate the x-data; returns a list of outputs, one for each xscript
xdata = d(xscript, all_globals)
# Loop over each xdata, appending to the master list, and generating a label
for n in range(len(xdata)):
xdatas.append(xdata[n])
if len(xdata)>1: labels.append(str(n)+": "+_os.path.split(d.path)[-1])
else: labels.append(_os.path.split(d.path)[-1])
# Append the other data sets to their master lists
for y in d( yscript, all_globals): ydatas.append(y)
for x in d(exscript, all_globals): exdatas.append(x)
for y in d(eyscript, all_globals): eydatas.append(y)
if "label" in kwargs: labels = kwargs.pop("label")
plotter(xdatas, ydatas, eydatas, exdatas, label=labels, **kwargs) | Plots the listed databox objects with the specified scripts.
ds list of databoxes
xscript script for x data
yscript script for y data
eyscript script for y error
exscript script for x error
plotter function used to do the plotting
transpose applies databox.transpose() prior to plotting
g optional dictionary of globals for the supplied scripts
**kwargs are sent to plotter() | entailment |
def files(xscript=0, yscript=1, eyscript=None, exscript=None, g=None, plotter=xy_databoxes, paths=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot this data using the specified databox plotter.
xscript, yscript, eyscript, exscript scripts to generate x, y, and errors
g optional dictionary of globals
optional: filters="*.*" to set the file filters for the dialog.
**kwargs are sent to plotter()
"""
if 'delimiter' in kwargs: delimiter = kwargs.pop('delimiter')
else: delimiter = None
if 'filters' in kwargs: filters = kwargs.pop('filters')
else: filters = '*.*'
ds = _data.load_multiple(paths=paths, delimiter=delimiter, filters=filters)
if ds is None or len(ds) == 0: return
# generate a default title (the directory)
if 'title' not in kwargs: kwargs['title']=_os.path.split(ds[0].path)[0]
# run the databox plotter
plotter(ds, xscript=xscript, yscript=yscript, eyscript=eyscript, exscript=exscript, g=g, **kwargs)
return ds | This will load a bunch of data files, generate data based on the supplied
scripts, and then plot this data using the specified databox plotter.
xscript, yscript, eyscript, exscript scripts to generate x, y, and errors
g optional dictionary of globals
optional: filters="*.*" to set the file filters for the dialog.
**kwargs are sent to plotter() | entailment |
def image_data(Z, X=[0,1.0], Y=[0,1.0], aspect=1.0, zmin=None, zmax=None, clear=1, clabel='z', autoformat=True, colormap="Last Used", shell_history=0, **kwargs):
"""
Generates an image plot.
Parameters
----------
Z
2-d array of z-values
X=[0,1.0], Y=[0,1.0]
1-d array of x-values (only the first and last element are used)
See matplotlib's imshow() for additional optional arguments.
"""
global _colormap
# Set interpolation to something more relevant for every day science
if not 'interpolation' in kwargs.keys(): kwargs['interpolation'] = 'nearest'
_pylab.ioff()
fig = _pylab.gcf()
if clear:
fig.clear()
_pylab.axes()
# generate the 3d axes
X = _n.array(X)
Y = _n.array(Y)
Z = _n.array(Z)
# assume X and Y are the bin centers and figure out the bin widths
x_width = abs(float(X[-1] - X[0])/(len(Z[0])-1))
y_width = abs(float(Y[-1] - Y[0])/(len(Z)-1))
# reverse the Z's
# Transpose and reverse
Z = Z.transpose()
Z = Z[-1::-1]
# get rid of the label and title kwargs
xlabel=''
ylabel=''
title =''
if 'xlabel' in kwargs: xlabel = kwargs.pop('xlabel')
if 'ylabel' in kwargs: ylabel = kwargs.pop('ylabel')
if 'title' in kwargs: title = kwargs.pop('title')
_pylab.imshow(Z, extent=[X[0]-x_width/2.0, X[-1]+x_width/2.0,
Y[0]-y_width/2.0, Y[-1]+y_width/2.0], **kwargs)
cb = _pylab.colorbar()
_pt.image_set_clim(zmin,zmax)
_pt.image_set_aspect(aspect)
cb.set_label(clabel)
a = _pylab.gca()
a.set_xlabel(xlabel)
a.set_ylabel(ylabel)
#_pt.close_sliders()
#_pt.image_sliders()
# title
history = _fun.get_shell_history()
for n in range(0, min(shell_history, len(history))):
title = title + "\n" + history[n].split('\n')[0].strip()
title = title + '\nPlot created ' + _time.asctime()
a.set_title(title.strip())
if autoformat: _pt.image_format_figure(fig)
_pylab.ion()
_pylab.show()
#_pt.raise_figure_window()
#_pt.raise_pyshell()
_pylab.draw()
# add the color sliders
if colormap:
if _colormap: _colormap.close()
_colormap = _pt.image_colormap(colormap, image=a.images[0]) | Generates an image plot.
Parameters
----------
Z
2-d array of z-values
X=[0,1.0], Y=[0,1.0]
1-d array of x-values (only the first and last element are used)
See matplotlib's imshow() for additional optional arguments. | entailment |
def image_function(f='sin(5*x)*cos(5*y)', xmin=-1, xmax=1, ymin=-1, ymax=1, xsteps=100, ysteps=100, p='x,y', g=None, **kwargs):
"""
Plots a 2-d function over the specified range
Parameters
----------
f='sin(5*x)*cos(5*y)'
Takes two inputs and returns one value. Can also
be a string function such as sin(x*y)
xmin=-1, xmax=1, ymin=-1, ymax=1
Range over which to generate/plot the data
xsteps=100, ysteps=100
How many points to plot on the specified range
p='x,y'
If using strings for functions, this is a string of parameters.
g=None
Optional additional globals. Try g=globals()!
See spinmob.plot.image.data() for additional optional keyword arguments.
"""
default_kwargs = dict(clabel=str(f), xlabel='x', ylabel='y')
default_kwargs.update(kwargs)
# aggregate globals
if not g: g = {}
for k in list(globals().keys()):
if k not in g: g[k] = globals()[k]
if type(f) == str:
f = eval('lambda ' + p + ': ' + f, g)
# generate the grid x and y coordinates
xones = _n.linspace(1,1,ysteps)
x = _n.linspace(xmin, xmax, xsteps)
xgrid = _n.outer(xones, x)
yones = _n.linspace(1,1,xsteps)
y = _n.linspace(ymin, ymax, ysteps)
ygrid = _n.outer(y, yones)
# now get the z-grid
try:
# try it the fast numpy way. Add 0 to assure dimensions
zgrid = f(xgrid, ygrid) + xgrid*0.0
except:
print("Notice: function is not rocking hardcore. Generating grid the slow way...")
# manually loop over the data to generate the z-grid
zgrid = []
for ny in range(0, len(y)):
zgrid.append([])
for nx in range(0, len(x)):
zgrid[ny].append(f(x[nx], y[ny]))
zgrid = _n.array(zgrid)
# now plot!
image_data(zgrid.transpose(), x, y, **default_kwargs) | Plots a 2-d function over the specified range
Parameters
----------
f='sin(5*x)*cos(5*y)'
Takes two inputs and returns one value. Can also
be a string function such as sin(x*y)
xmin=-1, xmax=1, ymin=-1, ymax=1
Range over which to generate/plot the data
xsteps=100, ysteps=100
How many points to plot on the specified range
p='x,y'
If using strings for functions, this is a string of parameters.
g=None
Optional additional globals. Try g=globals()!
See spinmob.plot.image.data() for additional optional keyword arguments. | entailment |
def image_file(path=None, zscript='self[1:]', xscript='[0,1]', yscript='d[0]', g=None, **kwargs):
"""
Loads an data file and plots it with color. Data file must have columns of the
same length!
Parameters
----------
path=None
Path to data file.
zscript='self[1:]'
Determines how to get data from the columns
xscript='[0,1]', yscript='d[0]'
Determine the x and y arrays used for setting the axes bounds
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.image.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
"""
if 'delimiter' in kwargs: delimiter = kwargs.pop('delimiter')
else: delimiter = None
d = _data.load(paths=path, delimiter = delimiter)
if d is None or len(d) == 0: return
# allows the user to overwrite the defaults
default_kwargs = dict(xlabel = str(xscript),
ylabel = str(yscript),
title = d.path,
clabel = str(zscript))
default_kwargs.update(kwargs)
# get the data
X = d(xscript, g)
Y = d(yscript, g)
Z = _n.array(d(zscript, g))
# Z = Z.transpose()
# plot!
image_data(Z, X, Y, **default_kwargs) | Loads an data file and plots it with color. Data file must have columns of the
same length!
Parameters
----------
path=None
Path to data file.
zscript='self[1:]'
Determines how to get data from the columns
xscript='[0,1]', yscript='d[0]'
Determine the x and y arrays used for setting the axes bounds
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.image.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts. | entailment |
def parametric_function(fx='sin(t)', fy='cos(t)', tmin=-1, tmax=1, steps=200, p='t', g=None, erange=False, **kwargs):
"""
Plots the parametric function over the specified range
Parameters
----------
fx='sin(t)', fy='cos(t)'
Functions or (matching) lists of functions to plot;
can be string functions or python functions taking one argument
tmin=-1, tmax=1, steps=200
Range over which to plot, and how many points to plot
p='t'
If using strings for functions, p is the parameter name
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the t data?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
if not g: g = {}
for k in list(globals().keys()):
if k not in g: g[k] = globals()[k]
# if the x-axis is a log scale, use erange
if erange: r = _fun.erange(tmin, tmax, steps)
else: r = _n.linspace(tmin, tmax, steps)
# make sure it's a list so we can loop over it
if not type(fy) in [type([]), type(())]: fy = [fy]
if not type(fx) in [type([]), type(())]: fx = [fx]
# loop over the list of functions
xdatas = []
ydatas = []
labels = []
for fs in fx:
if type(fs) == str:
a = eval('lambda ' + p + ': ' + fs, g)
a.__name__ = fs
else:
a = fs
x = []
for z in r: x.append(a(z))
xdatas.append(x)
labels.append(a.__name__)
for n in range(len(fy)):
fs = fy[n]
if type(fs) == str:
a = eval('lambda ' + p + ': ' + fs, g)
a.__name__ = fs
else:
a = fs
y = []
for z in r: y.append(a(z))
ydatas.append(y)
labels[n] = labels[n]+', '+a.__name__
# plot!
xy_data(xdatas, ydatas, label=labels, **kwargs) | Plots the parametric function over the specified range
Parameters
----------
fx='sin(t)', fy='cos(t)'
Functions or (matching) lists of functions to plot;
can be string functions or python functions taking one argument
tmin=-1, tmax=1, steps=200
Range over which to plot, and how many points to plot
p='t'
If using strings for functions, p is the parameter name
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the t data?
See spinmob.plot.xy.data() for additional optional keyword arguments. | entailment |
def reset(self):
"""
Resets the style cycle.
"""
for key in list(self.keys()): self.iterators[key] = _itertools.cycle(self[key])
return self | Resets the style cycle. | entailment |
def add_text(text, x=0.01, y=0.01, axes="gca", draw=True, **kwargs):
"""
Adds text to the axes at the specified position.
**kwargs go to the axes.text() function.
"""
if axes=="gca": axes = _pylab.gca()
axes.text(x, y, text, transform=axes.transAxes, **kwargs)
if draw: _pylab.draw() | Adds text to the axes at the specified position.
**kwargs go to the axes.text() function. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.