sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def save(self):
'''Save all changes on this item (if any) back to Redmine.'''
self._check_custom_fields()
if not self._changes:
return None
for tag in self._remap_to_id:
self._remap_tag_to_tag_id(tag, self._changes)
# Check for custom handlers for tags
for tag, type in self._field_type.items():
try:
raw_data = self._changes[tag]
except:
continue
# Convert datetime type to a datetime string that Redmine expects
if type == 'datetime':
try:
self._changes[tag] = raw_data.strftime('%Y-%m-%dT%H:%M:%S%z')
except AttributeError:
continue
# Convert date type to a date string that Redmine expects
if type == 'date':
try:
self._changes[tag] = raw_data.strftime('%Y-%m-%d')
except AttributeError:
continue
try:
self._update(self._changes)
except:
raise
else:
# Successful save, woot! Now clear the changes dict
self._changes.clear()
|
Save all changes on this item (if any) back to Redmine.
|
entailment
|
def refresh(self):
'''Refresh this item from data on the server.
Will save any unsaved data first.'''
if not self._item_path:
raise AttributeError('refresh is not available for %s' % self._type)
if not self.id:
raise RedmineError('%s did not come from the Redmine server - no link.' % self._type)
try:
self.save()
except:
pass
# Mimic the Redmine_Item_Manager.get command
target = self._item_path % self.id
json_data = self._redmine.get(target)
data = self._redmine.unwrap_json(self._type, json_data)
self._update_data(data=data)
|
Refresh this item from data on the server.
Will save any unsaved data first.
|
entailment
|
def _get_changes(self):
'''Get all changed values.'''
result = dict( (f['id'], f.get('value','')) for f in self._data if f.get('changed', False) )
self._clear_changes
return result
|
Get all changed values.
|
entailment
|
def iteritems(self, **options):
'''Return a query interator with (id, object) pairs.'''
iter = self.query(**options)
while True:
obj = iter.next()
yield (obj.id, obj)
|
Return a query interator with (id, object) pairs.
|
entailment
|
def _objectify(self, json_data=None, data={}):
'''Return an object derived from the given json data.'''
if json_data:
# Parse the data
try:
data = json.loads(json_data)
except ValueError:
# If parsing failed, then raise the string which likely contains an error message instead of data
raise RedmineError(json_data)
# Check to see if there is a data wrapper
# Some replies will have {'issue':{<data>}} instead of just {<data>}
try:
data = data[self._item_type]
except KeyError:
pass
# Either returns a new item or updates the item in the cache and returns that
return self._redmine.check_cache(self._item_type, data, self._object)
|
Return an object derived from the given json data.
|
entailment
|
def new(self, **dict):
'''Create a new item with the provided dict information. Returns the new item.'''
if not self._item_new_path:
raise AttributeError('new is not available for %s' % self._item_name)
# Remap various tag to tag_id
for tag in self._object._remap_to_id:
self._object._remap_tag_to_tag_id(tag, dict)
target = self._item_new_path
payload = json.dumps({self._item_type:dict})
json_data = self._redmine.post(target, payload)
data = self._redmine.unwrap_json(self._item_type, json_data)
data['_source_path'] = target
return self._objectify(data=data)
|
Create a new item with the provided dict information. Returns the new item.
|
entailment
|
def get(self, id, **options):
'''Get a single item with the given ID'''
if not self._item_path:
raise AttributeError('get is not available for %s' % self._item_name)
target = self._item_path % id
json_data = self._redmine.get(target, **options)
data = self._redmine.unwrap_json(self._item_type, json_data)
data['_source_path'] = target
return self._objectify(data=data)
|
Get a single item with the given ID
|
entailment
|
def update(self, id, **dict):
'''Update a given item with the passed data.'''
if not self._item_path:
raise AttributeError('update is not available for %s' % self._item_name)
target = (self._update_path or self._item_path) % id
payload = json.dumps({self._item_type:dict})
self._redmine.put(target, payload)
return None
|
Update a given item with the passed data.
|
entailment
|
def delete(self, id):
'''Delete a single item with the given ID'''
if not self._item_path:
raise AttributeError('delete is not available for %s' % self._item_name)
target = self._item_path % id
self._redmine.delete(target)
return None
|
Delete a single item with the given ID
|
entailment
|
def query(self, **options):
'''Return an iterator for the given items.'''
if not self._query_path:
raise AttributeError('query is not available for %s' % self._item_name)
last_item = 0
offset = 0
current_item = None
limit = options.get('limit', 25)
options['limit'] = limit
target = self._query_path
while True:
options['offset'] = offset
# go get the data with the given offset
json_data = self._redmine.get(target, options)
# Try and read the json
try:
data = json.loads(json_data)
except:
raise RedmineError(json_data)
# The data is enclosed in the _query_container item
# That is, {'issues':{(issue1),(issue2)...}, 'total_count':##}
data_container = data[self._query_container]
for item_data in data_container:
yield(self._objectify(data=item_data))
# If the container was empty, we requested past the end, just exit
if not data_container:
break
try:
if int(data['total_count']) > ( offset + len(data_container) ):
# moar data!
offset += limit
else:
break
except:
# If we don't even have a 'total_count', we're done.
break
|
Return an iterator for the given items.
|
entailment
|
def _setup_authentication(self, username, password):
'''Create the authentication object with the given credentials.'''
## BUG WORKAROUND
if self.version < 1.1:
# Version 1.0 had a bug when using the key parameter.
# Later versions have the opposite bug (a key in the username doesn't function)
if not username:
username = self._key
self._key = None
if not username:
return
if not password:
password = '12345' #the same combination on my luggage! (required dummy value)
#realm = 'Redmine API' - doesn't always work
# create a password manager
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, self._url, username, password )
handler = urllib2.HTTPBasicAuthHandler( password_mgr )
# create "opener" (OpenerDirector instance)
self._opener = urllib2.build_opener( handler )
# set the opener when we fetch the URL
self._opener.open( self._url )
# Install the opener.
urllib2.install_opener( self._opener )
|
Create the authentication object with the given credentials.
|
entailment
|
def open_raw(self, page, parms=None, payload=None, HTTPrequest=None, payload_type='application/json' ):
'''Opens a page from the server with optional XML. Returns a response file-like object'''
if not parms:
parms={}
# if we're using a key, but it's not going in the header, add it to the parms array
if self._key and not self.key_in_header:
parms['key'] = self._key
# encode any data
urldata = ''
if parms:
urldata = '?' + urllib.urlencode( parms )
fullUrl = self._url + page
#debug
if self.debug:
print fullUrl + urldata
# register this url to be used with the opener
# must be registered for each unique path
try:
self._opener.open( fullUrl )
except AttributeError:
# No authentication
pass
# Set up the request
if HTTPrequest:
request = HTTPrequest( fullUrl + urldata )
else:
request = urllib2.Request( fullUrl + urldata )
# If the key is set and in the header, add it
if self._key and self.key_in_header:
request.add_header('X-Redmine-API-Key', self._key)
# If impersonation is set, add header
if self.impersonate and self.impersonation_supported:
request.add_header('X-Redmine-Switch-User', self.impersonate)
# get the data and return XML object
if payload:
request.add_header('Content-Type', payload_type)
response = urllib2.urlopen( request, payload )
else:
response = urllib2.urlopen( request )
return response
|
Opens a page from the server with optional XML. Returns a response file-like object
|
entailment
|
def open(self, page, parms=None, payload=None, HTTPrequest=None ):
'''Opens a page from the server with optional content. Returns the string response.'''
response = self.open_raw( page, parms, payload, HTTPrequest )
return response.read()
|
Opens a page from the server with optional content. Returns the string response.
|
entailment
|
def post(self, page, payload, parms=None ):
'''Posts a string payload to the server - used to make new Redmine items. Returns an JSON string or error.'''
if self.readonlytest:
print 'Redmine read only test: Pretending to create: ' + page
return payload
else:
return self.open( page, parms, payload )
|
Posts a string payload to the server - used to make new Redmine items. Returns an JSON string or error.
|
entailment
|
def put(self, page, payload, parms=None ):
'''Puts an XML object on the server - used to update Redmine items. Returns nothing useful.'''
if self.readonlytest:
print 'Redmine read only test: Pretending to update: ' + page
else:
return self.open( page, parms, payload, HTTPrequest=self.PUT_Request )
|
Puts an XML object on the server - used to update Redmine items. Returns nothing useful.
|
entailment
|
def delete(self, page ):
'''Deletes a given object on the server - used to remove items from Redmine. Use carefully!'''
if self.readonlytest:
print 'Redmine read only test: Pretending to delete: ' + page
else:
return self.open( page, HTTPrequest=self.DELETE_Request )
|
Deletes a given object on the server - used to remove items from Redmine. Use carefully!
|
entailment
|
def unwrap_json(self, type, json_data):
'''Decodes a json string, and unwraps any 'type' it finds within.'''
# Parse the data
try:
data = json.loads(json_data)
except ValueError:
# If parsing failed, then raise the string which likely contains an error message instead of data
raise RedmineError(json_data)
# Check to see if there is a data wrapper
# Some replies will have {'issue':{<data>}} instead of just {<data>}
try:
data = data[type]
except KeyError:
pass
return data
|
Decodes a json string, and unwraps any 'type' it finds within.
|
entailment
|
def find_all_item_classes(self):
'''Finds and stores a reference to all Redmine_Item subclasses for later use.'''
# This is a circular import, but performed after the class is defined and an object is instatiated.
# We do this in order to get references to any objects definitions in the redmine.py file
# without requiring anyone editing the file to do anything other than create a class with the proper name.
import redmine as public_classes
item_class = {}
for key, value in public_classes.__dict__.items():
try:
if issubclass(value, Redmine_Item):
item_class[key.lower()] = value
except:
continue
self.item_class = item_class
|
Finds and stores a reference to all Redmine_Item subclasses for later use.
|
entailment
|
def check_cache(self, type, data, obj=None):
'''Returns the updated cached version of the given dict'''
try:
id = data['id']
except:
# Not an identifiable item
#print 'don\'t know this item %r:%r' % (type, data)
return data
# If obj was passed in, its type takes precedence
try:
type = obj._get_type()
except:
pass
# Find the item in the cache, update and return if it's there
try:
hit = self.item_cache[type][id]
except KeyError:
pass
else:
hit._update_data(data)
#print 'cache hit for %s at %s' % (type, id)
return hit
# Not there? Let's make us a new item
# If we weren't given the object ref, find the name in the global scope
if not obj:
# Default to Redmine_Item if it's not found
obj = self.item_class.get(type, Redmine_Item)
new_item = obj(redmine=self, data=data, type=type)
# Store it
self.item_cache.setdefault(type, {})[id] = new_item
#print 'set new %s at %s' % (type, id)
return new_item
|
Returns the updated cached version of the given dict
|
entailment
|
def substract(self, pt):
"""Return a Point instance as the displacement of two points."""
if isinstance(pt, Point):
return Point(pt.x - self.x, pt.y - self.y, pt.z - self.z)
else:
raise TypeError
|
Return a Point instance as the displacement of two points.
|
entailment
|
def from_list(cls, l):
"""Return a Point instance from a given list"""
if len(l) == 3:
x, y, z = map(float, l)
return cls(x, y, z)
elif len(l) == 2:
x, y = map(float, l)
return cls(x, y)
else:
raise AttributeError
|
Return a Point instance from a given list
|
entailment
|
def multiply(self, number):
"""Return a Vector as the product of the vector and a real number."""
return self.from_list([x * number for x in self.to_list()])
|
Return a Vector as the product of the vector and a real number.
|
entailment
|
def magnitude(self):
"""Return magnitude of the vector."""
return math.sqrt(
reduce(lambda x, y: x + y, [x ** 2 for x in self.to_list()])
)
|
Return magnitude of the vector.
|
entailment
|
def sum(self, vector):
"""Return a Vector instance as the vector sum of two vectors."""
return self.from_list(
[x + vector.vector[i] for i, x in self.to_list()]
)
|
Return a Vector instance as the vector sum of two vectors.
|
entailment
|
def dot(self, vector, theta=None):
"""Return the dot product of two vectors.
If theta is given then the dot product is computed as
v1*v1 = |v1||v2|cos(theta). Argument theta
is measured in degrees.
"""
if theta is not None:
return (self.magnitude() * vector.magnitude() *
math.degrees(math.cos(theta)))
return (reduce(lambda x, y: x + y,
[x * vector.vector[i] for i, x in self.to_list()()]))
|
Return the dot product of two vectors.
If theta is given then the dot product is computed as
v1*v1 = |v1||v2|cos(theta). Argument theta
is measured in degrees.
|
entailment
|
def cross(self, vector):
"""Return a Vector instance as the cross product of two vectors"""
return Vector((self.y * vector.z - self.z * vector.y),
(self.z * vector.x - self.x * vector.z),
(self.x * vector.y - self.y * vector.x))
|
Return a Vector instance as the cross product of two vectors
|
entailment
|
def unit(self):
"""Return a Vector instance of the unit vector"""
return Vector(
(self.x / self.magnitude()),
(self.y / self.magnitude()),
(self.z / self.magnitude())
)
|
Return a Vector instance of the unit vector
|
entailment
|
def angle(self, vector):
"""Return the angle between two vectors in degrees."""
return math.degrees(
math.acos(
self.dot(vector) /
(self.magnitude() * vector.magnitude())
)
)
|
Return the angle between two vectors in degrees.
|
entailment
|
def non_parallel(self, vector):
"""Return True if vectors are non-parallel.
Non-parallel vectors are vectors which are neither parallel
nor perpendicular to each other.
"""
if (self.is_parallel(vector) is not True and
self.is_perpendicular(vector) is not True):
return True
return False
|
Return True if vectors are non-parallel.
Non-parallel vectors are vectors which are neither parallel
nor perpendicular to each other.
|
entailment
|
def rotate(self, angle, axis=(0, 0, 1)):
"""Returns the rotated vector. Assumes angle is in radians"""
if not all(isinstance(a, int) for a in axis):
raise ValueError
x, y, z = self.x, self.y, self.z
# Z axis rotation
if(axis[2]):
x = (self.x * math.cos(angle) - self.y * math.sin(angle))
y = (self.x * math.sin(angle) + self.y * math.cos(angle))
# Y axis rotation
if(axis[1]):
x = self.x * math.cos(angle) + self.z * math.sin(angle)
z = -self.x * math.sin(angle) + self.z * math.cos(angle)
# X axis rotation
if(axis[0]):
y = self.y * math.cos(angle) - self.z * math.sin(angle)
z = self.y * math.sin(angle) + self.z * math.cos(angle)
return Vector(x, y, z)
|
Returns the rotated vector. Assumes angle is in radians
|
entailment
|
def from_points(cls, point1, point2):
"""Return a Vector instance from two given points."""
if isinstance(point1, Point) and isinstance(point2, Point):
displacement = point1.substract(point2)
return cls(displacement.x, displacement.y, displacement.z)
raise TypeError
|
Return a Vector instance from two given points.
|
entailment
|
def spherical(cls, mag, theta, phi=0):
'''Returns a Vector instance from spherical coordinates'''
return cls(
mag * math.sin(phi) * math.cos(theta), # X
mag * math.sin(phi) * math.sin(theta), # Y
mag * math.cos(phi) # Z
)
|
Returns a Vector instance from spherical coordinates
|
entailment
|
def cylindrical(cls, mag, theta, z=0):
'''Returns a Vector instance from cylindircal coordinates'''
return cls(
mag * math.cos(theta), # X
mag * math.sin(theta), # Y
z # Z
)
|
Returns a Vector instance from cylindircal coordinates
|
entailment
|
def amod(a, b):
'''Modulus function which returns numerator if modulus is zero'''
modded = int(a % b)
return b if modded is 0 else modded
|
Modulus function which returns numerator if modulus is zero
|
entailment
|
def search_weekday(weekday, jd, direction, offset):
'''Determine the Julian date for the next or previous weekday'''
return weekday_before(weekday, jd + (direction * offset))
|
Determine the Julian date for the next or previous weekday
|
entailment
|
def nth_day_of_month(n, weekday, month, year):
"""
Return (year, month, day) tuple that represents nth weekday of month in year.
If n==0, returns last weekday of month. Weekdays: Monday=0
"""
if not (0 <= n <= 5):
raise IndexError("Nth day of month must be 0-5. Received: {}".format(n))
if not (0 <= weekday <= 6):
raise IndexError("Weekday must be 0-6")
firstday, daysinmonth = calendar.monthrange(year, month)
# Get first WEEKDAY of month
first_weekday_of_kind = 1 + (weekday - firstday) % 7
if n == 0:
# find last weekday of kind, which is 5 if these conditions are met, else 4
if first_weekday_of_kind in [1, 2, 3] and first_weekday_of_kind + 28 < daysinmonth:
n = 5
else:
n = 4
day = first_weekday_of_kind + ((n - 1) * 7)
if day > daysinmonth:
raise IndexError("No {}th day of month {}".format(n, month))
return (year, month, day)
|
Return (year, month, day) tuple that represents nth weekday of month in year.
If n==0, returns last weekday of month. Weekdays: Monday=0
|
entailment
|
def irafglob(inlist, atfile=None):
""" Returns a list of filenames based on the type of IRAF input.
Handles lists, wild-card characters, and at-files. For special
at-files, use the atfile keyword to process them.
This function is recursive, so IRAF lists can also contain at-files
and wild-card characters, e.g. `a.fits`, `@file.lst`, `*flt.fits`.
"""
# Sanity check
if inlist is None or len(inlist) == 0:
return []
# Determine which form of input was provided:
if isinstance(inlist, list):
# python list
flist = []
for f in inlist:
flist += irafglob(f)
elif ',' in inlist:
# comma-separated string list
flist = []
for f in inlist.split(','):
f = f.strip()
flist += irafglob(f)
elif inlist[0] == '@':
# file list
flist = []
for f in open(inlist[1:], 'r').readlines():
f = f.rstrip()
# hook for application specific atfiles.
if atfile:
f = atfile(f)
flist += irafglob(f)
else:
# shell globbing
if osfn:
inlist = osfn(inlist)
flist = glob.glob(inlist)
return flist
|
Returns a list of filenames based on the type of IRAF input.
Handles lists, wild-card characters, and at-files. For special
at-files, use the atfile keyword to process them.
This function is recursive, so IRAF lists can also contain at-files
and wild-card characters, e.g. `a.fits`, `@file.lst`, `*flt.fits`.
|
entailment
|
def pack(self):
"""Return binary format of packet.
The returned string is the binary format of the packet with
stuffing and framing applied. It is ready to be sent to
the GPS.
"""
# Possible structs for packet ID.
#
try:
structs_ = get_structs_for_fields([self.fields[0]])
except (TypeError):
# TypeError, if self.fields[0] is a wrong argument to `chr()`.
raise PackError(self)
# Possible structs for packet ID + subcode
#
if structs_ == []:
try:
structs_ = get_structs_for_fields([self.fields[0], self.fields[1]])
except (IndexError, TypeError):
# IndexError, if no self.fields[1]
# TypeError, if self.fields[1] is a wrong argument to `chr()`.
raise PackError(self)
# Try to pack the packet with any of the possible structs.
#
for struct_ in structs_:
try:
return struct_.pack(*self.fields)
except struct.error:
pass
# We only get here if the ``return`` inside the``for`` loop
# above wasn't reached, i.e. none of the `structs_` matched.
#
raise PackError(self)
|
Return binary format of packet.
The returned string is the binary format of the packet with
stuffing and framing applied. It is ready to be sent to
the GPS.
|
entailment
|
def unpack(cls, rawpacket):
"""Instantiate `Packet` from binary string.
:param rawpacket: TSIP pkt in binary format.
:type rawpacket: String.
`rawpacket` must already have framing (DLE...DLE/ETX) removed and
byte stuffing reversed.
"""
structs_ = get_structs_for_rawpacket(rawpacket)
for struct_ in structs_:
try:
return cls(*struct_.unpack(rawpacket))
except struct.error:
raise
# Try next one.
pass
# Packet ID 0xff is a pseudo-packet representing
# packets unknown to `python-TSIP` in their raw format.
#
return cls(0xff, rawpacket)
|
Instantiate `Packet` from binary string.
:param rawpacket: TSIP pkt in binary format.
:type rawpacket: String.
`rawpacket` must already have framing (DLE...DLE/ETX) removed and
byte stuffing reversed.
|
entailment
|
def ch_handler(offset=0, length=-1, **kw):
""" Handle standard PRIMARY clipboard access. Note that offset and length
are passed as strings. This differs from CLIPBOARD. """
global _lastSel
offset = int(offset)
length = int(length)
if length < 0: length = len(_lastSel)
return _lastSel[offset:offset+length]
|
Handle standard PRIMARY clipboard access. Note that offset and length
are passed as strings. This differs from CLIPBOARD.
|
entailment
|
def put(text, cbname):
""" Put the given string into the given clipboard. """
global _lastSel
_checkTkInit()
if cbname == 'CLIPBOARD':
_theRoot.clipboard_clear()
if text:
# for clipboard_append, kwds can be -displayof, -format, or -type
_theRoot.clipboard_append(text)
return
if cbname == 'PRIMARY':
_lastSel = text
_theRoot.selection_handle(ch_handler, selection='PRIMARY')
# we need to claim/own it so that ch_handler is used
_theRoot.selection_own(selection='PRIMARY')
# could add command arg for a func to be called when we lose ownership
return
raise RuntimeError("Unexpected clipboard name: "+str(cbname))
|
Put the given string into the given clipboard.
|
entailment
|
def get(cbname):
""" Get the contents of the given clipboard. """
_checkTkInit()
if cbname == 'PRIMARY':
try:
return _theRoot.selection_get(selection='PRIMARY')
except:
return None
if cbname == 'CLIPBOARD':
try:
return _theRoot.selection_get(selection='CLIPBOARD')
except:
return None
raise RuntimeError("Unexpected clipboard name: "+str(cbname))
|
Get the contents of the given clipboard.
|
entailment
|
def createDevice(self, deviceCfg):
"""
Creates a measurement deviceCfg from the input configuration.
:param: deviceCfg: the deviceCfg cfg.
:param: handlers: the loaded handlers.
:return: the constructed deviceCfg.
"""
ioCfg = deviceCfg['io']
type = deviceCfg['type']
if type == 'mpu6050':
fs = deviceCfg.get('fs')
name = deviceCfg.get('name')
if ioCfg['type'] == 'mock':
provider = ioCfg.get('provider')
if provider is not None and provider == 'white noise':
dataProvider = WhiteNoiseProvider()
else:
raise ValueError(provider + " is not a supported mock io data provider")
self.logger.warning("Loading mock data provider for mpu6050")
io = mock_io(dataProvider=dataProvider.provide)
elif ioCfg['type'] == 'smbus':
busId = ioCfg['busId']
self.logger.warning("Loading smbus %d", busId)
io = smbus_io(busId)
else:
raise ValueError(ioCfg['type'] + " is not a supported io provider")
self.logger.warning("Loading mpu6050 " + name + "/" + str(fs))
return mpu6050(io, name=name, fs=fs) if name is not None else mpu6050(io, fs=fs)
else:
raise ValueError(type + " is not a supported device")
|
Creates a measurement deviceCfg from the input configuration.
:param: deviceCfg: the deviceCfg cfg.
:param: handlers: the loaded handlers.
:return: the constructed deviceCfg.
|
entailment
|
def _loadRecordingDevices(self):
"""
Loads the recordingDevices specified in the configuration.
:param: handlers the loaded handlers.
:return: the constructed recordingDevices in a dict keyed by name.
"""
return {device.name: device for device in
[self.createDevice(deviceCfg) for deviceCfg in self.config['accelerometers']]}
|
Loads the recordingDevices specified in the configuration.
:param: handlers the loaded handlers.
:return: the constructed recordingDevices in a dict keyed by name.
|
entailment
|
def createHandler(self, handler):
"""
Creates a data handler from the input configuration.
:param handler: the handler cfg.
:return: the constructed handler.
"""
target = handler['target']
if handler['type'] == 'log':
self.logger.warning("Initialising csvlogger to log data to " + target)
return CSVLogger('recorder', handler['name'], target)
elif handler['type'] == 'post':
self.logger.warning("Initialising http logger to log data to " + target)
return HttpPoster(handler['name'], target)
|
Creates a data handler from the input configuration.
:param handler: the handler cfg.
:return: the constructed handler.
|
entailment
|
def _loadHandlers(self):
"""
creates a dictionary of named handler instances
:return: the dictionary
"""
return {handler.name: handler for handler in map(self.createHandler, self.config['handlers'])}
|
creates a dictionary of named handler instances
:return: the dictionary
|
entailment
|
def put(self, filename, chunkIdx, totalChunks):
"""
stores a chunk of new file, this is a nop if the file already exists.
:param filename: the filename.
:param chunkIdx: the chunk idx.
:param totalChunks: the no of chunks expected.
:return: the no of bytes written and 200 or 400 if nothing was written.
"""
logger.info('handling chunk ' + chunkIdx + ' of ' + totalChunks + ' for ' + filename)
import flask
bytesWritten = self._uploadController.writeChunk(flask.request.stream, filename, int(chunkIdx))
return str(bytesWritten), 200 if bytesWritten > 0 else 400
|
stores a chunk of new file, this is a nop if the file already exists.
:param filename: the filename.
:param chunkIdx: the chunk idx.
:param totalChunks: the no of chunks expected.
:return: the no of bytes written and 200 or 400 if nothing was written.
|
entailment
|
def delete(self, name):
"""
Deletes the named file.
:param name: the name.
:return: 200 if it was deleted, 404 if it doesn't exist or 500 for anything else.
"""
try:
result = self._uploadController.delete(name)
return None, 200 if result is not None else 404
except Exception as e:
return str(e), 500
|
Deletes the named file.
:param name: the name.
:return: 200 if it was deleted, 404 if it doesn't exist or 500 for anything else.
|
entailment
|
def put(self, name, start, end):
"""
Stores a new target.
:param name: the name.
:param start: start time.
:param end: end time.
:return:
"""
entry = self._uploadController.getEntry(name)
if entry is not None:
return None, 200 if self._targetController.storeFromWav(entry, start, end) else 500
else:
return None, 404
|
Stores a new target.
:param name: the name.
:param start: start time.
:param end: end time.
:return:
|
entailment
|
def get(self, name, start, end, resolution, window):
"""
:param name:
:param start:
:param end:
:param resolution:
:param window:
:return: an analysed file.
"""
logger.info(
'Analysing ' + name + ' from ' + start + ' to ' + end + ' at ' + resolution + 'x resolution using ' + window + ' window')
signal = self._uploadController.loadSignal(name,
start=start if start != 'start' else None,
end=end if end != 'end' else None)
if signal is not None:
window = tuple(filter(None, window.split(' ')))
if len(window) == 2:
window = (window[0], float(window[1]))
import time
data = {
'spectrum': self._jsonify(
signal.spectrum(ref=SPECLAB_REFERENCE, segmentLengthMultiplier=int(resolution), window=window)
),
'peakSpectrum': self._jsonify(
signal.peakSpectrum(ref=SPECLAB_REFERENCE, segmentLengthMultiplier=int(resolution), window=window)
),
'analysedAt': int(time.time() * 1000)
}
return data, 200
else:
return None, 404
|
:param name:
:param start:
:param end:
:param resolution:
:param window:
:return: an analysed file.
|
entailment
|
def put(self, filename, totalChunks, status):
"""
Completes the specified upload.
:param filename: the filename.
:param totalChunks: the no of chunks.
:param status: the status of the upload.
:return: 200.
"""
logger.info('Completing ' + filename + ' - ' + status)
self._uploadController.finalise(filename, int(totalChunks), status)
return None, 200
|
Completes the specified upload.
:param filename: the filename.
:param totalChunks: the no of chunks.
:param status: the status of the upload.
:return: 200.
|
entailment
|
def patch(self, measurementId):
"""
Patches the metadata associated with the new measurement, if this impacts the measurement length then a new
measurement is created otherwise it just updates it in place.
:param measurementId:
:return:
"""
data = request.get_json()
if data is not None:
logger.debug('Received payload for ' + measurementId + ' - ' + str(data))
if self._measurementController.editMeasurement(measurementId, data):
return None, 200
else:
logger.warning('Unable to edit payload ' + measurementId)
return None, 404
else:
logger.error('Invalid data payload received ' + measurementId)
return None, 400
|
Patches the metadata associated with the new measurement, if this impacts the measurement length then a new
measurement is created otherwise it just updates it in place.
:param measurementId:
:return:
|
entailment
|
def put(self, measurementId):
"""
Initiates a new measurement. Accepts a json payload with the following attributes;
* duration: in seconds
* startTime OR delay: a date in YMD_HMS format or a delay in seconds
* description: some free text information about the measurement
:return:
"""
json = request.get_json()
try:
start = self._calculateStartTime(json)
except ValueError:
return 'invalid date format in request', 400
duration = json['duration'] if 'duration' in json else 10
if start is None:
# should never happen but just in case
return 'no start time', 400
else:
scheduled, message = self._measurementController.schedule(measurementId, duration, start,
description=json.get('description'))
return message, 200 if scheduled else 400
|
Initiates a new measurement. Accepts a json payload with the following attributes;
* duration: in seconds
* startTime OR delay: a date in YMD_HMS format or a delay in seconds
* description: some free text information about the measurement
:return:
|
entailment
|
def _calculateStartTime(self, json):
"""
Calculates an absolute start time from the json payload. This is either the given absolute start time (+2s) or
the time in delay seconds time. If the resulting date is in the past then now is returned instead.
:param json: the payload from the UI
:return: the absolute start time.
"""
start = json['startTime'] if 'startTime' in json else None
delay = json['delay'] if 'delay' in json else None
if start is None and delay is None:
return self._getAbsoluteTime(datetime.datetime.utcnow(), 2)
elif start is not None:
target = datetime.datetime.strptime(start, DATETIME_FORMAT)
if target <= datetime.datetime.utcnow():
time = self._getAbsoluteTime(datetime.datetime.utcnow(), 2)
logger.warning('Date requested is in the past (' + start + '), defaulting to ' +
time.strftime(DATETIME_FORMAT))
return time
else:
return target
elif delay is not None:
return self._getAbsoluteTime(datetime.datetime.utcnow(), delay)
else:
return None
|
Calculates an absolute start time from the json payload. This is either the given absolute start time (+2s) or
the time in delay seconds time. If the resulting date is in the past then now is returned instead.
:param json: the payload from the UI
:return: the absolute start time.
|
entailment
|
def _getAbsoluteTime(self, start, delay):
"""
Adds the delay in seconds to the start time.
:param start:
:param delay:
:return: a datetimem for the specified point in time.
"""
return start + datetime.timedelta(days=0, seconds=delay)
|
Adds the delay in seconds to the start time.
:param start:
:param delay:
:return: a datetimem for the specified point in time.
|
entailment
|
def delete(self, measurementId):
"""
Deletes the named measurement.
:return: 200 if something was deleted, 404 if the measurement doesn't exist, 500 in any other case.
"""
message, count, deleted = self._measurementController.delete(measurementId)
if count == 0:
return message, 404
elif deleted is None:
return message, 500
else:
return deleted, 200
|
Deletes the named measurement.
:return: 200 if something was deleted, 404 if the measurement doesn't exist, 500 in any other case.
|
entailment
|
def put(self, measurementId, deviceId):
"""
Initialises the measurement session from the given device.
:param measurementId:
:param deviceId:
:return:
"""
logger.info('Starting measurement ' + measurementId + ' for ' + deviceId)
if self._measurementController.startMeasurement(measurementId, deviceId):
logger.info('Started measurement ' + measurementId + ' for ' + deviceId)
return None, 200
else:
logger.warning('Failed to start measurement ' + measurementId + ' for ' + deviceId)
return None, 404
|
Initialises the measurement session from the given device.
:param measurementId:
:param deviceId:
:return:
|
entailment
|
def put(self, measurementId, deviceId):
"""
Store a bunch of data for this measurement session.
:param measurementId:
:param deviceId:
:return:
"""
data = request.get_json()
if data is not None:
parsedData = json.loads(data)
logger.debug('Received payload ' + measurementId + '/' + deviceId + ': ' +
str(len(parsedData)) + ' records')
if self._measurementController.recordData(measurementId, deviceId, parsedData):
return None, 200
else:
logger.warning('Unable to record payload ' + measurementId + '/' + deviceId)
return None, 404
else:
logger.error('Invalid data payload received ' + measurementId + '/' + deviceId)
return None, 400
|
Store a bunch of data for this measurement session.
:param measurementId:
:param deviceId:
:return:
|
entailment
|
def put(self, measurementId, deviceId):
"""
Fails the measurement for this device.
:param measurementId: the measurement name.
:param deviceId: the device name.
:return: 200 if
"""
payload = request.get_json()
failureReason = json.loads(payload).get('failureReason') if payload is not None else None
logger.warning('Failing measurement ' + measurementId + ' for ' + deviceId + ' because ' + str(failureReason))
if self._measurementController.failMeasurement(measurementId, deviceId, failureReason=failureReason):
logger.warning('Failed measurement ' + measurementId + ' for ' + deviceId)
return None, 200
else:
logger.error('Unable to fail measurement ' + measurementId + ' for ' + deviceId)
return None, 404
|
Fails the measurement for this device.
:param measurementId: the measurement name.
:param deviceId: the device name.
:return: 200 if
|
entailment
|
def put(self, deviceId):
"""
Puts a new device into the device store
:param deviceId:
:return:
"""
device = request.get_json()
logger.debug("Received /devices/" + deviceId + " - " + str(device))
self._deviceController.accept(deviceId, device)
return None, 200
|
Puts a new device into the device store
:param deviceId:
:return:
|
entailment
|
def printColsAuto(in_strings, term_width=80, min_pad=1):
""" Print a list of strings centered in columns. Determine the number
of columns and lines on the fly. Return the result, ready to print.
in_strings is a list/tuple/iterable of strings
min_pad is number of spaces to appear on each side of a single string (so
you will see twice this many spaces between 2 strings)
"""
# sanity check
assert in_strings and len(in_strings)>0, 'Unexpected: '+repr(in_strings)
# get max width in input
maxWidth = len(max(in_strings, key=len)) + (2*min_pad) # width with pad
numCols = term_width//maxWidth # integer div
# set numCols so we take advantage of the whole line width
numCols = min(numCols, len(in_strings))
# easy case - single column or too big
if numCols < 2:
# one or some items are too big but print one item per line anyway
lines = [x.center(term_width) for x in in_strings]
return '\n'.join(lines)
# normal case - 2 or more columns
colWidth = term_width//numCols # integer div
# colWidth is guaranteed to be larger than all items in input
retval = ''
for i in range(len(in_strings)):
retval+=in_strings[i].center(colWidth)
if (i+1)%numCols == 0:
retval += '\n'
return retval.rstrip()
|
Print a list of strings centered in columns. Determine the number
of columns and lines on the fly. Return the result, ready to print.
in_strings is a list/tuple/iterable of strings
min_pad is number of spaces to appear on each side of a single string (so
you will see twice this many spaces between 2 strings)
|
entailment
|
def printCols(strlist,cols=5,width=80):
"""Print elements of list in cols columns"""
# This may exist somewhere in the Python standard libraries?
# Should probably rewrite this, it is pretty crude.
nlines = (len(strlist)+cols-1)//cols
line = nlines*[""]
for i in range(len(strlist)):
c, r = divmod(i,nlines)
nwid = c*width//cols - len(line[r])
if nwid>0:
line[r] = line[r] + nwid*" " + strlist[i]
else:
line[r] = line[r] + " " + strlist[i]
for s in line:
print(s)
|
Print elements of list in cols columns
|
entailment
|
def stripQuotes(value):
"""Strip single or double quotes off string; remove embedded quote pairs"""
if value[:1] == '"':
value = value[1:]
if value[-1:] == '"':
value = value[:-1]
# replace "" with "
value = re.sub(_re_doubleq2, '"', value)
elif value[:1] == "'":
value = value[1:]
if value[-1:] == "'":
value = value[:-1]
# replace '' with '
value = re.sub(_re_singleq2, "'", value)
return value
|
Strip single or double quotes off string; remove embedded quote pairs
|
entailment
|
def csvSplit(line, delim=',', allowEol=True):
""" Take a string as input (e.g. a line in a csv text file), and break
it into tokens separated by commas while ignoring commas embedded inside
quoted sections. This is exactly what the 'csv' module is meant for, so
we *should* be using it, save that it has two bugs (described next) which
limit our use of it. When these bugs are fixed, this function should be
forsaken in favor of direct use of the csv module (or similar).
The basic use case is to split a function signature string, so for:
afunc(arg1='str1', arg2='str, with, embedded, commas', arg3=7)
we want a 3 element sequence:
["arg1='str1'", "arg2='str, with, embedded, commas'", "arg3=7"]
but:
>>> import csv
>>> y = "arg1='str1', arg2='str, with, embedded, commas', arg3=7"
>>> rdr = csv.reader( (y,), dialect='excel', quotechar="'", skipinitialspace=True)
>>> l = rdr.next(); print(len(l), str(l)) # doctest: +SKIP
6 ["arg1='str1'", "arg2='str", 'with', 'embedded', "commas'", "arg3=7"]
which we can see is not correct - we wanted 3 tokens. This occurs in
Python 2.5.2 and 2.6. It seems to be due to the text at the start of each
token ("arg1=") i.e. because the quote isn't for the whole token. If we
were to remove the names of the args and the equal signs, it works:
>>> x = "'str1', 'str, with, embedded, commas', 7"
>>> rdr = csv.reader( (x,), dialect='excel', quotechar="'", skipinitialspace=True)
>>> l = rdr.next(); print(len(l), str(l)) # doctest: +SKIP
3 ['str1', 'str, with, embedded, commas', '7']
But even this usage is delicate - when we turn off skipinitialspace, it
fails:
>>> x = "'str1', 'str, with, embedded, commas', 7"
>>> rdr = csv.reader( (x,), dialect='excel', quotechar="'")
>>> l = rdr.next(); print(len(l), str(l)) # doctest: +SKIP
6 ['str1', " 'str", ' with', ' embedded', " commas'", ' 7']
So, for now, we'll roll our own.
"""
# Algorithm: read chars left to right, go from delimiter to delimiter,
# but as soon as a single/double/triple quote is hit, scan forward
# (ignoring all else) until its matching end-quote is found.
# For now, we will not specially handle escaped quotes.
tokens = []
ldl = len(delim)
keepOnRollin = line is not None and len(line) > 0
while keepOnRollin:
tok = _getCharsUntil(line, delim, True, allowEol=allowEol)
# len of token should always be > 0 because it includes end delimiter
# except on last token
if len(tok) > 0:
# append it, but without the delimiter
if tok[-ldl:] == delim:
tokens.append(tok[:-ldl])
else:
tokens.append(tok) # tok goes to EOL - has no delimiter
keepOnRollin = False
line = line[len(tok):]
else:
# This is the case of the empty end token
tokens.append('')
keepOnRollin = False
return tokens
|
Take a string as input (e.g. a line in a csv text file), and break
it into tokens separated by commas while ignoring commas embedded inside
quoted sections. This is exactly what the 'csv' module is meant for, so
we *should* be using it, save that it has two bugs (described next) which
limit our use of it. When these bugs are fixed, this function should be
forsaken in favor of direct use of the csv module (or similar).
The basic use case is to split a function signature string, so for:
afunc(arg1='str1', arg2='str, with, embedded, commas', arg3=7)
we want a 3 element sequence:
["arg1='str1'", "arg2='str, with, embedded, commas'", "arg3=7"]
but:
>>> import csv
>>> y = "arg1='str1', arg2='str, with, embedded, commas', arg3=7"
>>> rdr = csv.reader( (y,), dialect='excel', quotechar="'", skipinitialspace=True)
>>> l = rdr.next(); print(len(l), str(l)) # doctest: +SKIP
6 ["arg1='str1'", "arg2='str", 'with', 'embedded', "commas'", "arg3=7"]
which we can see is not correct - we wanted 3 tokens. This occurs in
Python 2.5.2 and 2.6. It seems to be due to the text at the start of each
token ("arg1=") i.e. because the quote isn't for the whole token. If we
were to remove the names of the args and the equal signs, it works:
>>> x = "'str1', 'str, with, embedded, commas', 7"
>>> rdr = csv.reader( (x,), dialect='excel', quotechar="'", skipinitialspace=True)
>>> l = rdr.next(); print(len(l), str(l)) # doctest: +SKIP
3 ['str1', 'str, with, embedded, commas', '7']
But even this usage is delicate - when we turn off skipinitialspace, it
fails:
>>> x = "'str1', 'str, with, embedded, commas', 7"
>>> rdr = csv.reader( (x,), dialect='excel', quotechar="'")
>>> l = rdr.next(); print(len(l), str(l)) # doctest: +SKIP
6 ['str1', " 'str", ' with', ' embedded', " commas'", ' 7']
So, for now, we'll roll our own.
|
entailment
|
def rglob(root, pattern):
""" Same thing as glob.glob, but recursively checks subdirs. """
# Thanks to Alex Martelli for basics on Stack Overflow
retlist = []
if None not in (pattern, root):
for base, dirs, files in os.walk(root):
goodfiles = fnmatch.filter(files, pattern)
retlist.extend(os.path.join(base, f) for f in goodfiles)
return retlist
|
Same thing as glob.glob, but recursively checks subdirs.
|
entailment
|
def setWritePrivs(fname, makeWritable, ignoreErrors=False):
""" Set a file named fname to be writable (or not) by user, with the
option to ignore errors. There is nothing ground-breaking here, but I
was annoyed with having to repeate this little bit of code. """
privs = os.stat(fname).st_mode
try:
if makeWritable:
os.chmod(fname, privs | stat.S_IWUSR)
else:
os.chmod(fname, privs & (~ stat.S_IWUSR))
except OSError:
if ignoreErrors:
pass # just try, don't whine
else:
raise
|
Set a file named fname to be writable (or not) by user, with the
option to ignore errors. There is nothing ground-breaking here, but I
was annoyed with having to repeate this little bit of code.
|
entailment
|
def removeEscapes(value, quoted=0):
"""Remove escapes from in front of quotes (which IRAF seems to
just stick in for fun sometimes.) Remove \-newline too.
If quoted is true, removes all blanks following \-newline
(which is a nasty thing IRAF does for continuations inside
quoted strings.)
XXX Should we remove \\ too?
"""
i = value.find(r'\"')
while i>=0:
value = value[:i] + value[i+1:]
i = value.find(r'\"',i+1)
i = value.find(r"\'")
while i>=0:
value = value[:i] + value[i+1:]
i = value.find(r"\'",i+1)
# delete backslash-newlines
i = value.find("\\\n")
while i>=0:
j = i+2
if quoted:
# ignore blanks and tabs following \-newline in quoted strings
for c in value[i+2:]:
if c not in ' \t':
break
j = j+1
value = value[:i] + value[j:]
i = value.find("\\\n",i+1)
return value
|
Remove escapes from in front of quotes (which IRAF seems to
just stick in for fun sometimes.) Remove \-newline too.
If quoted is true, removes all blanks following \-newline
(which is a nasty thing IRAF does for continuations inside
quoted strings.)
XXX Should we remove \\ too?
|
entailment
|
def translateName(s, dot=0):
"""Convert CL parameter or variable name to Python-acceptable name
Translate embedded dollar signs to 'DOLLAR'
Add 'PY' prefix to components that are Python reserved words
Add 'PY' prefix to components start with a number
If dot != 0, also replaces '.' with 'DOT'
"""
s = s.replace('$', 'DOLLAR')
sparts = s.split('.')
for i in range(len(sparts)):
if sparts[i] == "" or sparts[i][0] in string.digits or \
keyword.iskeyword(sparts[i]):
sparts[i] = 'PY' + sparts[i]
if dot:
return 'DOT'.join(sparts)
else:
return '.'.join(sparts)
|
Convert CL parameter or variable name to Python-acceptable name
Translate embedded dollar signs to 'DOLLAR'
Add 'PY' prefix to components that are Python reserved words
Add 'PY' prefix to components start with a number
If dot != 0, also replaces '.' with 'DOT'
|
entailment
|
def init_tk_default_root(withdraw=True):
""" In case the _default_root value is required, you may
safely call this ahead of time to ensure that it has been
initialized. If it has already been, this is a no-op.
"""
if not capable.OF_GRAPHICS:
raise RuntimeError("Cannot run this command without graphics")
if not TKNTR._default_root: # TKNTR imported above
junk = TKNTR.Tk()
# tkinter._default_root is now populated (== junk)
retval = TKNTR._default_root
if withdraw and retval:
retval.withdraw()
return retval
|
In case the _default_root value is required, you may
safely call this ahead of time to ensure that it has been
initialized. If it has already been, this is a no-op.
|
entailment
|
def tkreadline(file=None):
"""Read a line from file while running Tk mainloop.
If the file is not line-buffered then the Tk mainloop will stop
running after one character is typed. The function will still work
but Tk widgets will stop updating. This should work OK for stdin and
other line-buffered filehandles. If file is omitted, reads from
sys.stdin.
The file must have a readline method. If it does not have a fileno
method (which can happen e.g. for the status line input on the
graphics window) then the readline method is simply called directly.
"""
if file is None:
file = sys.stdin
if not hasattr(file, "readline"):
raise TypeError("file must be a filehandle with a readline method")
# Call tkread now...
# BUT, if we get in here for something not GUI-related (e.g. terminal-
# focused code in a sometimes-GUI app) then skip tkread and simply call
# readline on the input eg. stdin. Otherwise we'd fail in _TkRead().read()
try:
fd = file.fileno()
except:
fd = None
if (fd and capable.OF_GRAPHICS):
tkread(fd, 0)
# if EOF was encountered on a tty, avoid reading again because
# it actually requests more data
if not select.select([fd],[],[],0)[0]:
return ''
return file.readline()
|
Read a line from file while running Tk mainloop.
If the file is not line-buffered then the Tk mainloop will stop
running after one character is typed. The function will still work
but Tk widgets will stop updating. This should work OK for stdin and
other line-buffered filehandles. If file is omitted, reads from
sys.stdin.
The file must have a readline method. If it does not have a fileno
method (which can happen e.g. for the status line input on the
graphics window) then the readline method is simply called directly.
|
entailment
|
def launchBrowser(url, brow_bin='mozilla', subj=None):
""" Given a URL, try to pop it up in a browser on most platforms.
brow_bin is only used on OS's where there is no "open" or "start" cmd.
"""
if not subj: subj = url
# Tries to use webbrowser module on most OSes, unless a system command
# is needed. (E.g. win, linux, sun, etc)
if sys.platform not in ('os2warp, iphone'): # try webbrowser w/ everything?
import webbrowser
if not webbrowser.open(url):
print("Error opening URL: "+url)
else:
print('Help on "'+subj+'" is now being displayed in a web browser')
return
# Go ahead and fork a subprocess to call the correct binary
pid = os.fork()
if pid == 0: # child
if sys.platform == 'darwin':
if 0 != os.system('open "'+url+'"'): # does not seem to keep '#.*'
print("Error opening URL: "+url)
os._exit(0)
# The following retries if "-remote" doesnt work, opening a new browser
# cmd = brow_bin+" -remote 'openURL("+url+")' '"+url+"' 1> /dev/null 2>&1"
# if 0 != os.system(cmd)
# print "Running "+brow_bin+" for HTML help..."
# os.execvp(brow_bin,[brow_bin,url])
# os._exit(0)
else: # parent
if not subj:
subj = url
print('Help on "'+subj+'" is now being displayed in a browser')
|
Given a URL, try to pop it up in a browser on most platforms.
brow_bin is only used on OS's where there is no "open" or "start" cmd.
|
entailment
|
def read(self, file, nbytes):
"""Read nbytes characters from file while running Tk mainloop"""
if not capable.OF_GRAPHICS:
raise RuntimeError("Cannot run this command without graphics")
if isinstance(file, int):
fd = file
else:
# Otherwise, assume we have Python file object
try:
fd = file.fileno()
except:
raise TypeError("file must be an integer or a filehandle/socket")
init_tk_default_root() # harmless if already done
self.widget = TKNTR._default_root
if not self.widget:
# no Tk widgets yet, so no need for mainloop
# (shouldnt happen now with init_tk_default_root)
s = []
while nbytes>0:
snew = os.read(fd, nbytes) # returns bytes in PY3K
if snew:
if PY3K: snew = snew.decode('ascii','replace')
s.append(snew)
nbytes -= len(snew)
else:
# EOF -- just return what we have so far
break
return "".join(s)
else:
self.nbytes = nbytes
self.value = []
self.widget.tk.createfilehandler(fd,
TKNTR.READABLE | TKNTR.EXCEPTION,
self._read)
try:
self.widget.mainloop()
finally:
self.widget.tk.deletefilehandler(fd)
return "".join(self.value)
|
Read nbytes characters from file while running Tk mainloop
|
entailment
|
def _read(self, fd, mask):
"""Read waiting data and terminate Tk mainloop if done"""
try:
# if EOF was encountered on a tty, avoid reading again because
# it actually requests more data
if select.select([fd],[],[],0)[0]:
snew = os.read(fd, self.nbytes) # returns bytes in PY3K
if PY3K: snew = snew.decode('ascii','replace')
self.value.append(snew)
self.nbytes -= len(snew)
else:
snew = ''
if (self.nbytes <= 0 or len(snew) == 0) and self.widget:
# stop the mainloop
self.widget.quit()
except OSError:
raise IOError("Error reading from %s" % (fd,))
|
Read waiting data and terminate Tk mainloop if done
|
entailment
|
def loadSignalFromDelimitedFile(filename, timeColumnIdx=0, dataColumnIdx=1, delimiter=',', skipHeader=0) -> Signal:
""" reads a delimited file and converts into a Signal
:param filename: string
:param timeColumnIdx: 0 indexed column number
:param dataColumnIdx: 0 indexed column number
:param delimiter: char
:return a Signal instance
"""
data = np.genfromtxt(filename, delimiter=delimiter, skip_header=skipHeader)
columnCount = data.shape[1]
if columnCount < timeColumnIdx + 1:
raise ValueError(
filename + ' has only ' + columnCount + ' columns, time values can\'t be at column ' + timeColumnIdx)
if columnCount < dataColumnIdx + 1:
raise ValueError(
filename + ' has only ' + columnCount + ' columns, data values can\'t be at column ' + dataColumnIdx)
t = data[:, [timeColumnIdx]]
samples = data[:, [dataColumnIdx]]
# calculate fs as the interval between the time samples
fs = int(round(1 / (np.diff(t, n=1, axis=0).mean()), 0))
source = Signal(samples.ravel(), fs)
return source
|
reads a delimited file and converts into a Signal
:param filename: string
:param timeColumnIdx: 0 indexed column number
:param dataColumnIdx: 0 indexed column number
:param delimiter: char
:return a Signal instance
|
entailment
|
def loadSignalFromWav(inputSignalFile, calibrationRealWorldValue=None, calibrationSignalFile=None, start=None,
end=None) -> Signal:
""" reads a wav file into a Signal and scales the input so that the sample are expressed in real world values
(as defined by the calibration signal).
:param inputSignalFile: a path to the input signal file
:param calibrationSignalFile: a path the calibration signal file
:param calibrationRealWorldValue: the real world value represented by the calibration signal
:param bitDepth: the bit depth of the input signal, used to rescale the value to a range of +1 to -1
:returns: a Signal
"""
inputSignal = readWav(inputSignalFile, start=start, end=end)
if calibrationSignalFile is not None:
calibrationSignal = readWav(calibrationSignalFile)
scalingFactor = calibrationRealWorldValue / np.max(calibrationSignal.samples)
return Signal(inputSignal.samples * scalingFactor, inputSignal.fs)
else:
return inputSignal
|
reads a wav file into a Signal and scales the input so that the sample are expressed in real world values
(as defined by the calibration signal).
:param inputSignalFile: a path to the input signal file
:param calibrationSignalFile: a path the calibration signal file
:param calibrationRealWorldValue: the real world value represented by the calibration signal
:param bitDepth: the bit depth of the input signal, used to rescale the value to a range of +1 to -1
:returns: a Signal
|
entailment
|
def readWav(inputSignalFile, selectedChannel=1, start=None, end=None) -> Signal:
""" reads a wav file into a Signal.
:param inputSignalFile: a path to the input signal file
:param selectedChannel: the channel to read.
:param start: the time to start reading from in HH:mm:ss.SSS format.
:param end: the time to end reading from in HH:mm:ss.SSS format.
:returns: Signal.
"""
def asFrames(time, fs):
hours, minutes, seconds = (time.split(":"))[-3:]
hours = int(hours)
minutes = int(minutes)
seconds = float(seconds)
millis = int((3600000 * hours) + (60000 * minutes) + (1000 * seconds))
return int(millis * (fs / 1000))
import soundfile as sf
if start is not None or end is not None:
info = sf.info(inputSignalFile)
startFrame = 0 if start is None else asFrames(start, info.samplerate)
endFrame = None if end is None else asFrames(end, info.samplerate)
ys, frameRate = sf.read(inputSignalFile, start=startFrame, stop=endFrame)
else:
ys, frameRate = sf.read(inputSignalFile)
return Signal(ys[::selectedChannel], frameRate)
|
reads a wav file into a Signal.
:param inputSignalFile: a path to the input signal file
:param selectedChannel: the channel to read.
:param start: the time to start reading from in HH:mm:ss.SSS format.
:param end: the time to end reading from in HH:mm:ss.SSS format.
:returns: Signal.
|
entailment
|
def loadTriAxisSignalFromFile(filename, timeColumnIdx=0, xIdx=1, yIdx=2, zIdx=3, delimiter=',',
skipHeader=0) -> TriAxisSignal:
"""
A factory method for loading a tri axis measurement from a single file.
:param filename: the file to load from.
:param timeColumnIdx: the column containing time data.
:param xIdx: the column containing x axis data.
:param yIdx: the column containing y axis data.
:param zIdx: the column containing z axis data.
:param delimiter: the delimiter.
:param skipHeader: how many rows of headers to skip.
:return: the measurement
"""
return TriAxisSignal(
x=loadSignalFromDelimitedFile(filename, timeColumnIdx=timeColumnIdx, dataColumnIdx=xIdx,
delimiter=delimiter, skipHeader=skipHeader),
y=loadSignalFromDelimitedFile(filename, timeColumnIdx=timeColumnIdx, dataColumnIdx=yIdx,
delimiter=delimiter, skipHeader=skipHeader),
z=loadSignalFromDelimitedFile(filename, timeColumnIdx=timeColumnIdx, dataColumnIdx=zIdx,
delimiter=delimiter, skipHeader=skipHeader))
|
A factory method for loading a tri axis measurement from a single file.
:param filename: the file to load from.
:param timeColumnIdx: the column containing time data.
:param xIdx: the column containing x axis data.
:param yIdx: the column containing y axis data.
:param zIdx: the column containing z axis data.
:param delimiter: the delimiter.
:param skipHeader: how many rows of headers to skip.
:return: the measurement
|
entailment
|
def psd(self, ref=None, segmentLengthMultiplier=1, mode=None, **kwargs):
"""
analyses the source and returns a PSD, segment is set to get ~1Hz frequency resolution
:param ref: the reference value for dB purposes.
:param segmentLengthMultiplier: allow for increased resolution.
:param mode: cq or none.
:return:
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density.
"""
def analysisFunc(x, nperseg, **kwargs):
f, Pxx_den = signal.welch(self.samples, self.fs, nperseg=nperseg, detrend=False, **kwargs)
if ref is not None:
Pxx_den = librosa.power_to_db(Pxx_den, ref)
return f, Pxx_den
if mode == 'cq':
return self._cq(analysisFunc, segmentLengthMultiplier)
else:
return analysisFunc(0, self.getSegmentLength() * segmentLengthMultiplier, **kwargs)
|
analyses the source and returns a PSD, segment is set to get ~1Hz frequency resolution
:param ref: the reference value for dB purposes.
:param segmentLengthMultiplier: allow for increased resolution.
:param mode: cq or none.
:return:
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density.
|
entailment
|
def spectrum(self, ref=None, segmentLengthMultiplier=1, mode=None, **kwargs):
"""
analyses the source to generate the linear spectrum.
:param ref: the reference value for dB purposes.
:param segmentLengthMultiplier: allow for increased resolution.
:param mode: cq or none.
:return:
f : ndarray
Array of sample frequencies.
Pxx : ndarray
linear spectrum.
"""
def analysisFunc(x, nperseg, **kwargs):
f, Pxx_spec = signal.welch(self.samples, self.fs, nperseg=nperseg, scaling='spectrum', detrend=False,
**kwargs)
Pxx_spec = np.sqrt(Pxx_spec)
# it seems a 3dB adjustment is required to account for the change in nperseg
if x > 0:
Pxx_spec = Pxx_spec / (10 ** ((3 * x) / 20))
if ref is not None:
Pxx_spec = librosa.amplitude_to_db(Pxx_spec, ref)
return f, Pxx_spec
if mode == 'cq':
return self._cq(analysisFunc, segmentLengthMultiplier)
else:
return analysisFunc(0, self.getSegmentLength() * segmentLengthMultiplier, **kwargs)
|
analyses the source to generate the linear spectrum.
:param ref: the reference value for dB purposes.
:param segmentLengthMultiplier: allow for increased resolution.
:param mode: cq or none.
:return:
f : ndarray
Array of sample frequencies.
Pxx : ndarray
linear spectrum.
|
entailment
|
def peakSpectrum(self, ref=None, segmentLengthMultiplier=1, mode=None, window='hann'):
"""
analyses the source to generate the max values per bin per segment
:param ref: the reference value for dB purposes.
:param segmentLengthMultiplier: allow for increased resolution.
:param mode: cq or none.
:return:
f : ndarray
Array of sample frequencies.
Pxx : ndarray
linear spectrum max values.
"""
def analysisFunc(x, nperseg):
freqs, _, Pxy = signal.spectrogram(self.samples,
self.fs,
window=window,
nperseg=int(nperseg),
noverlap=int(nperseg // 2),
detrend=False,
scaling='spectrum')
Pxy_max = np.sqrt(Pxy.max(axis=-1).real)
if x > 0:
Pxy_max = Pxy_max / (10 ** ((3 * x) / 20))
if ref is not None:
Pxy_max = librosa.amplitude_to_db(Pxy_max, ref=ref)
return freqs, Pxy_max
if mode == 'cq':
return self._cq(analysisFunc, segmentLengthMultiplier)
else:
return analysisFunc(0, self.getSegmentLength() * segmentLengthMultiplier)
|
analyses the source to generate the max values per bin per segment
:param ref: the reference value for dB purposes.
:param segmentLengthMultiplier: allow for increased resolution.
:param mode: cq or none.
:return:
f : ndarray
Array of sample frequencies.
Pxx : ndarray
linear spectrum max values.
|
entailment
|
def spectrogram(self, ref=None, segmentLengthMultiplier=1, window='hann'):
"""
analyses the source to generate a spectrogram
:param ref: the reference value for dB purposes.
:param segmentLengthMultiplier: allow for increased resolution.
:return:
t : ndarray
Array of time slices.
f : ndarray
Array of sample frequencies.
Pxx : ndarray
linear spectrum values.
"""
t, f, Sxx = signal.spectrogram(self.samples,
self.fs,
window=window,
nperseg=self.getSegmentLength() * segmentLengthMultiplier,
detrend=False,
scaling='spectrum')
Sxx = np.sqrt(Sxx)
if ref is not None:
Sxx = librosa.amplitude_to_db(Sxx, ref)
return t, f, Sxx
|
analyses the source to generate a spectrogram
:param ref: the reference value for dB purposes.
:param segmentLengthMultiplier: allow for increased resolution.
:return:
t : ndarray
Array of time slices.
f : ndarray
Array of sample frequencies.
Pxx : ndarray
linear spectrum values.
|
entailment
|
def lowPass(self, *args):
"""
Creates a copy of the signal with the low pass applied, args specifed are passed through to _butter.
:return:
"""
return Signal(self._butter(self.samples, 'low', *args), fs=self.fs)
|
Creates a copy of the signal with the low pass applied, args specifed are passed through to _butter.
:return:
|
entailment
|
def highPass(self, *args):
"""
Creates a copy of the signal with the high pass applied, args specifed are passed through to _butter.
:return:
"""
return Signal(self._butter(self.samples, 'high', *args), fs=self.fs)
|
Creates a copy of the signal with the high pass applied, args specifed are passed through to _butter.
:return:
|
entailment
|
def _butter(self, data, btype, f3=2, order=2):
"""
Applies a digital butterworth filter via filtfilt at the specified f3 and order. Default values are set to
correspond to apparently sensible filters that distinguish between vibration and tilt from an accelerometer.
:param data: the data to filter.
:param btype: high or low.
:param f3: the f3 of the filter.
:param order: the filter order.
:return: the filtered signal.
"""
b, a = signal.butter(order, f3 / (0.5 * self.fs), btype=btype)
y = signal.filtfilt(b, a, data)
return y
|
Applies a digital butterworth filter via filtfilt at the specified f3 and order. Default values are set to
correspond to apparently sensible filters that distinguish between vibration and tilt from an accelerometer.
:param data: the data to filter.
:param btype: high or low.
:param f3: the f3 of the filter.
:param order: the filter order.
:return: the filtered signal.
|
entailment
|
def _getAnalysis(self, axis, analysis, ref=None):
"""
gets the named analysis on the given axis and caches the result (or reads from the cache if data is available
already)
:param axis: the named axis.
:param analysis: the analysis name.
:return: the analysis tuple.
"""
cache = self.cache.get(str(ref))
if cache is None:
cache = {'x': {}, 'y': {}, 'z': {}, 'sum': {}}
self.cache[str(ref)] = cache
if axis in cache:
data = self.cache['raw'].get(axis, None)
cachedAxis = cache.get(axis)
if cachedAxis.get(analysis) is None:
if axis == 'sum':
if self._canSum(analysis):
fx, Pxx = self._getAnalysis('x', analysis)
fy, Pxy = self._getAnalysis('y', analysis)
fz, Pxz = self._getAnalysis('z', analysis)
# calculate the sum of the squares with an additional weighting for x and y
Psum = (((Pxx * 2.2) ** 2) + ((Pxy * 2.4) ** 2) + (Pxz ** 2)) ** 0.5
if ref is not None:
Psum = librosa.amplitude_to_db(Psum, ref)
cachedAxis[analysis] = (fx, Psum)
else:
return None
else:
cachedAxis[analysis] = getattr(data.highPass(), analysis)(ref=ref)
return cachedAxis[analysis]
else:
return None
|
gets the named analysis on the given axis and caches the result (or reads from the cache if data is available
already)
:param axis: the named axis.
:param analysis: the analysis name.
:return: the analysis tuple.
|
entailment
|
def legal_date(year, month, day):
'''Checks if a given date is a legal positivist date'''
try:
assert year >= 1
assert 0 < month <= 14
assert 0 < day <= 28
if month == 14:
if isleap(year + YEAR_EPOCH - 1):
assert day <= 2
else:
assert day == 1
except AssertionError:
raise ValueError("Invalid Positivist date: ({}, {}, {})".format(year, month, day))
return True
|
Checks if a given date is a legal positivist date
|
entailment
|
def to_jd(year, month, day):
'''Convert a Positivist date to Julian day count.'''
legal_date(year, month, day)
gyear = year + YEAR_EPOCH - 1
return (
gregorian.EPOCH - 1 + (365 * (gyear - 1)) +
floor((gyear - 1) / 4) + (-floor((gyear - 1) / 100)) +
floor((gyear - 1) / 400) + (month - 1) * 28 + day
)
|
Convert a Positivist date to Julian day count.
|
entailment
|
def from_jd(jd):
'''Convert a Julian day count to Positivist date.'''
try:
assert jd >= EPOCH
except AssertionError:
raise ValueError('Invalid Julian day')
depoch = floor(jd - 0.5) + 0.5 - gregorian.EPOCH
quadricent = floor(depoch / gregorian.INTERCALATION_CYCLE_DAYS)
dqc = depoch % gregorian.INTERCALATION_CYCLE_DAYS
cent = floor(dqc / gregorian.LEAP_SUPPRESSION_DAYS)
dcent = dqc % gregorian.LEAP_SUPPRESSION_DAYS
quad = floor(dcent / gregorian.LEAP_CYCLE_DAYS)
dquad = dcent % gregorian.LEAP_CYCLE_DAYS
yindex = floor(dquad / gregorian.YEAR_DAYS)
year = (
quadricent * gregorian.INTERCALATION_CYCLE_YEARS +
cent * gregorian.LEAP_SUPPRESSION_YEARS +
quad * gregorian.LEAP_CYCLE_YEARS + yindex
)
if yindex == 4:
yearday = 365
year = year - 1
else:
yearday = (
depoch -
quadricent * gregorian.INTERCALATION_CYCLE_DAYS -
cent * gregorian.LEAP_SUPPRESSION_DAYS -
quad * gregorian.LEAP_CYCLE_DAYS -
yindex * gregorian.YEAR_DAYS
)
month = floor(yearday / 28)
return (year - YEAR_EPOCH + 2, month + 1, int(yearday - (month * 28)) + 1)
|
Convert a Julian day count to Positivist date.
|
entailment
|
def dayname(year, month, day):
'''
Give the name of the month and day for a given date.
Returns:
tuple month_name, day_name
'''
legal_date(year, month, day)
yearday = (month - 1) * 28 + day
if isleap(year + YEAR_EPOCH - 1):
dname = data.day_names_leap[yearday - 1]
else:
dname = data.day_names[yearday - 1]
return MONTHS[month - 1], dname
|
Give the name of the month and day for a given date.
Returns:
tuple month_name, day_name
|
entailment
|
def hasExpired(self):
"""
:return: true if the lastUpdateTime is more than maxAge seconds ago.
"""
return (datetime.datetime.utcnow() - self.lastUpdateTime).total_seconds() > self.maxAgeSeconds
|
:return: true if the lastUpdateTime is more than maxAge seconds ago.
|
entailment
|
def accept(self, deviceId, device):
"""
Adds the named device to the store.
:param deviceId:
:param device:
:return:
"""
storedDevice = self.devices.get(deviceId)
if storedDevice is None:
logger.info('Initialising device ' + deviceId)
storedDevice = Device(self.maxAgeSeconds)
storedDevice.deviceId = deviceId
# this uses an async handler to decouple the recorder put (of the data) from the analyser handling that data
# thus the recorder will become free as soon as it has handed off the data. This means delivery is only
# guaranteed as long as the analyser stays up but this is not a system that sits on top of a bulletproof
# message bus so unlucky :P
storedDevice.dataHandler = AsyncHandler('analyser', CSVLogger('analyser', deviceId, self.dataDir))
else:
logger.debug('Pinged by device ' + deviceId)
storedDevice.payload = device
storedDevice.lastUpdateTime = datetime.datetime.utcnow()
# TODO if device has FAILED, do something?
self.devices.update({deviceId: storedDevice})
self.targetStateController.updateDeviceState(storedDevice.payload)
|
Adds the named device to the store.
:param deviceId:
:param device:
:return:
|
entailment
|
def getDevices(self, status=None):
"""
The devices in the given state or all devices is the arg is none.
:param status: the state to match against.
:return: the devices
"""
return [d for d in self.devices.values() if status is None or d.payload.get('status') == status]
|
The devices in the given state or all devices is the arg is none.
:param status: the state to match against.
:return: the devices
|
entailment
|
def getDevice(self, id):
"""
gets the named device.
:param id: the id.
:return: the device
"""
return next(iter([d for d in self.devices.values() if d.deviceId == id]), None)
|
gets the named device.
:param id: the id.
:return: the device
|
entailment
|
def _evictStaleDevices(self):
"""
A housekeeping function which runs in a worker thread and which evicts devices that haven't sent an update for a
while.
"""
while self.running:
expiredDeviceIds = [key for key, value in self.devices.items() if value.hasExpired()]
for key in expiredDeviceIds:
logger.warning("Device timeout, removing " + key)
del self.devices[key]
time.sleep(1)
# TODO send reset after a device fails
logger.warning("DeviceCaretaker is now shutdown")
|
A housekeeping function which runs in a worker thread and which evicts devices that haven't sent an update for a
while.
|
entailment
|
def scheduleMeasurement(self, measurementId, duration, start):
"""
Schedules the requested measurement session with all INITIALISED devices.
:param measurementId:
:param duration:
:param start:
:return: a dict of device vs status.
"""
# TODO subtract 1s from start and format
results = {}
for device in self.getDevices(RecordingDeviceStatus.INITIALISED.name):
logger.info('Sending measurement ' + measurementId + ' to ' + device.payload['serviceURL'])
try:
resp = self.httpclient.put(device.payload['serviceURL'] + '/measurements/' + measurementId,
json={'duration': duration, 'at': start.strftime(DATETIME_FORMAT)})
logger.info('Response for ' + measurementId + ' from ' + device.payload['serviceURL'] + ' is ' +
str(resp.status_code))
results[device] = resp.status_code
except Exception as e:
logger.exception(e)
results[device] = 500
return results
|
Schedules the requested measurement session with all INITIALISED devices.
:param measurementId:
:param duration:
:param start:
:return: a dict of device vs status.
|
entailment
|
def patch(self):
"""
Allows the UI to update parameters ensuring that all devices are kept in sync. Payload is json in TargetState
format.
:return:
"""
# TODO block until all devices have updated?
json = request.get_json()
logger.info("Updating target state with " + str(json))
self._targetStateController.updateTargetState(json)
return None, 200
|
Allows the UI to update parameters ensuring that all devices are kept in sync. Payload is json in TargetState
format.
:return:
|
entailment
|
def list_parse(name_list):
"""Parse a comma-separated list of values, or a filename (starting with @)
containing a list value on each line.
"""
if name_list and name_list[0] == '@':
value = name_list[1:]
if not os.path.exists(value):
log.warning('The file %s does not exist' % value)
return
try:
return [v.strip() for v in open(value, 'r').readlines()]
except IOError as e:
log.warning('reading %s failed: %s; ignoring this file' %
(value, e))
else:
return [v.strip() for v in name_list.split(',')]
|
Parse a comma-separated list of values, or a filename (starting with @)
containing a list value on each line.
|
entailment
|
def _mmInit(self):
"""Create the minimum match dictionary of keys"""
# cache references to speed up loop a bit
mmkeys = {}
mmkeysGet = mmkeys.setdefault
minkeylength = self.minkeylength
for key in self.data.keys():
# add abbreviations as short as minkeylength
# always add at least one entry (even for key="")
lenkey = len(key)
start = min(minkeylength,lenkey)
for i in range(start,lenkey+1):
mmkeysGet(key[0:i],[]).append(key)
self.mmkeys = mmkeys
|
Create the minimum match dictionary of keys
|
entailment
|
def resolve(self, key, keylist):
"""Hook to resolve ambiguities in selected keys"""
raise AmbiguousKeyError("Ambiguous key "+ repr(key) +
", could be any of " + str(sorted(keylist)))
|
Hook to resolve ambiguities in selected keys
|
entailment
|
def add(self, key, item):
"""Add a new key/item pair to the dictionary. Resets an existing
key value only if this is an exact match to a known key."""
mmkeys = self.mmkeys
if mmkeys is not None and not (key in self.data):
# add abbreviations as short as minkeylength
# always add at least one entry (even for key="")
lenkey = len(key)
start = min(self.minkeylength,lenkey)
# cache references to speed up loop a bit
mmkeysGet = mmkeys.setdefault
for i in range(start,lenkey+1):
mmkeysGet(key[0:i],[]).append(key)
self.data[key] = item
|
Add a new key/item pair to the dictionary. Resets an existing
key value only if this is an exact match to a known key.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.