code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def getDigestableArgs(Argv):
r"""Splits the given Argv into *Args and **KwArgs.
"""
first_kwarg_pos = 0
for arg in Argv:
if KWARG_VALIDATOR.search(arg):
break
else:
first_kwarg_pos += 1
for arg in Argv[first_kwarg_pos:]: # ensure that the kwargs are valid
if not KWARG_VALIDATOR.search(arg):
raise HandledException('Could not parse the arg "%s".' % arg)
return Argv[:first_kwarg_pos], list2dict(Argv[first_kwarg_pos:]) | r"""Splits the given Argv into *Args and **KwArgs. |
def calc_q1_lz_v1(self):
"""Calculate the slow response of the lower zone layer.
Required control parameters:
|K4|
|Gamma|
Calculated fluxes sequence:
|Q1|
Updated state sequence:
|LZ|
Basic equations:
:math:`\\frac{dLZ}{dt} = -Q1` \n
:math:`Q1 = \\Bigl \\lbrace
{
{K4 \\cdot LZ^{1+Gamma} \\ | \\ LZ > 0}
\\atop
{0 \\ | \\ LZ\\leq 0}
}`
Examples:
As long as the lower zone storage is negative...
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> simulationstep('12h')
>>> k4(0.2)
>>> gamma(0.0)
>>> states.lz = -2.0
>>> model.calc_q1_lz_v1()
>>> fluxes.q1
q1(0.0)
>>> states.lz
lz(-2.0)
...or zero, no slow discharge response occurs:
>>> states.lz = 0.0
>>> model.calc_q1_lz_v1()
>>> fluxes.q1
q1(0.0)
>>> states.lz
lz(0.0)
For storage values above zero the linear...
>>> states.lz = 2.0
>>> model.calc_q1_lz_v1()
>>> fluxes.q1
q1(0.2)
>>> states.lz
lz(1.8)
...or nonlinear storage routing equation applies:
>>> gamma(1.)
>>> states.lz = 2.0
>>> model.calc_q1_lz_v1()
>>> fluxes.q1
q1(0.4)
>>> states.lz
lz(1.6)
Note that the assumed length of the simulation step is only a
half day. Hence the effective value of the storage coefficient
is not 0.2 but 0.1:
>>> k4
k4(0.2)
>>> k4.value
0.1
"""
con = self.parameters.control.fastaccess
flu = self.sequences.fluxes.fastaccess
sta = self.sequences.states.fastaccess
if sta.lz > 0.:
flu.q1 = con.k4*sta.lz**(1.+con.gamma)
else:
flu.q1 = 0.
sta.lz -= flu.q1 | Calculate the slow response of the lower zone layer.
Required control parameters:
|K4|
|Gamma|
Calculated fluxes sequence:
|Q1|
Updated state sequence:
|LZ|
Basic equations:
:math:`\\frac{dLZ}{dt} = -Q1` \n
:math:`Q1 = \\Bigl \\lbrace
{
{K4 \\cdot LZ^{1+Gamma} \\ | \\ LZ > 0}
\\atop
{0 \\ | \\ LZ\\leq 0}
}`
Examples:
As long as the lower zone storage is negative...
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> simulationstep('12h')
>>> k4(0.2)
>>> gamma(0.0)
>>> states.lz = -2.0
>>> model.calc_q1_lz_v1()
>>> fluxes.q1
q1(0.0)
>>> states.lz
lz(-2.0)
...or zero, no slow discharge response occurs:
>>> states.lz = 0.0
>>> model.calc_q1_lz_v1()
>>> fluxes.q1
q1(0.0)
>>> states.lz
lz(0.0)
For storage values above zero the linear...
>>> states.lz = 2.0
>>> model.calc_q1_lz_v1()
>>> fluxes.q1
q1(0.2)
>>> states.lz
lz(1.8)
...or nonlinear storage routing equation applies:
>>> gamma(1.)
>>> states.lz = 2.0
>>> model.calc_q1_lz_v1()
>>> fluxes.q1
q1(0.4)
>>> states.lz
lz(1.6)
Note that the assumed length of the simulation step is only a
half day. Hence the effective value of the storage coefficient
is not 0.2 but 0.1:
>>> k4
k4(0.2)
>>> k4.value
0.1 |
def check_ordered(self):
""" True if each chromosome is listed together as a chunk and if the range starts go from smallest to largest otherwise false
:return: is it ordered?
:rtype: bool
"""
sys.stderr.write("error unimplemented check_ordered\n")
sys.exit()
seen_chrs = set()
curr_chr = None
prevstart = 0
for l in self._lines:
if not l['rng']: continue
if l['rng'].chr != curr_chr:
prevstart = 0
if l['rng'].chr in seen_chrs:
return False
curr_chr = l['rng'].chr
seen_chrs.add(curr_chr)
if l['rng'].start < prevstart: return False
prevstart = l['rng'].start
return True | True if each chromosome is listed together as a chunk and if the range starts go from smallest to largest otherwise false
:return: is it ordered?
:rtype: bool |
def poll(self, timeout=-1, maxevents=-1):
"""
Poll for events
:param timeout:
The amount of seconds to wait for events before giving up. The
default value, -1, represents infinity. Note that unlike the
underlying ``epoll_wait()`` timeout is a fractional number
representing **seconds**.
:param maxevents:
The maximum number of events to report. The default is a
reasonably-sized maximum, identical to the one selected by
Python 3.4.
:returns:
A list of (fd, events) that were reported or an empty list if the
timeout elapsed.
:raises ValueError:
If :meth:`closed()` is True
:raises OSError:
If the underlying ``epoll_wait(2)`` fails. The error message
matches those found in the manual page.
"""
if self._epfd < 0:
_err_closed()
if timeout != -1:
# 1000 because epoll_wait(2) uses milliseconds
timeout = int(timeout * 1000)
if maxevents == -1:
maxevents = FD_SETSIZE - 1
events = (epoll_event * maxevents)()
num_events = epoll_wait(
self._epfd, cast(byref(events), POINTER(epoll_event)),
maxevents, timeout)
return [(events[i].data.fd, events[i].events)
for i in range(num_events)] | Poll for events
:param timeout:
The amount of seconds to wait for events before giving up. The
default value, -1, represents infinity. Note that unlike the
underlying ``epoll_wait()`` timeout is a fractional number
representing **seconds**.
:param maxevents:
The maximum number of events to report. The default is a
reasonably-sized maximum, identical to the one selected by
Python 3.4.
:returns:
A list of (fd, events) that were reported or an empty list if the
timeout elapsed.
:raises ValueError:
If :meth:`closed()` is True
:raises OSError:
If the underlying ``epoll_wait(2)`` fails. The error message
matches those found in the manual page. |
def find_first(self, attr_name, resources, extra_prefix=''):
"""
Returns the boto object for the first resource in ``resources`` that
belongs to this stack. Uses the attribute specified by ``attr_name``
to match the stack name.
E.g. An RDS instance for a stack named ``foo`` might be named
``foo-mydb-fis8932ifs``. This call::
find_first('id', conn.get_all_dbinstances())
would return the boto.rds.dbinstance.DBInstance object whose ``id`` is
``foo-mydb-fis8932ifs``.
Returns None if a matching resource is not found.
If specified, ``extra_prefix`` is appended to the stack name prefix
before matching.
"""
prefix = self.name + '-' + (extra_prefix + '-' if extra_prefix else '')
for res in resources:
attr = getattr(res, attr_name)
if attr.startswith(prefix):
return res | Returns the boto object for the first resource in ``resources`` that
belongs to this stack. Uses the attribute specified by ``attr_name``
to match the stack name.
E.g. An RDS instance for a stack named ``foo`` might be named
``foo-mydb-fis8932ifs``. This call::
find_first('id', conn.get_all_dbinstances())
would return the boto.rds.dbinstance.DBInstance object whose ``id`` is
``foo-mydb-fis8932ifs``.
Returns None if a matching resource is not found.
If specified, ``extra_prefix`` is appended to the stack name prefix
before matching. |
def geocode_addresses(self, project_id, dataset_id, address_field,
geometry_field, **extra_params):
"""
Geocode addresses in a dataset. The dataset must have a string field
with the addresses to geocode and a geometry field (points) for the
geocoding results.
:param project_id: Must be a string.
:param dataset_id: Must be a string.
:param address_field: Name of the address field in the dataset.
:param geometry_field: Name of the geometry field in the dataset.
:param extra_params: Dictionary to filter the Geocoding response.
For example: {'country':'PE'}
More information:
https://developers.google.com/maps/documentation/geocoding/intro#ComponentFiltering
"""
project_url = ('/projects/{project_id}'
).format(project_id=project_id)
dataset_url = ('{project_url}/datasets/{dataset_id}'
).format(project_url=project_url,
dataset_id=dataset_id)
project_query_url = ('{project_url}/sql'
).format(project_url=project_url)
dataset_data = self.get(dataset_url)
dataset_count = dataset_data['feature_count']
print('%d rows to process' % dataset_count)
print('Estimated time: %d minutes' % (dataset_count * 2 / 60))
start = datetime.now()
print('Started at %s hrs' % start.strftime('%H:%M'))
get_query = ('SELECT {address_column}, amigo_id '
'FROM dataset_{dataset_id}'
).format(address_column=address_field,
dataset_id=dataset_id)
rows = []
print('Exporting addresses...')
for i in range(0, dataset_count, 1000):
response = self.get(
project_query_url,
params={
'query': get_query,
'offset': i,
'limit': 1000
}
)
dataset_rows = response['data']
rows.extend(dataset_rows)
print('Done!')
print('Geocoding addresses...')
geocoder_url = '/me/geocoder/search'
geocoder_params = {'focus.point.lat': 0, 'focus.point.lon': 0}
components = ''
for key, value in extra_params.items():
components += '{key}:{value}|'.format(key=key, value=value)
geocoder_params['components'] = components[:-1]
def geocode_address(row_data):
address = row_data[address_field]
amigo_id = row_data['amigo_id']
geocoder_params['text'] = address
geocoder_result = self.get(geocoder_url,
params=geocoder_params,
stream=True)
if geocoder_result.status_code == 200:
coordinates = json.loads(geocoder_result.text)[
'features'][0]['geometry']['coordinates']
lng = str(coordinates[0])
lat = str(coordinates[1])
return ("('{amigo_id}', "
"ST_SetSRID(ST_MakePoint({lng}, {lat}), 4326)),"
).format(amigo_id=amigo_id,
lng=lng,
lat=lat)
return ''
processed = 0
steps = 30
for i in range(0, len(rows), steps):
rows_to_geocode = rows[i: i + steps]
threads = []
for row in rows_to_geocode:
threads.append(gevent.spawn(geocode_address,
row))
gevent.joinall(threads)
values = ''.join([thread.value for thread in threads])
if values != '':
data = {
'query': ('UPDATE dataset_{dataset_id} as d '
'SET {geo_column} = c.{geo_column} '
'FROM (values {values}) '
'as c(amigo_id, {geo_column}) '
'WHERE c.amigo_id = d.amigo_id'
).format(dataset_id=dataset_id,
geo_column=geometry_field,
values=values[:-1])
}
self.post(project_query_url, data=data)
processed += len(rows_to_geocode)
print('%d%%' % (float(processed) / dataset_count * 100))
count_query = ('SELECT count(*) '
'FROM dataset_{dataset_id} '
'WHERE {geo_column} IS NOT NULL'
).format(dataset_id=dataset_id,
geo_column=geometry_field)
points_count = self.get(
project_query_url,
params={'query': count_query})['data'][0]['count']
print('Done!')
print('Finished at %s hrs' % datetime.now().strftime('%H:%M'))
print('Success rate: %d of %d points created' %
(points_count, dataset_count))
total_time = datetime.now() - start
print('Total time: %s' % total_time)
average_time = total_time.total_seconds() / dataset_count
print('Average time per request: %.3f seconds' % average_time) | Geocode addresses in a dataset. The dataset must have a string field
with the addresses to geocode and a geometry field (points) for the
geocoding results.
:param project_id: Must be a string.
:param dataset_id: Must be a string.
:param address_field: Name of the address field in the dataset.
:param geometry_field: Name of the geometry field in the dataset.
:param extra_params: Dictionary to filter the Geocoding response.
For example: {'country':'PE'}
More information:
https://developers.google.com/maps/documentation/geocoding/intro#ComponentFiltering |
def result_sort(result_list, start_index=0):
"""Sorts a list of results in O(n) in place (since every run is unique)
:param result_list: List of tuples [(run_idx, res), ...]
:param start_index: Index with which to start, every entry before `start_index` is ignored
"""
if len(result_list) < 2:
return result_list
to_sort = result_list[start_index:]
minmax = [x[0] for x in to_sort]
minimum = min(minmax)
maximum = max(minmax)
#print minimum, maximum
sorted_list = [None for _ in range(minimum, maximum + 1)]
for elem in to_sort:
key = elem[0] - minimum
sorted_list[key] = elem
idx_count = start_index
for elem in sorted_list:
if elem is not None:
result_list[idx_count] = elem
idx_count += 1
return result_list | Sorts a list of results in O(n) in place (since every run is unique)
:param result_list: List of tuples [(run_idx, res), ...]
:param start_index: Index with which to start, every entry before `start_index` is ignored |
def is_premium(self, media_type):
"""Get if the session is premium for a given media type
@param str media_type Should be one of ANDROID.MEDIA_TYPE_*
@return bool
"""
if self.logged_in:
if media_type in self._user_data['premium']:
return True
return False | Get if the session is premium for a given media type
@param str media_type Should be one of ANDROID.MEDIA_TYPE_*
@return bool |
def int_gps_time_to_str(t):
"""Takes an integer GPS time, either given as int or lal.LIGOTimeGPS, and
converts it to a string. If a LIGOTimeGPS with nonzero decimal part is
given, raises a ValueError."""
if isinstance(t, int):
return str(t)
elif isinstance(t, float):
# Wouldn't this just work generically?
int_t = int(t)
if abs(t - int_t) > 0.:
raise ValueError('Need an integer GPS time, got %s' % str(t))
return str(int_t)
elif isinstance(t, lal.LIGOTimeGPS):
if t.gpsNanoSeconds == 0:
return str(t.gpsSeconds)
else:
raise ValueError('Need an integer GPS time, got %s' % str(t))
else:
err_msg = "Didn't understand input type {}".format(type(t))
raise ValueError(err_msg) | Takes an integer GPS time, either given as int or lal.LIGOTimeGPS, and
converts it to a string. If a LIGOTimeGPS with nonzero decimal part is
given, raises a ValueError. |
def show(closeToo=False):
"""alternative to pylab.show() that updates IPython window."""
IPython.display.display(pylab.gcf())
if closeToo:
pylab.close('all') | alternative to pylab.show() that updates IPython window. |
def _init_map(self):
"""stub"""
MultiChoiceAnswerFormRecord._init_map(self)
FilesAnswerFormRecord._init_map(self)
FeedbackAnswerFormRecord._init_map(self)
super(MultiChoiceFeedbackAndFilesAnswerFormRecord, self)._init_map() | stub |
def random_subset_ids_by_count(self, count_per_class=1):
"""
Returns a random subset of sample ids of specified size by count,
within each class.
Parameters
----------
count_per_class : int
Exact number of samples per each class.
Returns
-------
subset : list
Combined list of sample ids from all classes.
"""
class_sizes = self.class_sizes
subsets = list()
if count_per_class < 1:
warnings.warn('Atleast one sample must be selected from each class')
return list()
elif count_per_class >= self.num_samples:
warnings.warn('All samples requested - returning a copy!')
return self.keys
# seeding the random number generator
# random.seed(random_seed)
for class_id, class_size in class_sizes.items():
# samples belonging to the class
this_class = self.keys_with_value(self.classes, class_id)
# shuffling the sample order; shuffling works in-place!
random.shuffle(this_class)
# clipping the range to [0, class_size]
subset_size_this_class = max(0, min(class_size, count_per_class))
if subset_size_this_class < 1 or this_class is None:
# warning if none were selected
warnings.warn('No subjects from class {} were selected.'.format(class_id))
else:
subsets_this_class = this_class[0:count_per_class]
subsets.extend(subsets_this_class)
if len(subsets) > 0:
return subsets
else:
warnings.warn('Zero samples were selected. Returning an empty list!')
return list() | Returns a random subset of sample ids of specified size by count,
within each class.
Parameters
----------
count_per_class : int
Exact number of samples per each class.
Returns
-------
subset : list
Combined list of sample ids from all classes. |
def remove(name, **kwargs):
'''
Remove system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.remove name=sshd_enable
'''
cmd = 'sysrc -v'
if 'file' in kwargs:
cmd += ' -f '+kwargs['file']
if 'jail' in kwargs:
cmd += ' -j '+kwargs['jail']
cmd += ' -x '+name
sysrcs = __salt__['cmd.run'](cmd)
if "sysrc: unknown variable" in sysrcs:
raise CommandExecutionError(sysrcs)
else:
return name+" removed" | Remove system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.remove name=sshd_enable |
def RegisterDefinition(self, data_type_definition):
"""Registers a data type definition.
The data type definitions are identified based on their lower case name.
Args:
data_type_definition (DataTypeDefinition): data type definitions.
Raises:
KeyError: if data type definition is already set for the corresponding
name.
"""
name_lower = data_type_definition.name.lower()
if name_lower in self._definitions:
raise KeyError('Definition already set for name: {0:s}.'.format(
data_type_definition.name))
if data_type_definition.name in self._aliases:
raise KeyError('Alias already set for name: {0:s}.'.format(
data_type_definition.name))
for alias in data_type_definition.aliases:
if alias in self._aliases:
raise KeyError('Alias already set for name: {0:s}.'.format(alias))
self._definitions[name_lower] = data_type_definition
for alias in data_type_definition.aliases:
self._aliases[alias] = name_lower
if data_type_definition.TYPE_INDICATOR == definitions.TYPE_INDICATOR_FORMAT:
self._format_definitions.append(name_lower) | Registers a data type definition.
The data type definitions are identified based on their lower case name.
Args:
data_type_definition (DataTypeDefinition): data type definitions.
Raises:
KeyError: if data type definition is already set for the corresponding
name. |
def helper(*commands):
"""Decorate a function to be the helper function of commands.
Arguments:
commands: Names of command that should trigger this function object.
---------------------------
Interface of helper methods:
@helper('some-command')
def help_foo(self, args):
'''
Arguments:
args: A list of arguments.
Returns:
A string that is the help message.
'''
pass
"""
def decorated_func(f):
f.__help_targets__ = list(commands)
return f
return decorated_func | Decorate a function to be the helper function of commands.
Arguments:
commands: Names of command that should trigger this function object.
---------------------------
Interface of helper methods:
@helper('some-command')
def help_foo(self, args):
'''
Arguments:
args: A list of arguments.
Returns:
A string that is the help message.
'''
pass |
def load_and_parse(self):
"""
Load the metrics file from the given path
"""
f = open(self.file_path, "r")
metrics_json = f.read()
self.metrics = json.loads(metrics_json) | Load the metrics file from the given path |
def _set_dpod(self, v, load=False):
"""
Setter method for dpod, mapped from YANG variable /dpod (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_dpod is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dpod() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=dpod.dpod, is_container='container', presence=False, yang_name="dpod", rest_name="dpod", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Manage and display DPOD license assignments.\nUsage: dpod [slot/port] [reserve|release]', u'display-when': u'(/c:capabilities/c:license/c:dpod_display = "true")'}}, namespace='urn:brocade.com:mgmt:brocade-license', defining_module='brocade-license', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dpod must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=dpod.dpod, is_container='container', presence=False, yang_name="dpod", rest_name="dpod", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Manage and display DPOD license assignments.\nUsage: dpod [slot/port] [reserve|release]', u'display-when': u'(/c:capabilities/c:license/c:dpod_display = "true")'}}, namespace='urn:brocade.com:mgmt:brocade-license', defining_module='brocade-license', yang_type='container', is_config=True)""",
})
self.__dpod = t
if hasattr(self, '_set'):
self._set() | Setter method for dpod, mapped from YANG variable /dpod (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_dpod is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dpod() directly. |
def build_clustbits(data, ipyclient, force):
"""
Reconstitutes clusters from .utemp and htemp files and writes them
to chunked files for aligning in muscle.
"""
## If you run this step then we clear all tmp .fa and .indel.h5 files
if os.path.exists(data.tmpdir):
shutil.rmtree(data.tmpdir)
os.mkdir(data.tmpdir)
## parallel client
lbview = ipyclient.load_balanced_view()
start = time.time()
printstr = " building clusters | {} | s6 |"
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 0, printstr.format(elapsed), spacer=data._spacer)
uhandle = os.path.join(data.dirs.across, data.name+".utemp")
usort = os.path.join(data.dirs.across, data.name+".utemp.sort")
async1 = ""
## skip usorting if not force and already exists
if not os.path.exists(usort) or force:
## send sort job to engines. Sorted seeds allows us to work through
## the utemp file one locus at a time instead of reading all into mem.
LOGGER.info("building reads file -- loading utemp file into mem")
async1 = lbview.apply(sort_seeds, *(uhandle, usort))
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 0, printstr.format(elapsed), spacer=data._spacer)
if async1.ready():
break
else:
time.sleep(0.1)
## send count seeds job to engines.
async2 = lbview.apply(count_seeds, usort)
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 1, printstr.format(elapsed), spacer=data._spacer)
if async2.ready():
break
else:
time.sleep(0.1)
## wait for both to finish while printing progress timer
nseeds = async2.result()
## send the clust bit building job to work and track progress
async3 = lbview.apply(sub_build_clustbits, *(data, usort, nseeds))
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 2, printstr.format(elapsed), spacer=data._spacer)
if async3.ready():
break
else:
time.sleep(0.1)
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(3, 3, printstr.format(elapsed), spacer=data._spacer)
print("")
## check for errors
for job in [async1, async2, async3]:
try:
if not job.successful():
raise IPyradWarningExit(job.result())
except AttributeError:
## If we skip usorting then async1 == "" so the call to
## successful() raises, but we can ignore it.
pass | Reconstitutes clusters from .utemp and htemp files and writes them
to chunked files for aligning in muscle. |
def _compute_ogg_page_crc(page):
""" Compute CRC of an Ogg page. """
page_zero_crc = page[:OGG_FIRST_PAGE_HEADER_CRC_OFFSET] + \
b"\00" * OGG_FIRST_PAGE_HEADER_CRC.size + \
page[OGG_FIRST_PAGE_HEADER_CRC_OFFSET + OGG_FIRST_PAGE_HEADER_CRC.size:]
return ogg_page_crc(page_zero_crc) | Compute CRC of an Ogg page. |
def update(self, capacity=values.unset, available=values.unset):
"""
Update the WorkerChannelInstance
:param unicode capacity: The total number of Tasks worker should handle for this TaskChannel type.
:param bool available: Toggle the availability of the WorkerChannel.
:returns: Updated WorkerChannelInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.worker_channel.WorkerChannelInstance
"""
data = values.of({'Capacity': capacity, 'Available': available, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return WorkerChannelInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
worker_sid=self._solution['worker_sid'],
sid=self._solution['sid'],
) | Update the WorkerChannelInstance
:param unicode capacity: The total number of Tasks worker should handle for this TaskChannel type.
:param bool available: Toggle the availability of the WorkerChannel.
:returns: Updated WorkerChannelInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.worker_channel.WorkerChannelInstance |
def debug_mode(self, toggle):
"""
Toggle debug mode for more detailed output
obj.debug_mode(True) - Turn debug mode on
obj.debug_mode(False) - Turn debug mode off
"""
if toggle:
self.log.setLevel(logging.DEBUG)
else:
self.log.setLevel(logging.ERROR) | Toggle debug mode for more detailed output
obj.debug_mode(True) - Turn debug mode on
obj.debug_mode(False) - Turn debug mode off |
def httperror_handler(error):
"""Format error responses properly, return the response body.
This function can be attached to the Bottle instance as the
default_error_handler function. It is also used by the
FormatExceptionMiddleware.
"""
status_code = error.status_code or 500
output = {
'code': status_code,
'message': error.body or UNEXPECTED_ERROR,
'reason': bottle.HTTP_CODES.get(status_code) or None,
}
if bottle.DEBUG:
LOG.warning("Debug-mode server is returning traceback and error "
"details in the response with a %s status.",
error.status_code)
if error.exception:
output['exception'] = repr(error.exception)
else:
if any(sys.exc_info()):
output['exception'] = repr(sys.exc_info()[1])
else:
output['exception'] = None
if error.traceback:
output['traceback'] = error.traceback
else:
if any(sys.exc_info()):
# Otherwise, format_exc() returns "None\n"
# which is pretty silly.
output['traceback'] = traceback.format_exc()
else:
output['traceback'] = None
# overwrite previous body attr with json
if isinstance(output['message'], bytes):
output['message'] = output['message'].decode(
'utf-8', errors='replace')
# Default type and writer to json.
accept = bottle.request.get_header('accept') or 'application/json'
writer = functools.partial(
json.dumps, sort_keys=True, indent=4)
error.set_header('Content-Type', 'application/json')
if 'json' not in accept:
if 'yaml' in accept:
if not yaml:
LOG.warning("Yaml requested but pyyaml is not installed.")
else:
error.set_header('Content-Type', 'application/x-yaml')
writer = functools.partial(
yaml.safe_dump,
default_flow_style=False,
indent=4)
# html could be added here.
error.body = [writer(output).encode('utf8')]
return error.body | Format error responses properly, return the response body.
This function can be attached to the Bottle instance as the
default_error_handler function. It is also used by the
FormatExceptionMiddleware. |
def shared_options(rq):
"Default class options to pass to the CLI commands."
return {
'url': rq.redis_url,
'config': None,
'worker_class': rq.worker_class,
'job_class': rq.job_class,
'queue_class': rq.queue_class,
'connection_class': rq.connection_class,
} | Default class options to pass to the CLI commands. |
def node_radius(self, node):
"""
Computes the radial position of the node.
"""
return self.get_idx(node) * self.scale + self.internal_radius | Computes the radial position of the node. |
def gradient(self):
r"""Gradient of the log of the marginal likelihood.
Returns
-------
dict
Map between variables to their gradient values.
"""
self._update_approx()
g = self._ep.lml_derivatives(self._X)
ed = exp(-self.logitdelta)
es = exp(self.logscale)
grad = dict()
grad["logitdelta"] = g["delta"] * (ed / (1 + ed)) / (1 + ed)
grad["logscale"] = g["scale"] * es
grad["beta"] = g["mean"]
return grad | r"""Gradient of the log of the marginal likelihood.
Returns
-------
dict
Map between variables to their gradient values. |
def is_mouse_over(self, event):
"""
Check whether a MouseEvent is over thus scroll bar.
:param event: The MouseEvent to check.
:returns: True if the mouse event is over the scroll bar.
"""
return event.x == self._x and self._y <= event.y < self._y + self._height | Check whether a MouseEvent is over thus scroll bar.
:param event: The MouseEvent to check.
:returns: True if the mouse event is over the scroll bar. |
def load_saved_records(self, status, records):
"""Load ALDB records from a set of saved records."""
if isinstance(status, ALDBStatus):
self._status = status
else:
self._status = ALDBStatus(status)
for mem_addr in records:
rec = records[mem_addr]
control_flags = int(rec.get('control_flags', 0))
group = int(rec.get('group', 0))
rec_addr = rec.get('address', '000000')
data1 = int(rec.get('data1', 0))
data2 = int(rec.get('data2', 0))
data3 = int(rec.get('data3', 0))
self[int(mem_addr)] = ALDBRecord(int(mem_addr), control_flags,
group, rec_addr,
data1, data2, data3)
if self._status == ALDBStatus.LOADED:
keys = list(self._records.keys())
keys.sort(reverse=True)
first_key = keys[0]
self._mem_addr = first_key | Load ALDB records from a set of saved records. |
def FromMicroseconds(self, micros):
"""Converts microseconds since epoch to Timestamp."""
self.seconds = micros // _MICROS_PER_SECOND
self.nanos = (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND | Converts microseconds since epoch to Timestamp. |
def get_details(self, language=None):
"""Retrieves full information on the place matching the place_id.
Further attributes will be made available on the instance once this
method has been invoked.
keyword arguments:
language -- The language code, indicating in which language the
results should be returned, if possible. This value defaults
to the language that was used to generate the
GooglePlacesSearchResult instance.
"""
if self._details is None:
if language is None:
try:
language = self._query_instance._request_params['language']
except KeyError:
language = lang.ENGLISH
self._details = _get_place_details(
self.place_id, self._query_instance.api_key,
self._query_instance.sensor, language=language) | Retrieves full information on the place matching the place_id.
Further attributes will be made available on the instance once this
method has been invoked.
keyword arguments:
language -- The language code, indicating in which language the
results should be returned, if possible. This value defaults
to the language that was used to generate the
GooglePlacesSearchResult instance. |
def get_client(self, name):
"""Like :meth:`.get`, but only mechanisms inheriting
:class:`ClientMechanism` will be returned.
Args:
name: The SASL mechanism name.
Returns:
The mechanism object or ``None``
"""
mech = self.get(name)
return mech if isinstance(mech, ClientMechanism) else None | Like :meth:`.get`, but only mechanisms inheriting
:class:`ClientMechanism` will be returned.
Args:
name: The SASL mechanism name.
Returns:
The mechanism object or ``None`` |
def ICALImporter(ctx, filename, all, owner, calendar, create_calendar, clear_calendar, dry, execfilter):
"""Calendar Importer for iCal (ics) files
"""
log('iCal importer running')
objectmodels = ctx.obj['db'].objectmodels
if objectmodels['user'].count({'name': owner}) > 0:
owner_object = objectmodels['user'].find_one({'name': owner})
elif objectmodels['user'].count({'uuid': owner}) > 0:
owner_object = objectmodels['user'].find_one({'uuid': owner})
else:
log('User unknown. Specify either uuid or name.', lvl=warn)
return
log('Found user')
if objectmodels['calendar'].count({'name': calendar}) > 0:
calendar = objectmodels['calendar'].find_one({'name': calendar})
elif objectmodels['calendar'].count({'uuid': owner}) > 0:
calendar = objectmodels['calendar'].find_one({'uuid': calendar})
elif create_calendar:
calendar = objectmodels['calendar']({
'uuid': std_uuid(),
'name': calendar
})
else:
log('Calendar unknown and no --create-calendar specified. Specify either uuid or name of an existing calendar.',
lvl=warn)
return
log('Found calendar')
if clear_calendar is True:
log('Clearing calendar events')
for item in objectmodels['event'].find({'calendar': calendar.uuid}):
item.delete()
with open(filename, 'rb') as file_object:
caldata = Calendar.from_ical(file_object.read())
keys = {
'class': 'str',
'created': 'dt',
'description': 'str',
'dtstart': 'dt',
'dtend': 'dt',
'timestamp': 'dt',
'modified': 'dt',
'location': 'str',
'status': 'str',
'summary': 'str',
'uid': 'str'
}
mapping = {
'description': 'summary',
'summary': 'name'
}
imports = []
def ical_import_filter(original, logfacilty):
log('Passthrough filter')
return original
if execfilter is not None:
import os
textFilePath = os.path.abspath(os.path.join(os.path.curdir, execfilter))
textFileFolder = os.path.dirname(textFilePath)
from importlib.machinery import SourceFileLoader
filter_module = SourceFileLoader("importfilter", textFilePath).load_module()
ical_import_filter = filter_module.ical_import_filter
for event in caldata.walk():
if event.name == 'VEVENT':
log(event, lvl=verbose, pretty=True)
initializer = {
'uuid': std_uuid(),
'calendar': calendar.uuid,
}
for item in keys:
thing = event.get(item, None)
if thing is None:
thing = 'NO-' + item
else:
if keys[item] == 'str':
thing = str(thing)
else:
thing = parser.parse(str(thing.dt))
thing = thing.isoformat()
if item in mapping:
item_assignment = mapping[item]
else:
item_assignment = item
initializer[item_assignment] = thing
new_event = objectmodels['event'](initializer)
new_event = ical_import_filter(new_event, log)
imports.append(new_event)
log(new_event, lvl=debug)
for ev in imports:
log(ev.summary)
if not dry:
log('Bulk creating events')
objectmodels['event'].bulk_create(imports)
calendar.save()
else:
log('Dry run - nothing stored.', lvl=warn) | Calendar Importer for iCal (ics) files |
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`."""
self.Y = sp.prox_l1(self.AX + self.U,
(self.lmbda / self.rho) * self.wl1)
super(ConvBPDN, self).ystep() | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. |
def time_to_first_byte(self):
"""
Time to first byte of the page request in ms
"""
# The unknown page is just a placeholder for entries with no page ID.
# As such, it would not have a TTFB
if self.page_id == 'unknown':
return None
ttfb = 0
for entry in self.entries:
if entry['response']['status'] == 200:
for k, v in iteritems(entry['timings']):
if k != 'receive':
if v > 0:
ttfb += v
break
else:
ttfb += entry['time']
return ttfb | Time to first byte of the page request in ms |
def imatch(pattern, name):
# type: (Text, Text) -> bool
"""Test whether a name matches a wildcard pattern (case insensitive).
Arguments:
pattern (str): A wildcard pattern, e.g. ``"*.py"``.
name (bool): A filename.
Returns:
bool: `True` if the filename matches the pattern.
"""
try:
re_pat = _PATTERN_CACHE[(pattern, False)]
except KeyError:
res = "(?ms)" + _translate(pattern, case_sensitive=False) + r'\Z'
_PATTERN_CACHE[(pattern, False)] = re_pat = re.compile(res, re.IGNORECASE)
return re_pat.match(name) is not None | Test whether a name matches a wildcard pattern (case insensitive).
Arguments:
pattern (str): A wildcard pattern, e.g. ``"*.py"``.
name (bool): A filename.
Returns:
bool: `True` if the filename matches the pattern. |
def rgb_to_yiq(rgb):
"""
Convert an RGB color representation to a YIQ color representation.
(r, g, b) :: r -> [0, 255]
g -> [0, 255]
b -> [0, 255]
:param rgb: A tuple of three numeric values corresponding to the red, green, and blue value.
:return: YIQ representation of the input RGB value.
:rtype: tuple
"""
r, g, b = rgb[0] / 255, rgb[1] / 255, rgb[2] / 255
y = (0.299 * r) + (0.587 * g) + (0.114 * b)
i = (0.596 * r) - (0.275 * g) - (0.321 * b)
q = (0.212 * r) - (0.528 * g) + (0.311 * b)
return round(y, 3), round(i, 3), round(q, 3) | Convert an RGB color representation to a YIQ color representation.
(r, g, b) :: r -> [0, 255]
g -> [0, 255]
b -> [0, 255]
:param rgb: A tuple of three numeric values corresponding to the red, green, and blue value.
:return: YIQ representation of the input RGB value.
:rtype: tuple |
def sspro8_results(self):
"""Parse the SSpro8 output file and return a dict of secondary structure compositions.
"""
return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_sspro8) | Parse the SSpro8 output file and return a dict of secondary structure compositions. |
def cookie_get(self, name):
"""
Check for a cookie value by name.
:param str name: Name of the cookie value to retreive.
:return: Returns the cookie value if it's set or None if it's not found.
"""
if not hasattr(self, 'cookies'):
return None
if self.cookies.get(name):
return self.cookies.get(name).value
return None | Check for a cookie value by name.
:param str name: Name of the cookie value to retreive.
:return: Returns the cookie value if it's set or None if it's not found. |
def load_commands(self, parser):
""" Load commands of this profile.
:param parser: argparse parser on which to add commands
"""
entrypoints = self._get_entrypoints()
already_loaded = set()
for entrypoint in entrypoints:
if entrypoint.name not in already_loaded:
command_class = entrypoint.load()
command_class(entrypoint.name, self, parser).prepare()
already_loaded.add(entrypoint.name) | Load commands of this profile.
:param parser: argparse parser on which to add commands |
def transmit(self, channel, message):
"""
Send the message to Slack.
:param channel: channel or user to whom the message should be sent.
If a ``thread`` attribute is present, that thread ID is used.
:param str message: message to send.
"""
target = (
self.slack.server.channels.find(channel)
or self._find_user_channel(username=channel)
)
message = self._expand_references(message)
target.send_message(message, thread=getattr(channel, 'thread', None)) | Send the message to Slack.
:param channel: channel or user to whom the message should be sent.
If a ``thread`` attribute is present, that thread ID is used.
:param str message: message to send. |
def top_sections(self):
"""
The number of sections that touch the top side.
Returns
-------
sections : int
The number of sections on the top
"""
top_line = self.text.split('\n')[0]
sections = len(top_line.split('+')) - 2
return sections | The number of sections that touch the top side.
Returns
-------
sections : int
The number of sections on the top |
def is_current_manager_equals_to(cls, pm):
"""Returns True if this package manager is usable, False otherwise."""
if hasattr(cls, 'works_result'):
return cls.works_result
is_ok = bool(cls._try_get_current_manager() == pm)
setattr(cls, 'works_result', is_ok)
return is_ok | Returns True if this package manager is usable, False otherwise. |
def get_storage(self, script_hash, key, **kwargs):
""" Returns the value stored in the storage of a contract script hash for a given key.
:param script_hash: contract script hash
:param key: key to look up in the storage
:type script_hash: str
:type key: str
:return: value associated with the storage key
:rtype: bytearray
"""
hexkey = binascii.hexlify(key.encode('utf-8')).decode('utf-8')
hexresult = self._call(
JSONRPCMethods.GET_STORAGE.value, params=[script_hash, hexkey, ], **kwargs)
try:
assert hexresult
result = bytearray(binascii.unhexlify(hexresult.encode('utf-8')))
except AssertionError:
result = hexresult
return result | Returns the value stored in the storage of a contract script hash for a given key.
:param script_hash: contract script hash
:param key: key to look up in the storage
:type script_hash: str
:type key: str
:return: value associated with the storage key
:rtype: bytearray |
def create_dashboard(self, panel_file, data_sources=None, strict=True):
"""Upload a panel to Elasticsearch if it does not exist yet.
If a list of data sources is specified, upload only those
elements (visualizations, searches) that match that data source.
:param panel_file: file name of panel (dashobard) to upload
:param data_sources: list of data sources
:param strict: only upload a dashboard if it is newer than the one already existing
"""
es_enrich = self.conf['es_enrichment']['url']
kibana_url = self.conf['panels']['kibiter_url']
mboxes_sources = set(['pipermail', 'hyperkitty', 'groupsio', 'nntp'])
if data_sources and any(x in data_sources for x in mboxes_sources):
data_sources = list(data_sources)
data_sources.append('mbox')
if data_sources and ('supybot' in data_sources):
data_sources = list(data_sources)
data_sources.append('irc')
if data_sources and 'google_hits' in data_sources:
data_sources = list(data_sources)
data_sources.append('googlehits')
if data_sources and 'stackexchange' in data_sources:
# stackexchange is called stackoverflow in panels
data_sources = list(data_sources)
data_sources.append('stackoverflow')
if data_sources and 'phabricator' in data_sources:
data_sources = list(data_sources)
data_sources.append('maniphest')
try:
import_dashboard(es_enrich, kibana_url, panel_file, data_sources=data_sources, strict=strict)
except ValueError:
logger.error("%s does not include release field. Not loading the panel.", panel_file)
except RuntimeError:
logger.error("Can not load the panel %s", panel_file) | Upload a panel to Elasticsearch if it does not exist yet.
If a list of data sources is specified, upload only those
elements (visualizations, searches) that match that data source.
:param panel_file: file name of panel (dashobard) to upload
:param data_sources: list of data sources
:param strict: only upload a dashboard if it is newer than the one already existing |
def _check_configs(self):
"""
Reloads the configuration files.
"""
configs = set(self._find_configs())
known_configs = set(self.configs.keys())
new_configs = configs - known_configs
for cfg in (known_configs - configs):
self.log.debug("Compass configuration has been removed: " + cfg)
del self.configs[cfg]
for cfg in new_configs:
self.log.debug("Found new compass configuration: " + cfg)
self.configs[cfg] = CompassConfig(cfg) | Reloads the configuration files. |
def yield_expr__26(self, yield_loc, exprs):
"""(2.6, 2.7, 3.0, 3.1, 3.2) yield_expr: 'yield' [testlist]"""
if exprs is not None:
return ast.Yield(value=exprs,
yield_loc=yield_loc, loc=yield_loc.join(exprs.loc))
else:
return ast.Yield(value=None,
yield_loc=yield_loc, loc=yield_loc) | (2.6, 2.7, 3.0, 3.1, 3.2) yield_expr: 'yield' [testlist] |
def getAttributeData(self, name, channel=None):
""" Returns a attribut """
return self._getNodeData(name, self._ATTRIBUTENODE, channel) | Returns a attribut |
def delete(self, *args, **kwargs):
"""
Delete clonable relations first, since they may be
objects that wouldn't otherwise be deleted.
Calls super to actually delete the object.
"""
skip_reverses = kwargs.pop('skip_reverses', False)
if not skip_reverses:
self._delete_reverses()
return super(Cloneable, self).delete(*args, **kwargs) | Delete clonable relations first, since they may be
objects that wouldn't otherwise be deleted.
Calls super to actually delete the object. |
def get_custom_annotations_for_alias(data_type):
"""
Given a Stone data type, returns all custom annotations applied to it.
"""
# annotations can only be applied to Aliases, but they can be wrapped in
# Nullable. also, Aliases pointing to other Aliases don't automatically
# inherit their custom annotations, so we might have to traverse.
result = []
data_type, _ = unwrap_nullable(data_type)
while is_alias(data_type):
result.extend(data_type.custom_annotations)
data_type, _ = unwrap_nullable(data_type.data_type)
return result | Given a Stone data type, returns all custom annotations applied to it. |
def load_waypoints(self, filename):
'''load waypoints from a file'''
self.wploader.target_system = self.target_system
self.wploader.target_component = self.target_component
try:
self.wploader.load(filename)
except Exception as msg:
print("Unable to load %s - %s" % (filename, msg))
return
print("Loaded %u waypoints from %s" % (self.wploader.count(), filename))
self.send_all_waypoints() | load waypoints from a file |
def to_text(value, encoding='utf-8'):
"""Convert value to unicode, default encoding is utf-8
:param value: Value to be converted
:param encoding: Desired encoding
"""
if not value:
return ''
if isinstance(value, six.text_type):
return value
if isinstance(value, six.binary_type):
return value.decode(encoding)
return six.text_type(value) | Convert value to unicode, default encoding is utf-8
:param value: Value to be converted
:param encoding: Desired encoding |
def legacy_decrypt(jwe, jwk, adata='', validate_claims=True,
expiry_seconds=None):
""" Decrypts a deserialized :class:`~jose.JWE`
:param jwe: An instance of :class:`~jose.JWE`
:param jwk: A `dict` representing the JWK required to decrypt the content
of the :class:`~jose.JWE`.
:param adata: Arbitrary string data used during encryption for additional
authentication.
:param validate_claims: A `bool` indicating whether or not the `exp`, `iat`
and `nbf` claims should be validated. Defaults to
`True`.
:param expiry_seconds: An `int` containing the JWT expiry in seconds, used
when evaluating the `iat` claim. Defaults to `None`,
which disables `iat` claim validation.
:rtype: :class:`~jose.JWT`
:raises: :class:`~jose.Expired` if the JWT has expired
:raises: :class:`~jose.NotYetValid` if the JWT is not yet valid
:raises: :class:`~jose.Error` if there is an error decrypting the JWE
"""
protected_header, encrypted_key, iv, ciphertext, authentication_tag = map(
b64decode_url, jwe)
header = json_decode(protected_header)
alg = header[HEADER_ALG]
enc = header[HEADER_ENC]
# decrypt cek
encryption_key = _decrypt_key(encrypted_key, jwk, alg)
# decrypt body
((_, decipher), _), ((hash_fn, _), mod) = JWA[enc]
version = header.get(_TEMP_VER_KEY)
if version:
plaintext = decipher(ciphertext, encryption_key[-mod.digest_size/2:],
iv)
hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version),
encryption_key[:-mod.digest_size/2], mod=mod)
else:
plaintext = decipher(ciphertext, encryption_key[:-mod.digest_size], iv)
hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version),
encryption_key[-mod.digest_size:], mod=mod)
if not const_compare(auth_tag(hash), authentication_tag):
raise Error('Mismatched authentication tags')
if HEADER_ZIP in header:
try:
(_, decompress) = COMPRESSION[header[HEADER_ZIP]]
except KeyError:
raise Error('Unsupported compression algorithm: {}'.format(
header[HEADER_ZIP]))
plaintext = decompress(plaintext)
claims = json_decode(plaintext)
try:
del claims[_TEMP_VER_KEY]
except KeyError:
# expected when decrypting legacy tokens
pass
_validate(claims, validate_claims, expiry_seconds)
return JWT(header, claims) | Decrypts a deserialized :class:`~jose.JWE`
:param jwe: An instance of :class:`~jose.JWE`
:param jwk: A `dict` representing the JWK required to decrypt the content
of the :class:`~jose.JWE`.
:param adata: Arbitrary string data used during encryption for additional
authentication.
:param validate_claims: A `bool` indicating whether or not the `exp`, `iat`
and `nbf` claims should be validated. Defaults to
`True`.
:param expiry_seconds: An `int` containing the JWT expiry in seconds, used
when evaluating the `iat` claim. Defaults to `None`,
which disables `iat` claim validation.
:rtype: :class:`~jose.JWT`
:raises: :class:`~jose.Expired` if the JWT has expired
:raises: :class:`~jose.NotYetValid` if the JWT is not yet valid
:raises: :class:`~jose.Error` if there is an error decrypting the JWE |
def get_argument_values(
arg_defs, # type: Union[Dict[str, GraphQLArgument], Dict]
arg_asts, # type: Optional[List[Argument]]
variables=None, # type: Optional[Dict[str, Union[List, Dict, int, float, bool, str, None]]]
):
# type: (...) -> Dict[str, Any]
"""Prepares an object map of argument values given a list of argument
definitions and list of argument AST nodes."""
if not arg_defs:
return {}
if arg_asts:
arg_ast_map = {
arg.name.value: arg for arg in arg_asts
} # type: Dict[str, Argument]
else:
arg_ast_map = {}
result = {}
for name, arg_def in arg_defs.items():
arg_type = arg_def.type
arg_ast = arg_ast_map.get(name)
if name not in arg_ast_map:
if arg_def.default_value is not None:
result[arg_def.out_name or name] = arg_def.default_value
continue
elif isinstance(arg_type, GraphQLNonNull):
raise GraphQLError(
'Argument "{name}" of required type {arg_type}" was not provided.'.format(
name=name, arg_type=arg_type
),
arg_asts,
)
elif isinstance(arg_ast.value, ast.Variable): # type: ignore
variable_name = arg_ast.value.name.value # type: ignore
if variables and variable_name in variables:
result[arg_def.out_name or name] = variables[variable_name]
elif arg_def.default_value is not None:
result[arg_def.out_name or name] = arg_def.default_value
elif isinstance(arg_type, GraphQLNonNull):
raise GraphQLError(
'Argument "{name}" of required type {arg_type}" provided the variable "${variable_name}" which was not provided'.format(
name=name, arg_type=arg_type, variable_name=variable_name
),
arg_asts,
)
continue
else:
value = value_from_ast(arg_ast.value, arg_type, variables) # type: ignore
if value is None:
if arg_def.default_value is not None:
value = arg_def.default_value
result[arg_def.out_name or name] = value
else:
# We use out_name as the output name for the
# dict if exists
result[arg_def.out_name or name] = value
return result | Prepares an object map of argument values given a list of argument
definitions and list of argument AST nodes. |
def remove_usb_device_source(self, id_p):
"""Removes a previously added USB device source.
in id_p of type str
The identifier used when the source was added.
"""
if not isinstance(id_p, basestring):
raise TypeError("id_p can only be an instance of type basestring")
self._call("removeUSBDeviceSource",
in_p=[id_p]) | Removes a previously added USB device source.
in id_p of type str
The identifier used when the source was added. |
def sigusr2_handler(self, unused_signum, unused_frame):
"""
Handle SIGUSR2 signal. Call function which is defined in the
**settings.SIGUSR2_HANDLER**.
"""
if self._sigusr1_handler_func is not None:
self._sigusr2_handler_func(self.context) | Handle SIGUSR2 signal. Call function which is defined in the
**settings.SIGUSR2_HANDLER**. |
def assign_default_log_values(self, fpath, line, formatter):
'''
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
'''
return dict(
id=None,
file=fpath,
host=self.HOST,
formatter=formatter,
event='event',
data={},
raw=line,
timestamp=datetime.datetime.utcnow().isoformat(),
type='log',
level='debug',
error= False,
error_tb='',
) | >>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'} |
def getCovariance(self,normalize=True,i0=None,i1=None,pos0=None,pos1=None,chrom=None,center=True,unit=True,pos_cum0=None,pos_cum1=None,blocksize=None,X=None,**kw_args):
"""calculate the empirical genotype covariance in a region"""
if X is not None:
K=X.dot(X.T)
Nsnp=X.shape[1]
else:
if (i0 is None) and (i1 is None) and ((pos0 is not None) & (pos1 is not None) & (chrom is not None)) or ((pos_cum0 is not None) & (pos_cum1 is not None)):
i0,i1=self.getGenoIndex(pos0=pos0,pos1=pos1,chrom=chrom,pos_cum0=pos_cum0,pos_cum1=pose_cum1)
[N,M]=self.genoM.shape
if blocksize is None:
blocksize=M
if i0 is None:
i0=0
if i1 is None:
i1=M
nread = i0
K=None
Nsnp=i1-i0
while nread<i1:
thisblock=min(blocksize,i1-nread)
X=self.getGenotypes(i0=nread,i1=(nread+thisblock),center=center,unit=unit,**kw_args)
if K is None:
K=X.dot(X.T)
else:
K+=X.dot(X.T)
nread+=thisblock
if normalize:
K/=(K.diagonal().mean())
else:#divide by number of SNPs in K
K/=Nsnp
return K | calculate the empirical genotype covariance in a region |
def add_document(self, key, url, **kwargs):
"""
Adds document to record
Args:
key (string): document key
url (string): document url
Keyword Args:
description (string): simple description
fulltext (bool): mark if this is a full text
hidden (bool): is document should be hidden
material (string):
original_url (string): original url
filename (string): current url
Returns: None
"""
document = self._check_metadata_for_file(key=key, url=url, **kwargs)
for dict_key in (
'description',
'fulltext',
'hidden',
'material',
'original_url',
'url',
'filename',
):
if kwargs.get(dict_key):
document[dict_key] = kwargs[dict_key]
if key_already_there(document, self.record.get('documents', ())):
raise ValueError(
'There\'s already a document with the key %s.'
% document['key']
)
self._append_to('documents', document) | Adds document to record
Args:
key (string): document key
url (string): document url
Keyword Args:
description (string): simple description
fulltext (bool): mark if this is a full text
hidden (bool): is document should be hidden
material (string):
original_url (string): original url
filename (string): current url
Returns: None |
def data(self, namespace):
"""
Gets the thread.local data (dict) for a given namespace.
Args:
namespace (string): The namespace, or key, of the data dict.
Returns:
(dict)
"""
assert namespace
if namespace in self._data:
return self._data[namespace]
new_data = {}
self._data[namespace] = new_data
return new_data | Gets the thread.local data (dict) for a given namespace.
Args:
namespace (string): The namespace, or key, of the data dict.
Returns:
(dict) |
def add_native(cls, name, func, ret, interp=None, send_interp=False):
"""Add the native python function ``func`` into the pfp interpreter with the
name ``name`` and return value ``ret`` so that it can be called from
within a template script.
.. note::
The :any:`@native <pfp.native.native>` decorator exists to simplify this.
All native functions must have the signature ``def func(params, ctxt, scope, stream, coord [,interp])``,
optionally allowing an interpreter param if ``send_interp`` is ``True``.
Example:
The example below defines a function ``Sum`` using the ``add_native`` method. ::
import pfp.fields
from pfp.fields import PYVAL
def native_sum(params, ctxt, scope, stream, coord):
return PYVAL(params[0]) + PYVAL(params[1])
pfp.interp.PfpInterp.add_native("Sum", native_sum, pfp.fields.Int64)
:param basestring name: The name the function will be exposed as in the interpreter.
:param function func: The native python function that will be referenced.
:param type(pfp.fields.Field) ret: The field class that the return value should be cast to.
:param pfp.interp.PfpInterp interp: The specific pfp interpreter the function should be defined in.
:param bool send_interp: If true, the current pfp interpreter will be added as an argument to the function.
"""
if interp is None:
natives = cls._natives
else:
# the instance's natives
natives = interp._natives
natives[name] = functions.NativeFunction(
name, func, ret, send_interp
) | Add the native python function ``func`` into the pfp interpreter with the
name ``name`` and return value ``ret`` so that it can be called from
within a template script.
.. note::
The :any:`@native <pfp.native.native>` decorator exists to simplify this.
All native functions must have the signature ``def func(params, ctxt, scope, stream, coord [,interp])``,
optionally allowing an interpreter param if ``send_interp`` is ``True``.
Example:
The example below defines a function ``Sum`` using the ``add_native`` method. ::
import pfp.fields
from pfp.fields import PYVAL
def native_sum(params, ctxt, scope, stream, coord):
return PYVAL(params[0]) + PYVAL(params[1])
pfp.interp.PfpInterp.add_native("Sum", native_sum, pfp.fields.Int64)
:param basestring name: The name the function will be exposed as in the interpreter.
:param function func: The native python function that will be referenced.
:param type(pfp.fields.Field) ret: The field class that the return value should be cast to.
:param pfp.interp.PfpInterp interp: The specific pfp interpreter the function should be defined in.
:param bool send_interp: If true, the current pfp interpreter will be added as an argument to the function. |
def show():
"""
Show the modifiers and colors
"""
# modifiers
sys.stdout.write(colorful.bold('bold') + ' ')
sys.stdout.write(colorful.dimmed('dimmed') + ' ')
sys.stdout.write(colorful.italic('italic') + ' ')
sys.stdout.write(colorful.underlined('underlined') + ' ')
sys.stdout.write(colorful.inversed('inversed') + ' ')
sys.stdout.write(colorful.concealed('concealed') + ' ')
sys.stdout.write(colorful.struckthrough('struckthrough') + '\n')
# foreground colors
sys.stdout.write(colorful.red('red') + ' ')
sys.stdout.write(colorful.green('green') + ' ')
sys.stdout.write(colorful.yellow('yellow') + ' ')
sys.stdout.write(colorful.blue('blue') + ' ')
sys.stdout.write(colorful.magenta('magenta') + ' ')
sys.stdout.write(colorful.cyan('cyan') + ' ')
sys.stdout.write(colorful.white('white') + '\n')
# background colors
sys.stdout.write(colorful.on_red('red') + ' ')
sys.stdout.write(colorful.on_green('green') + ' ')
sys.stdout.write(colorful.on_yellow('yellow') + ' ')
sys.stdout.write(colorful.on_blue('blue') + ' ')
sys.stdout.write(colorful.on_magenta('magenta') + ' ')
sys.stdout.write(colorful.on_cyan('cyan') + ' ')
sys.stdout.write(colorful.on_white('white') + '\n') | Show the modifiers and colors |
def autocrop(im, bgcolor):
"Crop away a border of the given background color."
if im.mode != "RGB":
im = im.convert("RGB")
bg = Image.new("RGB", im.size, bgcolor)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
return im | Crop away a border of the given background color. |
def definition_to_message(
definition, message=None, table_of_contents=None, heading_level=None):
"""Helper function to render a definition to a message.
:param definition: A definition dictionary (see definitions package).
:type definition: dict
:param message: The message that the definition should be appended to.
:type message: parameters.message.Message
:param table_of_contents: Table of contents that the headings should be
included in.
:type message: parameters.message.Message
:param heading_level: Optional style to apply to the definition
heading. See HEADING_LOOKUPS
:type heading_level: int
:returns: Message
:rtype: str
"""
if message is None:
message = m.Message()
if table_of_contents is None:
table_of_contents = m.Message()
if heading_level:
_create_section_header(
message,
table_of_contents,
definition['name'].replace(' ', '-'),
definition['name'],
heading_level=heading_level)
else:
header = m.Paragraph(m.ImportantText(definition['name']))
message.add(header)
# If the definition has an icon, we put the icon and description side by
# side in a table otherwise just show the description as a paragraph
url = _definition_icon_url(definition)
if url is None:
message.add(m.Paragraph(definition['description']))
if 'citations' in definition:
_citations_to_message(message, definition)
else:
LOGGER.info('Creating mini table for definition description: ' + url)
table = m.Table(style_class='table table-condensed')
row = m.Row()
row.add(m.Cell(m.Image(url, **MEDIUM_ICON_STYLE)))
row.add(m.Cell(definition['description']))
table.add(row)
for citation in definition['citations']:
if citation['text'] in [None, '']:
continue
row = m.Row()
row.add(m.Cell(''))
if citation['link'] in [None, '']:
row.add(m.Cell(citation['text']))
else:
row.add(m.Cell(m.Link(citation['link'], citation['text'])))
table.add(row)
message.add(table)
url = _definition_screenshot_url(definition)
if url:
message.add(m.Paragraph(m.Image(url), style_class='text-center'))
# types contains e.g. hazard_all
if 'types' in definition:
for sub_definition in definition['types']:
definition_to_message(
sub_definition,
message,
table_of_contents,
heading_level=3)
#
# Notes section if available
#
if 'notes' in definition:
# Start a notes details group too since we have an exposure
message.add(m.Heading(
tr('Notes:'), **DETAILS_STYLE))
message.add(m.Heading(
tr('General notes:'), **DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in definition['notes']:
if isinstance(note, dict):
bullets = _add_dict_to_bullets(bullets, note)
elif note:
bullets.add(m.Text(note))
message.add(bullets)
if 'citations' in definition:
_citations_to_message(message, definition)
# This only for EQ
if 'earthquake_fatality_models' in definition:
current_function = current_earthquake_model_name()
paragraph = m.Paragraph(tr(
'The following earthquake fatality models are available in '
'InaSAFE. Note that you need to set one of these as the '
'active model in InaSAFE Options. The currently active model '
'is: '),
m.ImportantText(current_function)
)
message.add(paragraph)
models_definition = definition['earthquake_fatality_models']
for model in models_definition:
message.add(m.Heading(model['name'], **DETAILS_SUBGROUP_STYLE))
if 'description' in model:
paragraph = m.Paragraph(model['description'])
message.add(paragraph)
for note in model['notes']:
paragraph = m.Paragraph(note)
message.add(paragraph)
_citations_to_message(message, model)
for exposure in exposure_all:
extra_exposure_notes = specific_notes(definition, exposure)
if extra_exposure_notes:
title = tr('Notes for exposure : {exposure_name}').format(
exposure_name=exposure['name'])
message.add(m.Heading(title, **DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in extra_exposure_notes:
if isinstance(note, dict):
bullets = _add_dict_to_bullets(bullets, note)
elif note:
bullets.add(m.Text(note))
message.add(bullets)
if 'continuous_notes' in definition:
message.add(m.Heading(
tr('Notes for continuous datasets:'),
**DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in definition['continuous_notes']:
bullets.add(m.Text(note))
message.add(bullets)
if 'classified_notes' in definition:
message.add(m.Heading(
tr('Notes for classified datasets:'),
**DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in definition['classified_notes']:
bullets.add(m.Text(note))
message.add(bullets)
if 'single_event_notes' in definition:
message.add(
m.Heading(tr('Notes for single events'), **DETAILS_STYLE))
if len(definition['single_event_notes']) < 1:
message.add(m.Paragraph(tr('No single event notes defined.')))
else:
bullets = m.BulletedList()
for note in definition['single_event_notes']:
bullets.add(m.Text(note))
message.add(bullets)
if 'multi_event_notes' in definition:
message.add(
m.Heading(
tr('Notes for multi events / scenarios:'),
**DETAILS_STYLE))
if len(definition['multi_event_notes']) < 1:
message.add(m.Paragraph(tr('No multi-event notes defined.')))
else:
bullets = m.BulletedList()
for note in definition['multi_event_notes']:
bullets.add(m.Text(note))
message.add(bullets)
if 'actions' in definition:
message.add(m.Paragraph(m.ImportantText(tr('Actions:'))))
bullets = m.BulletedList()
for note in definition['actions']:
if isinstance(note, dict):
bullets = _add_dict_to_bullets(bullets, note)
elif note:
bullets.add(m.Text(note))
message.add(bullets)
for exposure in exposure_all:
extra_exposure_actions = specific_actions(definition, exposure)
if extra_exposure_actions:
title = tr('Actions for exposure : {exposure_name}').format(
exposure_name=exposure['name'])
message.add(m.Heading(title, **DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in extra_exposure_actions:
if isinstance(note, dict):
bullets = _add_dict_to_bullets(bullets, note)
elif note:
bullets.add(m.Text(note))
message.add(bullets)
if 'continuous_hazard_units' in definition:
message.add(m.Paragraph(m.ImportantText(tr('Units:'))))
table = m.Table(style_class='table table-condensed table-striped')
row = m.Row()
row.add(m.Cell(tr('Name'), header=True))
row.add(m.Cell(tr('Plural'), header=True))
row.add(m.Cell(tr('Abbreviation'), header=True))
row.add(m.Cell(tr('Details'), header=True))
table.add(row)
for unit in definition['continuous_hazard_units']:
row = m.Row()
row.add(m.Cell(unit['name']))
row.add(m.Cell(unit['plural_name']))
row.add(m.Cell(unit['abbreviation']))
row.add(m.Cell(unit['description']))
table.add(row)
message.add(table)
if 'fields' in definition:
message.add(m.Paragraph(m.ImportantText(tr('Fields:'))))
table = _create_fields_table()
if 'extra_fields' in definition:
all_fields = definition['fields'] + definition['extra_fields']
else:
all_fields = definition['fields']
for field in all_fields:
_add_field_to_table(field, table)
message.add(table)
if 'classifications' in definition:
message.add(m.Heading(
tr('Hazard classifications'),
**DETAILS_STYLE))
message.add(m.Paragraph(
definitions.hazard_classification['description']))
for inasafe_class in definition['classifications']:
definition_to_message(
inasafe_class,
message,
table_of_contents,
heading_level=3)
if 'classes' in definition:
message.add(m.Paragraph(m.ImportantText(tr('Classes:'))))
is_hazard = definition['type'] == hazard_classification_type
if is_hazard:
table = _make_defaults_hazard_table()
else:
table = _make_defaults_exposure_table()
for inasafe_class in definition['classes']:
row = m.Row()
if is_hazard:
# name() on QColor returns its hex code
if 'color' in inasafe_class:
colour = inasafe_class['color'].name()
row.add(m.Cell(
'', attributes='style="background: %s;"' % colour))
else:
row.add(m.Cell(' '))
row.add(m.Cell(inasafe_class['name']))
if is_hazard:
if 'affected' in inasafe_class:
row.add(m.Cell(tr(inasafe_class['affected'])))
else:
row.add(m.Cell(tr('unspecified')))
if is_hazard:
if inasafe_class.get('fatality_rate') is None or \
inasafe_class.get('fatality_rate') < 0:
row.add(m.Cell(tr('unspecified')))
elif inasafe_class.get('fatality_rate') > 0:
# we want to show the rate as a scientific notation
rate = html_scientific_notation_rate(
inasafe_class['fatality_rate'])
rate = '%s%%' % rate
row.add(m.Cell(rate))
else: # == 0
row.add(m.Cell('0%'))
if is_hazard:
if 'displacement_rate' in inasafe_class:
rate = inasafe_class['displacement_rate'] * 100
rate = '%.0f%%' % rate
row.add(m.Cell(rate))
else:
row.add(m.Cell(tr('unspecified')))
if 'string_defaults' in inasafe_class:
defaults = None
for default in inasafe_class['string_defaults']:
if defaults:
defaults += ',%s' % default
else:
defaults = default
row.add(m.Cell(defaults))
else:
row.add(m.Cell(tr('unspecified')))
if is_hazard:
# Min may be a single value or a dict of values so we need
# to check type and deal with it accordingly
if 'numeric_default_min' in inasafe_class:
if isinstance(inasafe_class['numeric_default_min'], dict):
bullets = m.BulletedList()
minima = inasafe_class['numeric_default_min']
for key, value in sorted(minima.items()):
bullets.add('%s : %s' % (key, value))
row.add(m.Cell(bullets))
else:
row.add(m.Cell(inasafe_class['numeric_default_min']))
else:
row.add(m.Cell(tr('unspecified')))
if is_hazard:
# Max may be a single value or a dict of values so we need
# to check type and deal with it accordingly
if 'numeric_default_max' in inasafe_class:
if isinstance(inasafe_class['numeric_default_max'], dict):
bullets = m.BulletedList()
maxima = inasafe_class['numeric_default_max']
for key, value in sorted(maxima.items()):
bullets.add('%s : %s' % (key, value))
row.add(m.Cell(bullets))
else:
row.add(m.Cell(inasafe_class['numeric_default_max']))
else:
row.add(m.Cell(tr('unspecified')))
table.add(row)
# Description goes in its own row with spanning
row = m.Row()
row.add(m.Cell(''))
row.add(m.Cell(inasafe_class['description'], span=7))
table.add(row)
# For hazard classes we also add the 'not affected' class manually:
if definition['type'] == definitions.hazard_classification_type:
row = m.Row()
colour = definitions.not_exposed_class['color'].name()
row.add(m.Cell(
'', attributes='style="background: %s;"' % colour))
description = definitions.not_exposed_class['description']
row.add(m.Cell(description, span=7))
table.add(row)
message.add(table)
if 'affected' in definition:
if definition['affected']:
message.add(m.Paragraph(tr(
'Exposure entities in this class ARE considered affected')))
else:
message.add(m.Paragraph(tr(
'Exposure entities in this class are NOT considered '
'affected')))
if 'optional' in definition:
if definition['optional']:
message.add(m.Paragraph(tr(
'This class is NOT required in the hazard keywords.')))
else:
message.add(m.Paragraph(tr(
'This class IS required in the hazard keywords.')))
return message | Helper function to render a definition to a message.
:param definition: A definition dictionary (see definitions package).
:type definition: dict
:param message: The message that the definition should be appended to.
:type message: parameters.message.Message
:param table_of_contents: Table of contents that the headings should be
included in.
:type message: parameters.message.Message
:param heading_level: Optional style to apply to the definition
heading. See HEADING_LOOKUPS
:type heading_level: int
:returns: Message
:rtype: str |
def _from_any(cls, spec):
"""Generic creation method for all types accepted as ``spec``"""
if isinstance(spec, str):
spec = cls.from_file(spec)
elif isinstance(spec, dict):
spec = cls.from_dict(spec)
elif not isinstance(spec, cls):
raise context.TypeError("spec must be either an ApplicationSpec, "
"path, or dict, got "
"%s" % type(spec).__name__)
return spec | Generic creation method for all types accepted as ``spec`` |
def show(thing, domain=(0, 1), **kwargs):
"""Display a nupmy array without having to specify what it represents.
This module will attempt to infer how to display your tensor based on its
rank, shape and dtype. rank 4 tensors will be displayed as image grids, rank
2 and 3 tensors as images.
"""
if isinstance(thing, np.ndarray):
rank = len(thing.shape)
if rank == 4:
log.debug("Show is assuming rank 4 tensor to be a list of images.")
images(thing, domain=domain, **kwargs)
elif rank in (2, 3):
log.debug("Show is assuming rank 2 or 3 tensor to be an image.")
image(thing, domain=domain, **kwargs)
else:
log.warning("Show only supports numpy arrays of rank 2-4. Using repr().")
print(repr(thing))
elif isinstance(thing, (list, tuple)):
log.debug("Show is assuming list or tuple to be a collection of images.")
images(thing, domain=domain, **kwargs)
else:
log.warning("Show only supports numpy arrays so far. Using repr().")
print(repr(thing)) | Display a nupmy array without having to specify what it represents.
This module will attempt to infer how to display your tensor based on its
rank, shape and dtype. rank 4 tensors will be displayed as image grids, rank
2 and 3 tensors as images. |
def _generate_docstring_for_func(self, namespace, arg_data_type,
result_data_type=None, error_data_type=None,
overview=None, extra_request_args=None,
extra_return_arg=None, footer=None):
"""
Generates a docstring for a function or method.
This function is versatile. It will create a docstring using all the
data that is provided.
:param arg_data_type: The data type describing the argument to the
route. The data type should be a struct, and each field will be
treated as an input parameter of the method.
:param result_data_type: The data type of the route result.
:param error_data_type: The data type of the route result in the case
of an error.
:param str overview: A description of the route that will be located
at the top of the docstring.
:param extra_request_args: [(field name, field type, field doc), ...]
Describes any additional parameters for the method that aren't a
field in arg_data_type.
:param str extra_return_arg: Name of an additional return type that. If
this is specified, it is assumed that the return of the function
will be a tuple of return_data_type and extra_return-arg.
:param str footer: Additional notes at the end of the docstring.
"""
fields = [] if is_void_type(arg_data_type) else arg_data_type.fields
if not fields and not overview:
# If we don't have an overview or any input parameters, we skip the
# docstring altogether.
return
self.emit('"""')
if overview:
self.emit_wrapped_text(overview)
# Description of all input parameters
if extra_request_args or fields:
if overview:
# Add a blank line if we had an overview
self.emit()
if extra_request_args:
for name, data_type_name, doc in extra_request_args:
if data_type_name:
field_doc = ':param {} {}: {}'.format(data_type_name,
name, doc)
self.emit_wrapped_text(field_doc,
subsequent_prefix=' ')
else:
self.emit_wrapped_text(
':param {}: {}'.format(name, doc),
subsequent_prefix=' ')
if is_struct_type(arg_data_type):
for field in fields:
if field.doc:
if is_user_defined_type(field.data_type):
field_doc = ':param {}: {}'.format(
field.name, self.process_doc(field.doc, self._docf))
else:
field_doc = ':param {} {}: {}'.format(
self._format_type_in_doc(namespace, field.data_type),
field.name,
self.process_doc(field.doc, self._docf),
)
self.emit_wrapped_text(
field_doc, subsequent_prefix=' ')
if is_user_defined_type(field.data_type):
# It's clearer to declare the type of a composite on
# a separate line since it references a class in
# another module
self.emit(':type {}: {}'.format(
field.name,
self._format_type_in_doc(namespace, field.data_type),
))
else:
# If the field has no docstring, then just document its
# type.
field_doc = ':type {}: {}'.format(
field.name,
self._format_type_in_doc(namespace, field.data_type),
)
self.emit_wrapped_text(field_doc)
elif is_union_type(arg_data_type):
if arg_data_type.doc:
self.emit_wrapped_text(':param arg: {}'.format(
self.process_doc(arg_data_type.doc, self._docf)),
subsequent_prefix=' ')
self.emit(':type arg: {}'.format(
self._format_type_in_doc(namespace, arg_data_type)))
if overview and not (extra_request_args or fields):
# Only output an empty line if we had an overview and haven't
# started a section on declaring types.
self.emit()
if extra_return_arg:
# Special case where the function returns a tuple. The first
# element is the JSON response. The second element is the
# the extra_return_arg param.
args = []
if is_void_type(result_data_type):
args.append('None')
else:
rtype = self._format_type_in_doc(namespace,
result_data_type)
args.append(rtype)
args.append(extra_return_arg)
self.generate_multiline_list(args, ':rtype: ')
else:
if is_void_type(result_data_type):
self.emit(':rtype: None')
else:
rtype = self._format_type_in_doc(namespace, result_data_type)
self.emit(':rtype: {}'.format(rtype))
if not is_void_type(error_data_type) and error_data_type.fields:
self.emit(':raises: :class:`{}`'.format(self.args.error_class_path))
self.emit()
# To provide more clarity to a dev who reads the docstring, suggest
# the route's error class. This is confusing, however, because we
# don't know where the error object that's raised will store
# the more detailed route error defined in stone.
error_class_name = self.args.error_class_path.rsplit('.', 1)[-1]
self.emit('If this raises, {} will contain:'.format(error_class_name))
with self.indent():
self.emit(self._format_type_in_doc(namespace, error_data_type))
if footer:
self.emit()
self.emit_wrapped_text(footer)
self.emit('"""') | Generates a docstring for a function or method.
This function is versatile. It will create a docstring using all the
data that is provided.
:param arg_data_type: The data type describing the argument to the
route. The data type should be a struct, and each field will be
treated as an input parameter of the method.
:param result_data_type: The data type of the route result.
:param error_data_type: The data type of the route result in the case
of an error.
:param str overview: A description of the route that will be located
at the top of the docstring.
:param extra_request_args: [(field name, field type, field doc), ...]
Describes any additional parameters for the method that aren't a
field in arg_data_type.
:param str extra_return_arg: Name of an additional return type that. If
this is specified, it is assumed that the return of the function
will be a tuple of return_data_type and extra_return-arg.
:param str footer: Additional notes at the end of the docstring. |
def compute_from_text(self,text,beta=0.001):
"""
m.compute_from_text(,text,beta=0.001) -- Compute a matrix values from a text string of ambiguity codes.
Use Motif_from_text utility instead to build motifs on the fly.
"""
prevlett = {'B':'A', 'D':'C', 'V':'T', 'H':'G'}
countmat = []
text = re.sub('[\.\-]','N',text.upper())
for i in range(len(text)):
D = {'A': 0, 'C': 0, 'T':0, 'G':0}
letter = text[i]
if letter in ['B', 'D', 'V', 'H']: #B == no "A", etc...
_omit = prevlett[letter]
for L in ACGT:
if L != _omit: D[L] = 0.3333
elif one2two.has_key(letter): #Covers WSMYRK
for L in list(one2two[letter]):
D[L] = 0.5
elif letter == 'N':
for L in D.keys():
D[L] = self.background[L]
elif letter == '@':
for L in D.keys():
D[L] = self.background[L]-(0.0001)
D['A'] = D['A'] + 0.0004
else:
D[letter] = 1.0
countmat.append(D)
self.compute_from_counts(countmat,beta) | m.compute_from_text(,text,beta=0.001) -- Compute a matrix values from a text string of ambiguity codes.
Use Motif_from_text utility instead to build motifs on the fly. |
def _split_chemical_equations(value):
"""
Split a string with sequential chemical equations into separate strings.
Each string in the returned iterable represents a single chemical equation
of the input.
See the docstrings of `ChemicalEquation` and `ChemicalSystem` for more.
Parameters
----------
value : `str`
A string with sequential chemical equations in the mini-language (see
notes on `ChemicalEquation`).
Returns
-------
iterable of `str`
An iterable of strings in the format specified by the mini-language
(see notes on `ChemicalEquation`).
Examples
--------
>>> from pyrrole.core import _split_chemical_equations
>>> _split_chemical_equations('A + B -> C + D -> D + E <=> F + G <- H + I')
['A + B -> C + D', 'C + D -> D + E', 'D + E <=> F + G', 'F + G <- H + I']
"""
pieces = _split_arrows(value)
return [(pieces[i] +
pieces[i + 1] +
pieces[i + 2]).strip()
for i in range(0, len(pieces) - 2, 2)] | Split a string with sequential chemical equations into separate strings.
Each string in the returned iterable represents a single chemical equation
of the input.
See the docstrings of `ChemicalEquation` and `ChemicalSystem` for more.
Parameters
----------
value : `str`
A string with sequential chemical equations in the mini-language (see
notes on `ChemicalEquation`).
Returns
-------
iterable of `str`
An iterable of strings in the format specified by the mini-language
(see notes on `ChemicalEquation`).
Examples
--------
>>> from pyrrole.core import _split_chemical_equations
>>> _split_chemical_equations('A + B -> C + D -> D + E <=> F + G <- H + I')
['A + B -> C + D', 'C + D -> D + E', 'D + E <=> F + G', 'F + G <- H + I'] |
def set_hook(fn, key, **kwargs):
"""Mark decorated function as a hook to be picked up later.
.. note::
Currently only works with functions and instance methods. Class and
static methods are not supported.
:return: Decorated function if supplied, else this decorator with its args
bound.
"""
# Allow using this as either a decorator or a decorator factory.
if fn is None:
return functools.partial(set_hook, key=key, **kwargs)
# Set a __marshmallow_hook__ attribute instead of wrapping in some class,
# because I still want this to end up as a normal (unbound) method.
try:
hook_config = fn.__marshmallow_hook__
except AttributeError:
fn.__marshmallow_hook__ = hook_config = {}
# Also save the kwargs for the tagged function on
# __marshmallow_hook__, keyed by (<tag>, <pass_many>)
hook_config[key] = kwargs
return fn | Mark decorated function as a hook to be picked up later.
.. note::
Currently only works with functions and instance methods. Class and
static methods are not supported.
:return: Decorated function if supplied, else this decorator with its args
bound. |
def _golden(self, triplet, fun):
"""Reduce the size of the bracket until the minimum is found"""
self.num_golden = 0
(qa, fa), (qb, fb), (qc, fc) = triplet
while True:
self.num_golden += 1
qd = qa + (qb-qa)*phi/(1+phi)
fd = fun(qd)
if fd < fb:
#print "golden d"
(qa, fa), (qb, fb) = (qb, fb), (qd, fd)
else:
#print "golden b"
(qa, fa), (qc, fc) = (qd, fd), (qa, fa)
if abs(qa-qb) < self.qtol:
return qc, fc | Reduce the size of the bracket until the minimum is found |
def dump_img(fname):
""" output the image as text """
img = Image.open(fname)
width, _ = img.size
txt = ''
pixels = list(img.getdata())
for col in range(width):
txt += str(pixels[col:col+width])
return txt | output the image as text |
def perform(action_name, container, **kwargs):
"""
Performs an action on the given container map and configuration.
:param action_name: Name of the action (e.g. ``update``).
:param container: Container configuration name.
:param kwargs: Keyword arguments for the action implementation.
"""
cf = container_fabric()
cf.call(action_name, container, **kwargs) | Performs an action on the given container map and configuration.
:param action_name: Name of the action (e.g. ``update``).
:param container: Container configuration name.
:param kwargs: Keyword arguments for the action implementation. |
def generate(env,**kw):
""" Generate the `msginit` tool """
import SCons.Util
from SCons.Tool.GettextCommon import _detect_msginit
try:
env['MSGINIT'] = _detect_msginit(env)
except:
env['MSGINIT'] = 'msginit'
msginitcom = '$MSGINIT ${_MSGNoTranslator(__env__)} -l ${_MSGINITLOCALE}' \
+ ' $MSGINITFLAGS -i $SOURCE -o $TARGET'
# NOTE: We set POTSUFFIX here, in case the 'xgettext' is not loaded
# (sometimes we really don't need it)
env.SetDefault(
POSUFFIX = ['.po'],
POTSUFFIX = ['.pot'],
_MSGINITLOCALE = '${TARGET.filebase}',
_MSGNoTranslator = _optional_no_translator_flag,
MSGINITCOM = msginitcom,
MSGINITCOMSTR = '',
MSGINITFLAGS = [ ],
POAUTOINIT = False,
POCREATE_ALIAS = 'po-create'
)
env.Append( BUILDERS = { '_POInitBuilder' : _POInitBuilder(env) } )
env.AddMethod(_POInitBuilderWrapper, 'POInit')
env.AlwaysBuild(env.Alias('$POCREATE_ALIAS')) | Generate the `msginit` tool |
def _check_team_login(team):
"""
Disallow simultaneous public cloud and team logins.
"""
contents = _load_auth()
for auth in itervalues(contents):
existing_team = auth.get('team')
if team and team != existing_team:
raise CommandException(
"Can't log in as team %r; log out first." % team
)
elif not team and existing_team:
raise CommandException(
"Can't log in as a public user; log out from team %r first." % existing_team
) | Disallow simultaneous public cloud and team logins. |
def serialize_footer(signer):
"""Uses the signer object which has been used to sign the message to generate
the signature, then serializes that signature.
:param signer: Cryptographic signer object
:type signer: aws_encryption_sdk.internal.crypto.Signer
:returns: Serialized footer
:rtype: bytes
"""
footer = b""
if signer is not None:
signature = signer.finalize()
footer = struct.pack(">H{sig_len}s".format(sig_len=len(signature)), len(signature), signature)
return footer | Uses the signer object which has been used to sign the message to generate
the signature, then serializes that signature.
:param signer: Cryptographic signer object
:type signer: aws_encryption_sdk.internal.crypto.Signer
:returns: Serialized footer
:rtype: bytes |
def _integrate_variable_trajectory(self, h, g, tol, step, relax):
"""Generates a solution trajectory of variable length."""
# initialize the solution using initial condition
solution = np.hstack((self.t, self.y))
while self.successful():
self.integrate(self.t + h, step, relax)
current_step = np.hstack((self.t, self.y))
solution = np.vstack((solution, current_step))
if g(self.t, self.y, *self.f_params) < tol:
break
else:
continue
return solution | Generates a solution trajectory of variable length. |
def load_user_from_request(req):
"""
Just like the Flask.login load_user_from_request
If you need to customize the user loading from your database,
the FlaskBitjws.get_user_by_key method is the one to modify.
:param req: The flask request to load a user based on.
"""
load_jws_from_request(req)
if not hasattr(req, 'jws_header') or req.jws_header is None or not \
'iat' in req.jws_payload:
current_app.logger.info("invalid jws request.")
return None
ln = current_app.bitjws.get_last_nonce(current_app,
req.jws_header['kid'],
req.jws_payload['iat'])
if (ln is None or 'iat' not in req.jws_payload or
req.jws_payload['iat'] * 1000 <= ln):
current_app.logger.info("invalid nonce. lastnonce: %s" % ln)
return None
rawu = current_app.bitjws.get_user_by_key(current_app,
req.jws_header['kid'])
if rawu is None:
return None
current_app.logger.info("logging in user: %s" % rawu)
return FlaskUser(rawu) | Just like the Flask.login load_user_from_request
If you need to customize the user loading from your database,
the FlaskBitjws.get_user_by_key method is the one to modify.
:param req: The flask request to load a user based on. |
def add_backend(self, backend):
"Add a RapidSMS backend to this tenant"
if backend in self.get_backends():
return
backend_link, created = BackendLink.all_tenants.get_or_create(backend=backend)
self.backendlink_set.add(backend_link) | Add a RapidSMS backend to this tenant |
def extract_payload(self):
"""Extract payload from request."""
if not self.check_signature():
raise InvalidSignature('Invalid Signature')
if request.is_json:
# Request.get_json() could be first called with silent=True.
delete_cached_json_for(request)
return request.get_json(silent=False, cache=False)
elif request.content_type == 'application/x-www-form-urlencoded':
return dict(request.form)
raise InvalidPayload(request.content_type) | Extract payload from request. |
def _fingerprint_dict_with_files(self, option_val):
"""Returns a fingerprint of the given dictionary containing file paths.
Any value which is a file path which exists on disk will be fingerprinted by that file's
contents rather than by its path.
This assumes the files are small enough to be read into memory.
NB: The keys of the dict are assumed to be strings -- if they are not, the dict should be
converted to encode its keys with `stable_option_fingerprint()`, as is done in the `fingerprint()`
method.
"""
return stable_option_fingerprint({
k: self._expand_possible_file_value(v) for k, v in option_val.items()
}) | Returns a fingerprint of the given dictionary containing file paths.
Any value which is a file path which exists on disk will be fingerprinted by that file's
contents rather than by its path.
This assumes the files are small enough to be read into memory.
NB: The keys of the dict are assumed to be strings -- if they are not, the dict should be
converted to encode its keys with `stable_option_fingerprint()`, as is done in the `fingerprint()`
method. |
def _db_filename_from_dataframe(base_filename, df):
"""
Generate database filename for a sqlite3 database we're going to
fill with the contents of a DataFrame, using the DataFrame's
column names and types.
"""
db_filename = base_filename + ("_nrows%d" % len(df))
for column_name in df.columns:
column_db_type = db_type(df[column_name].dtype)
column_name = column_name.replace(" ", "_")
db_filename += ".%s_%s" % (column_name, column_db_type)
return db_filename + ".db" | Generate database filename for a sqlite3 database we're going to
fill with the contents of a DataFrame, using the DataFrame's
column names and types. |
def blend(self, other, percent=0.5):
"""blend this color with the other one.
Args:
:other:
the grapefruit.Color to blend with this one.
Returns:
A grapefruit.Color instance which is the result of blending
this color on the other one.
>>> c1 = Color.from_rgb(1, 0.5, 0, 0.2)
>>> c2 = Color.from_rgb(1, 1, 1, 0.6)
>>> c3 = c1.blend(c2)
>>> c3
Color(1.0, 0.75, 0.5, 0.4)
"""
dest = 1.0 - percent
rgb = tuple(((u * percent) + (v * dest) for u, v in zip(self.__rgb, other.__rgb)))
a = (self.__a * percent) + (other.__a * dest)
return Color(rgb, 'rgb', a, self.__wref) | blend this color with the other one.
Args:
:other:
the grapefruit.Color to blend with this one.
Returns:
A grapefruit.Color instance which is the result of blending
this color on the other one.
>>> c1 = Color.from_rgb(1, 0.5, 0, 0.2)
>>> c2 = Color.from_rgb(1, 1, 1, 0.6)
>>> c3 = c1.blend(c2)
>>> c3
Color(1.0, 0.75, 0.5, 0.4) |
def register_doi(self, submission_id, request_xml):
"""
This method registry a new DOI number in Crossref or update some DOI
metadata.
submission_id: Will be used as the submission file name. The file name
could be used in future requests to retrieve the submission status.
request_xml: The XML with the document metadata. It must be under
compliance with the Crossref Submission Schema.
"""
endpoint = self.get_endpoint('deposit')
files = {
'mdFile': ('%s.xml' % submission_id, request_xml)
}
params = {
'operation': 'doMDUpload',
'login_id': self.api_user,
'login_passwd': self.api_key
}
result = self.do_http_request(
'post',
endpoint,
data=params,
files=files,
timeout=10,
custom_header=str(self.etiquette)
)
return result | This method registry a new DOI number in Crossref or update some DOI
metadata.
submission_id: Will be used as the submission file name. The file name
could be used in future requests to retrieve the submission status.
request_xml: The XML with the document metadata. It must be under
compliance with the Crossref Submission Schema. |
def column_reflection_fallback(self):
"""If we can't reflect the table, use a query to at least get column names."""
sql = sa.select([sa.text("*")]).select_from(self._table)
col_names = self.engine.execute(sql).keys()
col_dict = [{'name': col_name} for col_name in col_names]
return col_dict | If we can't reflect the table, use a query to at least get column names. |
def replace_body_vars(self, body):
"""Given a multiline string that is the body of the job script, replace
the placeholders for environment variables with backend-specific
realizations, and return the modified body. See the `job_vars`
attribute for the mappings that are performed.
"""
for key, val in self.job_vars.items():
body = body.replace(key, val)
return body | Given a multiline string that is the body of the job script, replace
the placeholders for environment variables with backend-specific
realizations, and return the modified body. See the `job_vars`
attribute for the mappings that are performed. |
def flatten_and_write(dotenv_path, dotenv_as_dict, quote_mode='always'):
"""
Writes dotenv_as_dict to dotenv_path, flattening the values
:param dotenv_path: .env path
:param dotenv_as_dict: dict
:param quote_mode:
:return:
"""
with open(dotenv_path, 'w') as f:
for k, v in dotenv_as_dict.items():
str_format = _get_format(v, quote_mode)
f.write(str_format.format(key=k, value=v))
return True | Writes dotenv_as_dict to dotenv_path, flattening the values
:param dotenv_path: .env path
:param dotenv_as_dict: dict
:param quote_mode:
:return: |
def _get_id_format(self):
""" Return the id regex from the parameters"""
id_format = gf.safe_get(
self.parameters,
gc.PPN_TASK_OS_FILE_ID_REGEX,
self.DEFAULT_ID_FORMAT,
can_return_none=False
)
try:
identifier = id_format % 1
except (TypeError, ValueError) as exc:
self.log_exc(u"String '%s' is not a valid id format" % (id_format), exc, True, ValueError)
return id_format | Return the id regex from the parameters |
def create(self, create_missing=None):
"""Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1223540
<https://bugzilla.redhat.com/show_bug.cgi?id=1223540>`_.
"""
return DockerComputeResource(
self._server_config,
id=self.create_json(create_missing)['id'],
).read() | Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1223540
<https://bugzilla.redhat.com/show_bug.cgi?id=1223540>`_. |
def pairwise_point_combinations(xs, ys, anchors):
"""
Does an in-place addition of the four points that can be composed by
combining coordinates from the two lists to the given list of anchors
"""
for i in xs:
anchors.append((i, max(ys)))
anchors.append((i, min(ys)))
for i in ys:
anchors.append((max(xs), i))
anchors.append((min(xs), i)) | Does an in-place addition of the four points that can be composed by
combining coordinates from the two lists to the given list of anchors |
def check_missing_atoms(self, template=None, ha_only=True):
"""
Checks for missing atoms based on a template.
Default: Searches for missing heavy atoms (not Hydrogen) based on Bio.Struct.protein_residues
Arguments:
- template, dictionary, keys are residue names, values list of atom names.
- ha_only, boolean, default True, restrict check to heavy atoms.
Returns a dictionary of tuples with the missing atoms per residue.
"""
missing_atoms = {}
if not template:
import protein_residues
template = protein_residues.normal # Don't care for terminal residues here..
for residue in self.get_residues():
if not template.has_key(residue.resname):
# Maybe add this as a warning instead of exception?
raise ValueError('Residue name (%s) not in the template' %residue.resname )
if ha_only:
heavy_atoms = [ atom for atom in template[residue.resname]['atoms'].keys()
if atom[0] != 'H' and not (atom[0].isdigit() and atom[1] == 'H')]
reference_set = set(heavy_atoms)
else:
reference_set = set(template[residue.resname]['atoms'].keys())
structure_set = set(residue.child_dict.keys())
diff = reference_set.difference(structure_set)
if diff:
residue_uniq_id = (residue.parent.id, residue.resname, residue.get_id()[1]) # Chain, Name, Number
missing_atoms[residue_uniq_id] = list(diff)
return missing_atoms | Checks for missing atoms based on a template.
Default: Searches for missing heavy atoms (not Hydrogen) based on Bio.Struct.protein_residues
Arguments:
- template, dictionary, keys are residue names, values list of atom names.
- ha_only, boolean, default True, restrict check to heavy atoms.
Returns a dictionary of tuples with the missing atoms per residue. |
def parse_instance(self, tup_tree):
"""
Return a CIMInstance.
The instance contains the properties, qualifiers and classname for
the instance.
::
<!ELEMENT INSTANCE (QUALIFIER*, (PROPERTY | PROPERTY.ARRAY |
PROPERTY.REFERENCE)*)>
<!ATTLIST INSTANCE
%ClassName;
xml:lang NMTOKEN #IMPLIED>
"""
self.check_node(tup_tree, 'INSTANCE', ('CLASSNAME',), ('xml:lang',),
('QUALIFIER', 'PROPERTY', 'PROPERTY.ARRAY',
'PROPERTY.REFERENCE'))
# The 'xml:lang' attribute is tolerated but ignored.
# Note: The check above does not enforce the ordering constraint in the
# DTD that QUALIFIER elements must appear before PROPERTY* elements.
qualifiers = self.list_of_matching(tup_tree, ('QUALIFIER',))
props = self.list_of_matching(tup_tree,
('PROPERTY.REFERENCE', 'PROPERTY',
'PROPERTY.ARRAY'))
obj = CIMInstance(attrs(tup_tree)['CLASSNAME'], qualifiers=qualifiers)
for prop in props:
obj.__setitem__(prop.name, prop)
return obj | Return a CIMInstance.
The instance contains the properties, qualifiers and classname for
the instance.
::
<!ELEMENT INSTANCE (QUALIFIER*, (PROPERTY | PROPERTY.ARRAY |
PROPERTY.REFERENCE)*)>
<!ATTLIST INSTANCE
%ClassName;
xml:lang NMTOKEN #IMPLIED> |
def get_config_value(name, path_to_file='config.txt'):
"""
gets the value for "name" from "path_to_file" config file
Args:
name: name of varibale in config file
path_to_file: path to config file
Returns: path to dll if name exists in the file; otherwise, returns None
"""
# if the function is called from gui then the file has to be located with respect to the gui folder
if not os.path.isfile(path_to_file):
path_to_file = os.path.join('../instruments/', path_to_file)
path_to_file = os.path.abspath(path_to_file)
if not os.path.isfile(path_to_file):
print(('path_to_file', path_to_file))
#raise IOError('{:s}: config file is not valid'.format(path_to_file))
return None
f = open(path_to_file, 'r')
string_of_file_contents = f.read()
if name[-1] is not ':':
name += ':'
if name not in string_of_file_contents:
return None
else:
config_value = [line.split(name)[1] for line in string_of_file_contents.split('\n')
if len(line.split(name)) > 1][0].strip()
return config_value | gets the value for "name" from "path_to_file" config file
Args:
name: name of varibale in config file
path_to_file: path to config file
Returns: path to dll if name exists in the file; otherwise, returns None |
def area_top_orifice(self):
"""Estimate the orifice area corresponding to the top row of orifices.
Another solution method is to use integration to solve this problem.
Here we use the width of the stout weir in the center of the top row
to estimate the area of the top orifice
"""
# Calculate the center of the top row:
z = self.hl - 0.5 * self.b_rows
# Multiply the stout weir width by the height of one row.
return self.stout_w_per_flow(z) * self.q * self.b_rows | Estimate the orifice area corresponding to the top row of orifices.
Another solution method is to use integration to solve this problem.
Here we use the width of the stout weir in the center of the top row
to estimate the area of the top orifice |
def _construct_options(options_bootstrapper, build_configuration):
"""Parse and register options.
:returns: An Options object representing the full set of runtime options.
"""
# Now that plugins and backends are loaded, we can gather the known scopes.
# Gather the optionables that are not scoped to any other. All known scopes are reachable
# via these optionables' known_scope_infos() methods.
top_level_optionables = (
{GlobalOptionsRegistrar} |
GlobalSubsystems.get() |
build_configuration.optionables() |
set(Goal.get_optionables())
)
# Now that we have the known scopes we can get the full options. `get_full_options` will
# sort and de-duplicate these for us.
known_scope_infos = [si
for optionable in top_level_optionables
for si in optionable.known_scope_infos()]
return options_bootstrapper.get_full_options(known_scope_infos) | Parse and register options.
:returns: An Options object representing the full set of runtime options. |
def collect(self):
"""
Collect s3 bucket stats
"""
if boto is None:
self.log.error("Unable to import boto python module")
return {}
for s3instance in self.config['s3']:
self.log.info("S3: byte_unit: %s" % self.config['byte_unit'])
aws_access = self.config['s3'][s3instance]['aws_access_key']
aws_secret = self.config['s3'][s3instance]['aws_secret_key']
for bucket_name in self.config['s3'][s3instance]['buckets']:
bucket = self.getBucket(aws_access, aws_secret, bucket_name)
# collect bucket size
total_size = self.getBucketSize(bucket)
for byte_unit in self.config['byte_unit']:
new_size = diamond.convertor.binary.convert(
value=total_size,
oldUnit='byte',
newUnit=byte_unit
)
self.publish("%s.size.%s" % (bucket_name, byte_unit),
new_size) | Collect s3 bucket stats |
def close(self):
"""Disconnects uWSGI from the client."""
uwsgi.disconnect()
if self._req_ctx is None:
# better kill it here in case wait() is not called again
self._select_greenlet.kill()
self._event.set() | Disconnects uWSGI from the client. |
def disable_paging(self, command="terminal length 999", delay_factor=1):
"""Disable paging default to a Cisco CLI method."""
delay_factor = self.select_delay_factor(delay_factor)
time.sleep(delay_factor * 0.1)
self.clear_buffer()
command = self.normalize_cmd(command)
log.debug("In disable_paging")
log.debug("Command: {0}".format(command))
self.write_channel(command)
output = self.read_until_prompt()
if self.ansi_escape_codes:
output = self.strip_ansi_escape_codes(output)
log.debug("{0}".format(output))
log.debug("Exiting disable_paging")
return output | Disable paging default to a Cisco CLI method. |
def compute(self, x, yerr):
"""
Compute and factorize the covariance matrix.
Args:
x (ndarray[nsamples, ndim]): The independent coordinates of the
data points.
yerr (ndarray[nsamples] or float): The Gaussian uncertainties on
the data points at coordinates ``x``. These values will be
added in quadrature to the diagonal of the covariance matrix.
"""
# Compute the kernel matrix.
K = self.kernel.get_value(x)
K[np.diag_indices_from(K)] += yerr ** 2
# Factor the matrix and compute the log-determinant.
self._factor = (cholesky(K, overwrite_a=True, lower=False), False)
self.log_determinant = 2 * np.sum(np.log(np.diag(self._factor[0])))
self.computed = True | Compute and factorize the covariance matrix.
Args:
x (ndarray[nsamples, ndim]): The independent coordinates of the
data points.
yerr (ndarray[nsamples] or float): The Gaussian uncertainties on
the data points at coordinates ``x``. These values will be
added in quadrature to the diagonal of the covariance matrix. |
def write(self, page, data):
"""Send a WRITE command to store data on the tag.
The *page* argument specifies the offset in multiples of 4
bytes. The *data* argument must be a string or bytearray of
length 4.
Command execution errors raise :exc:`Type2TagCommandError`.
"""
if len(data) != 4:
raise ValueError("data must be a four byte string or array")
log.debug("write {0} to page {1}".format(hexlify(data), page))
rsp = self.transceive("\xA2" + chr(page % 256) + data)
if len(rsp) != 1:
log.debug("invalid response " + hexlify(data))
raise Type2TagCommandError(INVALID_RESPONSE_ERROR)
if rsp[0] != 0x0A: # NAK
log.debug("invalid page, received nak")
raise Type2TagCommandError(INVALID_PAGE_ERROR)
return True | Send a WRITE command to store data on the tag.
The *page* argument specifies the offset in multiples of 4
bytes. The *data* argument must be a string or bytearray of
length 4.
Command execution errors raise :exc:`Type2TagCommandError`. |
def patch(self, resource_endpoint, data={}):
"""Don't use it."""
url = self._create_request_url(resource_endpoint)
return req.patch(url, headers=self.auth_header, json=data) | Don't use it. |
def intersect(obj1, obj2):
"""
intersect two Vector objects
Parameters
----------
obj1: Vector
the first vector object; this object is reprojected to the CRS of obj2 if necessary
obj2: Vector
the second vector object
Returns
-------
Vector
the intersect of obj1 and obj2
"""
if not isinstance(obj1, Vector) or not isinstance(obj2, Vector):
raise RuntimeError('both objects must be of type Vector')
obj1 = obj1.clone()
obj2 = obj2.clone()
obj1.reproject(obj2.srs)
#######################################################
# create basic overlap
union1 = ogr.Geometry(ogr.wkbMultiPolygon)
# union all the geometrical features of layer 1
for feat in obj1.layer:
union1.AddGeometry(feat.GetGeometryRef())
obj1.layer.ResetReading()
union1.Simplify(0)
# same for layer2
union2 = ogr.Geometry(ogr.wkbMultiPolygon)
for feat in obj2.layer:
union2.AddGeometry(feat.GetGeometryRef())
obj2.layer.ResetReading()
union2.Simplify(0)
# intersection
intersect_base = union1.Intersection(union2)
union1 = None
union2 = None
#######################################################
# compute detailed per-geometry overlaps
if intersect_base.GetArea() > 0:
intersection = Vector(driver='Memory')
intersection.addlayer('intersect', obj1.srs, ogr.wkbPolygon)
fieldmap = []
for index, fielddef in enumerate([obj1.fieldDefs, obj2.fieldDefs]):
for field in fielddef:
name = field.GetName()
i = 2
while name in intersection.fieldnames:
name = '{}_{}'.format(field.GetName(), i)
i += 1
fieldmap.append((index, field.GetName(), name))
intersection.addfield(name, type=field.GetType(), width=field.GetWidth())
for feature1 in obj1.layer:
geom1 = feature1.GetGeometryRef()
if geom1.Intersects(intersect_base):
for feature2 in obj2.layer:
geom2 = feature2.GetGeometryRef()
# select only the intersections
if geom2.Intersects(intersect_base):
intersect = geom2.Intersection(geom1)
fields = {}
for item in fieldmap:
if item[0] == 0:
fields[item[2]] = feature1.GetField(item[1])
else:
fields[item[2]] = feature2.GetField(item[1])
intersection.addfeature(intersect, fields)
intersect_base = None
return intersection | intersect two Vector objects
Parameters
----------
obj1: Vector
the first vector object; this object is reprojected to the CRS of obj2 if necessary
obj2: Vector
the second vector object
Returns
-------
Vector
the intersect of obj1 and obj2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.