code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def _filter_fields(self, filter_function):
"""
Utility to iterate through all fields (super types first) of a type.
:param filter: A function that takes in a Field object. If it returns
True, the field is part of the generated output. If False, it is
omitted.
"""
fields = []
if self.parent_type:
fields.extend(self.parent_type._filter_fields(filter_function))
fields.extend(filter(filter_function, self.fields))
return fields | Utility to iterate through all fields (super types first) of a type.
:param filter: A function that takes in a Field object. If it returns
True, the field is part of the generated output. If False, it is
omitted. |
def submit(self, stanza):
"""Adds keys to the current configuration stanza as a
dictionary of key-value pairs.
:param stanza: A dictionary of key-value pairs for the stanza.
:type stanza: ``dict``
:return: The :class:`Stanza` object.
"""
body = _encode(**stanza)
self.service.post(self.path, body=body)
return self | Adds keys to the current configuration stanza as a
dictionary of key-value pairs.
:param stanza: A dictionary of key-value pairs for the stanza.
:type stanza: ``dict``
:return: The :class:`Stanza` object. |
def remove_blocked_work_units(self, work_spec_name, work_unit_names):
'''Remove some work units in the blocked list.
If `work_unit_names` is :const:`None` (which must be passed
explicitly), all pending work units in `work_spec_name` are
removed; otherwise only the specific named work units will be.
Note that none of the "remove" functions will restart blocked
work units, so if you have called
e.g. :meth:`remove_available_work_units` for a predecessor
job, you may need to also call this method for its successor.
:param str work_spec_name: name of the work spec
:param list work_unit_names: names of the work units, or
:const:`None` for all in `work_spec_name`
:return: number of work units removed
'''
return self._remove_some_work_units(
work_spec_name, work_unit_names, suffix=_BLOCKED) | Remove some work units in the blocked list.
If `work_unit_names` is :const:`None` (which must be passed
explicitly), all pending work units in `work_spec_name` are
removed; otherwise only the specific named work units will be.
Note that none of the "remove" functions will restart blocked
work units, so if you have called
e.g. :meth:`remove_available_work_units` for a predecessor
job, you may need to also call this method for its successor.
:param str work_spec_name: name of the work spec
:param list work_unit_names: names of the work units, or
:const:`None` for all in `work_spec_name`
:return: number of work units removed |
def _deposit_withdraw(self, type, amount, coinbase_account_id):
"""`<https://docs.exchange.coinbase.com/#depositwithdraw>`_"""
data = {
'type':type,
'amount':amount,
'coinbase_account_id':coinbase_account_id
}
return self._post('transfers', data=data) | `<https://docs.exchange.coinbase.com/#depositwithdraw>`_ |
def render_view(view_name, **args):
'''Process view and return root Node'''
try:
root_xml = get_view_root(view_name)
return render(root_xml, **args)
except CoreError as error:
error.add_view_info(ViewInfo(view_name, None))
raise
except:
info = exc_info()
error = ViewError('Unknown error occured during rendering', ViewInfo(view_name, None))
error.add_cause(info[1])
raise error from info[1] | Process view and return root Node |
def restrict(self, point):
"""Apply the ``restrict`` method to all functions.
Returns a new farray.
"""
items = [f.restrict(point) for f in self._items]
return self.__class__(items, self.shape, self.ftype) | Apply the ``restrict`` method to all functions.
Returns a new farray. |
def stop(self, stop_I):
"""
Get all stop data as a pandas DataFrame for all stops, or an individual stop'
Parameters
----------
stop_I : int
stop index
Returns
-------
stop: pandas.DataFrame
"""
return pd.read_sql_query("SELECT * FROM stops WHERE stop_I={stop_I}".format(stop_I=stop_I), self.conn) | Get all stop data as a pandas DataFrame for all stops, or an individual stop'
Parameters
----------
stop_I : int
stop index
Returns
-------
stop: pandas.DataFrame |
def _check_markers(task_ids, offset=10):
"""Returns a flag for markers being found for the task_ids. If all task ids
have markers True will be returned. Otherwise it will return False as soon
as a None result is hit.
"""
shuffle(task_ids)
has_errors = False
for index in xrange(0, len(task_ids), offset):
keys = [ndb.Key(FuriousAsyncMarker, id)
for id in task_ids[index:index + offset]]
markers = ndb.get_multi(keys)
if not all(markers):
logging.debug("Not all Async's complete")
return False, None
# Did any of the aync's fail? Check the success property on the
# AsyncResult.
has_errors = not all((marker.success for marker in markers))
return True, has_errors | Returns a flag for markers being found for the task_ids. If all task ids
have markers True will be returned. Otherwise it will return False as soon
as a None result is hit. |
def get_differentially_expressed_genes(self, diff_type: str) -> VertexSeq:
"""Get the differentially expressed genes based on diff_type.
:param str diff_type: Differential expression type chosen by the user; all, down, or up.
:return list: A list of differentially expressed genes.
"""
if diff_type == "up":
diff_expr = self.graph.vs.select(up_regulated_eq=True)
elif diff_type == "down":
diff_expr = self.graph.vs.select(down_regulated_eq=True)
else:
diff_expr = self.graph.vs.select(diff_expressed_eq=True)
return diff_expr | Get the differentially expressed genes based on diff_type.
:param str diff_type: Differential expression type chosen by the user; all, down, or up.
:return list: A list of differentially expressed genes. |
def stats(self):
""" shotcut to pull out useful info for interactive use """
printDebug("Classes.....: %d" % len(self.classes))
printDebug("Properties..: %d" % len(self.properties)) | shotcut to pull out useful info for interactive use |
def create_doc_jar(self, target, open_jar, version):
"""Returns a doc jar if either scala or java docs are available for the given target."""
javadoc = self._java_doc(target)
scaladoc = self._scala_doc(target)
if javadoc or scaladoc:
jar_path = self.artifact_path(open_jar, version, suffix='-javadoc')
with self.open_jar(jar_path, overwrite=True, compressed=True) as open_jar:
def add_docs(docs):
if docs:
for basedir, doc_files in docs.items():
for doc_file in doc_files:
open_jar.write(os.path.join(basedir, doc_file), doc_file)
add_docs(javadoc)
add_docs(scaladoc)
return jar_path
else:
return None | Returns a doc jar if either scala or java docs are available for the given target. |
def find_last(fileobj, serial):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
"""
# For non-muxed streams, look at the last page.
try:
fileobj.seek(-256*256, 2)
except IOError:
# The file is less than 64k in length.
fileobj.seek(0)
data = fileobj.read()
try:
index = data.rindex(b"OggS")
except ValueError:
raise error("unable to find final Ogg header")
bytesobj = cBytesIO(data[index:])
best_page = None
try:
page = OggPage(bytesobj)
except error:
pass
else:
if page.serial == serial:
if page.last:
return page
else:
best_page = page
else:
best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page | Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first. |
def _verify_subnet_association(route_table_desc, subnet_id):
'''
Helper function verify a subnet's route table association
route_table_desc
the description of a route table, as returned from boto_vpc.describe_route_table
subnet_id
the subnet id to verify
.. versionadded:: 2016.11.0
'''
if route_table_desc:
if 'associations' in route_table_desc:
for association in route_table_desc['associations']:
if association['subnet_id'] == subnet_id:
return True
return False | Helper function verify a subnet's route table association
route_table_desc
the description of a route table, as returned from boto_vpc.describe_route_table
subnet_id
the subnet id to verify
.. versionadded:: 2016.11.0 |
def update_fluent_cached_urls(item, dry_run=False):
"""
Regenerate the cached URLs for an item's translations. This is a fiddly
business: we use "hidden" methods instead of the public ones to avoid
unnecessary and unwanted slug changes to ensure uniqueness, the logic for
which doesn't work with our publishing.
"""
change_report = []
if hasattr(item, 'translations'):
for translation in item.translations.all():
old_url = translation._cached_url
item._update_cached_url(translation)
change_report.append(
(translation, '_cached_url', old_url, translation._cached_url))
if not dry_run:
translation.save()
if not dry_run:
item._expire_url_caches()
# Also process all the item's children, in case changes to this item
# affect the URL that should be cached for the children. We process
# only draft-or-published children, according to the item's status.
if item.is_draft:
children = [child for child in item.children.all()
if child.is_draft]
else:
children = [child for child in item.get_draft().children.all()
if child.is_published]
for child in children:
update_fluent_cached_urls(child, dry_run=dry_run)
return change_report | Regenerate the cached URLs for an item's translations. This is a fiddly
business: we use "hidden" methods instead of the public ones to avoid
unnecessary and unwanted slug changes to ensure uniqueness, the logic for
which doesn't work with our publishing. |
def get_all(self, name, failobj=None):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original
message, and may contain duplicates. Any fields deleted and
re-inserted are always appended to the header list.
If no such fields exist, failobj is returned (defaults to None).
"""
values = []
name = name.lower()
for k, v in self._headers:
if k.lower() == name:
values.append(self.policy.header_fetch_parse(k, v))
if not values:
return failobj
return values | Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original
message, and may contain duplicates. Any fields deleted and
re-inserted are always appended to the header list.
If no such fields exist, failobj is returned (defaults to None). |
def iteration(self):
"""
Runs the ipfn algorithm. Automatically detects of working with numpy ndarray or pandas dataframes.
"""
i = 0
conv = np.inf
old_conv = -np.inf
conv_list = []
m = self.original
# If the original data input is in pandas DataFrame format
if isinstance(self.original, pd.DataFrame):
ipfn_method = self.ipfn_df
elif isinstance(self.original, np.ndarray):
ipfn_method = self.ipfn_np
self.original = self.original.astype('float64')
else:
print('Data input instance not recognized')
sys.exit(0)
while ((i <= self.max_itr and conv > self.conv_rate) and
(i <= self.max_itr and abs(conv - old_conv) > self.rate_tolerance)):
old_conv = conv
m, conv = ipfn_method(m, self.aggregates, self.dimensions, self.weight_col)
conv_list.append(conv)
i += 1
converged = 1
if i <= self.max_itr:
if not conv > self.conv_rate:
print('ipfn converged: convergence_rate below threshold')
elif not abs(conv - old_conv) > self.rate_tolerance:
print('ipfn converged: convergence_rate not updating or below rate_tolerance')
else:
print('Maximum iterations reached')
converged = 0
# Handle the verbose
if self.verbose == 0:
return m
elif self.verbose == 1:
return m, converged
elif self.verbose == 2:
return m, converged, pd.DataFrame({'iteration': range(i), 'conv': conv_list}).set_index('iteration')
else:
print('wrong verbose input, return None')
sys.exit(0) | Runs the ipfn algorithm. Automatically detects of working with numpy ndarray or pandas dataframes. |
def content():
"""Helper method that returns just the content.
This method was added so that the text could be reused in the
dock_help module.
.. versionadded:: 4.0.0
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message
"""
# We will store a contents section at the top for easy navigation
table_of_contents = m.Message()
# and this will be the main message that we create
message = m.Message()
_create_section_header(
message,
table_of_contents,
'overview',
tr('Overview'),
heading_level=1)
##
# Credits and disclaimers ...
##
_create_section_header(
message,
table_of_contents,
'disclaimer',
tr('Disclaimer'),
heading_level=2)
message.add(m.Paragraph(definitions.messages.disclaimer()))
_create_section_header(
message,
table_of_contents,
'limitations',
tr('Limitations and License'),
heading_level=2)
bullets = m.BulletedList()
for item in definitions.limitations():
bullets.add(item)
message.add(bullets)
##
# Basic concepts ...
##
##
# Help dialog contents ...
##
_create_section_header(
message,
table_of_contents,
'glossary',
tr('Glossary of terms'),
heading_level=1)
last_group = None
table = None
for key, value in list(definitions.concepts.items()):
current_group = value['group']
if current_group != last_group:
if last_group is not None:
message.add(table)
_create_section_header(
message,
table_of_contents,
current_group.replace(' ', '-'),
current_group,
heading_level=2)
table = _start_glossary_table(current_group)
last_group = current_group
row = m.Row()
term = value['key'].replace('_', ' ').title()
description = m.Message(value['description'])
for citation in value['citations']:
if citation['text'] in [None, '']:
continue
if citation['link'] in [None, '']:
description.add(m.Paragraph(citation['text']))
else:
description.add(m.Paragraph(
m.Link(citation['link'], citation['text'])))
row.add(m.Cell(term))
row.add(m.Cell(description))
url = _definition_icon_url(value)
if url:
row.add(m.Cell(m.Image(url, **MEDIUM_ICON_STYLE)))
else:
row.add(m.Cell(''))
table.add(row)
# ensure the last group's table is added
message.add(table)
##
# Help dialog contents ...
##
_create_section_header(
message,
table_of_contents,
'core-functionality',
tr('Core functionality and tools'),
heading_level=1)
_create_section_header(
message,
table_of_contents,
'dock',
tr('The InaSAFE Dock'),
heading_level=2)
message.add(dock_help())
_create_section_header(
message,
table_of_contents,
'reports',
tr('InaSAFE Reports'),
heading_level=2)
message.add(report_help())
_create_section_header(
message,
table_of_contents,
'extents',
tr('Managing analysis extents with the extents selector'),
heading_level=2)
message.add(extent_help())
_create_section_header(
message,
table_of_contents,
'options',
tr('InaSAFE Options'),
heading_level=2)
message.add(options_help())
_create_section_header(
message,
table_of_contents,
'batch-runner',
tr('The Batch Runner'),
heading_level=2)
message.add(batch_help())
_create_section_header(
message,
table_of_contents,
'osm-downloader',
tr('The OpenStreetmap Downloader'),
heading_level=2)
message.add(osm_help())
_create_section_header(
message,
table_of_contents,
'petabencana-downloader',
tr('The PetaBencana Downloader'),
heading_level=2)
message.add(petabencana_help())
_create_section_header(
message,
table_of_contents,
'shakemap-converter',
tr('The Shakemap Converter'),
heading_level=2)
message.add(shakemap_help())
_create_section_header(
message,
table_of_contents,
'multi-buffer-tool',
tr('The Multi Buffer Tool'),
heading_level=2)
message.add(multi_buffer_help())
# Field mapping tool has a few added bits to enumerate the groups
_create_section_header(
message,
table_of_contents,
'field-mapping-tool',
tr('The Field Mapping Tool'),
heading_level=2)
message.add(field_mapping_tool_help())
_create_section_header(
message,
table_of_contents,
'exposure-groups',
tr('Exposure Groups'),
heading_level=3)
message.add(m.Paragraph(
'The following demographic groups apply only to vector population '
'exposure layers:'
))
for group in population_field_groups:
definition_to_message(
group, message, table_of_contents, heading_level=4)
_create_section_header(
message,
table_of_contents,
'aggregation-groups',
tr('Aggregation Groups'),
heading_level=3)
message.add(m.Paragraph(
'The following demographic groups apply only to aggregation layers:'
))
for group in aggregation_field_groups:
definition_to_message(
group, message, table_of_contents, heading_level=4)
# End of field mapping tool help
# Keep this last in the tool section please as it has subsections
# and so uses the top level section style
_create_section_header(
message,
table_of_contents,
'minimum-needs',
tr('Minimum Needs'),
heading_level=2)
_create_section_header(
message,
table_of_contents,
'minimum-needs-tool',
tr('The minimum needs tool'),
heading_level=3)
message.add(needs_help())
_create_section_header(
message,
table_of_contents,
'minimum-manager',
tr('The minimum needs manager'),
heading_level=3)
message.add(needs_manager_help())
##
# Analysis workflow
##
_create_section_header(
message,
table_of_contents,
'analysis-steps',
tr('Analysis steps'),
heading_level=1)
_create_section_header(
message,
table_of_contents,
'analysis-internal-process',
tr('Analysis internal process'),
heading_level=2)
analysis = definitions.concepts['analysis']
message.add(analysis['description'])
url = _definition_screenshot_url(analysis)
if url:
message.add(m.Paragraph(m.Image(url), style_class='text-center'))
_create_section_header(
message,
table_of_contents,
'analysis-progress-reporting',
tr('Progress reporting steps'),
heading_level=2)
steps = list(definitions.analysis_steps.values())
for step in steps:
definition_to_message(
step, message, table_of_contents, heading_level=3)
##
# Hazard definitions
##
_create_section_header(
message,
table_of_contents,
'hazards',
tr('Hazard Concepts'),
heading_level=1)
hazard_category = definitions.hazard_category
definition_to_message(
hazard_category,
message,
table_of_contents,
heading_level=2)
hazards = definitions.hazards
definition_to_message(
hazards,
message,
table_of_contents,
heading_level=2)
##
# Exposure definitions
##
_create_section_header(
message,
table_of_contents,
'exposures',
tr('Exposure Concepts'),
heading_level=1)
exposures = definitions.exposures
definition_to_message(
exposures,
message,
table_of_contents,
heading_level=2)
##
# Defaults
##
_create_section_header(
message,
table_of_contents,
'defaults',
tr('InaSAFE Defaults'),
heading_level=1)
table = m.Table(style_class='table table-condensed table-striped')
row = m.Row()
row.add(m.Cell(tr('Name'), header=True))
row.add(m.Cell(tr('Default value'), header=True))
row.add(m.Cell(tr('Default min'), header=True))
row.add(m.Cell(tr('Default max'), header=True))
row.add(m.Cell(tr('Description'), header=True))
table.add(row)
defaults = [
definitions.youth_ratio_default_value,
definitions.adult_ratio_default_value,
definitions.elderly_ratio_default_value,
definitions.female_ratio_default_value,
definitions.feature_rate_default_value
]
for default in defaults:
row = m.Row()
row.add(m.Cell(default['name']))
row.add(m.Cell(default['default_value']))
row.add(m.Cell(default['min_value']))
row.add(m.Cell(default['max_value']))
row.add(m.Cell(default['description']))
table.add(row)
message.add(table)
##
# All Fields
##
_create_section_header(
message,
table_of_contents,
'all-fields',
tr('Fields'),
heading_level=1)
_create_section_header(
message,
table_of_contents,
'input-fields',
tr('Input dataset fields'),
heading_level=2)
_create_fields_section(
message,
table_of_contents,
tr('Exposure fields'),
definitions.exposure_fields)
_create_fields_section(
message,
table_of_contents,
tr('Hazard fields'),
definitions.hazard_fields)
_create_fields_section(
message,
table_of_contents,
tr('Aggregation fields'),
definitions.aggregation_fields)
_create_section_header(
message,
table_of_contents,
'output-fields',
tr('Output dataset fields'),
heading_level=2)
_create_fields_section(
message,
table_of_contents,
tr('Impact fields'),
definitions.impact_fields)
_create_fields_section(
message,
table_of_contents,
tr('Aggregate hazard fields'),
definitions.aggregate_hazard_fields)
_create_fields_section(
message,
table_of_contents,
tr('Aggregation summary fields'),
definitions.aggregation_summary_fields)
_create_fields_section(
message,
table_of_contents,
tr('Exposure summary table fields'),
definitions.exposure_summary_table_fields)
_create_fields_section(
message,
table_of_contents,
tr('Analysis fields'),
definitions.analysis_fields)
##
# Geometries
##
_create_section_header(
message,
table_of_contents,
'geometries',
tr('Layer Geometry Types'),
heading_level=1)
_create_section_header(
message,
table_of_contents,
'vector-geometries',
tr('Vector'),
heading_level=2)
definition_to_message(
definitions.layer_geometry_point,
message,
table_of_contents,
heading_level=3)
definition_to_message(
definitions.layer_geometry_line,
message,
table_of_contents,
heading_level=3)
definition_to_message(
definitions.layer_geometry_polygon,
message,
table_of_contents,
heading_level=3)
_create_section_header(
message,
table_of_contents,
'raster-geometries',
tr('Raster'),
heading_level=2)
definition_to_message(
definitions.layer_geometry_raster,
message,
table_of_contents,
heading_level=3)
##
# Layer Modes
##
_create_section_header(
message,
table_of_contents,
'layer-modes',
tr('Layer Modes'),
heading_level=1)
definition_to_message(
definitions.layer_mode,
message,
table_of_contents,
heading_level=2)
##
# Layer Purposes
##
_create_section_header(
message,
table_of_contents,
'layer-purposes',
tr('Layer Purposes'),
heading_level=1)
definition_to_message(
definitions.layer_purpose_hazard,
message,
table_of_contents,
heading_level=2)
definition_to_message(
definitions.layer_purpose_exposure,
message,
table_of_contents,
heading_level=2)
definition_to_message(
definitions.layer_purpose_aggregation,
message,
table_of_contents,
heading_level=2)
definition_to_message(
definitions.layer_purpose_exposure_summary,
message,
table_of_contents,
heading_level=2)
definition_to_message(
definitions.layer_purpose_aggregate_hazard_impacted,
message,
table_of_contents,
heading_level=2)
definition_to_message(
definitions.layer_purpose_aggregation_summary,
message,
table_of_contents,
heading_level=2)
definition_to_message(
definitions.layer_purpose_exposure_summary_table,
message,
table_of_contents,
heading_level=2)
definition_to_message(
definitions.layer_purpose_profiling,
message,
table_of_contents,
heading_level=2)
##
# All units
##
_create_section_header(
message,
table_of_contents,
'all-units',
tr('All Units'),
heading_level=1)
table = m.Table(style_class='table table-condensed table-striped')
row = m.Row()
row.add(m.Cell(tr('Name'), header=True))
row.add(m.Cell(tr('Plural'), header=True))
row.add(m.Cell(tr('Abbreviation'), header=True))
row.add(m.Cell(tr('Details'), header=True))
table.add(row)
for unit in definitions.units_all:
row = m.Row()
row.add(m.Cell(unit['name']))
row.add(m.Cell(unit['plural_name']))
row.add(m.Cell(unit['abbreviation']))
row.add(m.Cell(unit['description']))
table.add(row)
message.add(table)
##
# Post processors
##
_create_section_header(
message,
table_of_contents,
'post-processors',
tr('Post Processors'),
heading_level=1)
_create_section_header(
message,
table_of_contents,
'post-processor-input-types',
tr('Post Processor Input Types'),
heading_level=2)
table = _create_post_processor_subtable(
post_processor_input_types
)
message.add(table)
_create_section_header(
message,
table_of_contents,
'post-processor-input-values',
tr('Post Processor Input Values'),
heading_level=2)
table = _create_post_processor_subtable(
post_processor_input_values
)
message.add(table)
_create_section_header(
message,
table_of_contents,
'post-processor-process-values',
tr('Post Processor Process Types'),
heading_level=2)
table = _create_post_processor_subtable(
safe.processors.post_processor_process_types
)
message.add(table)
_create_section_header(
message,
table_of_contents,
'post-processors',
tr('Post Processors'),
heading_level=2)
post_processors = safe.processors.post_processors
table = m.Table(style_class='table table-condensed table-striped')
row = m.Row()
row.add(m.Cell(tr('Name'), header=True))
row.add(m.Cell(tr('Input Fields'), header=True))
row.add(m.Cell(tr('Output Fields'), header=True))
table.add(row)
for post_processor in post_processors:
row = m.Row()
row.add(m.Cell(post_processor['name']))
# Input fields
bullets = m.BulletedList()
for key, value in sorted(post_processor['input'].items()):
bullets.add(key)
row.add(m.Cell(bullets))
# Output fields
bullets = m.BulletedList()
for key, value in sorted(post_processor['output'].items()):
name = value['value']['name']
formula_type = value['type']['key']
if formula_type == 'formula':
formula = value['formula']
else:
# We use python introspection because the processor
# uses a python function for calculations
formula = value['function'].__name__
formula += ' ('
formula += value['function'].__doc__
formula += ')'
bullets.add('%s %s. : %s' % (
name, formula_type, formula))
row.add(m.Cell(bullets))
table.add(row)
# Add the descriptions
row = m.Row()
row.add(m.Cell(''))
row.add(m.Cell(post_processor['description'], span=2))
table.add(row)
message.add(table)
##
# Reporting
##
_create_section_header(
message,
table_of_contents,
'reporting',
tr('Reporting'),
heading_level=1)
paragraph = m.Paragraph(
m.ImportantText(tr('Note: ')),
m.Text(tr(
'This section of the help documentation is intended for advanced '
'users who want to modify reports which are produced by InaSAFE.'
)))
message.add(paragraph)
_create_section_header(
message,
table_of_contents,
'reporting-overview',
tr('Overview'),
heading_level=2)
message.add(m.Paragraph(tr(
'Whenever InaSAFE completes an analysis, it will automatically '
'generate a number of reports. Some of these reports are based on '
'templates that are shipped with InaSAFE, and can be customised or '
'over-ridden by creating your own templates. The following '
'reports are produced in InaSAFE:'
)))
table = m.Table(style_class='table table-condensed table-striped')
row = m.Row()
row.add(m.Cell(tr('Name'), header=True))
row.add(m.Cell(tr('Customisable?'), header=True))
row.add(m.Cell(tr('Example'), header=True))
row.add(m.Cell(tr('Description'), header=True))
table.add(row)
for report in all_reports:
row = m.Row()
row.add(m.Cell(report['name']))
if report['customisable']:
row.add(m.Cell(tr('Yes')))
else:
row.add(m.Cell(tr('No')))
png_image_path = resources_path(
'img', 'screenshots', report['thumbnail'])
row.add(m.Image(png_image_path, style_class='text-center'))
row.add(m.Cell(report['description']))
table.add(row)
message.add(table)
message.add(m.Paragraph(tr(
'In the sections that follow, we provide more technical information '
'about the custom QGIS Expressions and special template elements '
'that can be used to customise your templates.'
)))
_create_section_header(
message,
table_of_contents,
'reporting-expressions',
tr('QGIS Expressions'),
heading_level=2)
message.add(m.Paragraph(tr(
'InaSAFE adds a number of expressions that can be used to '
'conveniently obtain provenance data to the active analysis results. '
'The expressions can also be used elsewhere in QGIS as needed.'
'.'
)))
table = m.Table(style_class='table table-condensed table-striped')
row = m.Row()
row.add(m.Cell(tr('Name'), header=True))
row.add(m.Cell(tr('Description'), header=True))
table.add(row)
for expression_name, expression in sorted(qgis_expressions().items()):
row = m.Row()
row.add(m.Cell(expression_name))
help = expression.helptext()
# This pattern comes from python/qgis/core/__init__.py ≈ L79
pattern = r'<h3>(.*) function</h3><br>'
help = re.sub(pattern, '', help)
help = re.sub(r'\n', '<br>', help)
row.add(m.Cell(help))
table.add(row)
message.add(table)
_create_section_header(
message,
table_of_contents,
'reporting-composer-elements',
tr('Composer Elements'),
heading_level=2)
message.add(m.Paragraph(tr(
'InaSAFE looks for elements with specific id\'s on the composer '
'page and replaces them with InaSAFE specific content.'
)))
table = m.Table(style_class='table table-condensed table-striped')
row = m.Row()
row.add(m.Cell(tr('ID'), header=True))
row.add(m.Cell(tr('Description'), header=True))
table.add(row)
for item in html_frame_elements:
row = m.Row()
row.add(m.Cell(item['id']))
row.add(m.Cell(item['description']))
table.add(row)
message.add(table)
##
# Developer documentation
##
_create_section_header(
message,
table_of_contents,
'developer-guide',
tr('Developer Guide'),
heading_level=1)
message.add(developer_help())
# Finally we add the table of contents at the top
full_message = m.Message()
# Contents is not a link so reset style
style = SECTION_STYLE
style['element_id'] = ''
header = m.Heading(tr('Contents'), **style)
full_message.add(header)
full_message.add(table_of_contents)
full_message.add(message)
return full_message | Helper method that returns just the content.
This method was added so that the text could be reused in the
dock_help module.
.. versionadded:: 4.0.0
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message |
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items()) | A context in which some attributes temporarily have a modified value. |
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label) | Short helper to add a 64 bit counter value to the MIB subtree. |
def _write_jpy_config(target_dir=None, install_dir=None):
"""
Write out a well-formed jpyconfig.properties file for easier Java
integration in a given location.
"""
if not target_dir:
target_dir = _build_dir()
args = [sys.executable,
os.path.join(target_dir, 'jpyutil.py'),
'--jvm_dll', jvm_dll_file,
'--java_home', jdk_home_dir,
'--log_level', 'DEBUG',
'--req_java',
'--req_py']
if install_dir:
args.append('--install_dir')
args.append(install_dir)
log.info('Writing jpy configuration to %s using install_dir %s' % (target_dir, install_dir))
return subprocess.call(args) | Write out a well-formed jpyconfig.properties file for easier Java
integration in a given location. |
def _new_output_char(self, char):
""" insert in text field """
self.text.config(state=tkinter.NORMAL)
self.text.insert("end", char)
self.text.see("end")
self.text.config(state=tkinter.DISABLED) | insert in text field |
def process_upload(photo_list, form, parent_object, user, status=''):
"""
Helper function that actually processes and saves the upload(s).
Segregated out for readability.
"""
status += "beginning upload processing. Gathering and normalizing fields....<br>"
for upload_file in photo_list:
# lowercase and replace spaces in filename
upload_file.name = upload_file.name.lower().replace(' ', '_')
upload_name = upload_file.name
status += """
File is {}.
Checking for single file upload or bulk upload... <br>
""".format(upload_name)
if upload_name.endswith('.jpg') or upload_name.endswith('.jpeg'):
status += "Found jpg. Attempting to save... <br>"
try:
dupe = ArticleImage.objects.get(photo__contains=upload_name, article=parent_object)
except ObjectDoesNotExist:
dupe = None
if not dupe:
try:
upload = ArticleImage(
article=parent_object,
photo=upload_file
)
upload.save()
status += "Saved and uploaded jpg."
except Exception as error:
status += "Error saving image: {}".format(error)
time.sleep(1)
return status | Helper function that actually processes and saves the upload(s).
Segregated out for readability. |
def get_urls(self):
"""
Content of field ``856u42``. Typically URL pointing to producers
homepage.
Returns:
list: List of URLs defined by producer.
"""
urls = self.get_subfields("856", "u", i1="4", i2="2")
return map(lambda x: x.replace("&", "&"), urls) | Content of field ``856u42``. Typically URL pointing to producers
homepage.
Returns:
list: List of URLs defined by producer. |
def set_console(self, console):
"""
Sets the TCP console port.
:param console: console port (integer)
"""
self.console = console
yield from self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self.console)) | Sets the TCP console port.
:param console: console port (integer) |
def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
"""
Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object.
"""
payload = str(request) + self.delimiter
self.socket.send(payload.encode(self.encoding))
response = bytes()
decoded = None
# Receive the response until we find the delimiter.
# TODO Do not wait for a response if the message sent is a notification.
while True:
response += self.socket.recv(1024)
decoded = response.decode(self.encoding)
if len(decoded) < self.delimiter_length:
continue
# TODO Check that're not in the middle of the response.
elif decoded[-self.delimiter_length :] == self.delimiter:
break
assert decoded is not None
return Response(decoded[: -self.delimiter_length]) | Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object. |
def switch_off(self, *args):
"""
Sets the state of the switch to False if off_check() returns True,
given the arguments provided in kwargs.
:param kwargs: variable length dictionary of key-pair arguments
:return: Boolean. Returns True if the operation is successful
"""
if self.off_check(*args):
return self._switch.switch(False)
else:
return False | Sets the state of the switch to False if off_check() returns True,
given the arguments provided in kwargs.
:param kwargs: variable length dictionary of key-pair arguments
:return: Boolean. Returns True if the operation is successful |
def run_command(command):
"""
Runs command and returns stdout
"""
process = subprocess.Popen(
shlex.split(command),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
output, stderr = [stream.decode(sys.getdefaultencoding(), 'ignore')
for stream in process.communicate()]
if process.returncode != 0:
raise AirflowConfigException(
"Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}"
.format(command, process.returncode, output, stderr)
)
return output | Runs command and returns stdout |
def eventFilter( self, object, event ):
"""
Filters the object for particular events.
:param object | <QObject>
event | <QEvent>
:return <bool> | consumed
"""
if ( event.type() == event.KeyPress ):
if ( event.key() in (Qt.Key_Return, Qt.Key_Enter) ):
self.addQuery()
return True
return False | Filters the object for particular events.
:param object | <QObject>
event | <QEvent>
:return <bool> | consumed |
def show_hydrophobic(self):
"""Visualizes hydrophobic contacts."""
hydroph = self.plcomplex.hydrophobic_contacts
if not len(hydroph.bs_ids) == 0:
self.select_by_ids('Hydrophobic-P', hydroph.bs_ids, restrict=self.protname)
self.select_by_ids('Hydrophobic-L', hydroph.lig_ids, restrict=self.ligname)
for i in hydroph.pairs_ids:
cmd.select('tmp_bs', 'id %i & %s' % (i[0], self.protname))
cmd.select('tmp_lig', 'id %i & %s' % (i[1], self.ligname))
cmd.distance('Hydrophobic', 'tmp_bs', 'tmp_lig')
if self.object_exists('Hydrophobic'):
cmd.set('dash_gap', 0.5, 'Hydrophobic')
cmd.set('dash_color', 'grey50', 'Hydrophobic')
else:
cmd.select('Hydrophobic-P', 'None') | Visualizes hydrophobic contacts. |
def changed(self, message=None, *args):
"""Marks the object as changed.
If a `parent` attribute is set, the `changed()` method on the parent
will be called, propagating the change notification up the chain.
The message (if provided) will be debug logged.
"""
if message is not None:
self.logger.debug('%s: %s', self._repr(), message % args)
self.logger.debug('%s: changed', self._repr())
if self.parent is not None:
self.parent.changed()
elif isinstance(self, Mutable):
super(TrackedObject, self).changed() | Marks the object as changed.
If a `parent` attribute is set, the `changed()` method on the parent
will be called, propagating the change notification up the chain.
The message (if provided) will be debug logged. |
def allreduce(self, x, mesh_axes, reduction_fn_string):
"""Grouped allreduce, (summed across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers
reduction_fn_string: "SUM"
Returns:
a LaidOutTensor
Raises:
ValueError: if the reduction is not yet implemented.
"""
if not mesh_axes:
return x
x = x.to_laid_out_tensor()
if reduction_fn_string == "SUM":
group_assignment = self._create_group_assignment(mesh_axes)
group_size = len(group_assignment[0])
tf_in = x.one_slice
dtype = tf_in.dtype
if dtype == tf.float32:
cast_to_float32 = False
elif dtype == tf.bfloat16:
cast_to_float32 = (
group_size > self._allreduce_in_bfloat16_max_group_size)
else:
tf.logging.info("Casting %s to float32 for allreduce" % tf_in.dtype)
cast_to_float32 = True
if cast_to_float32:
tf_in = tf.cast(tf_in, tf.float32)
tf_out = tpu_ops.cross_replica_sum(tf_in, group_assignment)
if cast_to_float32:
tf_out = tf.cast(tf_out, dtype)
return self.LaidOutTensor([tf_out])
else:
for axis in mesh_axes:
x = self.allconcat(x, axis, 0, stack=True)
x = self.LaidOutTensor(
[mtf.reduction_fn(reduction_fn_string)(x.one_slice, 0)])
return x | Grouped allreduce, (summed across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers
reduction_fn_string: "SUM"
Returns:
a LaidOutTensor
Raises:
ValueError: if the reduction is not yet implemented. |
def intersection(self, *args):
"""
Produce an array that contains every item shared between all the
passed-in arrays.
"""
if type(self.obj[0]) is int:
a = self.obj
else:
a = tuple(self.obj[0])
setobj = set(a)
for i, v in enumerate(args):
setobj = setobj & set(args[i])
return self._wrap(list(setobj)) | Produce an array that contains every item shared between all the
passed-in arrays. |
def sample_storage_size(self):
"""Get the storage size of the samples storage collection."""
try:
coll_stats = self.database.command('collStats', 'fs.chunks')
sample_storage_size = coll_stats['size']/1024.0/1024.0
return sample_storage_size
except pymongo.errors.OperationFailure:
return 0 | Get the storage size of the samples storage collection. |
def place_oceans_at_map_borders(world):
"""
Lower the elevation near the border of the map
"""
ocean_border = int(min(30, max(world.width / 5, world.height / 5)))
def place_ocean(x, y, i):
world.layers['elevation'].data[y, x] = \
(world.layers['elevation'].data[y, x] * i) / ocean_border
for x in range(world.width):
for i in range(ocean_border):
place_ocean(x, i, i)
place_ocean(x, world.height - i - 1, i)
for y in range(world.height):
for i in range(ocean_border):
place_ocean(i, y, i)
place_ocean(world.width - i - 1, y, i) | Lower the elevation near the border of the map |
def move(self, source_path, destination_path):
"""
Rename/move an object from one GCS location to another.
"""
self.copy(source_path, destination_path)
self.remove(source_path) | Rename/move an object from one GCS location to another. |
def _get_dS2S(self, imt_per):
"""
Table 4 of 2013 report
"""
if imt_per == 0:
dS2S = 0.05
elif 0 < imt_per < 0.15:
dS2S = self._interp_function(-0.15, 0.05, 0.15, 0, imt_per)
elif 0.15 <= imt_per < 0.45:
dS2S = self._interp_function(0.4, -0.15, 0.45, 0.15, imt_per)
elif 0.45 <= imt_per < 3.2:
dS2S = 0.4
elif 3.2 <= imt_per < 5:
dS2S = self._interp_function(0.08, 0.4, 5, 3.2, imt_per)
elif 5 <= imt_per <= 10:
dS2S = 0.08
else:
dS2S = 0
return dS2S | Table 4 of 2013 report |
def init_environment():
"""Allow variables assigned in .env available using
os.environ.get('VAR_NAME')"""
base_path = os.path.abspath(os.path.dirname(__file__))
env_path = '{0}/.env'.format(base_path)
if os.path.exists(env_path):
with open(env_path) as f:
lines = f.readlines()
for line in lines:
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1] | Allow variables assigned in .env available using
os.environ.get('VAR_NAME') |
def one_thread_per_process():
"""Return a context manager where only one thread is allocated to a process.
This function is intended to be used as a with statement like::
>>> with process_per_thread():
... do_something() # one thread per process
Notes:
This function only works when MKL (Intel Math Kernel Library)
is installed and used in, for example, NumPy and SciPy.
Otherwise this function does nothing.
"""
try:
import mkl
is_mkl = True
except ImportError:
is_mkl = False
if is_mkl:
n_threads = mkl.get_max_threads()
mkl.set_num_threads(1)
try:
# block nested in the with statement
yield
finally:
# revert to the original value
mkl.set_num_threads(n_threads)
else:
yield | Return a context manager where only one thread is allocated to a process.
This function is intended to be used as a with statement like::
>>> with process_per_thread():
... do_something() # one thread per process
Notes:
This function only works when MKL (Intel Math Kernel Library)
is installed and used in, for example, NumPy and SciPy.
Otherwise this function does nothing. |
def b_operator(self, P):
r"""
The B operator, mapping P into
.. math::
B(P) := R - \beta^2 A'PB(Q + \beta B'PB)^{-1}B'PA + \beta A'PA
and also returning
.. math::
F := (Q + \beta B'PB)^{-1} \beta B'PA
Parameters
----------
P : array_like(float, ndim=2)
A matrix that should be n x n
Returns
-------
F : array_like(float, ndim=2)
The F matrix as defined above
new_p : array_like(float, ndim=2)
The matrix P after applying the B operator
"""
A, B, Q, R, beta = self.A, self.B, self.Q, self.R, self.beta
S1 = Q + beta * dot(B.T, dot(P, B))
S2 = beta * dot(B.T, dot(P, A))
S3 = beta * dot(A.T, dot(P, A))
F = solve(S1, S2) if not self.pure_forecasting else np.zeros(
(self.k, self.n))
new_P = R - dot(S2.T, F) + S3
return F, new_P | r"""
The B operator, mapping P into
.. math::
B(P) := R - \beta^2 A'PB(Q + \beta B'PB)^{-1}B'PA + \beta A'PA
and also returning
.. math::
F := (Q + \beta B'PB)^{-1} \beta B'PA
Parameters
----------
P : array_like(float, ndim=2)
A matrix that should be n x n
Returns
-------
F : array_like(float, ndim=2)
The F matrix as defined above
new_p : array_like(float, ndim=2)
The matrix P after applying the B operator |
def is_ext_pack_usable(self, name):
"""Check if the given extension pack is loaded and usable.
in name of type str
The name of the extension pack to check for.
return usable of type bool
Is the given extension pack loaded and usable.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
usable = self._call("isExtPackUsable",
in_p=[name])
return usable | Check if the given extension pack is loaded and usable.
in name of type str
The name of the extension pack to check for.
return usable of type bool
Is the given extension pack loaded and usable. |
def register_widgets():
"""
Register all collected widgets from settings
WIDGETS = [('mymodule.models.MyWidget', {'mykwargs': 'mykwarg'})]
WIDGETS = ['mymodule.models.MyWidget', MyClass]
"""
# special case
# register external apps
Page.create_content_type(
ApplicationWidget, APPLICATIONS=settings.APPLICATION_CHOICES)
for _optgroup, _widgets in six.iteritems(settings.WIDGETS):
optgroup = _optgroup if _optgroup != 'ungrouped' else None
for widget in _widgets:
kwargs = {'optgroup': optgroup}
# load class from strings
if isinstance(widget, six.string_types):
try:
WidgetCls = get_class_from_string(widget)
except:
exc_info = sys.exc_info()
raise six.reraise(*exc_info)
elif isinstance(widget, tuple):
try:
WidgetCls = get_class_from_string(widget[0])
if len(widget) > 1:
kwargs.update(widget[1])
except Exception as e:
raise Exception('%s: %s' % (mod, e))
else:
WidgetCls = widget
Page.create_content_type(
WidgetCls, **kwargs) | Register all collected widgets from settings
WIDGETS = [('mymodule.models.MyWidget', {'mykwargs': 'mykwarg'})]
WIDGETS = ['mymodule.models.MyWidget', MyClass] |
def mkdir(self, req, parent, name, mode):
"""Create a directory
Valid replies:
reply_entry
reply_err
"""
self.reply_err(req, errno.EROFS) | Create a directory
Valid replies:
reply_entry
reply_err |
def addLayer(self,layer,z=-1):
"""
Adds a new layer to the stack, optionally at the specified z-value.
``layer`` must be an instance of Layer or subclasses.
``z`` can be used to override the index of the layer in the stack. Defaults to ``-1`` for appending.
"""
# Adds a new layer to the stack, optionally at the specified z-value
# The z-value is the index this layer should be inserted in, or -1 for appending
if not isinstance(layer,Layer):
raise TypeError("layer must be an instance of Layer!")
if z==-1:
self.layers.append(layer)
else:
self.layers.insert(z,layer) | Adds a new layer to the stack, optionally at the specified z-value.
``layer`` must be an instance of Layer or subclasses.
``z`` can be used to override the index of the layer in the stack. Defaults to ``-1`` for appending. |
def future(self, rev=None):
"""Return a Mapping of items after the given revision.
Default revision is the last one looked up.
"""
if rev is not None:
self.seek(rev)
return WindowDictFutureView(self._future) | Return a Mapping of items after the given revision.
Default revision is the last one looked up. |
def get_design_run_results(self, data_view_id, run_uuid):
"""
Retrieves the results of an existing designrun
:param data_view_id: The ID number of the data view to which the
run belongs, as a string
:type data_view_id: str
:param run_uuid: The UUID of the design run to retrieve results from
:type run_uuid: str
:return: A :class:`DesignResults` object
"""
url = routes.get_data_view_design_results(data_view_id, run_uuid)
response = self._get(url).json()
result = response["data"]
return DesignResults(
best_materials=result.get("best_material_results"),
next_experiments=result.get("next_experiment_results")
) | Retrieves the results of an existing designrun
:param data_view_id: The ID number of the data view to which the
run belongs, as a string
:type data_view_id: str
:param run_uuid: The UUID of the design run to retrieve results from
:type run_uuid: str
:return: A :class:`DesignResults` object |
def user_parse(data):
"""Parse information from the provider."""
user_ = data.get('user', {})
yield 'id', data.get('user_nsid') or user_.get('id')
yield 'username', user_.get('username', {}).get('_content')
first_name, _, last_name = data.get(
'fullname', {}).get('_content', '').partition(' ')
yield 'first_name', first_name
yield 'last_name', last_name | Parse information from the provider. |
def save_as_plt(self, fname, pixel_array=None, vmin=None, vmax=None,
cmap=None, format=None, origin=None):
""" This method saves the image from a numpy array using matplotlib
:param fname: Location and name of the image file to be saved.
:param pixel_array: Numpy pixel array, i.e. ``numpy()`` return value
:param vmin: matplotlib vmin
:param vmax: matplotlib vmax
:param cmap: matplotlib color map
:param format: matplotlib format
:param origin: matplotlib origin
This method will return True if successful
"""
from matplotlib.backends.backend_agg \
import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from pylab import cm
if pixel_array is None:
pixel_array = self.numpy
if cmap is None:
cmap = cm.bone
fig = Figure(figsize=pixel_array.shape[::-1], dpi=1, frameon=False)
canvas = FigureCanvas(fig)
fig.figimage(pixel_array, cmap=cmap, vmin=vmin,
vmax=vmax, origin=origin)
fig.savefig(fname, dpi=1, format=format)
return True | This method saves the image from a numpy array using matplotlib
:param fname: Location and name of the image file to be saved.
:param pixel_array: Numpy pixel array, i.e. ``numpy()`` return value
:param vmin: matplotlib vmin
:param vmax: matplotlib vmax
:param cmap: matplotlib color map
:param format: matplotlib format
:param origin: matplotlib origin
This method will return True if successful |
def tell(self):
"""
:return: number of records processed from the original file
"""
if self._shifts:
t = self._file.tell()
if t == self._shifts[0]:
return 0
elif t == self._shifts[-1]:
return len(self._shifts) - 1
elif t in self._shifts:
return bisect_left(self._shifts, t)
else:
return bisect_left(self._shifts, t) - 1
raise self._implement_error | :return: number of records processed from the original file |
def identify_phase(T, P, Tm=None, Tb=None, Tc=None, Psat=None):
r'''Determines the phase of a one-species chemical system according to
basic rules, using whatever information is available. Considers only the
phases liquid, solid, and gas; does not consider two-phase
scenarios, as should occurs between phase boundaries.
* If the melting temperature is known and the temperature is under or equal
to it, consider it a solid.
* If the critical temperature is known and the temperature is greater or
equal to it, consider it a gas.
* If the vapor pressure at `T` is known and the pressure is under or equal
to it, consider it a gas. If the pressure is greater than the vapor
pressure, consider it a liquid.
* If the melting temperature, critical temperature, and vapor pressure are
not known, attempt to use the boiling point to provide phase information.
If the pressure is between 90 kPa and 110 kPa (approximately normal),
consider it a liquid if it is under the boiling temperature and a gas if
above the boiling temperature.
* If the pressure is above 110 kPa and the boiling temperature is known,
consider it a liquid if the temperature is under the boiling temperature.
* Return None otherwise.
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [Pa]
Tm : float, optional
Normal melting temperature, [K]
Tb : float, optional
Normal boiling point, [K]
Tc : float, optional
Critical temperature, [K]
Psat : float, optional
Vapor pressure of the fluid at `T`, [Pa]
Returns
-------
phase : str
Either 's', 'l', 'g', or None if the phase cannot be determined
Notes
-----
No special attential is paid to any phase transition. For the case where
the melting point is not provided, the possibility of the fluid being solid
is simply ignored.
Examples
--------
>>> identify_phase(T=280, P=101325, Tm=273.15, Psat=991)
'l'
'''
if Tm and T <= Tm:
return 's'
elif Tc and T >= Tc:
# No special return value for the critical point
return 'g'
elif Psat:
# Do not allow co-existence of phases; transition to 'l' directly under
if P <= Psat:
return 'g'
elif P > Psat:
return 'l'
elif Tb:
# Crude attempt to model phases without Psat
# Treat Tb as holding from 90 kPa to 110 kPa
if 9E4 < P < 1.1E5:
if T < Tb:
return 'l'
else:
return 'g'
elif P > 1.1E5 and T <= Tb:
# For the higher-pressure case, it is definitely liquid if under Tb
# Above the normal boiling point, impossible to say - return None
return 'l'
else:
return None
else:
return None | r'''Determines the phase of a one-species chemical system according to
basic rules, using whatever information is available. Considers only the
phases liquid, solid, and gas; does not consider two-phase
scenarios, as should occurs between phase boundaries.
* If the melting temperature is known and the temperature is under or equal
to it, consider it a solid.
* If the critical temperature is known and the temperature is greater or
equal to it, consider it a gas.
* If the vapor pressure at `T` is known and the pressure is under or equal
to it, consider it a gas. If the pressure is greater than the vapor
pressure, consider it a liquid.
* If the melting temperature, critical temperature, and vapor pressure are
not known, attempt to use the boiling point to provide phase information.
If the pressure is between 90 kPa and 110 kPa (approximately normal),
consider it a liquid if it is under the boiling temperature and a gas if
above the boiling temperature.
* If the pressure is above 110 kPa and the boiling temperature is known,
consider it a liquid if the temperature is under the boiling temperature.
* Return None otherwise.
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [Pa]
Tm : float, optional
Normal melting temperature, [K]
Tb : float, optional
Normal boiling point, [K]
Tc : float, optional
Critical temperature, [K]
Psat : float, optional
Vapor pressure of the fluid at `T`, [Pa]
Returns
-------
phase : str
Either 's', 'l', 'g', or None if the phase cannot be determined
Notes
-----
No special attential is paid to any phase transition. For the case where
the melting point is not provided, the possibility of the fluid being solid
is simply ignored.
Examples
--------
>>> identify_phase(T=280, P=101325, Tm=273.15, Psat=991)
'l' |
def string(self):
r"""This is valid if and only if
1. the expression is a :class:`.TexCmd` AND
2. the command has only one argument.
:rtype: Union[None,str]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''\textbf{Hello}''')
>>> soup.textbf.string
'Hello'
>>> soup.textbf.string = 'Hello World'
>>> soup.textbf.string
'Hello World'
>>> soup.textbf
\textbf{Hello World}
"""
if isinstance(self.expr, TexCmd) and len(self.expr.args) == 1:
return self.expr.args[0].value | r"""This is valid if and only if
1. the expression is a :class:`.TexCmd` AND
2. the command has only one argument.
:rtype: Union[None,str]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''\textbf{Hello}''')
>>> soup.textbf.string
'Hello'
>>> soup.textbf.string = 'Hello World'
>>> soup.textbf.string
'Hello World'
>>> soup.textbf
\textbf{Hello World} |
def reference_index(self, ref_id):
"""Return the first reference with this ID."""
try:
indexes = range(self.reference_count())
return next(i for i in indexes if self.reference_id(i) == ref_id)
except StopIteration as e:
raise ReferenceNotFoundError("ID: " + ref_id) from e | Return the first reference with this ID. |
def untrace_class(cls):
"""
Untraces given class.
:param cls: Class to untrace.
:type cls: object
:return: Definition success.
:rtype: bool
"""
for name, method in inspect.getmembers(cls, inspect.ismethod):
untrace_method(cls, method)
for name, function in inspect.getmembers(cls, inspect.isfunction):
untrace_method(cls, function)
for name, accessor in inspect.getmembers(cls, lambda x: type(x) is property):
untrace_property(cls, accessor)
set_untraced(cls)
return True | Untraces given class.
:param cls: Class to untrace.
:type cls: object
:return: Definition success.
:rtype: bool |
def _get_color(self, age):
"""Get the fill color depending on age.
Args:
age (int): The age of the branch/es
Returns:
tuple: (r, g, b)
"""
if age == self.tree.age:
return self.leaf_color
color = self.stem_color
tree = self.tree
if len(color) == 3:
return color
diff = [color[i+3]-color[i] for i in range(3)]
per_age = [diff[i]/(tree.age-1) for i in range(3)]
return tuple([int(color[i]+per_age[i]*age) for i in range(3)]) | Get the fill color depending on age.
Args:
age (int): The age of the branch/es
Returns:
tuple: (r, g, b) |
def find_ge(self, dt):
'''Building block of all searches. Find the index
corresponding to the leftmost value greater or equal to *dt*.
If *dt* is greater than the
:func:`dynts.TimeSeries.end` a :class:`dynts.exceptions.RightOutOfBound`
exception will raise.
*dt* must be a python datetime.date instance.'''
i = bisect_left(self.dates, dt)
if i != len(self.dates):
return i
raise RightOutOfBound | Building block of all searches. Find the index
corresponding to the leftmost value greater or equal to *dt*.
If *dt* is greater than the
:func:`dynts.TimeSeries.end` a :class:`dynts.exceptions.RightOutOfBound`
exception will raise.
*dt* must be a python datetime.date instance. |
def get_refinement_options(self):
""" Returns possible specializations for the upper values in the taxonomy """
domain = self.get_domain()
for upper_value in self.upper:
for suc in domain.successors(upper_value):
yield suc | Returns possible specializations for the upper values in the taxonomy |
def data(self):
"""
Returns signed data if present in the message
"""
# Check if signatire is detached
if self.detached:
return None
bio = Membio()
if not libcrypto.CMS_verify(self.ptr, None, None, None, bio.bio,
Flags.NO_VERIFY):
raise CMSError("extract data")
return str(bio) | Returns signed data if present in the message |
def set_cpus(self, cpus=0):
"""
Add --cpus options to specify how many threads to use.
"""
from multiprocessing import cpu_count
max_cpus = cpu_count()
if not 0 < cpus < max_cpus:
cpus = max_cpus
self.add_option("--cpus", default=cpus, type="int",
help="Number of CPUs to use, 0=unlimited [default: %default]") | Add --cpus options to specify how many threads to use. |
def xinfo_help(self):
"""Retrieve help regarding the ``XINFO`` sub-commands"""
fut = self.execute(b'XINFO', b'HELP')
return wait_convert(fut, lambda l: b'\n'.join(l)) | Retrieve help regarding the ``XINFO`` sub-commands |
def lists(self, uid=0, **kwargs):
"""
Returns a list of :class:`List` objects (lists which Contact belongs to) and a pager dict.
:Example:
lists, pager = client.contacts.lists(uid=1901010)
:param int uid: The unique id of the Contact to update. Required.
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
"""
lists = Lists(self.base_uri, self.auth)
return self.get_subresource_instances(uid, instance=lists,
resource="lists", params=kwargs) | Returns a list of :class:`List` objects (lists which Contact belongs to) and a pager dict.
:Example:
lists, pager = client.contacts.lists(uid=1901010)
:param int uid: The unique id of the Contact to update. Required.
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10 |
def device_from_request(request):
"""
Determine's the device name from the request by first looking for an
overridding cookie, and if not found then matching the user agent.
Used at both the template level for choosing the template to load and
also at the cache level as a cache key prefix.
"""
from yacms.conf import settings
try:
# If a device was set via cookie, match available devices.
for (device, _) in settings.DEVICE_USER_AGENTS:
if device == request.COOKIES["yacms-device"]:
return device
except KeyError:
# If a device wasn't set via cookie, match user agent.
try:
user_agent = request.META["HTTP_USER_AGENT"].lower()
except KeyError:
pass
else:
try:
user_agent = user_agent.decode("utf-8")
for (device, ua_strings) in settings.DEVICE_USER_AGENTS:
for ua_string in ua_strings:
if ua_string.lower() in user_agent:
return device
except (AttributeError, UnicodeDecodeError, UnicodeEncodeError):
pass
return "" | Determine's the device name from the request by first looking for an
overridding cookie, and if not found then matching the user agent.
Used at both the template level for choosing the template to load and
also at the cache level as a cache key prefix. |
def options_response(env):
"""Construct WbResponse for OPTIONS based on the WSGI env dictionary
:param dict env: The WSGI environment dictionary
:return: The WBResponse for the options request
:rtype: WbResponse
"""
status_headers = StatusAndHeaders('200 Ok', [
('Content-Type', 'text/plain'),
('Content-Length', '0'),
])
response = WbResponse(status_headers)
response.add_access_control_headers(env=env)
return response | Construct WbResponse for OPTIONS based on the WSGI env dictionary
:param dict env: The WSGI environment dictionary
:return: The WBResponse for the options request
:rtype: WbResponse |
def _read_mode_acopt(self, size, kind):
"""Read Alternate Checksum Request option.
Positional arguments:
size - int, length of option
kind - int, 14 (Alt-Chksum Request)
Returns:
* dict -- extracted Alternate Checksum Request (CHKSUM-REQ) option
Structure of TCP CHKSUM-REQ [RFC 1146][RFC 6247]:
+----------+----------+----------+
| Kind=14 | Length=3 | chksum |
+----------+----------+----------+
Octets Bits Name Description
0 0 tcp.chksumreq.kind Kind (14)
1 8 tcp.chksumreq.length Length (3)
2 16 tcp.chksumreq.ac Checksum Algorithm
"""
temp = self._read_unpack(size)
algo = chksum_opt.get(temp)
data = dict(
kind=kind,
length=size,
ac=algo,
)
return data | Read Alternate Checksum Request option.
Positional arguments:
size - int, length of option
kind - int, 14 (Alt-Chksum Request)
Returns:
* dict -- extracted Alternate Checksum Request (CHKSUM-REQ) option
Structure of TCP CHKSUM-REQ [RFC 1146][RFC 6247]:
+----------+----------+----------+
| Kind=14 | Length=3 | chksum |
+----------+----------+----------+
Octets Bits Name Description
0 0 tcp.chksumreq.kind Kind (14)
1 8 tcp.chksumreq.length Length (3)
2 16 tcp.chksumreq.ac Checksum Algorithm |
def is_known_type(self, type_name):
"""Check if type is known to the type system.
Returns:
bool: True if the type is a known instantiated simple type, False otherwise
"""
type_name = str(type_name)
if type_name in self.known_types:
return True
return False | Check if type is known to the type system.
Returns:
bool: True if the type is a known instantiated simple type, False otherwise |
def build_verified_certificate_chain(self, received_certificate_chain: List[Certificate]) -> List[Certificate]:
"""Try to figure out the verified chain by finding the anchor/root CA the received chain chains up to in the
trust store.
This will not clean the certificate chain if additional/invalid certificates were sent and the signatures and
fields (notBefore, etc.) are not verified.
"""
# The certificates must have been sent in the correct order or we give up
if not self._is_certificate_chain_order_valid(received_certificate_chain):
raise InvalidCertificateChainOrderError()
# TODO: OpenSSL 1.1.0 has SSL_get0_verified_chain() to do this directly
verified_certificate_chain = []
anchor_cert = None
# Assume that the certificates were sent in the correct order or give up
for cert in received_certificate_chain:
anchor_cert = self._get_certificate_with_subject(cert.issuer)
verified_certificate_chain.append(cert)
if anchor_cert:
verified_certificate_chain.append(anchor_cert)
break
if anchor_cert is None:
# Could not build the verified chain
raise AnchorCertificateNotInTrustStoreError()
return verified_certificate_chain | Try to figure out the verified chain by finding the anchor/root CA the received chain chains up to in the
trust store.
This will not clean the certificate chain if additional/invalid certificates were sent and the signatures and
fields (notBefore, etc.) are not verified. |
def _multi_call(function, contentkey, *args, **kwargs):
'''
Retrieve full list of values for the contentkey from a boto3 ApiGateway
client function that may be paged via 'position'
'''
ret = function(*args, **kwargs)
position = ret.get('position')
while position:
more = function(*args, position=position, **kwargs)
ret[contentkey].extend(more[contentkey])
position = more.get('position')
return ret.get(contentkey) | Retrieve full list of values for the contentkey from a boto3 ApiGateway
client function that may be paged via 'position' |
def render_search(self, ctx, data):
"""
Render some UI for performing searches, if we know about a search
aggregator.
"""
if self.username is None:
return ''
translator = self._getViewerPrivateApplication()
searchAggregator = translator.getPageComponents().searchAggregator
if searchAggregator is None or not searchAggregator.providers():
return ''
return ctx.tag.fillSlots(
'form-action', translator.linkTo(searchAggregator.storeID)) | Render some UI for performing searches, if we know about a search
aggregator. |
def event_transition(self, event_cls, event_type,
ion_type=None, value=None, annotations=None, depth=None, whence=None):
"""Returns an ion event event_transition that yields to another co-routine.
If ``annotations`` is not specified, then the ``annotations`` are the annotations of this
context.
If ``depth`` is not specified, then the ``depth`` is depth of this context.
If ``whence`` is not specified, then ``whence`` is the whence of this context.
"""
if annotations is None:
annotations = self.annotations
if annotations is None:
annotations = ()
if not (event_type is IonEventType.CONTAINER_START) and \
annotations and (self.limit - self.queue.position) != 0:
# This value is contained in an annotation wrapper, from which its limit was inherited. It must have
# reached, but not surpassed, that limit.
raise IonException('Incorrect annotation wrapper length.')
if depth is None:
depth = self.depth
if whence is None:
whence = self.whence
return Transition(
event_cls(event_type, ion_type, value, self.field_name, annotations, depth),
whence
) | Returns an ion event event_transition that yields to another co-routine.
If ``annotations`` is not specified, then the ``annotations`` are the annotations of this
context.
If ``depth`` is not specified, then the ``depth`` is depth of this context.
If ``whence`` is not specified, then ``whence`` is the whence of this context. |
def p_members(self, p):
"""members :
| members member VALUE_SEPARATOR
| members member"""
if len(p) == 1:
p[0] = list()
else:
p[1].append(p[2])
p[0] = p[1] | members :
| members member VALUE_SEPARATOR
| members member |
def top_charts(self):
"""Get a listing of the default top charts."""
response = self._call(mc_calls.BrowseTopChart)
top_charts = response.body
return top_charts | Get a listing of the default top charts. |
def get_db_mutations(mut_db_path, gene_list, res_stop_codons):
"""
This function opens the file resistenss-overview.txt, and reads the
content into a dict of dicts. The dict will contain information about
all known mutations given in the database. This dict is returned.
"""
# Open resistens-overview.txt
try:
drugfile = open(mut_db_path, "r")
except:
sys.exit("Wrong path: %s"%(mut_db_path))
# Initiate variables
known_mutations = dict()
drug_genes = dict()
known_stop_codon = dict()
indelflag = False
stopcodonflag = False
# Go throug mutation file line by line
for line in drugfile:
# Ignore headers and check where the indel section starts
if line.startswith("#"):
if "indel" in line.lower():
indelflag = True
elif "stop codon" in line.lower():
stopcodonflag = True
else:
stopcodonflag = False
continue
# Ignore empty lines
if line.strip() == "":
continue
# Assert that all lines have the correct set of columns
mutation = [data.strip() for data in line.strip().split("\t")]
assert len(mutation) == 9, "mutation overview file (%s) must have 9 columns, %s"%(mut_db_path, mutation)
# Extract all info on the line (even though it is not all used)
gene_ID = mutation[0]
# Only consider mutations in genes found in the gene list
if gene_ID in gene_list:
gene_name = mutation[1]
no_of_mut = int(mutation[2])
mut_pos = int(mutation[3])
ref_codon = mutation[4]
ref_aa = mutation[5]
alt_aa = mutation[6].split(",")
res_drug = mutation[7].replace("\t", " ")
pmid = mutation[8].split(",")
# Check if resistance is known to be caused by a stop codon in the gene
if ("*" in alt_aa and res_stop_codons != 'specified') or (res_stop_codons == 'specified' and stopcodonflag == True):
if gene_ID not in known_stop_codon:
known_stop_codon[gene_ID] = {"pos": [], "drug": res_drug}
known_stop_codon[gene_ID]["pos"].append(mut_pos)
# Add genes associated with drug resistance to drug_genes dict
drug_lst = res_drug.split(",")
for drug in drug_lst:
drug = drug.upper()
if drug not in drug_genes:
drug_genes[drug] = []
if gene_ID not in drug_genes[drug]:
drug_genes[drug].append(gene_ID)
# Initiate empty dict to store relevant mutation information
mut_info = dict()
# Save need mutation info with pmid cooresponding to the amino acid change
for i in range(len(alt_aa)):
try:
mut_info[alt_aa[i]] = {"gene_name": gene_name, "drug": res_drug, "pmid": pmid[i]}
except IndexError:
mut_info[alt_aa[i]] = {"gene_name": gene_name, "drug": res_drug, "pmid": "-"}
# Check if more than one mutations is needed for resistance
if no_of_mut != 1:
print("More than one mutation is needed, this is not implemented", mutation)
# Add all possible types of mutations to the dict
if gene_ID not in known_mutations:
known_mutations[gene_ID] = {"sub" : dict(), "ins" : dict(), "del" : dict()}
# Check for the type of mutation
if indelflag == False:
mutation_type = "sub"
else:
mutation_type = ref_aa
# Save mutations positions with required information given in mut_info
if mut_pos not in known_mutations[gene_ID][mutation_type]:
known_mutations[gene_ID][mutation_type][mut_pos] = dict()
for aa in alt_aa:
known_mutations[gene_ID][mutation_type][mut_pos][aa] = mut_info[aa]
drugfile.close()
# Check that all genes in the gene list has known mutations
for gene in gene_list:
if gene not in known_mutations:
known_mutations[gene] = {"sub" : dict(), "ins" : dict(), "del" : dict()}
return known_mutations, drug_genes, known_stop_codon | This function opens the file resistenss-overview.txt, and reads the
content into a dict of dicts. The dict will contain information about
all known mutations given in the database. This dict is returned. |
def com_google_fonts_check_metadata_canonical_filename(font_metadata,
canonical_filename,
is_variable_font):
"""METADATA.pb: Filename is set canonically?"""
if is_variable_font:
valid_varfont_suffixes = [
("Roman-VF", "Regular"),
("Italic-VF", "Italic"),
]
for valid_suffix, style in valid_varfont_suffixes:
if style in canonical_filename:
canonical_filename = valid_suffix.join(canonical_filename.split(style))
if canonical_filename != font_metadata.filename:
yield FAIL, ("METADATA.pb: filename field (\"{}\")"
" does not match "
"canonical name \"{}\".".format(font_metadata.filename,
canonical_filename))
else:
yield PASS, "Filename in METADATA.pb is set canonically." | METADATA.pb: Filename is set canonically? |
def un_camel_case(text):
r"""
Splits apart words that are written in CamelCase.
Bugs:
- Non-ASCII characters are treated as lowercase letters, even if they are
actually capital letters.
Examples:
>>> un_camel_case('1984ZXSpectrumGames')
'1984 ZX Spectrum Games'
>>> un_camel_case('aaAa aaAaA 0aA AAAa!AAA')
'aa Aa aa Aa A 0a A AA Aa! AAA'
>>> un_camel_case('MotörHead')
'Mot\xf6r Head'
>>> un_camel_case('MSWindows3.11ForWorkgroups')
'MS Windows 3.11 For Workgroups'
This should not significantly affect text that is not camel-cased:
>>> un_camel_case('ACM_Computing_Classification_System')
'ACM Computing Classification System'
>>> un_camel_case('Anne_Blunt,_15th_Baroness_Wentworth')
'Anne Blunt, 15th Baroness Wentworth'
>>> un_camel_case('Hindi-Urdu')
'Hindi-Urdu'
"""
revtext = text[::-1]
pieces = []
while revtext:
match = CAMEL_RE.match(revtext)
if match:
pieces.append(match.group(1))
revtext = revtext[match.end():]
else:
pieces.append(revtext)
revtext = ''
revstr = ' '.join(piece.strip(' _') for piece in pieces
if piece.strip(' _'))
return revstr[::-1].replace('- ', '-') | r"""
Splits apart words that are written in CamelCase.
Bugs:
- Non-ASCII characters are treated as lowercase letters, even if they are
actually capital letters.
Examples:
>>> un_camel_case('1984ZXSpectrumGames')
'1984 ZX Spectrum Games'
>>> un_camel_case('aaAa aaAaA 0aA AAAa!AAA')
'aa Aa aa Aa A 0a A AA Aa! AAA'
>>> un_camel_case('MotörHead')
'Mot\xf6r Head'
>>> un_camel_case('MSWindows3.11ForWorkgroups')
'MS Windows 3.11 For Workgroups'
This should not significantly affect text that is not camel-cased:
>>> un_camel_case('ACM_Computing_Classification_System')
'ACM Computing Classification System'
>>> un_camel_case('Anne_Blunt,_15th_Baroness_Wentworth')
'Anne Blunt, 15th Baroness Wentworth'
>>> un_camel_case('Hindi-Urdu')
'Hindi-Urdu' |
def get_gravityspy_triggers(tablename, engine=None, **kwargs):
"""Fetch data into an `GravitySpyTable`
Parameters
----------
table : `str`,
The name of table you are attempting to receive triggers
from.
selection
other filters you would like to supply
underlying reader method for the given format
.. note::
For now it will attempt to automatically connect you
to a specific DB. In the future, this may be an input
argument.
Returns
-------
table : `GravitySpyTable`
"""
from sqlalchemy.engine import create_engine
from sqlalchemy.exc import ProgrammingError
# connect if needed
if engine is None:
conn_kw = {}
for key in ('db', 'host', 'user', 'passwd'):
try:
conn_kw[key] = kwargs.pop(key)
except KeyError:
pass
engine = create_engine(get_connection_str(**conn_kw))
try:
return GravitySpyTable(fetch(engine, tablename, **kwargs))
except ProgrammingError as exc:
if 'relation "%s" does not exist' % tablename in str(exc):
msg = exc.args[0]
msg = msg.replace(
'does not exist',
'does not exist, the following tablenames are '
'acceptable:\n %s\n' % '\n '.join(engine.table_names()))
exc.args = (msg,)
raise | Fetch data into an `GravitySpyTable`
Parameters
----------
table : `str`,
The name of table you are attempting to receive triggers
from.
selection
other filters you would like to supply
underlying reader method for the given format
.. note::
For now it will attempt to automatically connect you
to a specific DB. In the future, this may be an input
argument.
Returns
-------
table : `GravitySpyTable` |
def count_genomic_region_plot(self):
""" Generate the SnpEff Counts by Genomic Region plot """
# Sort the keys based on the total counts
keys = self.snpeff_section_totals['# Count by genomic region']
sorted_keys = sorted(keys, reverse=True, key=keys.get)
# Make nicer label names
pkeys = OrderedDict()
for k in sorted_keys:
pkeys[k] = {'name': k.replace('_', ' ').title().replace('Utr', 'UTR') }
# Config for the plot
pconfig = {
'id': 'snpeff_variant_effects_region',
'title': 'SnpEff: Counts by Genomic Region',
'ylab': '# Reads',
'logswitch': True
}
return bargraph.plot(self.snpeff_data, pkeys, pconfig) | Generate the SnpEff Counts by Genomic Region plot |
def sync(remote='origin', branch='master'):
"""git pull and push commit"""
pull(branch, remote)
push(branch, remote)
print(cyan("Git Synced!")) | git pull and push commit |
def save_files(self, selections) -> None:
"""Save the |Selection| objects contained in the given |Selections|
instance to separate network files."""
try:
currentpath = self.currentpath
selections = selectiontools.Selections(selections)
for selection in selections:
if selection.name == 'complete':
continue
path = os.path.join(currentpath, selection.name+'.py')
selection.save_networkfile(filepath=path)
except BaseException:
objecttools.augment_excmessage(
'While trying to save selections `%s` into network files'
% selections) | Save the |Selection| objects contained in the given |Selections|
instance to separate network files. |
def get_apps_menu(self):
"""Temporal code, will change to apps.get_app_configs() for django 1.7
Generate a initial menu list using the AppsConfig registered
"""
menu = {}
for model, model_admin in self.admin_site._registry.items():
if hasattr(model_admin, 'app_config'):
if model_admin.app_config.has_menu_permission(obj=self.user):
menu.update({
'app:' + model_admin.app_config.name: {
'title': model_admin.app_config.verbose_name,
'menus': model_admin.app_config.init_menu(),
'first_icon': model_admin.app_config.icon}})
return menu | Temporal code, will change to apps.get_app_configs() for django 1.7
Generate a initial menu list using the AppsConfig registered |
def network_interfaces_list(resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
List all network interfaces within a resource group.
:param resource_group: The resource group name to list network
interfaces within.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_interfaces_list testgroup
'''
result = {}
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
nics = __utils__['azurearm.paged_object_to_list'](
netconn.network_interfaces.list(
resource_group_name=resource_group
)
)
for nic in nics:
result[nic['name']] = nic
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | .. versionadded:: 2019.2.0
List all network interfaces within a resource group.
:param resource_group: The resource group name to list network
interfaces within.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_interfaces_list testgroup |
def _build_crawlid_info(self, master, dict):
'''
Builds the crawlid info object
@param master: the master dict
@param dict: the dict object received
@return: the crawlid info object
'''
master['total_pending'] = 0
master['total_domains'] = 0
master['appid'] = dict['appid']
master['crawlid'] = dict['crawlid']
master['spiderid'] = dict['spiderid']
master['domains'] = {}
timeout_key = 'timeout:{sid}:{aid}:{cid}'.format(sid=dict['spiderid'],
aid=dict['appid'],
cid=dict['crawlid'])
if self.redis_conn.exists(timeout_key):
master['expires'] = self.redis_conn.get(timeout_key)
# get all domain queues
match_string = '{sid}:*:queue'.format(sid=dict['spiderid'])
for key in self.redis_conn.scan_iter(match=match_string):
domain = key.split(":")[1]
sortedDict = self._get_bin(key)
# now iterate through binned dict
for score in sortedDict:
for item in sortedDict[score]:
if 'meta' in item:
item = item['meta']
if item['appid'] == dict['appid'] and item['crawlid'] == dict['crawlid']:
if domain not in master['domains']:
master['domains'][domain] = {}
master['domains'][domain]['total'] = 0
master['domains'][domain]['high_priority'] = -9999
master['domains'][domain]['low_priority'] = 9999
master['total_domains'] = master['total_domains'] + 1
master['domains'][domain]['total'] = master['domains'][domain]['total'] + 1
if item['priority'] > master['domains'][domain]['high_priority']:
master['domains'][domain]['high_priority'] = item['priority']
if item['priority'] < master['domains'][domain]['low_priority']:
master['domains'][domain]['low_priority'] = item['priority']
master['total_pending'] = master['total_pending'] + 1
return master | Builds the crawlid info object
@param master: the master dict
@param dict: the dict object received
@return: the crawlid info object |
def write_message(self, status=messages.INFO, message=None):
"""
Writes a message to django's messaging framework and
returns the written message.
:param status: The message status level. Defaults to \
messages.INFO.
:param message: The message to write. If not given, \
defaults to appending 'saved' to the unicode representation \
of `self.object`.
"""
if not message:
message = u"%s saved" % self.object
messages.add_message(self.request, status, message)
return message | Writes a message to django's messaging framework and
returns the written message.
:param status: The message status level. Defaults to \
messages.INFO.
:param message: The message to write. If not given, \
defaults to appending 'saved' to the unicode representation \
of `self.object`. |
def bitwise_or(self, t):
"""
Binary operation: logical or
:param b: The other operand
:return: self | b
"""
"""
This implementation combines the approaches used by 'WYSINWYX: what you see is not what you execute'
paper and 'Signedness-Agnostic Program Analysis: Precise Integer Bounds for Low-Level Code'. The
first paper provides an sound way to approximate the stride, whereas the second provides a way
to calculate the or operation using wrapping intervals.
Note that, even though according Warren's work 'Hacker's delight', one should follow different
approaches to calculate the minimun/maximum values of an or operations according on the type
of the operands (signed/unsigned). On the other other hand, by splitting the wrapping-intervals
at the south pole, we can safely and soundly only use the Warren's functions for unsigned
integers.
"""
s = self
result_interval = list()
for u in s._ssplit():
for v in t._ssplit():
w = u.bits
# u |w v
if u.is_integer:
s_t = StridedInterval._ntz(v.stride)
elif v.is_integer:
s_t = StridedInterval._ntz(u.stride)
else:
s_t = min(StridedInterval._ntz(u.stride), StridedInterval._ntz(v.stride))
if u.is_integer and u.lower_bound == 0:
new_stride = v.stride
elif v.is_integer and v.lower_bound == 0:
new_stride = u.stride
else:
new_stride = 2 ** s_t
mask = (1 << s_t) - 1
r = (u.lower_bound & mask) | (v.lower_bound & mask)
m = (2 ** w) - 1
low_bound = WarrenMethods.min_or(u.lower_bound & (~mask & m), u.upper_bound & (~mask & m), v.lower_bound & (~mask & m), v.upper_bound & (~mask & m), w)
upper_bound = WarrenMethods.max_or(u.lower_bound & (~mask & m), u.upper_bound & (~mask & m), v.lower_bound & (~mask & m), v.upper_bound & (~mask & m), w)
if low_bound == upper_bound:
new_stride = 0
new_interval = StridedInterval(lower_bound=((low_bound & (~mask & m)) | r), upper_bound=((upper_bound & (~mask & m)) | r), bits=w, stride=new_stride)
result_interval.append(new_interval)
return StridedInterval.least_upper_bound(*result_interval).normalize() | Binary operation: logical or
:param b: The other operand
:return: self | b |
def format_latex(self, strng):
"""Format a string for latex inclusion."""
# Characters that need to be escaped for latex:
escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE)
# Magic command names as headers:
cmd_name_re = re.compile(r'^(%s.*?):' % ESC_MAGIC,
re.MULTILINE)
# Magic commands
cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % ESC_MAGIC,
re.MULTILINE)
# Paragraph continue
par_re = re.compile(r'\\$',re.MULTILINE)
# The "\n" symbol
newline_re = re.compile(r'\\n')
# Now build the string for output:
#strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng)
strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:',
strng)
strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng)
strng = par_re.sub(r'\\\\',strng)
strng = escape_re.sub(r'\\\1',strng)
strng = newline_re.sub(r'\\textbackslash{}n',strng)
return strng | Format a string for latex inclusion. |
def chain_sub_regexes(phrase, *regex_sub_pairs):
'''
Allow for a series of regex substitutions to occur
chain_sub_regexes('test ok', (' ', '_'), ('k$', 'oo'))
# => 'test_ooo'
'''
for regex, substitution in regex_sub_pairs:
if isinstance(regex, basestring):
regex = re.compile(regex)
phrase = regex.sub(substitution, phrase)
return phrase | Allow for a series of regex substitutions to occur
chain_sub_regexes('test ok', (' ', '_'), ('k$', 'oo'))
# => 'test_ooo' |
def do_write(self):
"""
Flushes as much pending data from the internal write buffer as possible.
"""
while True:
try:
written = 0
if hasattr(self.fd, 'send'):
written = self.fd.send(self.buffer)
else:
written = os.write(self.fd.fileno(), self.buffer)
self.buffer = self.buffer[written:]
# try to close after writes if a close was requested
if self.close_requested and len(self.buffer) == 0:
self.close()
return written
except EnvironmentError as e:
if e.errno not in Stream.ERRNO_RECOVERABLE:
raise e | Flushes as much pending data from the internal write buffer as possible. |
def from_cli(opt, length, delta_f, low_frequency_cutoff,
strain=None, dyn_range_factor=1, precision=None):
"""Parses the CLI options related to the noise PSD and returns a
FrequencySeries with the corresponding PSD. If necessary, the PSD is
linearly interpolated to achieve the resolution specified in the CLI.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length,
psd_output).
length : int
The length in samples of the output PSD.
delta_f : float
The frequency step of the output PSD.
low_frequency_cutoff: float
The low frequncy cutoff to use when calculating the PSD.
strain : {None, TimeSeries}
Time series containing the data from which the PSD should be measured,
when psd_estimation is in use.
dyn_range_factor : {1, float}
For PSDs taken from models or text files, if `dyn_range_factor` is
not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
precision : str, choices (None,'single','double')
If not specified, or specified as None, the precision of the returned
PSD will match the precision of the data, if measuring a PSD, or will
match the default precision of the model if using an analytical PSD.
If 'single' the PSD will be converted to float32, if not already in
that precision. If 'double' the PSD will be converted to float64, if
not already in that precision.
Returns
-------
psd : FrequencySeries
The frequency series containing the PSD.
"""
f_low = low_frequency_cutoff
sample_rate = int((length -1) * 2 * delta_f)
try:
psd_estimation = opt.psd_estimation is not None
except AttributeError:
psd_estimation = False
exclusive_opts = [opt.psd_model, opt.psd_file, opt.asd_file,
psd_estimation]
if sum(map(bool, exclusive_opts)) != 1:
err_msg = "You must specify exactly one of '--psd-file', "
err_msg += "'--psd-model', '--asd-file', '--psd-estimation'"
raise ValueError(err_msg)
if (opt.psd_model or opt.psd_file or opt.asd_file):
# PSD from lalsimulation or file
if opt.psd_model:
psd = from_string(opt.psd_model, length, delta_f, f_low)
elif opt.psd_file or opt.asd_file:
if opt.asd_file:
psd_file_name = opt.asd_file
else:
psd_file_name = opt.psd_file
if psd_file_name.endswith(('.dat', '.txt')):
is_asd_file = bool(opt.asd_file)
psd = from_txt(psd_file_name, length,
delta_f, f_low, is_asd_file=is_asd_file)
elif opt.asd_file:
err_msg = "ASD files are only valid as ASCII files (.dat or "
err_msg += ".txt). Supplied {}.".format(psd_file_name)
elif psd_file_name.endswith(('.xml', '.xml.gz')):
psd = from_xml(psd_file_name, length, delta_f, f_low,
ifo_string=opt.psd_file_xml_ifo_string,
root_name=opt.psd_file_xml_root_name)
# Set values < flow to the value at flow
kmin = int(low_frequency_cutoff / psd.delta_f)
psd[0:kmin] = psd[kmin]
psd *= dyn_range_factor ** 2
elif psd_estimation:
# estimate PSD from data
psd = welch(strain, avg_method=opt.psd_estimation,
seg_len=int(opt.psd_segment_length * sample_rate),
seg_stride=int(opt.psd_segment_stride * sample_rate),
num_segments=opt.psd_num_segments,
require_exact_data_fit=False)
if delta_f != psd.delta_f:
psd = interpolate(psd, delta_f)
else:
# Shouldn't be possible to get here
raise ValueError("Shouldn't be possible to raise this!")
if opt.psd_inverse_length:
psd = inverse_spectrum_truncation(psd,
int(opt.psd_inverse_length * sample_rate),
low_frequency_cutoff=f_low)
if hasattr(opt, 'psd_output') and opt.psd_output:
(psd.astype(float64) / (dyn_range_factor ** 2)).save(opt.psd_output)
if precision is None:
return psd
elif precision == 'single':
return psd.astype(float32)
elif precision == 'double':
return psd.astype(float64)
else:
err_msg = "If provided the precision kwarg must be either 'single' "
err_msg += "or 'double'. You provided %s." %(precision)
raise ValueError(err_msg) | Parses the CLI options related to the noise PSD and returns a
FrequencySeries with the corresponding PSD. If necessary, the PSD is
linearly interpolated to achieve the resolution specified in the CLI.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length,
psd_output).
length : int
The length in samples of the output PSD.
delta_f : float
The frequency step of the output PSD.
low_frequency_cutoff: float
The low frequncy cutoff to use when calculating the PSD.
strain : {None, TimeSeries}
Time series containing the data from which the PSD should be measured,
when psd_estimation is in use.
dyn_range_factor : {1, float}
For PSDs taken from models or text files, if `dyn_range_factor` is
not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
precision : str, choices (None,'single','double')
If not specified, or specified as None, the precision of the returned
PSD will match the precision of the data, if measuring a PSD, or will
match the default precision of the model if using an analytical PSD.
If 'single' the PSD will be converted to float32, if not already in
that precision. If 'double' the PSD will be converted to float64, if
not already in that precision.
Returns
-------
psd : FrequencySeries
The frequency series containing the PSD. |
def reboot_adb_server():
""" execute 'adb devices' to start adb server """
_reboot_count = 0
_max_retry = 1
def _reboot():
nonlocal _reboot_count
if _reboot_count >= _max_retry:
raise RuntimeError('fail after retry {} times'.format(_max_retry))
_reboot_count += 1
return_code = subprocess.call(['adb', 'devices'], stdout=subprocess.DEVNULL)
if bool(return_code):
warnings.warn('return not zero, execute "adb version" failed')
raise EnvironmentError('adb did not work :(')
return _reboot | execute 'adb devices' to start adb server |
def _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file,
todo="square"):
"""Run squaring or merging analysis using bcbio.variation.recall.
"""
ref_file = tz.get_in(("reference", "fasta", "base"), data)
cores = tz.get_in(("config", "algorithm", "num_cores"), data, 1)
resources = config_utils.get_resources("bcbio-variation-recall", data["config"])
# adjust memory by cores but leave room for run program memory
memcores = int(math.ceil(float(cores) / 5.0))
jvm_opts = config_utils.adjust_opts(resources.get("jvm_opts", ["-Xms250m", "-Xmx2g"]),
{"algorithm": {"memory_adjust": {"direction": "increase",
"magnitude": memcores}}})
# Write unique VCFs and BAMs to input file
input_file = "%s-inputs.txt" % os.path.splitext(out_file)[0]
with open(input_file, "w") as out_handle:
out_handle.write("\n".join(sorted(list(set(vrn_files)))) + "\n")
if todo == "square":
out_handle.write("\n".join(sorted(list(set(bam_files)))) + "\n")
variantcaller = tz.get_in(("config", "algorithm", "jointcaller"), data).replace("-joint", "")
cmd = ["bcbio-variation-recall", todo] + jvm_opts + broad.get_default_jvm_opts() + \
["-c", cores, "-r", bamprep.region_to_gatk(region)]
if todo == "square":
cmd += ["--caller", variantcaller]
cmd += [out_file, ref_file, input_file]
bcbio_env = utils.get_bcbio_env()
cmd = " ".join(str(x) for x in cmd)
do.run(cmd, "%s in region: %s" % (cmd, bamprep.region_to_gatk(region)), env=bcbio_env)
return out_file | Run squaring or merging analysis using bcbio.variation.recall. |
def paste(self):
'''
Insert text from the clipboard at the cursor.
'''
try:
t = pygame.scrap.get(SCRAP_TEXT)
if t:
self.insert(t)
return True
except:
# pygame.scrap is experimental, allow for changes
return False | Insert text from the clipboard at the cursor. |
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors) | Run a command in each virtualenv. |
def log_error(self, msg, *args):
"""Log an error or print in stdout if no logger."""
if self._logger is not None:
self._logger.error(msg, *args)
else:
print(msg % args) | Log an error or print in stdout if no logger. |
def program_rtr_nwk_next_hop(self, rout_id, next_hop, cidr):
"""Program the next hop for all networks of a tenant. """
namespace = self.find_rtr_namespace(rout_id)
if namespace is None:
LOG.error("Unable to find namespace for router %s", rout_id)
return False
args = ['route', 'add', '-net', cidr, 'gw', next_hop]
ret = self.program_rtr(args, rout_id, namespace=namespace)
if not ret:
LOG.error("Program router returned error for %s", rout_id)
return False
return True | Program the next hop for all networks of a tenant. |
def to_html(sample, stats_object):
"""Generate a HTML report from summary statistics and a given sample.
Parameters
----------
sample : DataFrame
the sample you want to print
stats_object : dict
Summary statistics. Should be generated with an appropriate describe() function
Returns
-------
str
containing profile report in HTML format
Notes
-----
* This function as to be refactored since it's huge and it contains inner functions
"""
n_obs = stats_object['table']['n']
value_formatters = formatters.value_formatters
row_formatters = formatters.row_formatters
if not isinstance(sample, pd.DataFrame):
raise TypeError("sample must be of type pandas.DataFrame")
if not isinstance(stats_object, dict):
raise TypeError("stats_object must be of type dict. Did you generate this using the pandas_profiling.describe() function?")
if not set({'table', 'variables', 'freq', 'correlations'}).issubset(set(stats_object.keys())):
raise TypeError(
"stats_object badly formatted. Did you generate this using the pandas_profiling.describe() function?")
def fmt(value, name):
if pd.isnull(value):
return ""
if name in value_formatters:
return value_formatters[name](value)
elif isinstance(value, float):
return value_formatters[formatters.DEFAULT_FLOAT_FORMATTER](value)
else:
try:
return unicode(value) # Python 2
except NameError:
return str(value) # Python 3
def _format_row(freq, label, max_freq, row_template, n, extra_class=''):
if max_freq != 0:
width = int(freq / max_freq * 99) + 1
else:
width = 1
if width > 20:
label_in_bar = freq
label_after_bar = ""
else:
label_in_bar = " "
label_after_bar = freq
return row_template.render(label=label,
width=width,
count=freq,
percentage='{:2.1f}'.format(freq / n * 100),
extra_class=extra_class,
label_in_bar=label_in_bar,
label_after_bar=label_after_bar)
def freq_table(freqtable, n, table_template, row_template, max_number_to_print, nb_col=6):
freq_rows_html = u''
if max_number_to_print > n:
max_number_to_print=n
if max_number_to_print < len(freqtable):
freq_other = sum(freqtable.iloc[max_number_to_print:])
min_freq = freqtable.values[max_number_to_print]
else:
freq_other = 0
min_freq = 0
freq_missing = n - sum(freqtable)
max_freq = max(freqtable.values[0], freq_other, freq_missing)
# TODO: Correctly sort missing and other
for label, freq in six.iteritems(freqtable.iloc[0:max_number_to_print]):
freq_rows_html += _format_row(freq, label, max_freq, row_template, n)
if freq_other > min_freq:
freq_rows_html += _format_row(freq_other,
"Other values (%s)" % (freqtable.count() - max_number_to_print), max_freq, row_template, n,
extra_class='other')
if freq_missing > min_freq:
freq_rows_html += _format_row(freq_missing, "(Missing)", max_freq, row_template, n, extra_class='missing')
return table_template.render(rows=freq_rows_html, varid=hash(idx), nb_col=nb_col)
def extreme_obs_table(freqtable, table_template, row_template, number_to_print, n, ascending = True):
# If it's mixed between base types (str, int) convert to str. Pure "mixed" types are filtered during type discovery
if "mixed" in freqtable.index.inferred_type:
freqtable.index = freqtable.index.astype(str)
sorted_freqTable = freqtable.sort_index()
if ascending:
obs_to_print = sorted_freqTable.iloc[:number_to_print]
else:
obs_to_print = sorted_freqTable.iloc[-number_to_print:]
freq_rows_html = ''
max_freq = max(obs_to_print.values)
for label, freq in six.iteritems(obs_to_print):
freq_rows_html += _format_row(freq, label, max_freq, row_template, n)
return table_template.render(rows=freq_rows_html)
# Variables
rows_html = u""
messages = []
render_htmls = {}
for idx, row in stats_object['variables'].iterrows():
formatted_values = {'varname': idx, 'varid': hash(idx)}
row_classes = {}
for col, value in six.iteritems(row):
formatted_values[col] = fmt(value, col)
for col in set(row.index) & six.viewkeys(row_formatters):
row_classes[col] = row_formatters[col](row[col])
if row_classes[col] == "alert" and col in templates.messages:
messages.append(templates.messages[col].format(formatted_values, varname = idx))
if row['type'] in {'CAT', 'BOOL'}:
formatted_values['minifreqtable'] = freq_table(stats_object['freq'][idx], n_obs,
templates.template('mini_freq_table'),
templates.template('mini_freq_table_row'),
3,
templates.mini_freq_table_nb_col[row['type']])
if row['distinct_count'] > 50:
messages.append(templates.messages['HIGH_CARDINALITY'].format(formatted_values, varname = idx))
row_classes['distinct_count'] = "alert"
else:
row_classes['distinct_count'] = ""
if row['type'] == 'UNIQUE':
obs = stats_object['freq'][idx].index
formatted_values['firstn'] = pd.DataFrame(obs[0:3], columns=["First 3 values"]).to_html(classes="example_values", index=False)
formatted_values['lastn'] = pd.DataFrame(obs[-3:], columns=["Last 3 values"]).to_html(classes="example_values", index=False)
if row['type'] == 'UNSUPPORTED':
formatted_values['varname'] = idx
messages.append(templates.messages[row['type']].format(formatted_values))
elif row['type'] in {'CORR', 'CONST', 'RECODED'}:
formatted_values['varname'] = idx
messages.append(templates.messages[row['type']].format(formatted_values))
else:
formatted_values['freqtable'] = freq_table(stats_object['freq'][idx], n_obs,
templates.template('freq_table'), templates.template('freq_table_row'), 10)
formatted_values['firstn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = True)
formatted_values['lastn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = False)
rows_html += templates.row_templates_dict[row['type']].render(values=formatted_values, row_classes=row_classes)
render_htmls['rows_html'] = rows_html
# Overview
formatted_values = {k: fmt(v, k) for k, v in six.iteritems(stats_object['table'])}
row_classes={}
for col in six.viewkeys(stats_object['table']) & six.viewkeys(row_formatters):
row_classes[col] = row_formatters[col](stats_object['table'][col])
if row_classes[col] == "alert" and col in templates.messages:
messages.append(templates.messages[col].format(formatted_values, varname = idx))
messages_html = u''
for msg in messages:
messages_html += templates.message_row.format(message=msg)
overview_html = templates.template('overview').render(values=formatted_values, row_classes = row_classes, messages=messages_html)
render_htmls['overview_html'] = overview_html
# Add plot of matrix correlation if the dataframe is not empty
if len(stats_object['correlations']['pearson']) > 0:
pearson_matrix = plot.correlation_matrix(stats_object['correlations']['pearson'], 'Pearson')
spearman_matrix = plot.correlation_matrix(stats_object['correlations']['spearman'], 'Spearman')
correlations_html = templates.template('correlations').render(
values={'pearson_matrix': pearson_matrix, 'spearman_matrix': spearman_matrix})
render_htmls['correlations_html'] = correlations_html
# Add sample
sample_html = templates.template('sample').render(sample_table_html=sample.to_html(classes="sample"))
render_htmls['sample_html'] = sample_html
# TODO: should be done in the template
return templates.template('base').render(render_htmls) | Generate a HTML report from summary statistics and a given sample.
Parameters
----------
sample : DataFrame
the sample you want to print
stats_object : dict
Summary statistics. Should be generated with an appropriate describe() function
Returns
-------
str
containing profile report in HTML format
Notes
-----
* This function as to be refactored since it's huge and it contains inner functions |
def get_maya_envpath(self):
"""Return the PYTHONPATH neccessary for running mayapy
If you start native mayapy, it will setup these paths.
You might want to prepend this to your path if running from
an external intepreter.
:returns: the PYTHONPATH that is used for running mayapy
:rtype: str
:raises: None
"""
opj = os.path.join
ml = self.get_maya_location()
mb = self.get_maya_bin()
msp = self.get_maya_sitepackage_dir()
pyzip = opj(mb, "python27.zip")
pydir = opj(ml, "Python")
pydll = opj(pydir, "DLLs")
pylib = opj(pydir, "lib")
pyplat = opj(pylib, "plat-win")
pytk = opj(pylib, "lib-tk")
path = os.pathsep.join((pyzip, pydll, pylib, pyplat, pytk, mb, pydir, msp))
return path | Return the PYTHONPATH neccessary for running mayapy
If you start native mayapy, it will setup these paths.
You might want to prepend this to your path if running from
an external intepreter.
:returns: the PYTHONPATH that is used for running mayapy
:rtype: str
:raises: None |
def reciprocal_space(space, axes=None, halfcomplex=False, shift=True,
**kwargs):
"""Return the range of the Fourier transform on ``space``.
Parameters
----------
space : `DiscreteLp`
Real space whose reciprocal is calculated. It must be
uniformly discretized.
axes : sequence of ints, optional
Dimensions along which the Fourier transform is taken.
Default: all axes
halfcomplex : bool, optional
If ``True``, take only the negative frequency part along the last
axis for. For ``False``, use the full frequency space.
This option can only be used if ``space`` is a space of
real-valued functions.
shift : bool or sequence of bools, optional
If ``True``, the reciprocal grid is shifted by half a stride in
the negative direction. With a boolean sequence, this option
is applied separately to each axis.
If a sequence is provided, it must have the same length as
``axes`` if supplied. Note that this must be set to ``True``
in the halved axis in half-complex transforms.
Default: ``True``
impl : string, optional
Implementation back-end for the created space.
Default: ``'numpy'``
exponent : float, optional
Create a space with this exponent. By default, the conjugate
exponent ``q = p / (p - 1)`` of the exponent of ``space`` is
used, where ``q = inf`` for ``p = 1`` and vice versa.
dtype : optional
Complex data type of the created space. By default, the
complex counterpart of ``space.dtype`` is used.
Returns
-------
rspace : `DiscreteLp`
Reciprocal of the input ``space``. If ``halfcomplex=True``, the
upper end of the domain (where the half space ends) is chosen to
coincide with the grid node.
"""
if not isinstance(space, DiscreteLp):
raise TypeError('`space` {!r} is not a `DiscreteLp` instance'
''.format(space))
if axes is None:
axes = tuple(range(space.ndim))
axes = normalized_axes_tuple(axes, space.ndim)
if not all(space.is_uniform_byaxis[axis] for axis in axes):
raise ValueError('`space` is not uniformly discretized in the '
'`axes` of the transform')
if halfcomplex and space.field != RealNumbers():
raise ValueError('`halfcomplex` option can only be used with real '
'spaces')
exponent = kwargs.pop('exponent', None)
if exponent is None:
exponent = conj_exponent(space.exponent)
dtype = kwargs.pop('dtype', None)
if dtype is None:
dtype = complex_dtype(space.dtype)
else:
if not is_complex_floating_dtype(dtype):
raise ValueError('{} is not a complex data type'
''.format(dtype_repr(dtype)))
impl = kwargs.pop('impl', 'numpy')
# Calculate range
recip_grid = reciprocal_grid(space.grid, shift=shift,
halfcomplex=halfcomplex, axes=axes)
# Need to do this for axes of length 1 that are not transformed
non_axes = [i for i in range(space.ndim) if i not in axes]
min_pt = {i: space.min_pt[i] for i in non_axes}
max_pt = {i: space.max_pt[i] for i in non_axes}
# Make a partition with nodes on the boundary in the last transform axis
# if `halfcomplex == True`, otherwise a standard partition.
if halfcomplex:
max_pt[axes[-1]] = recip_grid.max_pt[axes[-1]]
part = uniform_partition_fromgrid(recip_grid, min_pt, max_pt)
# Use convention of adding a hat to represent fourier transform of variable
axis_labels = list(space.axis_labels)
for i in axes:
# Avoid double math
label = axis_labels[i].replace('$', '')
axis_labels[i] = '$\\^{{{}}}$'.format(label)
recip_spc = uniform_discr_frompartition(part, exponent=exponent,
dtype=dtype, impl=impl,
axis_labels=axis_labels)
return recip_spc | Return the range of the Fourier transform on ``space``.
Parameters
----------
space : `DiscreteLp`
Real space whose reciprocal is calculated. It must be
uniformly discretized.
axes : sequence of ints, optional
Dimensions along which the Fourier transform is taken.
Default: all axes
halfcomplex : bool, optional
If ``True``, take only the negative frequency part along the last
axis for. For ``False``, use the full frequency space.
This option can only be used if ``space`` is a space of
real-valued functions.
shift : bool or sequence of bools, optional
If ``True``, the reciprocal grid is shifted by half a stride in
the negative direction. With a boolean sequence, this option
is applied separately to each axis.
If a sequence is provided, it must have the same length as
``axes`` if supplied. Note that this must be set to ``True``
in the halved axis in half-complex transforms.
Default: ``True``
impl : string, optional
Implementation back-end for the created space.
Default: ``'numpy'``
exponent : float, optional
Create a space with this exponent. By default, the conjugate
exponent ``q = p / (p - 1)`` of the exponent of ``space`` is
used, where ``q = inf`` for ``p = 1`` and vice versa.
dtype : optional
Complex data type of the created space. By default, the
complex counterpart of ``space.dtype`` is used.
Returns
-------
rspace : `DiscreteLp`
Reciprocal of the input ``space``. If ``halfcomplex=True``, the
upper end of the domain (where the half space ends) is chosen to
coincide with the grid node. |
def handle_get_reseller(self, req):
"""Handles the GET v2 call for getting general reseller information
(currently just a list of accounts). Can only be called by a
.reseller_admin.
On success, a JSON dictionary will be returned with a single `accounts`
key whose value is list of dicts. Each dict represents an account and
currently only contains the single key `name`. For example::
{"accounts": [{"name": "reseller"}, {"name": "test"},
{"name": "test2"}]}
:param req: The swob.Request to process.
:returns: swob.Response, 2xx on success with a JSON dictionary as
explained above.
"""
if not self.is_reseller_admin(req):
return self.denied_response(req)
listing = []
marker = ''
while True:
path = '/v1/%s?format=json&marker=%s' % (quote(self.auth_account),
quote(marker))
resp = self.make_pre_authed_request(
req.environ, 'GET', path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not list main auth account: %s %s' %
(path, resp.status))
sublisting = json.loads(resp.body)
if not sublisting:
break
for container in sublisting:
if container['name'][0] != '.':
listing.append({'name': container['name']})
marker = sublisting[-1]['name'].encode('utf-8')
return Response(body=json.dumps({'accounts': listing}),
content_type=CONTENT_TYPE_JSON) | Handles the GET v2 call for getting general reseller information
(currently just a list of accounts). Can only be called by a
.reseller_admin.
On success, a JSON dictionary will be returned with a single `accounts`
key whose value is list of dicts. Each dict represents an account and
currently only contains the single key `name`. For example::
{"accounts": [{"name": "reseller"}, {"name": "test"},
{"name": "test2"}]}
:param req: The swob.Request to process.
:returns: swob.Response, 2xx on success with a JSON dictionary as
explained above. |
def post_load(fn=None, pass_many=False, pass_original=False):
"""Register a method to invoke after deserializing an object. The method
receives the deserialized data and returns the processed data.
By default, receives a single datum at a time, transparently handling the ``many``
argument passed to the Schema. If ``pass_many=True``, the raw data
(which may be a collection) and the value for ``many`` is passed.
If ``pass_original=True``, the original data (before deserializing) will be passed as
an additional argument to the method.
"""
return set_hook(fn, (POST_LOAD, pass_many), pass_original=pass_original) | Register a method to invoke after deserializing an object. The method
receives the deserialized data and returns the processed data.
By default, receives a single datum at a time, transparently handling the ``many``
argument passed to the Schema. If ``pass_many=True``, the raw data
(which may be a collection) and the value for ``many`` is passed.
If ``pass_original=True``, the original data (before deserializing) will be passed as
an additional argument to the method. |
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = dict((name, 'pug.crawlnmine') for name in find_commands(__path__[0]))
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, 'management')
commands.update(dict((name, app_config.name) for name in find_commands(path)))
return commands | Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls. |
def read_transport_message(self, origin, message_type, timeout=15):
"""
Blocking read of a transport message that does not indicate a message from the Pebble.
Will block until a message is received, or it times out.
.. warning::
Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock.
:param origin: The type of :class:`.MessageTarget` that triggers the message.
:param message_type: The class of the message to read from the transport.
:param timeout: The maximum time to wait before raising :exc:`.TimeoutError`.
:return: The object read from the transport; of the same type as passed to ``message_type``.
"""
return self.event_handler.wait_for_event((_EventType.Transport, origin, message_type), timeout=timeout) | Blocking read of a transport message that does not indicate a message from the Pebble.
Will block until a message is received, or it times out.
.. warning::
Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock.
:param origin: The type of :class:`.MessageTarget` that triggers the message.
:param message_type: The class of the message to read from the transport.
:param timeout: The maximum time to wait before raising :exc:`.TimeoutError`.
:return: The object read from the transport; of the same type as passed to ``message_type``. |
def add_dependency(self, depend):
"""Adds dependencies."""
try:
self._add_child(self.depends, self.depends_set, depend)
except TypeError as e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
else:
s = str(e)
raise SCons.Errors.UserError("attempted to add a non-Node dependency to %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e))) | Adds dependencies. |
def detach(self, force=False):
"""
Detach this EBS volume from an EC2 instance.
:type force: bool
:param force: Forces detachment if the previous detachment attempt did
not occur cleanly. This option can lead to data loss or
a corrupted file system. Use this option only as a last
resort to detach a volume from a failed instance. The
instance will not have an opportunity to flush file system
caches nor file system meta data. If you use this option,
you must perform file system check and repair procedures.
:rtype: bool
:return: True if successful
"""
instance_id = None
if self.attach_data:
instance_id = self.attach_data.instance_id
device = None
if self.attach_data:
device = self.attach_data.device
return self.connection.detach_volume(self.id, instance_id, device, force) | Detach this EBS volume from an EC2 instance.
:type force: bool
:param force: Forces detachment if the previous detachment attempt did
not occur cleanly. This option can lead to data loss or
a corrupted file system. Use this option only as a last
resort to detach a volume from a failed instance. The
instance will not have an opportunity to flush file system
caches nor file system meta data. If you use this option,
you must perform file system check and repair procedures.
:rtype: bool
:return: True if successful |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.