code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def printBlastRecord(record):
"""
Print a BLAST record.
@param record: A BioPython C{Bio.Blast.Record.Blast} instance.
"""
for key in sorted(record.__dict__.keys()):
if key not in ['alignments', 'descriptions', 'reference']:
print('%s: %r' % (key, record.__dict__[key]))
print('alignments: (%d in total):' % len(record.alignments))
for i, alignment in enumerate(record.alignments):
print(' description %d:' % (i + 1))
for attr in ['accession', 'bits', 'e', 'num_alignments', 'score']:
print(' %s: %s' % (attr, getattr(record.descriptions[i], attr)))
print(' alignment %d:' % (i + 1))
for attr in 'accession', 'hit_def', 'hit_id', 'length', 'title':
print(' %s: %s' % (attr, getattr(alignment, attr)))
print(' HSPs (%d in total):' % len(alignment.hsps))
for hspIndex, hsp in enumerate(alignment.hsps, start=1):
print(' hsp %d:' % hspIndex)
printHSP(hsp, ' ') | Print a BLAST record.
@param record: A BioPython C{Bio.Blast.Record.Blast} instance. |
def postpro_fisher(data, report=None):
"""
Performs fisher transform on everything in data.
If report variable is passed, this is added to the report.
"""
if not report:
report = {}
# Due to rounding errors
data[data < -0.99999999999999] = -1
data[data > 0.99999999999999] = 1
fisher_data = 0.5 * np.log((1 + data) / (1 - data))
report['fisher'] = {}
report['fisher']['performed'] = 'yes'
#report['fisher']['diagonal'] = 'zeroed'
return fisher_data, report | Performs fisher transform on everything in data.
If report variable is passed, this is added to the report. |
def scale_aphi(self, scale_parameter):
"""Scale the spectra by multiplying by linear scaling factor
:param scale_parameter: Linear scaling factor
"""
lg.info('Scaling a_phi by :: ' + str(scale_parameter))
try:
self.a_phi = self.a_phi * scale_parameter
except:
lg.exception("Can't scale a_phi, check that it has been defined ") | Scale the spectra by multiplying by linear scaling factor
:param scale_parameter: Linear scaling factor |
def get_leaf_certificates(certs):
"""
Extracts the leaf certificates from a list of certificates. Leaf
certificates are ones whose subject does not appear as issuer among the
others.
"""
issuers = [cert.issuer.get_attributes_for_oid(x509.NameOID.COMMON_NAME)
for cert in certs]
leafs = [cert for cert in certs
if (cert.subject.get_attributes_for_oid(x509.NameOID.COMMON_NAME)
not in issuers)]
return leafs | Extracts the leaf certificates from a list of certificates. Leaf
certificates are ones whose subject does not appear as issuer among the
others. |
def missing(self, field, last=True):
'''
Numeric fields support specific handling for missing fields in a doc.
The missing value can be _last, _first, or a custom value
(that will be used for missing docs as the sort value).
missing('price')
> {"price" : {"missing": "_last" } }
missing('price',False)
> {"price" : {"missing": "_first"} }
'''
if last:
self.append({field: {'missing': '_last'}})
else:
self.append({field: {'missing': '_first'}})
return self | Numeric fields support specific handling for missing fields in a doc.
The missing value can be _last, _first, or a custom value
(that will be used for missing docs as the sort value).
missing('price')
> {"price" : {"missing": "_last" } }
missing('price',False)
> {"price" : {"missing": "_first"} } |
def n1ql_query(self, query, *args, **kwargs):
"""
Execute a N1QL query.
This method is mainly a wrapper around the :class:`~.N1QLQuery`
and :class:`~.N1QLRequest` objects, which contain the inputs
and outputs of the query.
Using an explicit :class:`~.N1QLQuery`::
query = N1QLQuery(
'SELECT airportname FROM `travel-sample` WHERE city=$1', "Reno")
# Use this option for often-repeated queries
query.adhoc = False
for row in cb.n1ql_query(query):
print 'Name: {0}'.format(row['airportname'])
Using an implicit :class:`~.N1QLQuery`::
for row in cb.n1ql_query(
'SELECT airportname, FROM `travel-sample` WHERE city="Reno"'):
print 'Name: {0}'.format(row['airportname'])
With the latter form, *args and **kwargs are forwarded to the
N1QL Request constructor, optionally selected in kwargs['iterclass'],
otherwise defaulting to :class:`~.N1QLRequest`.
:param query: The query to execute. This may either be a
:class:`.N1QLQuery` object, or a string (which will be
implicitly converted to one).
:param kwargs: Arguments for :class:`.N1QLRequest`.
:return: An iterator which yields rows. Each row is a dictionary
representing a single result
"""
if not isinstance(query, N1QLQuery):
query = N1QLQuery(query)
itercls = kwargs.pop('itercls', N1QLRequest)
return itercls(query, self, *args, **kwargs) | Execute a N1QL query.
This method is mainly a wrapper around the :class:`~.N1QLQuery`
and :class:`~.N1QLRequest` objects, which contain the inputs
and outputs of the query.
Using an explicit :class:`~.N1QLQuery`::
query = N1QLQuery(
'SELECT airportname FROM `travel-sample` WHERE city=$1', "Reno")
# Use this option for often-repeated queries
query.adhoc = False
for row in cb.n1ql_query(query):
print 'Name: {0}'.format(row['airportname'])
Using an implicit :class:`~.N1QLQuery`::
for row in cb.n1ql_query(
'SELECT airportname, FROM `travel-sample` WHERE city="Reno"'):
print 'Name: {0}'.format(row['airportname'])
With the latter form, *args and **kwargs are forwarded to the
N1QL Request constructor, optionally selected in kwargs['iterclass'],
otherwise defaulting to :class:`~.N1QLRequest`.
:param query: The query to execute. This may either be a
:class:`.N1QLQuery` object, or a string (which will be
implicitly converted to one).
:param kwargs: Arguments for :class:`.N1QLRequest`.
:return: An iterator which yields rows. Each row is a dictionary
representing a single result |
def set_assessments(self, assessment_ids=None):
"""Sets the assessments.
arg: assessmentIds (osid.id.Id): the assessment Ids
raise: INVALID_ARGUMENT - assessmentIds is invalid
raise: NullArgument - assessmentIds is null
raise: NoAccess - metadata.is_read_only() is true
compliance: mandatory - This method must be implemented.
"""
if assessment_ids is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['assessment_ids'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(assessment_ids, metadata, array=True):
for assessment_id in assessment_ids:
self._my_map['assessmentIds'].append(str(assessment_id))
else:
raise InvalidArgument | Sets the assessments.
arg: assessmentIds (osid.id.Id): the assessment Ids
raise: INVALID_ARGUMENT - assessmentIds is invalid
raise: NullArgument - assessmentIds is null
raise: NoAccess - metadata.is_read_only() is true
compliance: mandatory - This method must be implemented. |
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
If *attrs* is a list or an ordered dict (:class:`dict` on Python 3.6+,
:class:`collections.OrderedDict` otherwise), the order is deduced from
the order of the names or attributes inside *attrs*. Otherwise the
order of the definition of the attributes is used.
:type attrs: :class:`list` or :class:`dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to :func:`attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
.. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attrib()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
post_init = cls_dict.pop("__attrs_post_init__", None)
type_ = type(
name,
bases,
{} if post_init is None else {"__attrs_post_init__": post_init},
)
# For pickling to work, the __module__ variable needs to be set to the
# frame where the class is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
type_.__module__ = sys._getframe(1).f_globals.get(
"__name__", "__main__"
)
except (AttributeError, ValueError):
pass
return _attrs(these=cls_dict, **attributes_arguments)(type_) | A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
If *attrs* is a list or an ordered dict (:class:`dict` on Python 3.6+,
:class:`collections.OrderedDict` otherwise), the order is deduced from
the order of the names or attributes inside *attrs*. Otherwise the
order of the definition of the attributes is used.
:type attrs: :class:`list` or :class:`dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to :func:`attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
.. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. |
def del_edges(self, edges, *args, **kwargs):
"""
Removes edges from the graph. Takes optional arguments for
``DictGraph.del_edge``.
Arguments:
- edges(iterable) Sequence of edges to be removed from the
``DictGraph``.
"""
for edge in edges:
self.del_edge(edge, *args, **kwargs) | Removes edges from the graph. Takes optional arguments for
``DictGraph.del_edge``.
Arguments:
- edges(iterable) Sequence of edges to be removed from the
``DictGraph``. |
def preview(src_path):
''' Generates a preview of src_path in the requested format.
:returns: A list of preview paths, one for each page.
'''
previews = []
if sketch.is_sketchfile(src_path):
previews = sketch.preview(src_path)
if not previews:
previews = quicklook.preview(src_path)
previews = [safely_decode(preview) for preview in previews]
return previews | Generates a preview of src_path in the requested format.
:returns: A list of preview paths, one for each page. |
def write_info_file(tensorboard_info):
"""Write TensorBoardInfo to the current process's info file.
This should be called by `main` once the server is ready. When the
server shuts down, `remove_info_file` should be called.
Args:
tensorboard_info: A valid `TensorBoardInfo` object.
Raises:
ValueError: If any field on `info` is not of the correct type.
"""
payload = "%s\n" % _info_to_string(tensorboard_info)
with open(_get_info_file_path(), "w") as outfile:
outfile.write(payload) | Write TensorBoardInfo to the current process's info file.
This should be called by `main` once the server is ready. When the
server shuts down, `remove_info_file` should be called.
Args:
tensorboard_info: A valid `TensorBoardInfo` object.
Raises:
ValueError: If any field on `info` is not of the correct type. |
def fraction(self, value: float) -> 'Size':
"""Set the fraction of free space to use."""
raise_not_number(value)
self.maximum = '{}fr'.format(value)
return self | Set the fraction of free space to use. |
def get_app_prefs(app=None):
"""Returns a dictionary with preferences for a certain app/module.
:param str|unicode app:
:rtype: dict
"""
if app is None:
with Frame(stepback=1) as frame:
app = frame.f_globals['__name__'].split('.')[0]
prefs = get_prefs()
if app not in prefs:
return {}
return prefs[app] | Returns a dictionary with preferences for a certain app/module.
:param str|unicode app:
:rtype: dict |
def _narrow_states(node, old_state, new_state, previously_widened_state): # pylint:disable=unused-argument,no-self-use
"""
Try to narrow the state!
:param old_state:
:param new_state:
:param previously_widened_state:
:returns: The narrowed state, and whether a narrowing has occurred
"""
l.debug('Narrowing state at IP %s', previously_widened_state.ip)
s = previously_widened_state.copy()
narrowing_occurred = False
# TODO: Finish the narrowing logic
return s, narrowing_occurred | Try to narrow the state!
:param old_state:
:param new_state:
:param previously_widened_state:
:returns: The narrowed state, and whether a narrowing has occurred |
def get_pastml_marginal_prob_file(method, model, column):
"""
Get the filename where the PastML marginal probabilities of node states are saved (will be None for non-marginal methods).
This file is inside the work_dir that can be specified for the pastml_pipeline method.
:param method: str, the ancestral state prediction method used by PASTML.
:param model: str, the state evolution model used by PASTML.
:param column: str, the column for which ancestral states are reconstructed with PASTML.
:return: str, filename or None if the method is not marginal.
"""
if not is_marginal(method):
return None
column, method = get_column_method(column, method)
return PASTML_MARGINAL_PROBS_TAB.format(state=column, model=model) | Get the filename where the PastML marginal probabilities of node states are saved (will be None for non-marginal methods).
This file is inside the work_dir that can be specified for the pastml_pipeline method.
:param method: str, the ancestral state prediction method used by PASTML.
:param model: str, the state evolution model used by PASTML.
:param column: str, the column for which ancestral states are reconstructed with PASTML.
:return: str, filename or None if the method is not marginal. |
def pad_positive_wrapper(fmtfct):
"""Ensure that numbers are aligned in table by appending a blank space to postive values if 'parenthesis' are
used to denote negative numbers"""
def check_and_append(*args, **kwargs):
result = fmtfct(*args, **kwargs)
if fmtfct.parens and not result.endswith(')'):
result += ' '
return result
return check_and_append | Ensure that numbers are aligned in table by appending a blank space to postive values if 'parenthesis' are
used to denote negative numbers |
def poly_energies(samples_like, poly):
"""Calculates energy of samples from a higher order polynomial.
Args:
sample (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a
tuple of variables and `bias` the associated bias. Variable
labeling/indexing of terms in poly dict must match that of the
sample(s).
Returns:
list/:obj:`numpy.ndarray`: The energy of the sample(s).
"""
msg = ("poly_energies is deprecated and will be removed in dimod 0.9.0."
"In the future, use BinaryPolynomial.energies")
warnings.warn(msg, DeprecationWarning)
# dev note the vartype is not used in the energy calculation and this will
# be deprecated in the future
return BinaryPolynomial(poly, 'SPIN').energies(samples_like) | Calculates energy of samples from a higher order polynomial.
Args:
sample (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a
tuple of variables and `bias` the associated bias. Variable
labeling/indexing of terms in poly dict must match that of the
sample(s).
Returns:
list/:obj:`numpy.ndarray`: The energy of the sample(s). |
def register(email):
'''
Register a new user account
CLI Example:
.. code-block:: bash
salt-run venafi.register email@example.com
'''
data = __utils__['http.query'](
'{0}/useraccounts'.format(_base_url()),
method='POST',
data=salt.utils.json.dumps({
'username': email,
'userAccountType': 'API',
}),
status=True,
decode=True,
decode_type='json',
header_dict={
'Content-Type': 'application/json',
},
)
status = data['status']
if six.text_type(status).startswith('4') or six.text_type(status).startswith('5'):
raise CommandExecutionError(
'There was an API error: {0}'.format(data['error'])
)
return data.get('dict', {}) | Register a new user account
CLI Example:
.. code-block:: bash
salt-run venafi.register email@example.com |
def _check_location_part(cls, val, regexp):
"""Deprecated. See CourseLocator._check_location_part"""
cls._deprecation_warning()
return CourseLocator._check_location_part(val, regexp) | Deprecated. See CourseLocator._check_location_part |
def choose_form(self, number=None, xpath=None, name=None, **kwargs):
"""
Set the default form.
:param number: number of form (starting from zero)
:param id: value of "id" attribute
:param name: value of "name" attribute
:param xpath: XPath query
:raises: :class:`DataNotFound` if form not found
:raises: :class:`GrabMisuseError`
if method is called without parameters
Selected form will be available via `form` attribute of `Grab`
instance. All form methods will work with default form.
Examples::
# Select second form
g.choose_form(1)
# Select by id
g.choose_form(id="register")
# Select by name
g.choose_form(name="signup")
# Select by xpath
g.choose_form(xpath='//form[contains(@action, "/submit")]')
"""
id_ = kwargs.pop('id', None)
if id_ is not None:
try:
self._lxml_form = self.select('//form[@id="%s"]' % id_).node()
except IndexError:
raise DataNotFound("There is no form with id: %s" % id_)
elif name is not None:
try:
self._lxml_form = self.select(
'//form[@name="%s"]' % name).node()
except IndexError:
raise DataNotFound('There is no form with name: %s' % name)
elif number is not None:
try:
self._lxml_form = self.tree.forms[number]
except IndexError:
raise DataNotFound('There is no form with number: %s' % number)
elif xpath is not None:
try:
self._lxml_form = self.select(xpath).node()
except IndexError:
raise DataNotFound(
'Could not find form with xpath: %s' % xpath)
else:
raise GrabMisuseError('choose_form methods requires one of '
'[number, id, name, xpath] arguments') | Set the default form.
:param number: number of form (starting from zero)
:param id: value of "id" attribute
:param name: value of "name" attribute
:param xpath: XPath query
:raises: :class:`DataNotFound` if form not found
:raises: :class:`GrabMisuseError`
if method is called without parameters
Selected form will be available via `form` attribute of `Grab`
instance. All form methods will work with default form.
Examples::
# Select second form
g.choose_form(1)
# Select by id
g.choose_form(id="register")
# Select by name
g.choose_form(name="signup")
# Select by xpath
g.choose_form(xpath='//form[contains(@action, "/submit")]') |
def authorize(*args, **kwargs):
"""View for rendering authorization request."""
if request.method == 'GET':
client = Client.query.filter_by(
client_id=kwargs.get('client_id')
).first()
if not client:
abort(404)
scopes = current_oauth2server.scopes
ctx = dict(
client=client,
oauth_request=kwargs.get('request'),
scopes=[scopes[x] for x in kwargs.get('scopes', [])],
)
return render_template('invenio_oauth2server/authorize.html', **ctx)
confirm = request.form.get('confirm', 'no')
return confirm == 'yes' | View for rendering authorization request. |
def validate_tzinfo(dummy, value):
"""Validate the tzinfo option
"""
if value is not None and not isinstance(value, datetime.tzinfo):
raise TypeError("%s must be an instance of datetime.tzinfo" % value)
return value | Validate the tzinfo option |
def cli():
""" Command line interface """
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(
'%(asctime)s.%(msecs)03d %(levelname)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S"
))
logger.addHandler(ch)
import argparse
parser = argparse.ArgumentParser(description="Search 'network' for hosts with a \
response to 'path' that matches 'filter'")
parser.add_argument('network', help='IP address with optional mask, e.g. 192.168.0.0/24')
parser.add_argument('-p', '--path', help='URL path at host, e.g. index.html',
default='')
parser.add_argument('-f', '--filter', help='Regular expression pattern for filter',
dest='pattern', default='')
parser.add_argument('-l', '--log', help='Enable logging', action='store_true')
args = parser.parse_args()
print('Scanning, please wait ...')
result = survey(**vars(args))
print('Found {} match{}{}{} on {}'.format(len(result), 'es' if len(result)!=1 else '',
' for ' if args.pattern else '', args.pattern, args.network))
for x in result:
print(x.hostname) | Command line interface |
def _refresh_authentication_token(self):
"""
POST to ArcGIS requesting a new token.
"""
if self.retry == self._MAX_RETRIES:
raise GeocoderAuthenticationFailure(
'Too many retries for auth: %s' % self.retry
)
token_request_arguments = {
'username': self.username,
'password': self.password,
'referer': self.referer,
'expiration': self.token_lifetime,
'f': 'json'
}
url = "?".join((self.auth_api, urlencode(token_request_arguments)))
logger.debug(
"%s._refresh_authentication_token: %s",
self.__class__.__name__, url
)
self.token_expiry = int(time()) + self.token_lifetime
response = self._base_call_geocoder(url)
if 'token' not in response:
raise GeocoderAuthenticationFailure(
'Missing token in auth request.'
'Request URL: %s; response JSON: %s' %
(url, json.dumps(response))
)
self.retry = 0
self.token = response['token'] | POST to ArcGIS requesting a new token. |
def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
logger.info('Beginning EventMultiplexer.Reload()')
self._reload_called = True
# Build a list so we're safe even if the list of accumulators is modified
# even while we're reloading.
with self._accumulators_mutex:
items = list(self._accumulators.items())
names_to_delete = set()
for name, accumulator in items:
try:
accumulator.Reload()
except (OSError, IOError) as e:
logger.error("Unable to reload accumulator '%s': %s", name, e)
except directory_watcher.DirectoryDeletedError:
names_to_delete.add(name)
with self._accumulators_mutex:
for name in names_to_delete:
logger.warn("Deleting accumulator '%s'", name)
del self._accumulators[name]
logger.info('Finished with EventMultiplexer.Reload()')
return self | Call `Reload` on every `EventAccumulator`. |
def update(self, params):
"""Update the dev_info data from a dictionary.
Only updates if it already exists in the device.
"""
dev_info = self.json_state.get('deviceInfo')
dev_info.update({k: params[k] for k in params if dev_info.get(k)}) | Update the dev_info data from a dictionary.
Only updates if it already exists in the device. |
def query_by_grader(self, grader_id, end_time=None, start_time=None):
"""
Query by grader.
List grade change events for a given grader.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - grader_id
"""ID"""
path["grader_id"] = grader_id
# OPTIONAL - start_time
"""The beginning of the time range from which you want events."""
if start_time is not None:
params["start_time"] = start_time
# OPTIONAL - end_time
"""The end of the time range from which you want events."""
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/audit/grade_change/graders/{grader_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/audit/grade_change/graders/{grader_id}".format(**path), data=data, params=params, all_pages=True) | Query by grader.
List grade change events for a given grader. |
def visit_module(self, node):
"""
A interface will be called when visiting a module.
@param node: node of current module
"""
if not node.file_stream:
# Failed to open the module
return
isFirstLineOfComment = True
isDocString = False
lines = node.stream().readlines()
for linenum, line in enumerate(lines):
if line.strip().startswith(b'"""'):
# This is a simple assumption than docstring are delimited
# with triple double quotes on a single line.
# Should do the job for Twisted code.
isDocString = not isDocString
if isDocString:
# We ignore comments in docstrings.
continue
matchedComment = COMMENT_RGX.search(line)
if matchedComment:
if isFirstLineOfComment:
# Check for W9401
comment = matchedComment.group()
if (comment.startswith(b"# ") or
not comment.startswith(b"# ")):
self.add_message('W9401', line=linenum + 1, node=node)
# Check for W9402
strippedComment = comment.lstrip(b"#").lstrip()
if strippedComment:
firstLetter = strippedComment[0:1]
if (firstLetter.isalpha() and
not firstLetter.isupper()):
self.add_message('W9402', line=linenum + 1, node=node)
isFirstLineOfComment = False
else:
isFirstLineOfComment = True | A interface will be called when visiting a module.
@param node: node of current module |
def reporter(metadata, analysistype, reportpath):
"""
Create the core genome report
:param metadata: type LIST: List of metadata objects
:param analysistype: type STR: Current analysis type
:param reportpath: type STR: Absolute path to folder in which the reports are to be created
:return:
"""
header = 'Strain,ClosestRef,GenesPresent/Total,\n'
data = str()
for sample in metadata:
try:
if sample[analysistype].blastresults != 'NA':
if sample.general.closestrefseqgenus == 'Listeria':
# Write the sample name, closest ref genome, and the # of genes found / total # of genes
closestref = list(sample[analysistype].blastresults.items())[0][0]
coregenes = list(sample[analysistype].blastresults.items())[0][1][0]
# Find the closest reference file
try:
ref = glob(os.path.join(sample[analysistype].targetpath, '{fasta}*'
.format(fasta=closestref)))[0]
except IndexError:
# Replace underscores with dashes to find files
closestref = closestref.replace('_', '-')
ref = glob(os.path.join(sample[analysistype].targetpath, '{fasta}*'
.format(fasta=closestref)))[0]
# Determine the number of core genes present in the closest reference file
totalcore = 0
for _ in SeqIO.parse(ref, 'fasta'):
totalcore += 1
# Add the data to the object
sample[analysistype].targetspresent = coregenes
sample[analysistype].totaltargets = totalcore
sample[analysistype].coreresults = '{cg}/{tc}'.format(cg=coregenes,
tc=totalcore)
row = '{sn},{cr},{cg}/{tc}\n'.format(sn=sample.name,
cr=closestref,
cg=coregenes,
tc=totalcore)
# Open the report
with open(os.path.join(sample[analysistype].reportdir,
'{sn}_{at}.csv'.format(sn=sample.name,
at=analysistype)), 'w') as report:
# Write the row to the report
report.write(header)
report.write(row)
data += row
else:
sample[analysistype].targetspresent = 'NA'
sample[analysistype].totaltargets = 'NA'
sample[analysistype].coreresults = 'NA'
except KeyError:
sample[analysistype].targetspresent = 'NA'
sample[analysistype].totaltargets = 'NA'
sample[analysistype].coreresults = 'NA'
with open(os.path.join(reportpath, 'coregenome.csv'), 'w') as report:
# Write the data to the report
report.write(header)
report.write(data) | Create the core genome report
:param metadata: type LIST: List of metadata objects
:param analysistype: type STR: Current analysis type
:param reportpath: type STR: Absolute path to folder in which the reports are to be created
:return: |
def create(gandi):
""" Create a new contact.
"""
contact = {}
for field, label, checks in FIELDS:
ask_field(gandi, contact, field, label, checks)
default_pwd = randomstring(16)
contact['password'] = click.prompt('Please enter your password',
hide_input=True,
confirmation_prompt=True,
default=default_pwd)
result = True
while result:
result = gandi.contact.create_dry_run(contact)
# display errors
for err in result:
gandi.echo(err['reason'])
field = err['field']
if field not in FIELDS_POSITION:
return
desc = FIELDS[FIELDS_POSITION.get(field)]
ask_field(gandi, contact, *desc)
result = gandi.contact.create(contact)
handle = result['handle']
gandi.echo('Please activate you public api access from gandi website, and '
'get the apikey.')
gandi.echo('Your handle is %s, and the password is the one you defined.' %
handle)
# open new browser window
webbrowser.open('https://www.gandi.net/admin/api_key')
# just to avoid missing the next question in webbrowser stderr
time.sleep(1)
# get the apikey from shell
apikey = None
while not apikey:
apikey = click.prompt('What is your production apikey')
caller = gandi.get('api.key')
# save apikey in the conf if none defined else display an help on how to
# use it
if caller:
gandi.echo('You already have an apikey defined, if you want to use the'
' newly created contact, use the env var : ')
gandi.echo('export API_KEY=%s' % apikey)
else:
gandi.echo('Will save your apikey into the config file.')
gandi.configure(True, 'api.key', apikey)
return handle | Create a new contact. |
def create_sparse_mapping(id_array, unique_ids=None):
"""
Will create a scipy.sparse compressed-sparse-row matrix that maps
each row represented by an element in id_array to the corresponding
value of the unique ids in id_array.
Parameters
----------
id_array : 1D ndarray of ints.
Each element should represent some id related to the corresponding row.
unique_ids : 1D ndarray of ints, or None, optional.
If not None, each element should be present in `id_array`. The elements
in `unique_ids` should be present in the order in which one wishes them
to appear in the columns of the resulting sparse array. For the
`row_to_obs` and `row_to_mixers` mappings, this should be the order of
appearance in `id_array`. If None, then the unique_ids will be created
from `id_array`, in the order of their appearance in `id_array`.
Returns
-------
mapping : 2D scipy.sparse CSR matrix.
Will contain only zeros and ones. `mapping[i, j] == 1` where
`id_array[i] == unique_ids[j]`. The id's corresponding to each column
are given by `unique_ids`. The rows correspond to the elements of
`id_array`.
"""
# Create unique_ids if necessary
if unique_ids is None:
unique_ids = get_original_order_unique_ids(id_array)
# Check function arguments for validity
assert isinstance(unique_ids, np.ndarray)
assert isinstance(id_array, np.ndarray)
assert unique_ids.ndim == 1
assert id_array.ndim == 1
# Figure out which ids in id_array are represented in unique_ids
represented_ids = np.in1d(id_array, unique_ids)
# Determine the number of rows in id_array that are in unique_ids
num_non_zero_rows = represented_ids.sum()
# Figure out the dimensions of the resulting sparse matrix
num_rows = id_array.size
num_cols = unique_ids.size
# Specify the non-zero values that will be present in the sparse matrix.
data = np.ones(num_non_zero_rows, dtype=int)
# Specify which rows will have non-zero entries in the sparse matrix.
row_indices = np.arange(num_rows)[represented_ids]
# Map the unique id's to their respective columns
unique_id_dict = dict(zip(unique_ids, np.arange(num_cols)))
# Figure out the column indices of the non-zero entries, and do so in a way
# that avoids a key error (i.e. only look up ids that are represented)
col_indices =\
np.array([unique_id_dict[x] for x in id_array[represented_ids]])
# Create and return the sparse matrix
return csr_matrix((data, (row_indices, col_indices)),
shape=(num_rows, num_cols)) | Will create a scipy.sparse compressed-sparse-row matrix that maps
each row represented by an element in id_array to the corresponding
value of the unique ids in id_array.
Parameters
----------
id_array : 1D ndarray of ints.
Each element should represent some id related to the corresponding row.
unique_ids : 1D ndarray of ints, or None, optional.
If not None, each element should be present in `id_array`. The elements
in `unique_ids` should be present in the order in which one wishes them
to appear in the columns of the resulting sparse array. For the
`row_to_obs` and `row_to_mixers` mappings, this should be the order of
appearance in `id_array`. If None, then the unique_ids will be created
from `id_array`, in the order of their appearance in `id_array`.
Returns
-------
mapping : 2D scipy.sparse CSR matrix.
Will contain only zeros and ones. `mapping[i, j] == 1` where
`id_array[i] == unique_ids[j]`. The id's corresponding to each column
are given by `unique_ids`. The rows correspond to the elements of
`id_array`. |
def main(argv=None):
""" Script execution.
The project repo will be cloned to a temporary directory, and the desired
branch, tag, or commit will be checked out. Then, the application will be
installed into a self-contained virtualenv environment.
"""
@contextmanager
def tmpdir():
""" Create a self-deleting temporary directory. """
path = mkdtemp()
try:
yield path
finally:
rmtree(path)
return
def test():
""" Execute the test suite. """
install = "{:s} install -r requirements-test.txt"
check_call(install.format(pip).split())
pytest = join(path, "bin", "py.test")
test = "{:s} test/".format(pytest)
check_call(test.split())
uninstall = "{:s} uninstall -y -r requirements-test.txt"
check_call(uninstall.format(pip).split())
return
args = _cmdline(argv)
path = join(abspath(args.root), args.name)
with tmpdir() as tmp:
clone = "git clone {:s} {:s}".format(args.repo, tmp)
check_call(clone.split())
chdir(tmp)
checkout = "git checkout {:s}".format(args.checkout)
check_call(checkout.split())
virtualenv = "virtualenv {:s}".format(path)
check_call(virtualenv.split())
pip = join(path, "bin", "pip")
install = "{:s} install -U -r requirements.txt .".format(pip)
check_call(install.split())
if args.test:
test()
return 0 | Script execution.
The project repo will be cloned to a temporary directory, and the desired
branch, tag, or commit will be checked out. Then, the application will be
installed into a self-contained virtualenv environment. |
def list_projects(self, dataset_name):
"""
Lists a set of projects related to a dataset.
Arguments:
dataset_name (str): Dataset name to search projects for
Returns:
dict: Projects found based on dataset query
"""
url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\
+ "/project/"
req = self.remote_utils.get_url(url)
if req.status_code is not 200:
raise RemoteDataNotFoundError('Could not find {}'.format(req.text))
else:
return req.json() | Lists a set of projects related to a dataset.
Arguments:
dataset_name (str): Dataset name to search projects for
Returns:
dict: Projects found based on dataset query |
def chunk(iterable, size):
"""
chunk('ABCDEFG', 3) --> ABC DEF G
"""
# TODO: only used in gui.mainwindow(deprecated)
iterator = iter(iterable)
while size:
result = []
try:
for i in range(size):
elem = next(iterator)
result.append(elem)
yield tuple(result)
except StopIteration:
if result:
yield tuple(result)
return | chunk('ABCDEFG', 3) --> ABC DEF G |
def _validate_relations(relations, services, add_error):
"""Validate relations, ensuring that the endpoints exist.
Receive the relations and services bundle sections.
Use the given add_error callable to register validation error.
"""
if not relations:
return
for relation in relations:
if not islist(relation):
add_error('relation {} is malformed'.format(relation))
continue
relation_str = ' -> '.join('{}'.format(i) for i in relation)
for endpoint in relation:
if not isstring(endpoint):
add_error(
'relation {} has malformed endpoint {}'
''.format(relation_str, endpoint))
continue
try:
service, _ = endpoint.split(':')
except ValueError:
service = endpoint
if service not in services:
add_error(
'relation {} endpoint {} refers to a non-existent service '
'{}'.format(relation_str, endpoint, service)) | Validate relations, ensuring that the endpoints exist.
Receive the relations and services bundle sections.
Use the given add_error callable to register validation error. |
def sina_xml_to_url_list(xml_data):
"""str->list
Convert XML to URL List.
From Biligrab.
"""
rawurl = []
dom = parseString(xml_data)
for node in dom.getElementsByTagName('durl'):
url = node.getElementsByTagName('url')[0]
rawurl.append(url.childNodes[0].data)
return rawurl | str->list
Convert XML to URL List.
From Biligrab. |
def find_carbon_sources(model):
"""
Find all active carbon source reactions.
Parameters
----------
model : Model
A genome-scale metabolic model.
Returns
-------
list
The medium reactions with carbon input flux.
"""
try:
model.slim_optimize(error_value=None)
except OptimizationError:
return []
reactions = model.reactions.get_by_any(list(model.medium))
reactions_fluxes = [
(rxn, total_components_flux(rxn.flux, reaction_elements(rxn),
consumption=True)) for rxn in reactions]
return [rxn for rxn, c_flux in reactions_fluxes if c_flux > 0] | Find all active carbon source reactions.
Parameters
----------
model : Model
A genome-scale metabolic model.
Returns
-------
list
The medium reactions with carbon input flux. |
def set_property(self, key, value):
"""
Update only one property in the dict
"""
self.properties[key] = value
self.sync_properties() | Update only one property in the dict |
def canonical_url(app, pagename, templatename, context, doctree):
"""Build the canonical URL for a page. Appends the path for the
page to the base URL specified by the
``html_context["canonical_url"]`` config and stores it in
``html_context["page_canonical_url"]``.
"""
base = context.get("canonical_url")
if not base:
return
target = app.builder.get_target_uri(pagename)
context["page_canonical_url"] = base + target | Build the canonical URL for a page. Appends the path for the
page to the base URL specified by the
``html_context["canonical_url"]`` config and stores it in
``html_context["page_canonical_url"]``. |
def download(self, path, retry=5, timeout=10,
chunk_size=PartSize.DOWNLOAD_MINIMUM_PART_SIZE, wait=True,
overwrite=False):
"""
Downloads the file and returns a download handle.
Download will not start until .start() method is invoked.
:param path: Full path to the new file.
:param retry: Number of retries if error occurs during download.
:param timeout: Timeout for http requests.
:param chunk_size: Chunk size in bytes.
:param wait: If true will wait for download to complete.
:param overwrite: If True will silently overwrite existing file.
:return: Download handle.
"""
if not overwrite and os.path.exists(path):
raise LocalFileAlreadyExists(message=path)
extra = {'resource': self.__class__.__name__, 'query': {
'id': self.id,
'path': path,
'overwrite': overwrite,
'retry': retry,
'timeout': timeout,
'chunk_size': chunk_size,
'wait': wait,
}}
logger.info('Downloading file', extra=extra)
info = self.download_info()
download = Download(
url=info.url, file_path=path, retry_count=retry, timeout=timeout,
part_size=chunk_size, api=self._api
)
if wait:
download.start()
download.wait()
else:
return download | Downloads the file and returns a download handle.
Download will not start until .start() method is invoked.
:param path: Full path to the new file.
:param retry: Number of retries if error occurs during download.
:param timeout: Timeout for http requests.
:param chunk_size: Chunk size in bytes.
:param wait: If true will wait for download to complete.
:param overwrite: If True will silently overwrite existing file.
:return: Download handle. |
def bsp_resize(node: tcod.bsp.BSP, x: int, y: int, w: int, h: int) -> None:
"""
.. deprecated:: 2.0
Assign directly to :any:`BSP` attributes instead.
"""
node.x = x
node.y = y
node.width = w
node.height = h | .. deprecated:: 2.0
Assign directly to :any:`BSP` attributes instead. |
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content) | Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user. |
def get_insertion(cls) -> str:
"""Return the complete string to be inserted into the string of the
template file.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_insertion()) # doctest: +ELLIPSIS
<element name="arma_v1"
substitutionGroup="hpcb:sequenceGroup"
type="hpcb:arma_v1Type"/>
<BLANKLINE>
<complexType name="arma_v1Type">
<complexContent>
<extension base="hpcb:sequenceGroupType">
<sequence>
<element name="fluxes"
minOccurs="0">
<complexType>
<sequence>
<element
name="qin"
minOccurs="0"/>
...
</complexType>
</element>
</sequence>
</extension>
</complexContent>
</complexType>
<BLANKLINE>
"""
indent = 1
blanks = ' ' * (indent+4)
subs = []
for name in cls.get_modelnames():
subs.extend([
f'{blanks}<element name="{name}"',
f'{blanks} substitutionGroup="hpcb:sequenceGroup"',
f'{blanks} type="hpcb:{name}Type"/>',
f'',
f'{blanks}<complexType name="{name}Type">',
f'{blanks} <complexContent>',
f'{blanks} <extension base="hpcb:sequenceGroupType">',
f'{blanks} <sequence>'])
model = importtools.prepare_model(name)
subs.append(cls.get_modelinsertion(model, indent + 4))
subs.extend([
f'{blanks} </sequence>',
f'{blanks} </extension>',
f'{blanks} </complexContent>',
f'{blanks}</complexType>',
f''
])
return '\n'.join(subs) | Return the complete string to be inserted into the string of the
template file.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_insertion()) # doctest: +ELLIPSIS
<element name="arma_v1"
substitutionGroup="hpcb:sequenceGroup"
type="hpcb:arma_v1Type"/>
<BLANKLINE>
<complexType name="arma_v1Type">
<complexContent>
<extension base="hpcb:sequenceGroupType">
<sequence>
<element name="fluxes"
minOccurs="0">
<complexType>
<sequence>
<element
name="qin"
minOccurs="0"/>
...
</complexType>
</element>
</sequence>
</extension>
</complexContent>
</complexType>
<BLANKLINE> |
def set_run_on_node_mask(nodemask):
"""
Runs the current thread and its children only on nodes specified in nodemask.
They will not migrate to CPUs of other nodes until the node affinity is
reset with a new call to L{set_run_on_node_mask}.
@param nodemask: node mask
@type nodemask: C{set}
"""
mask = set_to_numa_nodemask(nodemask)
tmp = bitmask_t()
tmp.maskp = cast(byref(mask), POINTER(c_ulong))
tmp.size = sizeof(nodemask_t) * 8
if libnuma.numa_run_on_node_mask(byref(tmp)) < 0:
raise RuntimeError() | Runs the current thread and its children only on nodes specified in nodemask.
They will not migrate to CPUs of other nodes until the node affinity is
reset with a new call to L{set_run_on_node_mask}.
@param nodemask: node mask
@type nodemask: C{set} |
def get(self, resource_id=None):
"""Return an HTTP response object resulting from an HTTP GET call.
If *resource_id* is provided, return just the single resource.
Otherwise, return the full collection.
:param resource_id: The value of the resource's primary key
"""
if request.path.endswith('meta'):
return self._meta()
if resource_id is None:
error_message = is_valid_method(self.__model__)
if error_message:
raise BadRequestException(error_message)
if 'export' in request.args:
return self._export(self._all_resources())
return flask.jsonify({
self.__json_collection_name__: self._all_resources()
})
else:
resource = self._resource(resource_id)
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
return jsonify(resource) | Return an HTTP response object resulting from an HTTP GET call.
If *resource_id* is provided, return just the single resource.
Otherwise, return the full collection.
:param resource_id: The value of the resource's primary key |
def mangleIR(data, ignore_errors=False):
"""Mangle a raw Kira data packet into shorthand"""
try:
# Packet mangling algorithm inspired by Rex Becket's kirarx vera plugin
# Determine a median value for the timing packets and categorize each
# timing as longer or shorter than that. This will always work for signals
# that use pulse width modulation (since varying by long-short is basically
# the definition of what PWM is). By lucky coincidence this also works with
# the RC-5/RC-6 encodings used by Phillips (manchester encoding)
# because time variations of opposite-phase/same-phase are either N or 2*N
if isinstance(data, bytes):
data = data.decode('ascii')
data = data.strip()
times = [int(x, 16) for x in data.split()[2:]]
minTime = min(times[2:-1])
maxTime = max(times[2:-1])
margin = (maxTime - minTime) / 2 + minTime
return ''.join([(x < margin and 'S' or 'L') for x in times])
except:
# Probably a mangled packet.
if not ignore_errors:
raise | Mangle a raw Kira data packet into shorthand |
def initialize_path(self, path_num=None):
"""
initialize consumer for next path
"""
self.state = copy(self.initial_state)
return self.state | initialize consumer for next path |
def create(obj: PersistedObject, obj_type: Type[Any], arg_name: str):
"""
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param obj:
:param obj_type:
:param arg_name:
:return:
"""
return MissingMandatoryAttributeFiles('Multifile object ' + str(obj) + ' cannot be built from constructor of '
'type ' + get_pretty_type_str(obj_type) +
', mandatory constructor argument \'' + arg_name + '\'was not found on '
'filesystem') | Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param obj:
:param obj_type:
:param arg_name:
:return: |
def write(
contents: str,
path: Union[str, pathlib.Path],
verbose: bool = False,
logger_func=None,
) -> bool:
"""
Writes ``contents`` to ``path``.
Checks if ``path`` already exists and only write out new contents if the
old contents do not match.
Creates any intermediate missing directories.
:param contents: the file contents to write
:param path: the path to write to
:param verbose: whether to print output
"""
print_func = logger_func or print
path = pathlib.Path(path)
if path.exists():
with path.open("r") as file_pointer:
old_contents = file_pointer.read()
if old_contents == contents:
if verbose:
print_func("preserved {}".format(path))
return False
else:
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("rewrote {}".format(path))
return True
elif not path.exists():
if not path.parent.exists():
path.parent.mkdir(parents=True)
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("wrote {}".format(path))
return True | Writes ``contents`` to ``path``.
Checks if ``path`` already exists and only write out new contents if the
old contents do not match.
Creates any intermediate missing directories.
:param contents: the file contents to write
:param path: the path to write to
:param verbose: whether to print output |
def init_gl(self):
"""
Perform the magic incantations to create an
OpenGL scene using pyglet.
"""
# default background color is white-ish
background = [.99, .99, .99, 1.0]
# if user passed a background color use it
if 'background' in self.kwargs:
try:
# convert to (4,) uint8 RGBA
background = to_rgba(self.kwargs['background'])
# convert to 0.0 - 1.0 float
background = background.astype(np.float64) / 255.0
except BaseException:
log.error('background color set but wrong!',
exc_info=True)
self._gl_set_background(background)
self._gl_enable_depth(self.scene)
self._gl_enable_color_material()
self._gl_enable_blending()
self._gl_enable_smooth_lines()
self._gl_enable_lighting(self.scene) | Perform the magic incantations to create an
OpenGL scene using pyglet. |
def doubleprox_dc(x, y, f, phi, g, K, niter, gamma, mu, callback=None):
r"""Double-proxmial gradient d.c. algorithm of Banert and Bot.
This algorithm solves a problem of the form ::
min_x f(x) + phi(x) - g(Kx).
Parameters
----------
x : `LinearSpaceElement`
Initial primal guess, updated in-place.
y : `LinearSpaceElement`
Initial dual guess, updated in-place.
f : `Functional`
Convex functional. Needs to implement ``g.proximal``.
phi : `Functional`
Convex functional. Needs to implement ``phi.gradient``.
Convergence can be guaranteed if the gradient is Lipschitz continuous.
g : `Functional`
Convex functional. Needs to implement ``h.convex_conj.proximal``.
K : `Operator`
Linear operator. Needs to implement ``K.adjoint``
niter : int
Number of iterations.
gamma : positive float
Stepsize in the primal updates.
mu : positive float
Stepsize in the dual updates.
callback : callable, optional
Function called with the current iterate after each iteration.
Notes
-----
This algorithm is proposed in `[BB2016]
<https://arxiv.org/abs/1610.06538>`_ and solves the d.c. problem
.. math ::
\min_x f(x) + \varphi(x) - g(Kx)
together with its Toland dual
.. math ::
\min_y g^*(y) - (f + \varphi)^*(K^* y).
The iterations are given by
.. math ::
x_{n+1} &= \mathrm{Prox}_{\gamma f} (x_n + \gamma (K^* y_n
- \nabla \varphi(x_n))), \\
y_{n+1} &= \mathrm{Prox}_{\mu g^*} (y_n + \mu K x_{n+1}).
To guarantee convergence, the parameter :math:`\gamma` must satisfy
:math:`0 < \gamma < 2/L` where :math:`L` is the Lipschitz constant of
:math:`\nabla \varphi`.
References
----------
[BB2016] Banert, S, and Bot, R I. *A general double-proximal gradient
algorithm for d.c. programming*. arXiv:1610.06538 [math.OC] (2016).
See also
--------
dca :
Solver with subgradient steps for all the functionals.
prox_dca :
Solver with a proximal step for ``f`` and a subgradient step for ``g``.
"""
primal_space = f.domain
dual_space = g.domain
if phi.domain != primal_space:
raise ValueError('`f.domain` and `phi.domain` need to be equal, but '
'{} != {}'.format(primal_space, phi.domain))
if K.domain != primal_space:
raise ValueError('`f.domain` and `K.domain` need to be equal, but '
'{} != {}'.format(primal_space, K.domain))
if K.range != dual_space:
raise ValueError('`g.domain` and `K.range` need to be equal, but '
'{} != {}'.format(dual_space, K.range))
g_convex_conj = g.convex_conj
for _ in range(niter):
f.proximal(gamma)(x.lincomb(1, x,
gamma, K.adjoint(y) - phi.gradient(x)),
out=x)
g_convex_conj.proximal(mu)(y.lincomb(1, y, mu, K(x)), out=y)
if callback is not None:
callback(x) | r"""Double-proxmial gradient d.c. algorithm of Banert and Bot.
This algorithm solves a problem of the form ::
min_x f(x) + phi(x) - g(Kx).
Parameters
----------
x : `LinearSpaceElement`
Initial primal guess, updated in-place.
y : `LinearSpaceElement`
Initial dual guess, updated in-place.
f : `Functional`
Convex functional. Needs to implement ``g.proximal``.
phi : `Functional`
Convex functional. Needs to implement ``phi.gradient``.
Convergence can be guaranteed if the gradient is Lipschitz continuous.
g : `Functional`
Convex functional. Needs to implement ``h.convex_conj.proximal``.
K : `Operator`
Linear operator. Needs to implement ``K.adjoint``
niter : int
Number of iterations.
gamma : positive float
Stepsize in the primal updates.
mu : positive float
Stepsize in the dual updates.
callback : callable, optional
Function called with the current iterate after each iteration.
Notes
-----
This algorithm is proposed in `[BB2016]
<https://arxiv.org/abs/1610.06538>`_ and solves the d.c. problem
.. math ::
\min_x f(x) + \varphi(x) - g(Kx)
together with its Toland dual
.. math ::
\min_y g^*(y) - (f + \varphi)^*(K^* y).
The iterations are given by
.. math ::
x_{n+1} &= \mathrm{Prox}_{\gamma f} (x_n + \gamma (K^* y_n
- \nabla \varphi(x_n))), \\
y_{n+1} &= \mathrm{Prox}_{\mu g^*} (y_n + \mu K x_{n+1}).
To guarantee convergence, the parameter :math:`\gamma` must satisfy
:math:`0 < \gamma < 2/L` where :math:`L` is the Lipschitz constant of
:math:`\nabla \varphi`.
References
----------
[BB2016] Banert, S, and Bot, R I. *A general double-proximal gradient
algorithm for d.c. programming*. arXiv:1610.06538 [math.OC] (2016).
See also
--------
dca :
Solver with subgradient steps for all the functionals.
prox_dca :
Solver with a proximal step for ``f`` and a subgradient step for ``g``. |
def write_to(self, group, append=False):
"""Writes the properties to a `group`, or append it"""
data = self.data
if append is True:
try:
# concatenate original and new properties in a single list
original = read_properties(group)
data = original + data
except EOFError:
pass # no former data to append on
# h5py does not support embedded NULLs in strings ('\x00')
data = pickle.dumps(data).replace(b'\x00', b'__NULL__')
group['properties'][...] = np.void(data) | Writes the properties to a `group`, or append it |
def horizontal_headers(self, value):
"""
Setter for **self.__horizontal_headers** attribute.
:param value: Attribute value.
:type value: OrderedDict
"""
if value is not None:
assert type(value) is OrderedDict, "'{0}' attribute: '{1}' type is not 'OrderedDict'!".format(
"horizontal_headers", value)
self.__horizontal_headers = value | Setter for **self.__horizontal_headers** attribute.
:param value: Attribute value.
:type value: OrderedDict |
def getLocalTime(date, time, *args, **kwargs):
"""
Get the time in the local timezone from date and time
"""
if time is not None:
return getLocalDateAndTime(date, time, *args, **kwargs)[1] | Get the time in the local timezone from date and time |
def activate(self):
"""Activate a Vera scene.
This will call the Vera api to activate a scene.
"""
payload = {
'id': 'lu_action',
'action': 'RunScene',
'serviceId': self.scene_service
}
result = self.vera_request(**payload)
logger.debug("activate: "
"result of vera_request with payload %s: %s",
payload, result.text)
self._active = True | Activate a Vera scene.
This will call the Vera api to activate a scene. |
def start(self, name):
'''
End the current behaviour and run a named behaviour.
:param name: the name of the behaviour to run
:type name: str
'''
d = self.boatd.post({'active': name}, endpoint='/behaviours')
current = d.get('active')
if current is not None:
return 'started {}'.format(current)
else:
return 'no behaviour running' | End the current behaviour and run a named behaviour.
:param name: the name of the behaviour to run
:type name: str |
def getCSVReader(data, reader_type=csv.DictReader):
"""Take a Rave CSV output ending with a line with just EOF on it and return a DictReader"""
f = StringIO(data[:-4]) # Remove \nEOF
return reader_type(f) | Take a Rave CSV output ending with a line with just EOF on it and return a DictReader |
def frames(self, most_recent=False):
"""Retrieve a new frame from the PhoXi and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
most_recent: bool
If true, the OpenCV buffer is emptied for the webcam before reading the most recent frame.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
"""
if most_recent:
for i in xrange(4):
self._cap.grab()
for i in range(1):
if self._adjust_exposure:
try:
command = 'v4l2-ctl -d /dev/video{} -c exposure_auto=1 -c exposure_auto_priority=0 -c exposure_absolute=100 -c saturation=60 -c gain=140'.format(self._device_id)
FNULL = open(os.devnull, 'w')
subprocess.call(shlex.split(command), stdout=FNULL, stderr=subprocess.STDOUT)
except:
pass
ret, frame = self._cap.read()
rgb_data = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return ColorImage(rgb_data, frame=self._frame), None, None | Retrieve a new frame from the PhoXi and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
most_recent: bool
If true, the OpenCV buffer is emptied for the webcam before reading the most recent frame.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame. |
def add(self, si):
'''puts `si` into the currently open chunk, which it creates if
necessary. If this item causes the chunk to cross chunk_max,
then the chunk closed after adding.
'''
if self.o_chunk is None:
if os.path.exists(self.t_path):
os.remove(self.t_path)
self.o_chunk = streamcorpus.Chunk(self.t_path, mode='wb')
self.o_chunk.add(si)
logger.debug('added %d-th item to chunk', len(self.o_chunk))
if len(self.o_chunk) == self.chunk_max:
self.close() | puts `si` into the currently open chunk, which it creates if
necessary. If this item causes the chunk to cross chunk_max,
then the chunk closed after adding. |
def incr(self, key, value):
"""
Increment a key, if it exists, returns it's actual value, if it don't, return 0.
:param key: Key's name
:type key: six.string_types
:param value: Number to be incremented
:type value: int
:return: Actual value of the key on server
:rtype: int
"""
returns = []
for server in self.servers:
returns.append(server.incr(key, value))
return returns[0] | Increment a key, if it exists, returns it's actual value, if it don't, return 0.
:param key: Key's name
:type key: six.string_types
:param value: Number to be incremented
:type value: int
:return: Actual value of the key on server
:rtype: int |
def all_time(self):
"""
Access the all_time
:returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeList
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeList
"""
if self._all_time is None:
self._all_time = AllTimeList(self._version, account_sid=self._solution['account_sid'], )
return self._all_time | Access the all_time
:returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeList
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeList |
def add_to_pythonpath(self, path):
"""Add path to project's PYTHONPATH
Return True if path was added, False if it was already there"""
pathlist = self.get_pythonpath()
if path in pathlist:
return False
else:
pathlist.insert(0, path)
self.set_pythonpath(pathlist)
return True | Add path to project's PYTHONPATH
Return True if path was added, False if it was already there |
def extract_ast_species(ast):
"""Extract species from ast.species set of tuples (id, label)"""
species_id = "None"
species_label = "None"
species = [
(species_id, species_label) for (species_id, species_label) in ast.species if species_id
]
if len(species) == 1:
(species_id, species_label) = species[0]
if not species_id:
species_id = "None"
species_label = "None"
log.debug(f"AST Species: {ast.species} Species: {species} SpeciesID: {species_id}")
return (species_id, species_label) | Extract species from ast.species set of tuples (id, label) |
def swapColors(self):
"""
Swaps the current :py:class:`Color` with the secondary :py:class:`Color`.
:rtype: Nothing.
"""
rgba = self.color.get_0_255()
self.color = self.secondColor
self.secondColor = Color(rgba, '0-255') | Swaps the current :py:class:`Color` with the secondary :py:class:`Color`.
:rtype: Nothing. |
def all_dataset_ids(self, reader_name=None, composites=False):
"""Get names of all datasets from loaded readers or `reader_name` if
specified..
:return: list of all dataset names
"""
try:
if reader_name:
readers = [self.readers[reader_name]]
else:
readers = self.readers.values()
except (AttributeError, KeyError):
raise KeyError("No reader '%s' found in scene" % reader_name)
all_datasets = [dataset_id
for reader in readers
for dataset_id in reader.all_dataset_ids]
if composites:
all_datasets += self.all_composite_ids()
return all_datasets | Get names of all datasets from loaded readers or `reader_name` if
specified..
:return: list of all dataset names |
def init(deb1, deb2=False):
"""Initialize DEBUG and DEBUGALL.
Allows other modules to set DEBUG and DEBUGALL, so their
call to dprint or dprintx generate output.
Args:
deb1 (bool): value of DEBUG to set
deb2 (bool): optional - value of DEBUGALL to set,
defaults to False.
"""
global DEBUG # pylint: disable=global-statement
global DEBUGALL # pylint: disable=global-statement
DEBUG = deb1
DEBUGALL = deb2 | Initialize DEBUG and DEBUGALL.
Allows other modules to set DEBUG and DEBUGALL, so their
call to dprint or dprintx generate output.
Args:
deb1 (bool): value of DEBUG to set
deb2 (bool): optional - value of DEBUGALL to set,
defaults to False. |
def is_result_edition_allowed(self, analysis_brain):
"""Checks if the edition of the result field is allowed
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the result field, otherwise False
"""
# Always check general edition first
if not self.is_analysis_edition_allowed(analysis_brain):
return False
# Get the ananylsis object
obj = api.get_object(analysis_brain)
if not obj.getDetectionLimitOperand():
# This is a regular result (not a detection limit)
return True
# Detection limit selector is enabled in the Analysis Service
if obj.getDetectionLimitSelector():
# Manual detection limit entry is *not* allowed
if not obj.getAllowManualDetectionLimit():
return False
return True | Checks if the edition of the result field is allowed
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the result field, otherwise False |
def update_option_set_by_id(cls, option_set_id, option_set, **kwargs):
"""Update OptionSet
Update attributes of OptionSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_option_set_by_id(option_set_id, option_set, async=True)
>>> result = thread.get()
:param async bool
:param str option_set_id: ID of optionSet to update. (required)
:param OptionSet option_set: Attributes of optionSet to update. (required)
:return: OptionSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_option_set_by_id_with_http_info(option_set_id, option_set, **kwargs)
else:
(data) = cls._update_option_set_by_id_with_http_info(option_set_id, option_set, **kwargs)
return data | Update OptionSet
Update attributes of OptionSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_option_set_by_id(option_set_id, option_set, async=True)
>>> result = thread.get()
:param async bool
:param str option_set_id: ID of optionSet to update. (required)
:param OptionSet option_set: Attributes of optionSet to update. (required)
:return: OptionSet
If the method is called asynchronously,
returns the request thread. |
def My_TreeTable(self, table, heads, heads2=None):
''' Define and display a table
in which the values in first column form one or more trees.
'''
self.Define_TreeTable(heads, heads2)
self.Display_TreeTable(table) | Define and display a table
in which the values in first column form one or more trees. |
def _set_rules(self, rules: dict, overwrite=True):
"""Created a new Rules object based on the provided dict of rules."""
if not isinstance(rules, dict):
raise TypeError('rules must be an instance of dict or Rules,'
'got %r instead' % type(rules))
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules) | Created a new Rules object based on the provided dict of rules. |
def start(self, timeout=None):
"""Start the client in a new thread.
Parameters
----------
timeout : float in seconds
Seconds to wait for client thread to start. Do not specify a
timeout if start() is being called from the same ioloop that this
client will be installed on, since it will block the ioloop without
progressing.
"""
if self._running.isSet():
raise RuntimeError("Device client already started.")
# Make sure we have an ioloop
self.ioloop = self._ioloop_manager.get_ioloop()
if timeout:
t0 = self.ioloop.time()
self._ioloop_manager.start(timeout)
self.ioloop.add_callback(self._install)
if timeout:
remaining_timeout = timeout - (self.ioloop.time() - t0)
self.wait_running(remaining_timeout) | Start the client in a new thread.
Parameters
----------
timeout : float in seconds
Seconds to wait for client thread to start. Do not specify a
timeout if start() is being called from the same ioloop that this
client will be installed on, since it will block the ioloop without
progressing. |
def p_expression_land(self, p):
'expression : expression LAND expression'
p[0] = Land(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | expression : expression LAND expression |
def recognized_release(self):
"""
Check if this Release value is something we can parse.
:rtype: bool
"""
_, _, rest = self.get_release_parts()
# If "rest" is not a well-known value here, then this package is
# using a Release value pattern we cannot recognize.
if rest == '' or re.match(r'%{\??dist}', rest):
return True
return False | Check if this Release value is something we can parse.
:rtype: bool |
def __set_authoring_nodes(self, source, target):
"""
Sets given editor authoring nodes.
:param source: Source file.
:type source: unicode
:param target: Target file.
:type target: unicode
"""
editor = self.__script_editor.get_editor(source)
editor.set_file(target)
self.__script_editor.model.update_authoring_nodes(editor) | Sets given editor authoring nodes.
:param source: Source file.
:type source: unicode
:param target: Target file.
:type target: unicode |
def getWindowByPID(self, pid, order=0):
""" Returns a handle for the first window that matches the provided PID """
if pid <= 0:
return None
EnumWindowsProc = ctypes.WINFUNCTYPE(
ctypes.c_bool,
ctypes.POINTER(ctypes.c_int),
ctypes.py_object)
def callback(hwnd, context):
if ctypes.windll.user32.IsWindowVisible(hwnd):
pid = ctypes.c_ulong()
ctypes.windll.user32.GetWindowThreadProcessId(hwnd, ctypes.byref(pid))
if context["pid"] == int(pid.value) and not context["handle"]:
if context["order"] > 0:
context["order"] -= 1
else:
context["handle"] = hwnd
return True
data = {"pid": pid, "handle": None, "order": order}
ctypes.windll.user32.EnumWindows(EnumWindowsProc(callback), ctypes.py_object(data))
return data["handle"] | Returns a handle for the first window that matches the provided PID |
def get(self, url, headers=None, kwargs=None):
"""Make a GET request.
To make a GET request pass, ``url``
:param url: ``str``
:param headers: ``dict``
:param kwargs: ``dict``
"""
return self._request(
method='get',
url=url,
headers=headers,
kwargs=kwargs
) | Make a GET request.
To make a GET request pass, ``url``
:param url: ``str``
:param headers: ``dict``
:param kwargs: ``dict`` |
def acl_remove_draft(self, id_vlan, type_acl):
"""
Remove Acl draft by type
:param id_vlan: Identity of Vlan
:param type_acl: Acl type v4 or v6
:return: None
:raise VlanDoesNotExistException: Vlan Does Not Exist.
:raise InvalidIdVlanException: Invalid id for Vlan.
:raise NetworkAPIException: Failed to access the data source.
"""
parameters = dict(id_vlan=id_vlan, type_acl=type_acl)
uri = 'api/vlan/acl/remove/draft/%(id_vlan)s/%(type_acl)s/' % parameters
return super(ApiVlan, self).get(uri) | Remove Acl draft by type
:param id_vlan: Identity of Vlan
:param type_acl: Acl type v4 or v6
:return: None
:raise VlanDoesNotExistException: Vlan Does Not Exist.
:raise InvalidIdVlanException: Invalid id for Vlan.
:raise NetworkAPIException: Failed to access the data source. |
def parse_port_pin(name_str):
"""Parses a string and returns a (port, gpio_bit) tuple."""
if len(name_str) < 3:
raise ValueError("Expecting pin name to be at least 3 characters")
if name_str[:2] != 'GP':
raise ValueError("Expecting pin name to start with GP")
if not name_str[2:].isdigit():
raise ValueError("Expecting numeric GPIO number")
port = int(int(name_str[2:]) / 8)
gpio_bit = 1 << int(int(name_str[2:]) % 8)
return (port, gpio_bit) | Parses a string and returns a (port, gpio_bit) tuple. |
def get_member_class(resource):
"""
Returns the registered member class for the given resource.
:param resource: registered resource
:type resource: class implementing or instance providing or subclass of
a registered resource interface.
"""
reg = get_current_registry()
if IInterface in provided_by(resource):
member_class = reg.getUtility(resource, name='member-class')
else:
member_class = reg.getAdapter(resource, IMemberResource,
name='member-class')
return member_class | Returns the registered member class for the given resource.
:param resource: registered resource
:type resource: class implementing or instance providing or subclass of
a registered resource interface. |
def position(self):
"""
Returns the current position of the motor in pulses of the rotary
encoder. When the motor rotates clockwise, the position will increase.
Likewise, rotating counter-clockwise causes the position to decrease.
Writing will set the position to that value.
"""
self._position, value = self.get_attr_int(self._position, 'position')
return value | Returns the current position of the motor in pulses of the rotary
encoder. When the motor rotates clockwise, the position will increase.
Likewise, rotating counter-clockwise causes the position to decrease.
Writing will set the position to that value. |
def merge_request(self, request_id):
"""
Merge a pull request.
:param request_id: the id of the request
:return:
"""
request_url = "{}pull-request/{}/merge".format(self.create_basic_url(),
request_id)
return_value = self._call_api(request_url, method='POST')
LOG.debug(return_value) | Merge a pull request.
:param request_id: the id of the request
:return: |
def info(environment, opts):
"""Display information about environment and running containers
Usage:
datacats info [-qr] [ENVIRONMENT]
Options:
-q --quiet Echo only the web URL or nothing if not running
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
damaged = False
sites = environment.sites
if not environment.sites:
sites = []
damaged = True
if opts['--quiet']:
if damaged:
raise DatacatsError('Damaged datadir: cannot get address.')
for site in sites:
environment.site_name = site
print '{}: {}'.format(site, environment.web_address())
return
datadir = environment.datadir
if not environment.data_exists():
datadir = ''
elif damaged:
datadir += ' (damaged)'
print 'Environment name: ' + environment.name
print ' Environment dir: ' + environment.target
print ' Data dir: ' + datadir
print ' Sites: ' + ' '.join(environment.sites)
for site in environment.sites:
print
environment.site_name = site
print ' Site: ' + site
print ' Containers: ' + ' '.join(environment.containers_running())
sitedir = environment.sitedir + (' (damaged)' if not environment.data_complete() else '')
print ' Site dir: ' + sitedir
addr = environment.web_address()
if addr:
print ' Available at: ' + addr | Display information about environment and running containers
Usage:
datacats info [-qr] [ENVIRONMENT]
Options:
-q --quiet Echo only the web URL or nothing if not running
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.' |
def get_steps_branch_len(self, length):
"""Get, how much steps will needed for a given branch length.
Returns:
float: The age the tree must achieve to reach the given branch length.
"""
return log(length/self.length, min(self.branches[0][0])) | Get, how much steps will needed for a given branch length.
Returns:
float: The age the tree must achieve to reach the given branch length. |
def list_archive(archive, verbosity=1, program=None, interactive=True):
"""List given archive."""
# Set default verbosity to 1 since the listing output should be visible.
util.check_existing_filename(archive)
if verbosity >= 0:
util.log_info("Listing %s ..." % archive)
return _handle_archive(archive, 'list', verbosity=verbosity,
interactive=interactive, program=program) | List given archive. |
def chebyshev_neg(h1, h2): # 12 us @array, 36 us @list \w 100 bins
r"""
Chebyshev negative distance.
Also Tchebychev distance, Minimum or :math:`L_{-\infty}` metric; equal to Minowski
distance with :math:`p=-\infty`. For the case of :math:`p=+\infty`, use `chebyshev`.
The Chebyshev distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is
defined as:
.. math::
d_{-\infty}(H, H') = \min_{m=1}^M|H_m-H'_m|
*Attributes:*
- semimetric (triangle equation satisfied?)
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, 1]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[0, \infty)`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram.
Returns
-------
chebyshev_neg : float
Chebyshev negative distance.
See also
--------
minowski, chebyshev
"""
h1, h2 = __prepare_histogram(h1, h2)
return min(scipy.absolute(h1 - h2)) | r"""
Chebyshev negative distance.
Also Tchebychev distance, Minimum or :math:`L_{-\infty}` metric; equal to Minowski
distance with :math:`p=-\infty`. For the case of :math:`p=+\infty`, use `chebyshev`.
The Chebyshev distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is
defined as:
.. math::
d_{-\infty}(H, H') = \min_{m=1}^M|H_m-H'_m|
*Attributes:*
- semimetric (triangle equation satisfied?)
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, 1]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[0, \infty)`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram.
Returns
-------
chebyshev_neg : float
Chebyshev negative distance.
See also
--------
minowski, chebyshev |
def get_node(self, name):
r"""
Get a tree node structure.
The structure is a dictionary with the following keys:
* **parent** (*NodeName*) Parent node name, :code:`''` if the
node is the root node
* **children** (*list of NodeName*) Children node names, an
empty list if node is a leaf
* **data** (*list*) Node data, an empty list if node contains no data
:param name: Node name
:type name: string
:rtype: dictionary
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return self._db[name] | r"""
Get a tree node structure.
The structure is a dictionary with the following keys:
* **parent** (*NodeName*) Parent node name, :code:`''` if the
node is the root node
* **children** (*list of NodeName*) Children node names, an
empty list if node is a leaf
* **data** (*list*) Node data, an empty list if node contains no data
:param name: Node name
:type name: string
:rtype: dictionary
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree) |
def generate_transit_lightcurve(
times,
mags=None,
errs=None,
paramdists={'transitperiod':sps.uniform(loc=0.1,scale=49.9),
'transitdepth':sps.uniform(loc=1.0e-4,scale=2.0e-2),
'transitduration':sps.uniform(loc=0.01,scale=0.29)},
magsarefluxes=False,
):
'''This generates fake planet transit light curves.
Parameters
----------
times : np.array
This is an array of time values that will be used as the time base.
mags,errs : np.array
These arrays will have the model added to them. If either is
None, `np.full_like(times, 0.0)` will used as a substitute and the model
light curve will be centered around 0.0.
paramdists : dict
This is a dict containing parameter distributions to use for the
model params, containing the following keys ::
{'transitperiod', 'transitdepth', 'transitduration'}
The values of these keys should all be 'frozen' scipy.stats distribution
objects, e.g.:
https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
The variability epoch will be automatically chosen from a uniform
distribution between `times.min()` and `times.max()`.
The ingress duration will be automatically chosen from a uniform
distribution ranging from 0.05 to 0.5 of the transitduration.
The transitdepth will be flipped automatically as appropriate if
`magsarefluxes=True`.
magsarefluxes : bool
If the generated time series is meant to be a flux time-series, set this
to True to get the correct sign of variability amplitude.
Returns
-------
dict
A dict of the form below is returned::
{'vartype': 'planet',
'params': {'transitperiod': generated value of period,
'transitepoch': generated value of epoch,
'transitdepth': generated value of transit depth,
'transitduration': generated value of transit duration,
'ingressduration': generated value of transit ingress
duration},
'times': the model times,
'mags': the model mags,
'errs': the model errs,
'varperiod': the generated period of variability == 'transitperiod'
'varamplitude': the generated amplitude of
variability == 'transitdepth'}
'''
if mags is None:
mags = np.full_like(times, 0.0)
if errs is None:
errs = np.full_like(times, 0.0)
# choose the epoch
epoch = npr.random()*(times.max() - times.min()) + times.min()
# choose the period, depth, duration
period = paramdists['transitperiod'].rvs(size=1)
depth = paramdists['transitdepth'].rvs(size=1)
duration = paramdists['transitduration'].rvs(size=1)
# figure out the ingress duration
ingduration = npr.random()*(0.5*duration - 0.05*duration) + 0.05*duration
# fix the transit depth if it needs to be flipped
if magsarefluxes and depth < 0.0:
depth = -depth
elif not magsarefluxes and depth > 0.0:
depth = -depth
# generate the model
modelmags, phase, ptimes, pmags, perrs = (
transits.trapezoid_transit_func([period, epoch, depth,
duration, ingduration],
times,
mags,
errs)
)
# resort in original time order
timeind = np.argsort(ptimes)
mtimes = ptimes[timeind]
mmags = modelmags[timeind]
merrs = perrs[timeind]
# return a dict with everything
modeldict = {
'vartype':'planet',
'params':{x:np.asscalar(y) for x,y in zip(['transitperiod',
'transitepoch',
'transitdepth',
'transitduration',
'ingressduration'],
[period,
epoch,
depth,
duration,
ingduration])},
'times':mtimes,
'mags':mmags,
'errs':merrs,
# these are standard keys that help with later characterization of
# variability as a function period, variability amplitude, object mag,
# ndet, etc.
'varperiod':period,
'varamplitude':depth
}
return modeldict | This generates fake planet transit light curves.
Parameters
----------
times : np.array
This is an array of time values that will be used as the time base.
mags,errs : np.array
These arrays will have the model added to them. If either is
None, `np.full_like(times, 0.0)` will used as a substitute and the model
light curve will be centered around 0.0.
paramdists : dict
This is a dict containing parameter distributions to use for the
model params, containing the following keys ::
{'transitperiod', 'transitdepth', 'transitduration'}
The values of these keys should all be 'frozen' scipy.stats distribution
objects, e.g.:
https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
The variability epoch will be automatically chosen from a uniform
distribution between `times.min()` and `times.max()`.
The ingress duration will be automatically chosen from a uniform
distribution ranging from 0.05 to 0.5 of the transitduration.
The transitdepth will be flipped automatically as appropriate if
`magsarefluxes=True`.
magsarefluxes : bool
If the generated time series is meant to be a flux time-series, set this
to True to get the correct sign of variability amplitude.
Returns
-------
dict
A dict of the form below is returned::
{'vartype': 'planet',
'params': {'transitperiod': generated value of period,
'transitepoch': generated value of epoch,
'transitdepth': generated value of transit depth,
'transitduration': generated value of transit duration,
'ingressduration': generated value of transit ingress
duration},
'times': the model times,
'mags': the model mags,
'errs': the model errs,
'varperiod': the generated period of variability == 'transitperiod'
'varamplitude': the generated amplitude of
variability == 'transitdepth'} |
def _force(self,z,t=0.):
"""
NAME:
_force
PURPOSE:
evaluate the force
INPUT:
z
t
OUTPUT:
F_z(z,t;R)
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
return self._Pot.zforce(self._R,z,phi=self._phi,t=t,use_physical=False)\
-self._Pot.zforce(self._R,0.,phi=self._phi,t=t,use_physical=False) | NAME:
_force
PURPOSE:
evaluate the force
INPUT:
z
t
OUTPUT:
F_z(z,t;R)
HISTORY:
2010-07-13 - Written - Bovy (NYU) |
def _generate_docstring(self, doc_type, quote):
"""Generate docstring."""
docstring = None
self.quote3 = quote * 3
if quote == '"':
self.quote3_other = "'''"
else:
self.quote3_other = '"""'
result = self.get_function_definition_from_below_last_line()
if result:
func_def, __ = result
func_info = FunctionInfo()
func_info.parse_def(func_def)
if func_info.has_info:
func_body = self.get_function_body(func_info.func_indent)
if func_body:
func_info.parse_body(func_body)
if doc_type == 'Numpydoc':
docstring = self._generate_numpy_doc(func_info)
elif doc_type == 'Googledoc':
docstring = self._generate_google_doc(func_info)
return docstring | Generate docstring. |
def substitute_vars(template, replacements):
"""Replace certain keys with respective values in a string.
@param template: the string in which replacements should be made
@param replacements: a dict or a list of pairs of keys and values
"""
result = template
for (key, value) in replacements:
result = result.replace('${' + key + '}' , value)
if '${' in result:
logging.warning("A variable was not replaced in '%s'.", result)
return result | Replace certain keys with respective values in a string.
@param template: the string in which replacements should be made
@param replacements: a dict or a list of pairs of keys and values |
def fetch_live(self, formatter=TableFormat):
"""
Fetch a live stream query. This is the equivalent of selecting
the "Play" option for monitoring fields within the SMC UI. Data will
be streamed back in real time.
:param formatter: Formatter type for data representation. Any type
in :py:mod:`smc_monitoring.models.formatters`.
:return: generator yielding results in specified format
"""
fmt = formatter(self)
for results in self.execute():
if 'records' in results and results['records'].get('added'):
yield fmt.formatted(results['records']['added']) | Fetch a live stream query. This is the equivalent of selecting
the "Play" option for monitoring fields within the SMC UI. Data will
be streamed back in real time.
:param formatter: Formatter type for data representation. Any type
in :py:mod:`smc_monitoring.models.formatters`.
:return: generator yielding results in specified format |
def search_datasets(self, search_phrase, limit=None):
""" Search for datasets. """
return self.backend.dataset_index.search(search_phrase, limit=limit) | Search for datasets. |
def generate(env):
"""Add Builders and construction variables for WiX to an Environment."""
if not exists(env):
return
env['WIXCANDLEFLAGS'] = ['-nologo']
env['WIXCANDLEINCLUDE'] = []
env['WIXCANDLECOM'] = '$WIXCANDLE $WIXCANDLEFLAGS -I $WIXCANDLEINCLUDE -o ${TARGET} ${SOURCE}'
env['WIXLIGHTFLAGS'].append( '-nologo' )
env['WIXLIGHTCOM'] = "$WIXLIGHT $WIXLIGHTFLAGS -out ${TARGET} ${SOURCES}"
env['WIXSRCSUF'] = '.wxs'
env['WIXOBJSUF'] = '.wixobj'
object_builder = SCons.Builder.Builder(
action = '$WIXCANDLECOM',
suffix = '$WIXOBJSUF',
src_suffix = '$WIXSRCSUF')
linker_builder = SCons.Builder.Builder(
action = '$WIXLIGHTCOM',
src_suffix = '$WIXOBJSUF',
src_builder = object_builder)
env['BUILDERS']['WiX'] = linker_builder | Add Builders and construction variables for WiX to an Environment. |
def agg_iter(self, lower_limit=None, upper_limit=None):
"""Aggregate and return dictionary to be indexed in ES."""
lower_limit = lower_limit or self.get_bookmark().isoformat()
upper_limit = upper_limit or (
datetime.datetime.utcnow().replace(microsecond=0).isoformat())
aggregation_data = {}
self.agg_query = Search(using=self.client,
index=self.event_index).\
filter('range', timestamp={
'gte': self._format_range_dt(lower_limit),
'lte': self._format_range_dt(upper_limit)})
# apply query modifiers
for modifier in self.query_modifiers:
self.agg_query = modifier(self.agg_query)
hist = self.agg_query.aggs.bucket(
'histogram',
'date_histogram',
field='timestamp',
interval=self.aggregation_interval
)
terms = hist.bucket(
'terms', 'terms', field=self.aggregation_field, size=0
)
top = terms.metric(
'top_hit', 'top_hits', size=1, sort={'timestamp': 'desc'}
)
for dst, (metric, src, opts) in self.metric_aggregation_fields.items():
terms.metric(dst, metric, field=src, **opts)
results = self.agg_query.execute()
index_name = None
for interval in results.aggregations['histogram'].buckets:
interval_date = datetime.datetime.strptime(
interval['key_as_string'], '%Y-%m-%dT%H:%M:%S')
for aggregation in interval['terms'].buckets:
aggregation_data['timestamp'] = interval_date.isoformat()
aggregation_data[self.aggregation_field] = aggregation['key']
aggregation_data['count'] = aggregation['doc_count']
if self.metric_aggregation_fields:
for f in self.metric_aggregation_fields:
aggregation_data[f] = aggregation[f]['value']
doc = aggregation.top_hit.hits.hits[0]['_source']
for destination, source in self.copy_fields.items():
if isinstance(source, six.string_types):
aggregation_data[destination] = doc[source]
else:
aggregation_data[destination] = source(
doc,
aggregation_data
)
index_name = 'stats-{0}-{1}'.\
format(self.event,
interval_date.strftime(
self.index_name_suffix))
self.indices.add(index_name)
yield dict(_id='{0}-{1}'.
format(aggregation['key'],
interval_date.strftime(
self.doc_id_suffix)),
_index=index_name,
_type=self.aggregation_doc_type,
_source=aggregation_data)
self.last_index_written = index_name | Aggregate and return dictionary to be indexed in ES. |
def next_block(self):
"""
This could probably be improved; at the moment it starts by trying to overshoot the
desired compressed block size, then it reduces the input bytes one by one until it
has met the required block size
"""
assert self.pos <= self.input_len
if self.pos == self.input_len:
return None
# Overshoot
i = self.START_OVERSHOOT
while True:
try_size = int(self.bs * i)
size = self.check_request_size(try_size)
c, d = self.compress_next_chunk(size)
if size != try_size:
break
if len(d) < self.bs:
i += self.OVERSHOOT_INCREASE
else:
break
# Reduce by one byte until we hit the target
while True:
if len(d) <= self.bs:
self.c = c
# self.c = self.factory()
crc32 = zlib.crc32(self.get_input(size), 0xffffffff) & 0xffffffff
self.pos += size
self.compressed_bytes += len(d)
return crc32, size, d
size -= 1
if size == 0:
return None
c, d = self.compress_next_chunk(size) | This could probably be improved; at the moment it starts by trying to overshoot the
desired compressed block size, then it reduces the input bytes one by one until it
has met the required block size |
def F_to_K(self, F, method='doubling'):
"""
Compute agent 2's best cost-minimizing response K, given F.
Parameters
----------
F : array_like(float, ndim=2)
A k x n array
method : str, optional(default='doubling')
Solution method used in solving the associated Riccati
equation, str in {'doubling', 'qz'}.
Returns
-------
K : array_like(float, ndim=2)
Agent's best cost minimizing response for a given F
P : array_like(float, ndim=2)
The value function for a given F
"""
Q2 = self.beta * self.theta
R2 = - self.R - dot(F.T, dot(self.Q, F))
A2 = self.A - dot(self.B, F)
B2 = self.C
lq = LQ(Q2, R2, A2, B2, beta=self.beta)
neg_P, neg_K, d = lq.stationary_values(method=method)
return -neg_K, -neg_P | Compute agent 2's best cost-minimizing response K, given F.
Parameters
----------
F : array_like(float, ndim=2)
A k x n array
method : str, optional(default='doubling')
Solution method used in solving the associated Riccati
equation, str in {'doubling', 'qz'}.
Returns
-------
K : array_like(float, ndim=2)
Agent's best cost minimizing response for a given F
P : array_like(float, ndim=2)
The value function for a given F |
def copy(self):
'''
makes a clone copy of the mapper. It won't clone the serializers or deserializers and it won't copy the events
'''
try:
tmp = self.__class__()
except Exception:
tmp = self.__class__(self._pdict)
tmp._serializers = self._serializers
tmp.__deserializers = self.__deserializers
return tmp | makes a clone copy of the mapper. It won't clone the serializers or deserializers and it won't copy the events |
def Setup(self):
"""
Initialize the local node.
Returns:
"""
self.Peers = [] # active nodes that we're connected to
self.KNOWN_ADDRS = [] # node addresses that we've learned about from other nodes
self.DEAD_ADDRS = [] # addresses that were performing poorly or we could not establish a connection to
self.MissionsGlobal = []
self.NodeId = random.randint(1294967200, 4294967200) | Initialize the local node.
Returns: |
def _truthtable(inputs, pcdata):
"""Return a truth table."""
if len(inputs) == 0 and pcdata[0] in {PC_ZERO, PC_ONE}:
return {
PC_ZERO : TTZERO,
PC_ONE : TTONE
}[pcdata[0]]
elif len(inputs) == 1 and pcdata[0] == PC_ZERO and pcdata[1] == PC_ONE:
return inputs[0]
else:
return TruthTable(inputs, pcdata) | Return a truth table. |
def stats(self, key=None):
"""
Return server stats.
:param key: Optional if you want status from a key.
:type key: six.string_types
:return: A dict with server stats
:rtype: dict
"""
# TODO: Stats with key is not working.
returns = {}
for server in self.servers:
returns[server.server] = server.stats(key)
return returns | Return server stats.
:param key: Optional if you want status from a key.
:type key: six.string_types
:return: A dict with server stats
:rtype: dict |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.