code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def configureLogger(self):
"""
Configures the python logging system to log to a debug file and to stdout for warn and above.
:return: the base logger.
"""
baseLogLevel = logging.DEBUG if self.isDebugLogging() else logging.INFO
# create recorder app root logger
logger = logging.getLogger(self._name)
logger.setLevel(baseLogLevel)
# file handler
fh = handlers.RotatingFileHandler(path.join(self._getConfigPath(), self._name + '.log'),
maxBytes=10 * 1024 * 1024, backupCount=10)
fh.setLevel(baseLogLevel)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger | Configures the python logging system to log to a debug file and to stdout for warn and above.
:return: the base logger. |
def process_form(self, instance, field, form,
empty_marker=None, emptyReturnsMarker=False):
"""Return a list of dictionaries fit for ReferenceResultsField
consumption. Only services which have float()able entries in result,min
and max field will be included. If any of min, max, or result fields
are blank, the row value is ignored here.
"""
values = {}
# Process settings from the reference definition first
ref_def = form.get("ReferenceDefinition")
ref_def_uid = ref_def and ref_def[0]
if ref_def_uid:
ref_def_obj = api.get_object_by_uid(ref_def_uid)
ref_results = ref_def_obj.getReferenceResults()
# store reference results by UID to avoid duplicates
rr_by_uid = dict(map(lambda r: (r.get("uid"), r), ref_results))
values.update(rr_by_uid)
# selected services
service_uids = form.get("uids", [])
for uid in service_uids:
result = self._get_spec_value(form, uid, "result")
if not result:
# User has to set a value for result subfield at least
continue
# If neither min nor max have been set, assume we only accept a
# discrete result (like if % of error was 0).
s_min = self._get_spec_value(form, uid, "min", result)
s_max = self._get_spec_value(form, uid, "max", result)
service = api.get_object_by_uid(uid)
values[uid] = {
"keyword": service.getKeyword(),
"uid": uid,
"result": result,
"min": s_min,
"max": s_max
}
return values.values(), {} | Return a list of dictionaries fit for ReferenceResultsField
consumption. Only services which have float()able entries in result,min
and max field will be included. If any of min, max, or result fields
are blank, the row value is ignored here. |
def serve_forever(self, poll_interval=0.5):
"""
Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self.__is_shut_down.clear()
try:
while not self.__shutdown_request:
r, w, e = _eintr_retry(select.select, [self], [], [], poll_interval)
if self in r:
self._handle_request_noblock()
finally:
self.__shutdown_request = False
self.__is_shut_down.set() | Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread. |
def redirect(self):
""" This is the HTTP-redirect endpoint """
logger.info("--- In SSO Redirect ---")
saml_msg = self.unpack_redirect()
try:
_key = saml_msg["key"]
saml_msg = IDP.ticket[_key]
self.req_info = saml_msg["req_info"]
del IDP.ticket[_key]
except KeyError:
try:
self.req_info = IDP.parse_authn_request(saml_msg["SAMLRequest"],
BINDING_HTTP_REDIRECT)
except KeyError:
resp = BadRequest("Message signature verification failure")
return resp(self.environ, self.start_response)
_req = self.req_info.message
if "SigAlg" in saml_msg and "Signature" in saml_msg: # Signed
# request
issuer = _req.issuer.text
_certs = IDP.metadata.certs(issuer, "any", "signing")
verified_ok = False
for cert in _certs:
if verify_redirect_signature(saml_msg, IDP.sec.sec_backend,
cert):
verified_ok = True
break
if not verified_ok:
resp = BadRequest("Message signature verification failure")
return resp(self.environ, self.start_response)
if self.user:
if _req.force_authn:
saml_msg["req_info"] = self.req_info
key = self._store_request(saml_msg)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(saml_msg, BINDING_HTTP_REDIRECT)
else:
saml_msg["req_info"] = self.req_info
key = self._store_request(saml_msg)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(saml_msg, BINDING_HTTP_REDIRECT) | This is the HTTP-redirect endpoint |
def touch(fname, times=None):
"""Creates an empty file at fname, creating path if necessary
Answer taken from Stack Overflow http://stackoverflow.com/a/1160227
User: ephemient http://stackoverflow.com/users/20713
License: CC-BY-SA 3.0 https://creativecommons.org/licenses/by-sa/3.0/
"""
fpath, f = os.path.split(fname)
if not os.path.exists(fpath):
os.makedirs(fpath)
with open(fname, 'a'):
os.utime(fname, times) | Creates an empty file at fname, creating path if necessary
Answer taken from Stack Overflow http://stackoverflow.com/a/1160227
User: ephemient http://stackoverflow.com/users/20713
License: CC-BY-SA 3.0 https://creativecommons.org/licenses/by-sa/3.0/ |
def getTypeByPosition(self, idx):
"""Return ASN.1 type object by its position in fields set.
Parameters
----------
idx: :py:class:`int`
Field index
Returns
-------
:
ASN.1 type
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If given position is out of fields range
"""
try:
return self.__namedTypes[idx].asn1Object
except IndexError:
raise error.PyAsn1Error('Type position out of range') | Return ASN.1 type object by its position in fields set.
Parameters
----------
idx: :py:class:`int`
Field index
Returns
-------
:
ASN.1 type
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If given position is out of fields range |
def _format_output(kernel_restart, packages, verbose, restartable, nonrestartable, restartservicecommands,
restartinitcommands):
'''
Formats the output of the restartcheck module.
Returns:
String - formatted output.
Args:
kernel_restart: indicates that newer kernel is instaled
packages: list of packages that should be restarted
verbose: enables extensive output
restartable: list of restartable packages
nonrestartable: list of non-restartable packages
restartservicecommands: list of commands to restart services
restartinitcommands: list of commands to restart init.d scripts
'''
if not verbose:
packages = restartable + nonrestartable
if kernel_restart:
packages.append('System restart required.')
return packages
else:
ret = ''
if kernel_restart:
ret = 'System restart required.\n\n'
if packages:
ret += "Found {0} processes using old versions of upgraded files.\n".format(len(packages))
ret += "These are the packages:\n"
if restartable:
ret += "Of these, {0} seem to contain systemd service definitions or init scripts " \
"which can be used to restart them:\n".format(len(restartable))
for package in restartable:
ret += package + ':\n'
for program in packages[package]['processes']:
ret += program + '\n'
if restartservicecommands:
ret += "\n\nThese are the systemd services:\n"
ret += '\n'.join(restartservicecommands)
if restartinitcommands:
ret += "\n\nThese are the initd scripts:\n"
ret += '\n'.join(restartinitcommands)
if nonrestartable:
ret += "\n\nThese processes {0} do not seem to have an associated init script " \
"to restart them:\n".format(len(nonrestartable))
for package in nonrestartable:
ret += package + ':\n'
for program in packages[package]['processes']:
ret += program + '\n'
return ret | Formats the output of the restartcheck module.
Returns:
String - formatted output.
Args:
kernel_restart: indicates that newer kernel is instaled
packages: list of packages that should be restarted
verbose: enables extensive output
restartable: list of restartable packages
nonrestartable: list of non-restartable packages
restartservicecommands: list of commands to restart services
restartinitcommands: list of commands to restart init.d scripts |
def parse_unknown_args(args):
"""
Parse arguments not consumed by arg parser into a dicitonary
"""
retval = {}
preceded_by_key = False
for arg in args:
if arg.startswith('--'):
if '=' in arg:
key = arg.split('=')[0][2:]
value = arg.split('=')[1]
retval[key] = value
else:
key = arg[2:]
preceded_by_key = True
elif preceded_by_key:
retval[key] = arg
preceded_by_key = False
return retval | Parse arguments not consumed by arg parser into a dicitonary |
def chmod(self, path, mode):
"""
Change the mode (permissions) of a file. The permissions are
unix-style and identical to those used by python's C{os.chmod}
function.
@param path: path of the file to change the permissions of
@type path: str
@param mode: new permissions
@type mode: int
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'chmod(%r, %r)' % (path, mode))
attr = SFTPAttributes()
attr.st_mode = mode
self._request(CMD_SETSTAT, path, attr) | Change the mode (permissions) of a file. The permissions are
unix-style and identical to those used by python's C{os.chmod}
function.
@param path: path of the file to change the permissions of
@type path: str
@param mode: new permissions
@type mode: int |
def _validate_place_types(self, types):
"""Validate place types and return a mapping for use in requests."""
for pt in types:
if pt not in self.place_types:
raise InvalidPlaceTypeError(pt)
return {'types': ",".join(types)} | Validate place types and return a mapping for use in requests. |
def manhattan_distant(vector1, vector2):
"""曼哈顿距离"""
vector1 = np.mat(vector1)
vector2 = np.mat(vector2)
return np.sum(np.abs(vector1 - vector2)) | 曼哈顿距离 |
def key_value_to_tree(data):
'''
Convert key/value to tree
'''
tree = {}
for flatkey, value in six.iteritems(data):
t = tree
keys = flatkey.split(__opts__['pepa_delimiter'])
for i, key in enumerate(keys, 1):
if i == len(keys):
t[key] = value
else:
t = t.setdefault(key, {})
return tree | Convert key/value to tree |
def args_range(min_value, max_value, *args):
"""
检查参数范围
"""
not_null(*args)
if not all(map(lambda v: min_value <= v <= max_value, args)):
raise ValueError("Argument must be between {0} and {1}!".format(min_value, max_value)) | 检查参数范围 |
def _hammer_function_precompute(self,x0, L, Min, model):
"""
Pre-computes the parameters of a penalizer centered at x0.
"""
if x0 is None: return None, None
if len(x0.shape)==1: x0 = x0[None,:]
m = model.predict(x0)[0]
pred = model.predict(x0)[1].copy()
pred[pred<1e-16] = 1e-16
s = np.sqrt(pred)
r_x0 = (m-Min)/L
s_x0 = s/L
r_x0 = r_x0.flatten()
s_x0 = s_x0.flatten()
return r_x0, s_x0 | Pre-computes the parameters of a penalizer centered at x0. |
def apply_bbox(sf,ax):
"""
Use bbox as xlim and ylim in ax
"""
limits = sf.bbox
xlim = limits[0],limits[2]
ylim = limits[1],limits[3]
ax.set_xlim(xlim)
ax.set_ylim(ylim) | Use bbox as xlim and ylim in ax |
def run_gblocks(align_fasta_file, **kwargs):
"""
remove poorly aligned positions and divergent regions with Gblocks
"""
cl = GblocksCommandline(aln_file=align_fasta_file, **kwargs)
r, e = cl.run()
print("Gblocks:", cl, file=sys.stderr)
if e:
print("***Gblocks could not run", file=sys.stderr)
return None
else:
print(r, file=sys.stderr)
alignp = re.sub(r'.*Gblocks alignment:.*\(([0-9]{1,3}) %\).*', \
r'\1', r, flags=re.DOTALL)
alignp = int(alignp)
if alignp <= 10:
print("** WARNING ** Only %s %% positions retained by Gblocks. " \
"Results aborted. Using original alignment instead.\n" % alignp, file=sys.stderr)
return None
else:
return align_fasta_file+"-gb" | remove poorly aligned positions and divergent regions with Gblocks |
def get_file_courses(self, id, course_id, include=None):
"""
Get file.
Returns the standard attachment json object
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - include
"""Array of additional information to include.
"user":: the user who uploaded the file or last edited its content
"usage_rights":: copyright and license information for the file (see UsageRights)"""
if include is not None:
self._validate_enum(include, ["user"])
params["include"] = include
self.logger.debug("GET /api/v1/courses/{course_id}/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/files/{id}".format(**path), data=data, params=params, single_item=True) | Get file.
Returns the standard attachment json object |
def tables_get(self, table_name):
"""Issues a request to retrieve information about a table.
Args:
table_name: a tuple representing the full name of the table.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, credentials=self._credentials) | Issues a request to retrieve information about a table.
Args:
table_name: a tuple representing the full name of the table.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. |
def set_opt(self, name, value):
""" Set option.
"""
self.cache['opts'][name] = value
if name == 'compress':
self.cache['delims'] = self.def_delims if not value else (
'',
'',
'') | Set option. |
def watch(self, path, recursive=False):
"""Watch for files in a directory and apply normalizations.
Watch for new or changed files in a directory and apply
normalizations over them.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not.
"""
self._logger.info('Initializing watcher for path "%s"', path)
handler = FileHandler(self)
self._observer = Observer()
self._observer.schedule(handler, path, recursive)
self._logger.info('Starting watcher')
self._observer.start()
self._watch = True
try:
self._logger.info('Waiting for file events')
while self._watch:
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
self.stop_watching()
self._observer.join() | Watch for files in a directory and apply normalizations.
Watch for new or changed files in a directory and apply
normalizations over them.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not. |
def wait_script(name,
source=None,
template=None,
onlyif=None,
unless=None,
cwd=None,
runas=None,
shell=None,
env=None,
stateful=False,
umask=None,
use_vt=False,
output_loglevel='debug',
hide_output=False,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs):
'''
Download a script from a remote source and execute it only if a watch
statement calls it.
source
The source script being downloaded to the minion, this source script is
hosted on the salt master server. If the file is located on the master
in the directory named spam, and is called eggs, the source string is
salt://spam/eggs
template
If this setting is applied then the named templating engine will be
used to render the downloaded file, currently jinja, mako, and wempy
are supported
name
The command to execute, remember that the command will execute with the
path and permissions of the salt-minion.
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
cwd
The current working directory to execute the command in, defaults to
/root
runas
The user name to run the command as
shell
The shell to use for execution, defaults to the shell grain
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.wait_script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.wait_script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: jinja
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
umask
The umask (in octal) to use when running the command.
stateful
The command being executed is expected to return data about executing
a state. For more information, see the :ref:`stateful-argument` section.
use_vt
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
success_stdout: This parameter will be allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon
success_stderr: This parameter will be allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon
'''
# Ignoring our arguments is intentional.
return {'name': name,
'changes': {},
'result': True,
'comment': ''} | Download a script from a remote source and execute it only if a watch
statement calls it.
source
The source script being downloaded to the minion, this source script is
hosted on the salt master server. If the file is located on the master
in the directory named spam, and is called eggs, the source string is
salt://spam/eggs
template
If this setting is applied then the named templating engine will be
used to render the downloaded file, currently jinja, mako, and wempy
are supported
name
The command to execute, remember that the command will execute with the
path and permissions of the salt-minion.
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
cwd
The current working directory to execute the command in, defaults to
/root
runas
The user name to run the command as
shell
The shell to use for execution, defaults to the shell grain
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.wait_script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.wait_script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: jinja
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
umask
The umask (in octal) to use when running the command.
stateful
The command being executed is expected to return data about executing
a state. For more information, see the :ref:`stateful-argument` section.
use_vt
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
success_stdout: This parameter will be allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon
success_stderr: This parameter will be allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon |
def list_view_on_selected(self, widget, selected_item_key):
""" The selection event of the listView, returns a key of the clicked event.
You can retrieve the item rapidly
"""
self.lbl.set_text('List selection: ' + self.listView.children[selected_item_key].get_text()) | The selection event of the listView, returns a key of the clicked event.
You can retrieve the item rapidly |
def _lab_to_rgb(labs):
"""Convert Nx3 or Nx4 lab to rgb"""
# adapted from BSD-licensed work in MATLAB by Mark Ruzon
# Based on ITU-R Recommendation BT.709 using the D65
labs, n_dim = _check_color_dim(labs)
# Convert Lab->XYZ (silly indexing used to preserve dimensionality)
y = (labs[:, 0] + 16.) / 116.
x = (labs[:, 1] / 500.) + y
z = y - (labs[:, 2] / 200.)
xyz = np.concatenate(([x], [y], [z])) # 3xN
over = xyz > 0.2068966
xyz[over] = xyz[over] ** 3.
xyz[~over] = (xyz[~over] - 0.13793103448275862) / 7.787
# Convert XYZ->LAB
rgbs = np.dot(_xyz2rgb_norm, xyz).T
over = rgbs > 0.0031308
rgbs[over] = 1.055 * (rgbs[over] ** (1. / 2.4)) - 0.055
rgbs[~over] *= 12.92
if n_dim == 4:
rgbs = np.concatenate((rgbs, labs[:, 3]), axis=1)
rgbs = np.clip(rgbs, 0., 1.)
return rgbs | Convert Nx3 or Nx4 lab to rgb |
def gen_df_state(
list_table: list,
set_initcond: set,
set_runcontrol: set,
set_input_runcontrol: set)->pd.DataFrame:
'''generate dataframe of all state variables used by supy
Parameters
----------
list_table : list
csv files for site info: `SUEWS_xx.csv` on github SUEWS-docs repo
set_initcond : set
initial condition related variables
set_runcontrol : set
runcontrol related variables
set_input_runcontrol : set
runcontrol related variables used as supy input
Returns
-------
pd.DataFrame
Description of all state variables used by supy
'''
# generate a base df for site characteristics related variables
df_var_site = gen_df_site(list_table)
# generate a base df for runcontrol related variables
df_var_runcontrol = gen_df_runcontrol(
set_initcond, set_runcontrol, set_input_runcontrol)
# generate a base df for initial condition related variables
df_var_initcond = gen_df_initcond(set_initcond, set_runcontrol)
# further processing by modifying several entries
df_var_state = proc_df_state(
df_var_site, df_var_runcontrol, df_var_initcond)
# reorganising the result:
df_var_state = df_var_state.sort_index()
# delete duplicates while considering the variable name (stored as index)
df_var_state = df_var_state.reset_index()
df_var_state = df_var_state.drop_duplicates()
# convert index back
df_var_state = df_var_state.set_index('variable')
return df_var_state | generate dataframe of all state variables used by supy
Parameters
----------
list_table : list
csv files for site info: `SUEWS_xx.csv` on github SUEWS-docs repo
set_initcond : set
initial condition related variables
set_runcontrol : set
runcontrol related variables
set_input_runcontrol : set
runcontrol related variables used as supy input
Returns
-------
pd.DataFrame
Description of all state variables used by supy |
def parse_reports(self):
""" Find RSeQC junction_saturation frequency reports and parse their data """
# Set up vars
self.junction_saturation_all = dict()
self.junction_saturation_known = dict()
self.junction_saturation_novel = dict()
# Go through files and parse data
for f in self.find_log_files('rseqc/junction_saturation'):
parsed = dict()
for l in f['f'].splitlines():
r = re.search(r"^([xyzw])=c\(([\d,]+)\)$", l)
if r:
parsed[r.group(1)] = [float(i) for i in r.group(2).split(',')]
if len(parsed) == 4:
if parsed['z'][-1] == 0:
log.warn("Junction saturation data all zeroes, skipping: '{}'".format(f['s_name']))
else:
if f['s_name'] in self.junction_saturation_all:
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
self.add_data_source(f, section='junction_saturation')
self.junction_saturation_all[f['s_name']] = OrderedDict()
self.junction_saturation_known[f['s_name']] = OrderedDict()
self.junction_saturation_novel[f['s_name']] = OrderedDict()
for k, v in enumerate(parsed['x']):
self.junction_saturation_all[f['s_name']][v] = parsed['z'][k]
self.junction_saturation_known[f['s_name']][v] = parsed['y'][k]
self.junction_saturation_novel[f['s_name']][v] = parsed['w'][k]
# Filter to strip out ignored sample names
self.junction_saturation_all = self.ignore_samples(self.junction_saturation_all)
self.junction_saturation_known = self.ignore_samples(self.junction_saturation_known)
self.junction_saturation_novel = self.ignore_samples(self.junction_saturation_novel)
if len(self.junction_saturation_all) > 0:
# Add line graph to section
pconfig = {
'id': 'rseqc_junction_saturation_plot',
'title': 'RSeQC: Junction Saturation',
'ylab': 'Number of Junctions',
'ymin': 0,
'xlab': "Percent of reads",
'xmin': 0,
'xmax': 100,
'tt_label': "<strong>{point.x}% of reads</strong>: {point.y:.2f}",
'data_labels': [
{'name': 'Known Junctions'},
{'name': 'Novel Junctions'},
{'name': 'All Junctions'}
],
'cursor': 'pointer',
'click_func': plot_single()
}
self.add_section (
name = 'Junction Saturation',
anchor = 'rseqc-junction_saturation',
description = '''<a href="http://rseqc.sourceforge.net/#junction-saturation-py" target="_blank">Junction Saturation</a>
counts the number of known splicing junctions that are observed
in each dataset. If sequencing depth is sufficient, all (annotated) splice junctions should
be rediscovered, resulting in a curve that reaches a plateau. Missing low abundance splice
junctions can affect downstream analysis.</p>
<div class="alert alert-info" id="rseqc-junction_sat_single_hint">
<span class="glyphicon glyphicon-hand-up"></span>
Click a line to see the data side by side (as in the original RSeQC plot).
</div><p>''',
plot = linegraph.plot([
self.junction_saturation_known,
self.junction_saturation_novel,
self.junction_saturation_all
], pconfig)
)
# Return number of samples found
return len(self.junction_saturation_all) | Find RSeQC junction_saturation frequency reports and parse their data |
def most_even(number, group):
"""Divide a number into a list of numbers as even as possible."""
count, rest = divmod(number, group)
counts = zip_longest([count] * group, [1] * rest, fillvalue=0)
chunks = [sum(one) for one in counts]
logging.debug('chunks: %s', chunks)
return chunks | Divide a number into a list of numbers as even as possible. |
def unique(seen, *iterables):
"""
Get the unique items in iterables while preserving order. Note that this
mutates the seen set provided only when the returned generator is used.
Args:
seen (set): either an empty set, or the set of things already seen
*iterables: one or more iterable lists to chain together
Returns:
generator:
"""
_add = seen.add
# return a generator of the unique items and the set of the seen items
# the seen set will mutate when the generator is iterated over
return (i for i in chain(*iterables) if i not in seen and not _add(i)) | Get the unique items in iterables while preserving order. Note that this
mutates the seen set provided only when the returned generator is used.
Args:
seen (set): either an empty set, or the set of things already seen
*iterables: one or more iterable lists to chain together
Returns:
generator: |
def dump(doc, output_stream=None):
"""
Dump a :class:`.Doc` object into a JSON-encoded text string.
The output will be sent to :data:`sys.stdout` unless an alternative
text stream is given.
To dump to :data:`sys.stdout` just do:
>>> import panflute as pf
>>> doc = pf.Doc(Para(Str('a'))) # Create sample document
>>> pf.dump(doc)
To dump to file:
>>> with open('some-document.json', 'w'. encoding='utf-8') as f:
>>> pf.dump(doc, f)
To dump to a string:
>>> import io
>>> with io.StringIO() as f:
>>> pf.dump(doc, f)
>>> contents = f.getvalue()
:param doc: document, usually created with :func:`.load`
:type doc: :class:`.Doc`
:param output_stream: text stream used as output
(default is :data:`sys.stdout`)
"""
assert type(doc) == Doc, "panflute.dump needs input of type panflute.Doc"
if output_stream is None:
sys.stdout = codecs.getwriter("utf-8")(sys.stdout) if py2 else codecs.getwriter("utf-8")(sys.stdout.detach())
output_stream = sys.stdout
# Switch to legacy JSON output; eg: {'t': 'Space', 'c': []}
if doc.api_version is None:
# Switch .to_json() to legacy
Citation.backup = Citation.to_json
Citation.to_json = Citation.to_json_legacy
# Switch ._slots_to_json() to legacy
for E in [Table, OrderedList, Quoted, Math]:
E.backup = E._slots_to_json
E._slots_to_json = E._slots_to_json_legacy
# Switch .to_json() to method of base class
for E in EMPTY_ELEMENTS:
E.backup = E.to_json
E.to_json = Element.to_json
json_serializer = lambda elem: elem.to_json()
output_stream.write(json.dumps(
obj=doc,
default=json_serializer, # Serializer
check_circular=False,
separators=(',', ':'), # Compact separators, like Pandoc
ensure_ascii=False # For Pandoc compat
))
# Undo legacy changes
if doc.api_version is None:
Citation.to_json = Citation.backup
for E in [Table, OrderedList, Quoted, Math]:
E._slots_to_json = E.backup
for E in EMPTY_ELEMENTS:
E.to_json = E.backup | Dump a :class:`.Doc` object into a JSON-encoded text string.
The output will be sent to :data:`sys.stdout` unless an alternative
text stream is given.
To dump to :data:`sys.stdout` just do:
>>> import panflute as pf
>>> doc = pf.Doc(Para(Str('a'))) # Create sample document
>>> pf.dump(doc)
To dump to file:
>>> with open('some-document.json', 'w'. encoding='utf-8') as f:
>>> pf.dump(doc, f)
To dump to a string:
>>> import io
>>> with io.StringIO() as f:
>>> pf.dump(doc, f)
>>> contents = f.getvalue()
:param doc: document, usually created with :func:`.load`
:type doc: :class:`.Doc`
:param output_stream: text stream used as output
(default is :data:`sys.stdout`) |
def v1_highlights_get(response, kvlclient, file_id_str, max_elapsed = 300):
'''Obtain highlights for a document POSTed previously to this end
point. See documentation for v1_highlights_post for further
details. If the `state` is still `pending` for more than
`max_elapsed` after the start of the `WorkUnit`, then this reports
an error, although the `WorkUnit` may continue in the background.
'''
file_id = make_file_id(file_id_str)
kvlclient.setup_namespace(highlights_kvlayer_tables)
payload_strs = list(kvlclient.get('highlights', file_id))
if not (payload_strs and payload_strs[0][1]):
response.status = 500
payload = {
'state': ERROR,
'error': {
'code': 8,
'message': 'unknown error'}}
logger.critical('got bogus info for %r: %r', file_id, payload_strs)
else:
payload_str = payload_strs[0][1]
try:
payload = json.loads(payload_str)
if payload['state'] == HIGHLIGHTS_PENDING:
elapsed = time.time() - payload.get('start', 0)
if elapsed > max_elapsed:
response.status = 500
payload = {
'state': ERROR,
'error': {
'code': 8,
'message': 'hit timeout'}}
logger.critical('hit timeout on %r', file_id)
kvlclient.put('highlights', (file_id, json.dumps(payload)))
else:
payload['elapsed'] = elapsed
logger.info('returning stored payload for %r', file_id)
except Exception, exc:
logger.critical('failed to decode out of %r',
payload_str, exc_info=True)
response.status = 400
payload = {
'state': ERROR,
'error': {
'code': 9,
'message': 'nothing known about file_id=%r' % file_id}
}
# only place where payload is returned
return payload | Obtain highlights for a document POSTed previously to this end
point. See documentation for v1_highlights_post for further
details. If the `state` is still `pending` for more than
`max_elapsed` after the start of the `WorkUnit`, then this reports
an error, although the `WorkUnit` may continue in the background. |
def page(self, course):
""" Get all data and display the page """
if not self.webdav_host:
raise web.notfound()
url = self.webdav_host + "/" + course.get_id()
username = self.user_manager.session_username()
apikey = self.user_manager.session_api_key()
return self.template_helper.get_renderer().course_admin.webdav(course, url, username, apikey) | Get all data and display the page |
def public_key_to_connection_id(self, public_key):
"""
Get stored connection id for a public key.
"""
with self._connections_lock:
for connection_id, connection_info in self._connections.items():
if connection_info.public_key == public_key:
return connection_id
return None | Get stored connection id for a public key. |
def get_effective_domain_id(request):
"""Gets the id of the default domain.
If the requests default domain is the same as DEFAULT_DOMAIN,
return None.
"""
default_domain = get_default_domain(request)
domain_id = default_domain.get('id')
domain_name = default_domain.get('name')
return None if domain_name == DEFAULT_DOMAIN else domain_id | Gets the id of the default domain.
If the requests default domain is the same as DEFAULT_DOMAIN,
return None. |
def get_url_endpoint(self):
"""
Returns the Hypermap endpoint for a layer.
This endpoint will be the WMTS MapProxy endpoint, only for WM we use the original endpoint.
"""
endpoint = self.url
if self.type not in ('Hypermap:WorldMap',):
endpoint = 'registry/%s/layer/%s/map/wmts/1.0.0/WMTSCapabilities.xml' % (
self.catalog.slug,
self.id
)
return endpoint | Returns the Hypermap endpoint for a layer.
This endpoint will be the WMTS MapProxy endpoint, only for WM we use the original endpoint. |
def kick_job(self, job: JobOrID) -> None:
"""Moves a delayed or buried job into the ready queue.
:param job: The job or job ID to kick.
"""
self._send_cmd(b'kick-job %d' % _to_id(job), b'KICKED') | Moves a delayed or buried job into the ready queue.
:param job: The job or job ID to kick. |
def experiments_predictions_list(self, listing_url, offset=0, limit=-1, properties=None):
"""Get list of experiment resources from a SCO-API.
Parameters
----------
listing_url : string
url for experiments run listing.
offset : int, optional
Starting offset for returned list items
limit : int, optional
Limit the number of items in the result
properties : List(string)
List of additional object properties to be included for items in
the result
Returns
-------
List(scoserv.ModelRunDescriptor)
List of model run descriptors
"""
return sco.get_run_listing(
listing_url,
offset=offset,
limit=limit,
properties=properties
) | Get list of experiment resources from a SCO-API.
Parameters
----------
listing_url : string
url for experiments run listing.
offset : int, optional
Starting offset for returned list items
limit : int, optional
Limit the number of items in the result
properties : List(string)
List of additional object properties to be included for items in
the result
Returns
-------
List(scoserv.ModelRunDescriptor)
List of model run descriptors |
def res_to_str(res):
"""
:param res: :class:`requests.Response` object
Parse the given request and generate an informative string from it
"""
if 'Authorization' in res.request.headers:
res.request.headers['Authorization'] = "*****"
return """
####################################
url = %s
headers = %s
-------- data sent -----------------
%s
------------------------------------
@@@@@ response @@@@@@@@@@@@@@@@
headers = %s
code = %d
reason = %s
--------- data received ------------
%s
------------------------------------
####################################
""" % (res.url,
str(res.request.headers),
OLD_REQ and res.request.data or res.request.body,
res.headers,
res.status_code,
res.reason,
res.text) | :param res: :class:`requests.Response` object
Parse the given request and generate an informative string from it |
def plot(feature, mp=None, style_function=None, **map_kwargs):
"""Plots a GeoVector in an ipyleaflet map.
Parameters
----------
feature : telluric.vectors.GeoVector, telluric.features.GeoFeature, telluric.collections.BaseCollection
Data to plot.
mp : ipyleaflet.Map, optional
Map in which to plot, default to None (creates a new one).
style_function : func
Function that returns an style dictionary for
map_kwargs : kwargs, optional
Extra parameters to send to ipyleaflet.Map.
"""
map_kwargs.setdefault('basemap', basemaps.Stamen.Terrain)
if feature.is_empty:
warnings.warn("The geometry is empty.")
mp = Map(**map_kwargs) if mp is None else mp
else:
if mp is None:
center = feature.envelope.centroid.reproject(WGS84_CRS)
zoom = zoom_level_from_geometry(feature.envelope)
mp = Map(center=(center.y, center.x), zoom=zoom, **map_kwargs)
mp.add_layer(layer_from_element(feature, style_function))
return mp | Plots a GeoVector in an ipyleaflet map.
Parameters
----------
feature : telluric.vectors.GeoVector, telluric.features.GeoFeature, telluric.collections.BaseCollection
Data to plot.
mp : ipyleaflet.Map, optional
Map in which to plot, default to None (creates a new one).
style_function : func
Function that returns an style dictionary for
map_kwargs : kwargs, optional
Extra parameters to send to ipyleaflet.Map. |
def authenticate(name, remote_addr, password, cert, key, verify_cert=True):
'''
Authenticate with a remote peer.
.. notes:
This function makes every time you run this a connection
to remote_addr, you better call this only once.
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
password :
The PaSsW0rD
cert :
PEM Formatted SSL Zertifikate.
Examples:
/root/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
/root/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
name:
Ignore this. This is just here for salt.
'''
ret = {
'name': name,
'remote_addr': remote_addr,
'cert': cert,
'key': key,
'verify_cert': verify_cert
}
try:
client = __salt__['lxd.pylxd_client_get'](
remote_addr, cert, key, verify_cert
)
except SaltInvocationError as e:
return _error(ret, six.text_type(e))
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
if client.trusted:
return _success(ret, "Already authenticated.")
try:
result = __salt__['lxd.authenticate'](
remote_addr, password, cert, key, verify_cert
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
if result is not True:
return _error(
ret,
"Failed to authenticate with peer: {0}".format(remote_addr)
)
msg = "Successfully authenticated with peer: {0}".format(remote_addr)
ret['changes'] = msg
return _success(
ret,
msg
) | Authenticate with a remote peer.
.. notes:
This function makes every time you run this a connection
to remote_addr, you better call this only once.
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
password :
The PaSsW0rD
cert :
PEM Formatted SSL Zertifikate.
Examples:
/root/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
/root/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
name:
Ignore this. This is just here for salt. |
def relevant(symbol, token='', version=''):
'''Same as peers
https://iexcloud.io/docs/api/#relevant
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
_raiseIfNotStr(symbol)
return _getJson('stock/' + symbol + '/relevant', token, version) | Same as peers
https://iexcloud.io/docs/api/#relevant
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result |
def could_be(self, other):
"""Return True if the other PersonName is not explicitly inconsistent."""
# TODO: Some suffix and title differences should be allowed
if type(other) is not type(self):
return NotImplemented
if self == other:
return True
for attr in ['title', 'firstname', 'middlename', 'nickname', 'prefix', 'lastname', 'suffix']:
if attr not in self or attr not in other:
continue
puncmap = dict((ord(char), None) for char in string.punctuation)
s = self[attr].lower().translate(puncmap)
o = other[attr].lower().translate(puncmap)
if s == o:
continue
if attr in {'firstname', 'middlename', 'lastname'}:
if (({len(comp) for comp in s.split()} == {1} and [el[0] for el in o.split()] == s.split()) or
({len(comp) for comp in o.split()} == {1} and [el[0] for el in s.split()] == o.split())):
continue
return False
return True | Return True if the other PersonName is not explicitly inconsistent. |
def _coulomb(n1, n2, k, r):
"""Calculates Coulomb forces and updates node data."""
# Get relevant positional data
delta = [x2 - x1 for x1, x2 in zip(n1['velocity'], n2['velocity'])]
distance = sqrt(sum(d ** 2 for d in delta))
# If the deltas are too small, use random values to keep things moving
if distance < 0.1:
delta = [uniform(0.1, 0.2) for _ in repeat(None, 3)]
distance = sqrt(sum(d ** 2 for d in delta))
# If the distance isn't huge (ie. Coulomb is negligible), calculate
if distance < r:
force = (k / distance) ** 2
n1['force'] = [f - force * d for f, d in zip(n1['force'], delta)]
n2['force'] = [f + force * d for f, d in zip(n2['force'], delta)] | Calculates Coulomb forces and updates node data. |
def to_e164(name, origin=public_enum_domain, want_plus_prefix=True):
"""Convert an ENUM domain name into an E.164 number.
@param name: the ENUM domain name.
@type name: dns.name.Name object.
@param origin: A domain containing the ENUM domain name. The
name is relativized to this domain before being converted to text.
@type: dns.name.Name object or None
@param want_plus_prefix: if True, add a '+' to the beginning of the
returned number.
@rtype: str
"""
if not origin is None:
name = name.relativize(origin)
dlabels = [d for d in name.labels if (d.isdigit() and len(d) == 1)]
if len(dlabels) != len(name.labels):
raise dns.exception.SyntaxError('non-digit labels in ENUM domain name')
dlabels.reverse()
text = ''.join(dlabels)
if want_plus_prefix:
text = '+' + text
return text | Convert an ENUM domain name into an E.164 number.
@param name: the ENUM domain name.
@type name: dns.name.Name object.
@param origin: A domain containing the ENUM domain name. The
name is relativized to this domain before being converted to text.
@type: dns.name.Name object or None
@param want_plus_prefix: if True, add a '+' to the beginning of the
returned number.
@rtype: str |
def until_condition(self, condition, condition_description):
"""
Waits until conditions is True or returns a non-None value.
If any of the trait is still not present after timeout, raises a TimeoutException.
"""
end_time = time.time() + self._timeout
count = 1
while True:
try:
if not hasattr(condition, '__call__'):
raise TypeError("condition is not callable")
value = condition()
if type(value) is bool and value is not False:
return value
elif type(value) is not bool and value is not None:
return value
else:
logger.debug("#" + str(count) + " - wait until " + condition_description) # pragma: no cover
except self._ignored_exceptions as ex:
logger.debug("Captured {0} : {1}".format(str(ex.__class__).replace("<type '", "").replace("'>", ""),
str(ex))) # pragma: no cover
time.sleep(self._poll)
count += 1
if time.time() > end_time: # pragma: no cover
break
raise TimeoutException(
msg="condition <" + condition_description + "> was not true after " + str(self._timeout) + " seconds.") | Waits until conditions is True or returns a non-None value.
If any of the trait is still not present after timeout, raises a TimeoutException. |
def userParamFromDict(attributes):
"""Python representation of a mzML userParam = tuple(name, value,
unitAccession, type)
:param attributes: #TODO: docstring
:returns: #TODO: docstring
"""
keys = ['name', 'value', 'unitAccession', 'type']
return tuple(attributes[key] if key in attributes else None for key in keys) | Python representation of a mzML userParam = tuple(name, value,
unitAccession, type)
:param attributes: #TODO: docstring
:returns: #TODO: docstring |
def get_gc_property(value, is_bytes=False):
"""Get `GC` property."""
obj = unidata.ascii_properties if is_bytes else unidata.unicode_properties
if value.startswith('^'):
negate = True
value = value[1:]
else:
negate = False
value = unidata.unicode_alias['generalcategory'].get(value, value)
assert 1 <= len(value) <= 2, 'Invalid property!'
if not negate:
p1, p2 = (value[0], value[1]) if len(value) > 1 else (value[0], None)
value = ''.join(
[v for k, v in obj.get(p1, {}).items() if not k.startswith('^')]
) if p2 is None else obj.get(p1, {}).get(p2, '')
else:
p1, p2 = (value[0], value[1]) if len(value) > 1 else (value[0], '')
value = obj.get(p1, {}).get('^' + p2, '')
assert value, 'Invalid property!'
return value | Get `GC` property. |
def name(self):
"""
Algo name.
"""
if self._name is None:
self._name = self.__class__.__name__
return self._name | Algo name. |
def on_state_execution_status_changed_after(self, model, prop_name, info):
""" Show current execution status in the widget
This function specifies what happens if the state machine execution status of a state changes
:param model: the model of the state that has changed (most likely its execution status)
:param prop_name: property name that has been changed
:param info: notification info dictionary
:return:
"""
from rafcon.gui.utils.notification_overview import NotificationOverview
from rafcon.core.states.state import State
def name_and_next_state(state):
assert isinstance(state, State)
if state.is_root_state_of_library:
return state.parent.parent, state.parent.name
else:
return state.parent, state.name
def create_path(state, n=3, separator='/'):
next_parent, name = name_and_next_state(state)
path = separator + name
n -= 1
while n > 0 and isinstance(next_parent, State):
next_parent, name = name_and_next_state(next_parent)
path = separator + name + path
n -= 1
if isinstance(next_parent, State):
path = separator + '..' + path
return path
if 'kwargs' in info and 'method_name' in info['kwargs']:
overview = NotificationOverview(info)
if overview['method_name'][-1] == 'state_execution_status':
active_state = overview['model'][-1].state
assert isinstance(active_state, State)
path_depth = rafcon.gui.singleton.global_gui_config.get_config_value("EXECUTION_TICKER_PATH_DEPTH", 3)
message = self._fix_text_of_label + create_path(active_state, path_depth)
if rafcon.gui.singleton.main_window_controller.view is not None:
self.ticker_text_label.set_text(message)
else:
logger.warn("Not initialized yet") | Show current execution status in the widget
This function specifies what happens if the state machine execution status of a state changes
:param model: the model of the state that has changed (most likely its execution status)
:param prop_name: property name that has been changed
:param info: notification info dictionary
:return: |
def __initialize_node(self, attributes_flags=int(Qt.ItemIsSelectable | Qt.ItemIsEnabled)):
"""
Initializes the node.
:param attributes_flags: Attributes flags.
:type attributes_flags: int
"""
self["traced"] = umbra.ui.nodes.GraphModelAttribute(name="traced",
value=foundations.trace.is_traced(self.__module),
flags=attributes_flags)
self.update_node_attributes() | Initializes the node.
:param attributes_flags: Attributes flags.
:type attributes_flags: int |
def redo(self, channel, image):
"""This method is called when an image is set in a channel."""
imname = image.get('name', 'none')
chname = channel.name
# is image in contents tree yet?
in_contents = self.is_in_contents(chname, imname)
# get old highlighted entries for this channel -- will be
# an empty set or one key
old_highlight = channel.extdata.contents_old_highlight
# calculate new highlight keys -- again, an empty set or one key
if image is not None:
key = self._get_hl_key(chname, image)
new_highlight = set([key])
else:
# no image has the focus
new_highlight = set([])
# Only highlights active image in the current channel
if self.highlight_tracks_keyboard_focus:
if in_contents:
self.update_highlights(self._hl_path, new_highlight)
self._hl_path = new_highlight
# Highlight all active images in all channels
else:
if in_contents:
self.update_highlights(old_highlight, new_highlight)
channel.extdata.contents_old_highlight = new_highlight
return True | This method is called when an image is set in a channel. |
def GetMessage(self, log_source, lcid, message_identifier):
"""Retrieves a specific message for a specific Event Log source.
Args:
log_source (str): Event Log source.
lcid (int): language code identifier (LCID).
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
"""
event_log_provider_key = self._GetEventLogProviderKey(log_source)
if not event_log_provider_key:
return None
generator = self._GetMessageFileKeys(event_log_provider_key)
if not generator:
return None
# TODO: cache a number of message strings.
message_string = None
for message_file_key in generator:
message_string = self._GetMessage(
message_file_key, lcid, message_identifier)
if message_string:
break
if self._string_format == 'wrc':
message_string = self._ReformatMessageString(message_string)
return message_string | Retrieves a specific message for a specific Event Log source.
Args:
log_source (str): Event Log source.
lcid (int): language code identifier (LCID).
message_identifier (int): message identifier.
Returns:
str: message string or None if not available. |
def quit(self):
"""
Quit the player, blocking until the process has died
"""
if self._process is None:
logger.debug('Quit was called after self._process had already been released')
return
try:
logger.debug('Quitting OMXPlayer')
process_group_id = os.getpgid(self._process.pid)
os.killpg(process_group_id, signal.SIGTERM)
logger.debug('SIGTERM Sent to pid: %s' % process_group_id)
self._process_monitor.join()
except OSError:
logger.error('Could not find the process to kill')
self._process = None | Quit the player, blocking until the process has died |
def sum_dicts(dicts, normalize=False):
"""Sums the given dicts into a single dict mapping each numberic-valued
key to the sum of its mappings in all given dicts. Keys mapping to
non-numeric values retain the last value (by the given order).
Parameters
----------
dicts : list
A list of dict objects mapping each key to an numeric value.
normalize : bool, default False
Indicated whether to normalize all values by value sum.
Returns
-------
dict
A dict where each key is mapped to the sum of its mappings in all
given dicts.
"""
sum_dict = {}
for dicti in dicts:
for key in dicti:
val = dicti[key]
if isinstance(val, numbers.Number):
sum_dict[key] = sum_dict.get(key, 0) + val
else:
sum_dict[key] = val
if normalize:
return norm_int_dict(sum_dict)
return sum_dict | Sums the given dicts into a single dict mapping each numberic-valued
key to the sum of its mappings in all given dicts. Keys mapping to
non-numeric values retain the last value (by the given order).
Parameters
----------
dicts : list
A list of dict objects mapping each key to an numeric value.
normalize : bool, default False
Indicated whether to normalize all values by value sum.
Returns
-------
dict
A dict where each key is mapped to the sum of its mappings in all
given dicts. |
def set_fraction(self, value):
"""Set the meter indicator. Value should be between 0 and 1."""
if value < 0:
value *= -1
value = min(value, 1)
if self.horizontal:
width = int(self.width * value)
height = self.height
else:
width = self.width
height = int(self.height * value)
self.canvas.coords(self.meter, self.xpos, self.ypos,
self.xpos + width, self.ypos + height) | Set the meter indicator. Value should be between 0 and 1. |
def ticket_delete(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/tickets#delete-ticket"
api_path = "/api/v2/tickets/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, method="DELETE", **kwargs) | https://developer.zendesk.com/rest_api/docs/core/tickets#delete-ticket |
def conversion_rate(self):
"""
The percentage of participants that have converted for this variant.
Returns a > 0 float representing a percentage rate.
"""
participants = self.participant_count
if participants == 0:
return 0.0
return self.experiment.conversions_for(self.name) / float(participants) | The percentage of participants that have converted for this variant.
Returns a > 0 float representing a percentage rate. |
def get_savename_from_varname(
varname, varname_prefix=None,
savename_prefix=None):
"""
Args:
varname(str): a variable name in the graph
varname_prefix(str): an optional prefix that may need to be removed in varname
savename_prefix(str): an optional prefix to append to all savename
Returns:
str: the name used to save the variable
"""
name = varname
if varname_prefix is not None \
and name.startswith(varname_prefix):
name = name[len(varname_prefix) + 1:]
if savename_prefix is not None:
name = savename_prefix + '/' + name
return name | Args:
varname(str): a variable name in the graph
varname_prefix(str): an optional prefix that may need to be removed in varname
savename_prefix(str): an optional prefix to append to all savename
Returns:
str: the name used to save the variable |
def _update_rs_with_primary_from_member(
sds,
replica_set_name,
server_description):
"""RS with known primary. Process a response from a non-primary.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns new topology type.
"""
assert replica_set_name is not None
if replica_set_name != server_description.replica_set_name:
sds.pop(server_description.address)
elif (server_description.me and
server_description.address != server_description.me):
sds.pop(server_description.address)
# Had this member been the primary?
return _check_has_primary(sds) | RS with known primary. Process a response from a non-primary.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns new topology type. |
def fromProfileName(cls, name):
"""Return a `SessionAPI` from a given configuration profile name.
:see: `ProfileStore`.
"""
with profiles.ProfileStore.open() as config:
return cls.fromProfile(config.load(name)) | Return a `SessionAPI` from a given configuration profile name.
:see: `ProfileStore`. |
def as_ordered_dict(self, preference_orders: List[List[str]] = None) -> OrderedDict:
"""
Returns Ordered Dict of Params from list of partial order preferences.
Parameters
----------
preference_orders: List[List[str]], optional
``preference_orders`` is list of partial preference orders. ["A", "B", "C"] means
"A" > "B" > "C". For multiple preference_orders first will be considered first.
Keys not found, will have last but alphabetical preference. Default Preferences:
``[["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path",
"test_data_path", "trainer", "vocabulary"], ["type"]]``
"""
params_dict = self.as_dict(quiet=True)
if not preference_orders:
preference_orders = []
preference_orders.append(["dataset_reader", "iterator", "model",
"train_data_path", "validation_data_path", "test_data_path",
"trainer", "vocabulary"])
preference_orders.append(["type"])
def order_func(key):
# Makes a tuple to use for ordering. The tuple is an index into each of the `preference_orders`,
# followed by the key itself. This gives us integer sorting if you have a key in one of the
# `preference_orders`, followed by alphabetical ordering if not.
order_tuple = [order.index(key) if key in order else len(order) for order in preference_orders]
return order_tuple + [key]
def order_dict(dictionary, order_func):
# Recursively orders dictionary according to scoring order_func
result = OrderedDict()
for key, val in sorted(dictionary.items(), key=lambda item: order_func(item[0])):
result[key] = order_dict(val, order_func) if isinstance(val, dict) else val
return result
return order_dict(params_dict, order_func) | Returns Ordered Dict of Params from list of partial order preferences.
Parameters
----------
preference_orders: List[List[str]], optional
``preference_orders`` is list of partial preference orders. ["A", "B", "C"] means
"A" > "B" > "C". For multiple preference_orders first will be considered first.
Keys not found, will have last but alphabetical preference. Default Preferences:
``[["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path",
"test_data_path", "trainer", "vocabulary"], ["type"]]`` |
def manipulate(self, stored_instance, component_instance):
"""
Stores the given StoredInstance bean.
:param stored_instance: The iPOPO component StoredInstance
:param component_instance: The component instance
"""
# Store the stored instance...
self._ipopo_instance = stored_instance
# ... and the bundle context
self._context = stored_instance.bundle_context | Stores the given StoredInstance bean.
:param stored_instance: The iPOPO component StoredInstance
:param component_instance: The component instance |
def hgetall(self, key):
"""
Returns all fields and values of the has stored at `key`.
The underlying redis `HGETALL`_ command returns an array of
pairs. This method converts that to a Python :class:`dict`.
It will return an empty :class:`dict` when the key is not
found.
.. note::
**Time complexity**: ``O(N)`` where ``N`` is the size
of the hash.
:param key: The key of the hash
:type key: :class:`str`, :class:`bytes`
:returns: a :class:`dict` of key to value mappings for all
fields in the hash
.. _HGETALL: http://redis.io/commands/hgetall
"""
def format_response(value):
return dict(zip(value[::2], value[1::2]))
return self._execute(
[b'HGETALL', key], format_callback=format_response) | Returns all fields and values of the has stored at `key`.
The underlying redis `HGETALL`_ command returns an array of
pairs. This method converts that to a Python :class:`dict`.
It will return an empty :class:`dict` when the key is not
found.
.. note::
**Time complexity**: ``O(N)`` where ``N`` is the size
of the hash.
:param key: The key of the hash
:type key: :class:`str`, :class:`bytes`
:returns: a :class:`dict` of key to value mappings for all
fields in the hash
.. _HGETALL: http://redis.io/commands/hgetall |
def is_from_parent(cls, attribute_name, value=None):
# type: (type, str, bool) -> bool
"""
Tests if the current attribute value is shared by a parent of the given
class.
Returns None if the attribute value is None.
:param cls: Child class with the requested attribute
:param attribute_name: Name of the attribute to be tested
:param value: The exact value in the child class (optional)
:return: True if the attribute value is shared with a parent class
"""
if value is None:
try:
# Get the current value
value = getattr(cls, attribute_name)
except AttributeError:
# No need to go further: the attribute does not exist
return False
for base in cls.__bases__:
# Look for the value in each parent class
try:
return getattr(base, attribute_name) is value
except AttributeError:
pass
# Attribute value not found in parent classes
return False | Tests if the current attribute value is shared by a parent of the given
class.
Returns None if the attribute value is None.
:param cls: Child class with the requested attribute
:param attribute_name: Name of the attribute to be tested
:param value: The exact value in the child class (optional)
:return: True if the attribute value is shared with a parent class |
def _append_record(test_data, results, test_path):
"""Adds data of single testcase results to results database."""
statuses = test_data.get("statuses")
jenkins_data = test_data.get("jenkins") or {}
data = [
("title", test_data.get("test_name") or _get_testname(test_path)),
("verdict", statuses.get("overall")),
("source", test_data.get("source")),
("job_name", jenkins_data.get("job_name")),
("run", jenkins_data.get("build_number")),
("params", _filter_parameters(test_data.get("params"))),
(
"time",
_calculate_duration(test_data.get("start_time"), test_data.get("finish_time")) or 0,
),
]
test_id = test_data.get("polarion")
if test_id:
if isinstance(test_id, list):
test_id = test_id[0]
data.append(("test_id", test_id))
results.append(OrderedDict(data)) | Adds data of single testcase results to results database. |
def delete_ipv6(self, ipv6_id):
"""
Delete ipv6
"""
uri = 'api/ipv6/%s/' % (ipv6_id)
return super(ApiNetworkIPv6, self).delete(uri) | Delete ipv6 |
def sg_argmin(tensor, opt):
r"""Returns the indices of the minimum values along the specified axis.
See `tf.argin()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis: Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
opt += tf.sg_opt(axis=tensor.get_shape().ndims - 1)
return tf.argmin(tensor, opt.axis, opt.name) | r"""Returns the indices of the minimum values along the specified axis.
See `tf.argin()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis: Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. |
def translate_rgb_to_ansi_code(red, green, blue, offset, colormode):
"""
Translate the given RGB color into the appropriate ANSI escape code
for the given color mode.
The offset is used for the base color which is used.
The ``colormode`` has to be one of:
* 0: no colors / disabled
* 8: use ANSI 8 colors
* 16: use ANSI 16 colors (same as 8 but with brightness)
* 256: use ANSI 256 colors
* 0xFFFFFF / 16777215: use 16 Million true colors
:param int red: the red channel value
:param int green: the green channel value
:param int blue: the blue channel value
:param int offset: the offset to use for the base color
:param int colormode: the color mode to use. See explanation above
"""
if colormode == terminal.NO_COLORS: # colors are disabled, thus return empty string
return '', ''
if colormode == terminal.ANSI_8_COLORS or colormode == terminal.ANSI_16_COLORS:
color_code = ansi.rgb_to_ansi16(red, green, blue)
start_code = ansi.ANSI_ESCAPE_CODE.format(
code=color_code + offset - ansi.FOREGROUND_COLOR_OFFSET)
end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET)
return start_code, end_code
if colormode == terminal.ANSI_256_COLORS:
color_code = ansi.rgb_to_ansi256(red, green, blue)
start_code = ansi.ANSI_ESCAPE_CODE.format(code='{base};5;{code}'.format(
base=8 + offset, code=color_code))
end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET)
return start_code, end_code
if colormode == terminal.TRUE_COLORS:
start_code = ansi.ANSI_ESCAPE_CODE.format(code='{base};2;{red};{green};{blue}'.format(
base=8 + offset, red=red, green=green, blue=blue))
end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET)
return start_code, end_code
raise ColorfulError('invalid color mode "{0}"'.format(colormode)) | Translate the given RGB color into the appropriate ANSI escape code
for the given color mode.
The offset is used for the base color which is used.
The ``colormode`` has to be one of:
* 0: no colors / disabled
* 8: use ANSI 8 colors
* 16: use ANSI 16 colors (same as 8 but with brightness)
* 256: use ANSI 256 colors
* 0xFFFFFF / 16777215: use 16 Million true colors
:param int red: the red channel value
:param int green: the green channel value
:param int blue: the blue channel value
:param int offset: the offset to use for the base color
:param int colormode: the color mode to use. See explanation above |
def wait_until_alert_is_present(self, timeout=None):
"""
Waits for an alert to be present
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
locator = None
def wait():
'''
Wait function passed to executor
'''
return WebDriverWait(self.driver, timeout).until(EC.alert_is_present())
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, locator, 'Timeout waiting for alert to be present') | Waits for an alert to be present
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found |
def execute_all_rules(self, matches, context):
"""
Execute all rules from this rules list. All when condition with same priority will be performed before
calling then actions.
:param matches:
:type matches:
:param context:
:type context:
:return:
:rtype:
"""
ret = []
for priority, priority_rules in groupby(sorted(self), lambda rule: rule.priority):
sorted_rules = toposort_rules(list(priority_rules)) # Group by dependency graph toposort
for rules_group in sorted_rules:
rules_group = list(sorted(rules_group, key=self.index)) # Sort rules group based on initial ordering.
group_log_level = None
for rule in rules_group:
if group_log_level is None or group_log_level < rule.log_level:
group_log_level = rule.log_level
log(group_log_level, "%s independent rule(s) at priority %s.", len(rules_group), priority)
for rule in rules_group:
when_response = execute_rule(rule, matches, context)
if when_response is not None:
ret.append((rule, when_response))
return ret | Execute all rules from this rules list. All when condition with same priority will be performed before
calling then actions.
:param matches:
:type matches:
:param context:
:type context:
:return:
:rtype: |
def _set_gre_ttl(self, v, load=False):
"""
Setter method for gre_ttl, mapped from YANG variable /interface/tunnel/gre_ttl (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_gre_ttl is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_gre_ttl() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 255']}), is_leaf=True, yang_name="gre-ttl", rest_name="ttl", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Tunnel ttl range 1 to 255', u'alt-name': u'ttl', u'cli-full-no': None, u'cli-break-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """gre_ttl must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 255']}), is_leaf=True, yang_name="gre-ttl", rest_name="ttl", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Tunnel ttl range 1 to 255', u'alt-name': u'ttl', u'cli-full-no': None, u'cli-break-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='uint32', is_config=True)""",
})
self.__gre_ttl = t
if hasattr(self, '_set'):
self._set() | Setter method for gre_ttl, mapped from YANG variable /interface/tunnel/gre_ttl (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_gre_ttl is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_gre_ttl() directly. |
def _top_element(self):
"""Returns top XML element."""
attrs = {"project-id": self.config["polarion-project-id"]}
document_relative_path = self.config.get("requirements-document-relative-path")
if document_relative_path:
attrs["document-relative-path"] = document_relative_path
top = etree.Element("requirements", attrs)
return top | Returns top XML element. |
def namedb_state_mutation_sanity_check( opcode, op_data ):
"""
Make sure all mutate fields for this operation are present.
Return True if so
Raise exception if not
"""
# sanity check: each mutate field in the operation must be defined in op_data, even if it's null.
missing = []
mutate_fields = op_get_mutate_fields( opcode )
for field in mutate_fields:
if field not in op_data.keys():
missing.append( field )
assert len(missing) == 0, ("BUG: operation '%s' is missing the following fields: %s" % (opcode, ",".join(missing)))
return True | Make sure all mutate fields for this operation are present.
Return True if so
Raise exception if not |
def to_links_df(regressor_type,
regressor_kwargs,
trained_regressor,
tf_matrix_gene_names,
target_gene_name):
"""
:param regressor_type: string. Case insensitive.
:param regressor_kwargs: dict of key-value pairs that configures the regressor.
:param trained_regressor: the trained model from which to extract the feature importances.
:param tf_matrix_gene_names: the list of names corresponding to the columns of the tf_matrix used to train the model.
:param target_gene_name: the name of the target gene.
:return: a Pandas DataFrame['TF', 'target', 'importance'] representing inferred regulatory links and their
connection strength.
"""
def pythonic():
# feature_importances = trained_regressor.feature_importances_
feature_importances = to_feature_importances(regressor_type, regressor_kwargs, trained_regressor)
links_df = pd.DataFrame({'TF': tf_matrix_gene_names, 'importance': feature_importances})
links_df['target'] = target_gene_name
clean_links_df = links_df[links_df.importance > 0].sort_values(by='importance', ascending=False)
return clean_links_df[['TF', 'target', 'importance']]
if is_sklearn_regressor(regressor_type):
return pythonic()
elif is_xgboost_regressor(regressor_type):
raise ValueError('XGB regressor not yet supported')
else:
raise ValueError('Unsupported regressor type: ' + regressor_type) | :param regressor_type: string. Case insensitive.
:param regressor_kwargs: dict of key-value pairs that configures the regressor.
:param trained_regressor: the trained model from which to extract the feature importances.
:param tf_matrix_gene_names: the list of names corresponding to the columns of the tf_matrix used to train the model.
:param target_gene_name: the name of the target gene.
:return: a Pandas DataFrame['TF', 'target', 'importance'] representing inferred regulatory links and their
connection strength. |
def plot_variability_thresholds(varthreshpkl,
xmin_lcmad_stdev=5.0,
xmin_stetj_stdev=2.0,
xmin_iqr_stdev=2.0,
xmin_inveta_stdev=2.0,
lcformat='hat-sql',
lcformatdir=None,
magcols=None):
'''This makes plots for the variability threshold distributions.
Parameters
----------
varthreshpkl : str
The pickle produced by the function above.
xmin_lcmad_stdev,xmin_stetj_stdev,xmin_iqr_stdev,xmin_inveta_stdev : float or np.array
Values of the threshold values to override the ones in the
`vartresholdpkl`. If provided, will plot the thresholds accordingly
instead of using the ones in the input pickle directly.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
magcols : list of str or None
The magcol keys to use from the lcdict.
Returns
-------
str
The file name of the threshold plot generated.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
if magcols is None:
magcols = dmagcols
with open(varthreshpkl,'rb') as infd:
allobjects = pickle.load(infd)
magbins = allobjects['magbins']
for magcol in magcols:
min_lcmad_stdev = (
xmin_lcmad_stdev or allobjects[magcol]['min_lcmad_stdev']
)
min_stetj_stdev = (
xmin_stetj_stdev or allobjects[magcol]['min_stetj_stdev']
)
min_iqr_stdev = (
xmin_iqr_stdev or allobjects[magcol]['min_iqr_stdev']
)
min_inveta_stdev = (
xmin_inveta_stdev or allobjects[magcol]['min_inveta_stdev']
)
fig = plt.figure(figsize=(20,16))
# the mag vs lcmad
plt.subplot(221)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['lcmad']*1.483,
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_lcmad_median'])*1.483,
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_lcmad_median'])*1.483 +
min_lcmad_stdev*np.array(
allobjects[magcol]['binned_lcmad_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.xlabel('SDSS r')
plt.ylabel(r'lightcurve RMS (MAD $\times$ 1.483)')
plt.title('%s - SDSS r vs. light curve RMS' % magcol)
plt.yscale('log')
plt.tight_layout()
# the mag vs stetsonj
plt.subplot(222)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['stetsonj'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_stetsonj_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_stetsonj_median']) +
min_stetj_stdev*np.array(
allobjects[magcol]['binned_stetsonj_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.xlabel('SDSS r')
plt.ylabel('Stetson J index')
plt.title('%s - SDSS r vs. Stetson J index' % magcol)
plt.yscale('log')
plt.tight_layout()
# the mag vs IQR
plt.subplot(223)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['iqr'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_iqr_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_iqr_median']) +
min_iqr_stdev*np.array(
allobjects[magcol]['binned_iqr_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlabel('SDSS r')
plt.ylabel('IQR')
plt.title('%s - SDSS r vs. IQR' % magcol)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.yscale('log')
plt.tight_layout()
# the mag vs IQR
plt.subplot(224)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['inveta'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_inveta_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_inveta_median']) +
min_inveta_stdev*np.array(
allobjects[magcol]['binned_inveta_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlabel('SDSS r')
plt.ylabel(r'$1/\eta$')
plt.title(r'%s - SDSS r vs. $1/\eta$' % magcol)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.yscale('log')
plt.tight_layout()
plt.savefig('varfeatures-%s-%s-distributions.png' % (varthreshpkl,
magcol),
bbox_inches='tight')
plt.close('all') | This makes plots for the variability threshold distributions.
Parameters
----------
varthreshpkl : str
The pickle produced by the function above.
xmin_lcmad_stdev,xmin_stetj_stdev,xmin_iqr_stdev,xmin_inveta_stdev : float or np.array
Values of the threshold values to override the ones in the
`vartresholdpkl`. If provided, will plot the thresholds accordingly
instead of using the ones in the input pickle directly.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
magcols : list of str or None
The magcol keys to use from the lcdict.
Returns
-------
str
The file name of the threshold plot generated. |
def start( self ):
"""
Starts the thread in its own event loop if the local and global thread
options are true, otherwise runs the thread logic in the main event
loop.
"""
if ( self.localThreadingEnabled() and self.globalThreadingEnabled() ):
super(XThread, self).start()
else:
self.run()
self.finished.emit() | Starts the thread in its own event loop if the local and global thread
options are true, otherwise runs the thread logic in the main event
loop. |
def estimator_spec_train(self, loss, num_async_replicas=1, use_tpu=False):
"""Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode."""
train_op = self.optimize(loss, num_async_replicas=num_async_replicas,
use_tpu=use_tpu)
if use_tpu:
if self._hparams.warm_start_from:
def scaffold_fn():
self.initialize_from_ckpt(self._hparams.warm_start_from)
return tf.train.Scaffold()
else:
scaffold_fn = None
# Note: important to call this before remove_summaries()
if self.hparams.tpu_enable_host_call:
host_call = self.create_train_host_call()
else:
host_call = None
remove_summaries()
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=train_op,
host_call=host_call,
scaffold_fn=scaffold_fn)
else:
if self._hparams.warm_start_from:
self.initialize_from_ckpt(self._hparams.warm_start_from)
# When loading weights from a pre-trained model, you want to be able to
# load separate weights into the encoder and decoder.
if self._hparams.warm_start_from_second:
self.initialize_from_ckpt(self._hparams.warm_start_from_second)
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op) | Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode. |
def get_snpeff_info(snpeff_string, snpeff_header):
"""Make the vep annotations into a dictionaries
A snpeff dictionary will have the snpeff column names as keys and
the vep annotations as values.
The dictionaries are stored in a list.
One dictionary for each transcript.
Args:
snpeff_string (string): A string with the ANN annotation
snpeff_header (list): A list with the vep header
Return:
snpeff_annotations (list): A list of vep dicts
"""
snpeff_annotations = [
dict(zip(snpeff_header, snpeff_annotation.split('|')))
for snpeff_annotation in snpeff_string.split(',')
]
return snpeff_annotations | Make the vep annotations into a dictionaries
A snpeff dictionary will have the snpeff column names as keys and
the vep annotations as values.
The dictionaries are stored in a list.
One dictionary for each transcript.
Args:
snpeff_string (string): A string with the ANN annotation
snpeff_header (list): A list with the vep header
Return:
snpeff_annotations (list): A list of vep dicts |
def udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):
"""Send UDP messages to usage tracker asynchronously
This multiprocessing based messenger was written to overcome the limitations
of signalling/terminating a thread that is blocked on a system call. This
messenger is created as a separate process, and initialized with 2 queues,
to_send to receive messages to be sent to the internet.
Args:
- domain_name (str) : Domain name string
- UDP_IP (str) : IP address YYY.YYY.YYY.YYY
- UDP_PORT (int) : UDP port to send out on
- sock_timeout (int) : Socket timeout
- to_send (multiprocessing.Queue) : Queue of outgoing messages to internet
"""
try:
if message is None:
raise ValueError("message was none")
encoded_message = bytes(message, "utf-8")
if encoded_message is None:
raise ValueError("utf-8 encoding of message failed")
if domain_name:
try:
UDP_IP = socket.gethostbyname(domain_name)
except Exception:
# (False, "Domain lookup failed, defaulting to {0}".format(UDP_IP))
pass
if UDP_IP is None:
raise Exception("UDP_IP is None")
if UDP_PORT is None:
raise Exception("UDP_PORT is None")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sock.settimeout(sock_timeout)
sock.sendto(bytes(message, "utf-8"), (UDP_IP, UDP_PORT))
sock.close()
except socket.timeout:
logger.debug("Failed to send usage tracking data: socket timeout")
except OSError as e:
logger.debug("Failed to send usage tracking data: OSError: {}".format(e))
except Exception as e:
logger.debug("Failed to send usage tracking data: Exception: {}".format(e)) | Send UDP messages to usage tracker asynchronously
This multiprocessing based messenger was written to overcome the limitations
of signalling/terminating a thread that is blocked on a system call. This
messenger is created as a separate process, and initialized with 2 queues,
to_send to receive messages to be sent to the internet.
Args:
- domain_name (str) : Domain name string
- UDP_IP (str) : IP address YYY.YYY.YYY.YYY
- UDP_PORT (int) : UDP port to send out on
- sock_timeout (int) : Socket timeout
- to_send (multiprocessing.Queue) : Queue of outgoing messages to internet |
def _add_prefix(self, split_names, start_node, group_type_name):
"""Adds the correct sub branch prefix to a given name.
Usually the prefix is the full name of the parent node. In case items are added
directly to the trajectory the prefixes are chosen according to the matching subbranch.
For example, this could be 'parameters' for parameters or 'results.run_00000004' for
results added to the fifth single run.
:param split_names:
List of names of the new node (e.g. ``['mynewgroupA', 'mynewgroupB', 'myresult']``).
:param start_node:
Parent node under which the new node should be added.
:param group_type_name:
Type name of subbranch the item belongs to
(e.g. 'PARAMETER_GROUP', 'RESULT_GROUP' etc).
:return: The name with the added prefix.
"""
root = self._root_instance
# If the start node of our insertion is root or one below root
# we might need to add prefixes.
# In case of derived parameters and results we also need to add prefixes containing the
# subbranch and the current run in case of a single run.
# For instance, a prefix could be 'results.runs.run_00000007'.
prepend = []
if start_node.v_depth < 3 and not group_type_name == GROUP:
if start_node.v_depth == 0:
if group_type_name == DERIVED_PARAMETER_GROUP:
if split_names[0] == 'derived_parameters':
return split_names
else:
prepend += ['derived_parameters']
elif group_type_name == RESULT_GROUP:
if split_names[0] == 'results':
return split_names
else:
prepend += ['results']
elif group_type_name == CONFIG_GROUP:
if split_names[0] == 'config':
return split_names
else:
prepend += ['config']
elif group_type_name == PARAMETER_GROUP:
if split_names[0] == 'parameters':
return split_names[0]
else:
prepend += ['parameters']
else:
raise RuntimeError('Why are you here?')
# Check if we have to add a prefix containing the current run
if root._is_run and root._auto_run_prepend:
dummy = root.f_wildcard('$', -1)
crun = root.f_wildcard('$')
if any(name in root._run_information for name in split_names):
pass
elif any(name == dummy for name in split_names):
pass
elif (group_type_name == RESULT_GROUP or
group_type_name == DERIVED_PARAMETER_GROUP):
if start_node.v_depth == 0:
prepend += ['runs', crun]
elif start_node.v_depth == 1:
if len(split_names) == 1 and split_names[0] == 'runs':
return split_names
else:
prepend += ['runs', crun]
elif start_node.v_depth == 2 and start_node.v_name == 'runs':
prepend += [crun]
if prepend:
split_names = prepend + split_names
return split_names | Adds the correct sub branch prefix to a given name.
Usually the prefix is the full name of the parent node. In case items are added
directly to the trajectory the prefixes are chosen according to the matching subbranch.
For example, this could be 'parameters' for parameters or 'results.run_00000004' for
results added to the fifth single run.
:param split_names:
List of names of the new node (e.g. ``['mynewgroupA', 'mynewgroupB', 'myresult']``).
:param start_node:
Parent node under which the new node should be added.
:param group_type_name:
Type name of subbranch the item belongs to
(e.g. 'PARAMETER_GROUP', 'RESULT_GROUP' etc).
:return: The name with the added prefix. |
def get_element_dt(self, el_name, tz=None, el_idx=0):
"""Return the text of the selected element as a ``datetime.datetime`` object.
The element text must be a ISO8601 formatted datetime
Args:
el_name : str
Name of element to use.
tz : datetime.tzinfo
Timezone in which to return the datetime.
- Without a timezone, other contextual information is required in order to
determine the exact represented time.
- If dt has timezone: The ``tz`` parameter is ignored.
- If dt is naive (without timezone): The timezone is set to ``tz``.
- ``tz=None``: Prevent naive dt from being set to a timezone. Without a
timezone, other contextual information is required in order to determine
the exact represented time.
- ``tz=d1_common.date_time.UTC()``: Set naive dt to UTC.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
Returns:
datetime.datetime
"""
return iso8601.parse_date(self.get_element_by_name(el_name, el_idx).text, tz) | Return the text of the selected element as a ``datetime.datetime`` object.
The element text must be a ISO8601 formatted datetime
Args:
el_name : str
Name of element to use.
tz : datetime.tzinfo
Timezone in which to return the datetime.
- Without a timezone, other contextual information is required in order to
determine the exact represented time.
- If dt has timezone: The ``tz`` parameter is ignored.
- If dt is naive (without timezone): The timezone is set to ``tz``.
- ``tz=None``: Prevent naive dt from being set to a timezone. Without a
timezone, other contextual information is required in order to determine
the exact represented time.
- ``tz=d1_common.date_time.UTC()``: Set naive dt to UTC.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
Returns:
datetime.datetime |
def cctop_submit(seq_str):
"""Submit a protein sequence string to CCTOP and return the job ID.
Args:
seq_str (str): Protein sequence as a string
Returns:
dict: Job ID on the CCTOP server
"""
url = 'http://cctop.enzim.ttk.mta.hu/php/submit.php?sequence={}&tmFilter&signalPred'.format(seq_str)
r = requests.post(url)
jobid = r.text.split('ID: ')[1]
return jobid | Submit a protein sequence string to CCTOP and return the job ID.
Args:
seq_str (str): Protein sequence as a string
Returns:
dict: Job ID on the CCTOP server |
def normalize_per_cell_weinreb16_deprecated(
X,
max_fraction=1,
mult_with_mean=False,
) -> np.ndarray:
"""Normalize each cell [Weinreb17]_.
This is a deprecated version. See `normalize_per_cell` instead.
Normalize each cell by UMI count, so that every cell has the same total
count.
Parameters
----------
X : np.ndarray
Expression matrix. Rows correspond to cells and columns to genes.
max_fraction : float, optional
Only use genes that make up more than max_fraction of the total
reads in every cell.
mult_with_mean: bool, optional
Multiply the result with the mean of total counts.
Returns
-------
Normalized version of the original expression matrix.
"""
if max_fraction < 0 or max_fraction > 1:
raise ValueError('Choose max_fraction between 0 and 1.')
counts_per_cell = X.sum(1).A1 if issparse(X) else X.sum(1)
gene_subset = np.all(X <= counts_per_cell[:, None] * max_fraction, axis=0)
if issparse(X): gene_subset = gene_subset.A1
tc_include = X[:, gene_subset].sum(1).A1 if issparse(X) else X[:, gene_subset].sum(1)
X_norm = X.multiply(csr_matrix(1/tc_include[:, None])) if issparse(X) else X / tc_include[:, None]
if mult_with_mean:
X_norm *= np.mean(counts_per_cell)
return X_norm | Normalize each cell [Weinreb17]_.
This is a deprecated version. See `normalize_per_cell` instead.
Normalize each cell by UMI count, so that every cell has the same total
count.
Parameters
----------
X : np.ndarray
Expression matrix. Rows correspond to cells and columns to genes.
max_fraction : float, optional
Only use genes that make up more than max_fraction of the total
reads in every cell.
mult_with_mean: bool, optional
Multiply the result with the mean of total counts.
Returns
-------
Normalized version of the original expression matrix. |
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values."""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'") | Do basic checks on matrix covariance sizes and values. |
def migrateFileFields(portal):
"""
This function walks over all attachment types and migrates their FileField
fields.
"""
portal_types = [
"Attachment",
"ARImport",
"Instrument",
"InstrumentCertification",
"Method",
"Multifile",
"Report",
"ARReport",
"SamplePoint"]
for portal_type in portal_types:
# Do the migration
migrate_to_blob(
portal,
portal_type=portal_type,
remove_old_value=True) | This function walks over all attachment types and migrates their FileField
fields. |
def get_delta(D, k):
'''Calculate the k-th order trend filtering matrix given the oriented edge
incidence matrix and the value of k.'''
if k < 0:
raise Exception('k must be at least 0th order.')
result = D
for i in range(k):
result = D.T.dot(result) if i % 2 == 0 else D.dot(result)
return result | Calculate the k-th order trend filtering matrix given the oriented edge
incidence matrix and the value of k. |
def get_element_ids(self, prefix_id):
"""
Returns a single or a list of element ids, one for each input widget of this field
"""
if isinstance(self.widget, widgets.MultiWidget):
ids = ['{0}_{1}_{2}'.format(prefix_id, self.name, field_name) for field_name in self.widget]
elif isinstance(self.widget, (widgets.SelectMultiple, widgets.RadioSelect)):
ids = ['{0}_{1}_{2}'.format(prefix_id, self.name, k) for k in range(len(self.widget.choices))]
else:
ids = ['{0}_{1}'.format(prefix_id, self.name)]
return ids | Returns a single or a list of element ids, one for each input widget of this field |
def set_cookie( # type: ignore
self,
key: str,
value: AnyStr='',
max_age: Optional[Union[int, timedelta]]=None,
expires: Optional[datetime]=None,
path: str='/',
domain: Optional[str]=None,
secure: bool=False,
httponly: bool=False,
) -> None:
"""Set a cookie in the response headers.
The arguments are the standard cookie morsels and this is a
wrapper around the stdlib SimpleCookie code.
"""
if isinstance(value, bytes):
value = value.decode() # type: ignore
cookie = create_cookie(key, value, max_age, expires, path, domain, secure, httponly) # type: ignore # noqa: E501
self.headers.add('Set-Cookie', cookie.output(header='')) | Set a cookie in the response headers.
The arguments are the standard cookie morsels and this is a
wrapper around the stdlib SimpleCookie code. |
def _parse_notes_dict(sbase):
""" Creates dictionary of COBRA notes.
Parameters
----------
sbase : libsbml.SBase
Returns
-------
dict of notes
"""
notes = sbase.getNotesString()
if notes and len(notes) > 0:
pattern = r"<p>\s*(\w+\s*\w*)\s*:\s*([\w|\s]+)<"
matches = re.findall(pattern, notes)
d = {k.strip(): v.strip() for (k, v) in matches}
return {k: v for k, v in d.items() if len(v) > 0}
else:
return {} | Creates dictionary of COBRA notes.
Parameters
----------
sbase : libsbml.SBase
Returns
-------
dict of notes |
def to_dict(self):
""" to_dict: puts channel data into the format that Kolibri Studio expects
Args: None
Returns: dict of channel data
"""
return {
"id": self.get_node_id().hex,
"name": self.title,
"thumbnail": self.thumbnail.filename if self.thumbnail else None,
"language" : self.language,
"description": self.description or "",
"license": self.license,
"source_domain": self.source_domain,
"source_id": self.source_id,
"ricecooker_version": __version__,
} | to_dict: puts channel data into the format that Kolibri Studio expects
Args: None
Returns: dict of channel data |
def edit_release_notes():
"""Use the default text $EDITOR to write release notes.
If $EDITOR is not set, use 'nano'."""
from tempfile import mkstemp
import os
import shlex
import subprocess
text_editor = shlex.split(os.environ.get('EDITOR', 'nano'))
fd, tmp = mkstemp(prefix='bumpversion-')
try:
os.close(fd)
with open(tmp, 'w') as f:
f.write(u"\n\n# Write release notes.\n"
u"# Lines starting with '#' will be ignored.")
subprocess.check_call(text_editor + [tmp])
with open(tmp, 'r') as f:
changes = "".join(
l for l in f.readlines() if not l.startswith('#'))
finally:
os.remove(tmp)
return changes | Use the default text $EDITOR to write release notes.
If $EDITOR is not set, use 'nano'. |
def canintersect(self, other):
'''
Intersection is not well-defined for all pairs of multipliers.
For example:
{2,3} & {3,4} = {3}
{2,} & {1,7} = {2,7}
{2} & {5} = ERROR
'''
return not (self.max < other.min or other.max < self.min) | Intersection is not well-defined for all pairs of multipliers.
For example:
{2,3} & {3,4} = {3}
{2,} & {1,7} = {2,7}
{2} & {5} = ERROR |
def get_failed_instruments(self):
"""Find invalid instruments
- instruments who have failed QC tests
- instruments whose certificate is out of date
- instruments which are disposed until next calibration test
Return a dictionary with all info about expired/invalid instruments
"""
bsc = api.get_tool("bika_setup_catalog")
insts = bsc(portal_type="Instrument", is_active=True)
for i in insts:
i = i.getObject()
instr = {
'uid': i.UID(),
'title': i.Title(),
}
if i.isValidationInProgress():
instr['link'] = '<a href="%s/validations">%s</a>' % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed['validation'].append(instr)
elif i.isCalibrationInProgress():
instr['link'] = '<a href="%s/calibrations">%s</a>' % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed['calibration'].append(instr)
elif i.isOutOfDate():
instr['link'] = '<a href="%s/certifications">%s</a>' % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed['out-of-date'].append(instr)
elif not i.isQCValid():
instr['link'] = '<a href="%s/referenceanalyses">%s</a>' % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed['qc-fail'].append(instr)
elif i.getDisposeUntilNextCalibrationTest():
instr['link'] = '<a href="%s/referenceanalyses">%s</a>' % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed['next-test'].append(instr) | Find invalid instruments
- instruments who have failed QC tests
- instruments whose certificate is out of date
- instruments which are disposed until next calibration test
Return a dictionary with all info about expired/invalid instruments |
def _make_intermediate_dirs(sftp_client, remote_directory):
"""
Create all the intermediate directories in a remote host
:param sftp_client: A Paramiko SFTP client.
:param remote_directory: Absolute Path of the directory containing the file
:return:
"""
if remote_directory == '/':
sftp_client.chdir('/')
return
if remote_directory == '':
return
try:
sftp_client.chdir(remote_directory)
except IOError:
dirname, basename = os.path.split(remote_directory.rstrip('/'))
_make_intermediate_dirs(sftp_client, dirname)
sftp_client.mkdir(basename)
sftp_client.chdir(basename)
return | Create all the intermediate directories in a remote host
:param sftp_client: A Paramiko SFTP client.
:param remote_directory: Absolute Path of the directory containing the file
:return: |
def digests_are_equal(digest1, digest2):
"""
<Purpose>
While protecting against timing attacks, compare the hexadecimal arguments
and determine if they are equal.
<Arguments>
digest1:
The first hexadecimal string value to compare.
digest2:
The second hexadecimal string value to compare.
<Exceptions>
securesystemslib.exceptions.FormatError: If the arguments are improperly
formatted.
<Side Effects>
None.
<Return>
Return True if 'digest1' is equal to 'digest2', False otherwise.
"""
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.HEX_SCHEMA.check_match(digest1)
securesystemslib.formats.HEX_SCHEMA.check_match(digest2)
if len(digest1) != len(digest2):
return False
are_equal = True
for element in range(len(digest1)):
if digest1[element] != digest2[element]:
are_equal = False
return are_equal | <Purpose>
While protecting against timing attacks, compare the hexadecimal arguments
and determine if they are equal.
<Arguments>
digest1:
The first hexadecimal string value to compare.
digest2:
The second hexadecimal string value to compare.
<Exceptions>
securesystemslib.exceptions.FormatError: If the arguments are improperly
formatted.
<Side Effects>
None.
<Return>
Return True if 'digest1' is equal to 'digest2', False otherwise. |
def trim_wav_pydub(in_path: Path, out_path: Path,
start_time: int, end_time: int) -> None:
""" Crops the wav file. """
logger.info(
"Using pydub/ffmpeg to create {} from {}".format(out_path, in_path) +
" using a start_time of {} and an end_time of {}".format(start_time,
end_time))
if out_path.is_file():
return
# TODO add logging here
#print("in_fn: {}".format(in_fn))
#print("out_fn: {}".format(out_fn))
in_ext = in_path.suffix[1:]
out_ext = out_path.suffix[1:]
audio = AudioSegment.from_file(str(in_path), in_ext)
trimmed = audio[start_time:end_time]
# pydub evidently doesn't actually use the parameters when outputting wavs,
# since it doesn't use FFMPEG to deal with outputtting WAVs. This is a bit
# of a leaky abstraction. No warning is given, so normalization to 16Khz
# mono wavs has to happen later. Leaving the parameters here in case it
# changes
trimmed.export(str(out_path), format=out_ext,
parameters=["-ac", "1", "-ar", "16000"]) | Crops the wav file. |
def _buildDict(self):
'''
Builds the isle textfile into a dictionary for fast searching
'''
lexDict = {}
with io.open(self.islePath, "r", encoding='utf-8') as fd:
wordList = [line.rstrip('\n') for line in fd]
for row in wordList:
word, pronunciation = row.split(" ", 1)
word, extraInfo = word.split("(", 1)
extraInfo = extraInfo.replace(")", "")
extraInfoList = [segment for segment in extraInfo.split(",")
if ("_" not in segment and "+" not in segment and
':' not in segment and segment != '')]
lexDict.setdefault(word, [])
lexDict[word].append((pronunciation, extraInfoList))
return lexDict | Builds the isle textfile into a dictionary for fast searching |
def ac_viz(acdata):
'''
Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged.
'''
acdata = np.log(acdata+0.000001) # log to reduce darkening on sides of spectrum, due to AC triangling
acdata[:, :, acdata.shape[2]/2] = acdata[:, :, acdata.shape[2]/2 - 1] # vals at zero delay set to symmetric neighbor vals
acdata[:, :, acdata.shape[2] - 1] = np.max(acdata) # visualize subband edges
return acdata | Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged. |
def ensure_parent_id(self):
"""If current trace_parent has no span_id, generate one, then return it
This is used to generate a span ID which the RUM agent will use to correlate
the RUM transaction with the backend transaction.
"""
if self.trace_parent.span_id == self.id:
self.trace_parent.span_id = "%016x" % random.getrandbits(64)
logger.debug("Set parent id to generated %s", self.trace_parent.span_id)
return self.trace_parent.span_id | If current trace_parent has no span_id, generate one, then return it
This is used to generate a span ID which the RUM agent will use to correlate
the RUM transaction with the backend transaction. |
def expected_error_messages(*error_messages):
"""
Decorator expecting defined error messages at the end of test method. As
param use what
:py:meth:`~.WebdriverWrapperErrorMixin.get_error_messages`
returns.
.. versionadded:: 2.0
Before this decorator was called ``ShouldBeError``.
"""
def wrapper(func):
setattr(func, EXPECTED_ERROR_MESSAGES, error_messages)
return func
return wrapper | Decorator expecting defined error messages at the end of test method. As
param use what
:py:meth:`~.WebdriverWrapperErrorMixin.get_error_messages`
returns.
.. versionadded:: 2.0
Before this decorator was called ``ShouldBeError``. |
def run_from_argv(self, argv):
"""Overriden in order to access the command line arguments."""
self.argv_string = ' '.join(argv)
super(EmailNotificationCommand, self).run_from_argv(argv) | Overriden in order to access the command line arguments. |
def install(editable=True):
"""Install this component (or remove and reinstall)"""
try:
__import__(package['name'])
except ImportError:
pass
else:
run("pip uninstall --quiet -y %s" % package['name'], warn=True)
cmd = "pip install --quiet "
cmd += "-e ." if editable else "."
run(cmd, warn=True) | Install this component (or remove and reinstall) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.