code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def _iterate_fields_cond(self, pkt, val, use_val):
"""Internal function used by _find_fld_pkt & _find_fld_pkt_val"""
# Iterate through the fields
for fld, cond in self.flds:
if isinstance(cond, tuple):
if use_val:
if cond[1](pkt, val):
return fld
continue
else:
cond = cond[0]
if cond(pkt):
return fld
return self.dflt | Internal function used by _find_fld_pkt & _find_fld_pkt_val |
def step_command_output_should_not_contain_log_records_from_categories(context):
"""
Verifies that the command output contains not log records from
the provided log categories (in any order).
.. code-block: gherkin
Given I define the log record schema:
| category | level | message |
| root | ERROR | __LOG_MESSAGE__ |
Then the command output should not contain log records from categories:
| category |
| bar |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_column("category")
record_schema = context.log_record_row_schema
LogRecordTable.annotate_with_row_schema(context.table, record_schema)
step_command_output_should_not_contain_log_records(context)
context.table.remove_columns(["level", "message"]) | Verifies that the command output contains not log records from
the provided log categories (in any order).
.. code-block: gherkin
Given I define the log record schema:
| category | level | message |
| root | ERROR | __LOG_MESSAGE__ |
Then the command output should not contain log records from categories:
| category |
| bar | |
def get_assignments_by_sis_course_id(self, sis_course_id):
"""
Returns assignment data for the given course_id.
https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.course_assignments
"""
url = "/api/v1/courses/%s/analytics/assignments.json" % (
self._sis_id(sis_course_id, sis_field="course"))
return self._get_resource(url) | Returns assignment data for the given course_id.
https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.course_assignments |
def change_bgcolor_enable(self, state):
"""
This is implementet so column min/max is only active when bgcolor is
"""
self.dataModel.bgcolor(state)
self.bgcolor_global.setEnabled(not self.is_series and state > 0) | This is implementet so column min/max is only active when bgcolor is |
def is_valid_callsign(self, callsign, timestamp=timestamp_now):
""" Checks if a callsign is valid
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
bool: True / False
Example:
The following checks if "DH1TW" is a valid callsign
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.is_valid_callsign("DH1TW")
True
"""
try:
if self.get_all(callsign, timestamp):
return True
except KeyError:
return False | Checks if a callsign is valid
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
bool: True / False
Example:
The following checks if "DH1TW" is a valid callsign
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.is_valid_callsign("DH1TW")
True |
def _ending_consonants_only(self, letters: List[str]) -> List[int]:
"""Return a list of positions for ending consonants."""
reversed_letters = list(reversed(letters))
length = len(letters)
for idx, letter in enumerate(reversed_letters):
if not self._contains_vowels(letter) and self._contains_consonants(letter):
return [(length - idx) - 1]
if self._contains_vowels(letter):
return []
if self._contains_vowels(letter) and self._contains_consonants(letter):
return []
return [] | Return a list of positions for ending consonants. |
def is_active(self):
"""
Returns True if the logical volume is active, False otherwise.
"""
self.open()
active = lvm_lv_is_active(self.__lvh)
self.close()
return bool(active) | Returns True if the logical volume is active, False otherwise. |
def fetch_items(self, category, **kwargs):
"""Fetch the messages
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
logger.info("Looking for messages from '%s' since %s",
self.url, str(from_date))
mailing_list = HyperKittyList(self.url, self.dirpath)
mailing_list.fetch(from_date=from_date)
messages = self._fetch_and_parse_messages(mailing_list, from_date)
for message in messages:
yield message
logger.info("Fetch process completed") | Fetch the messages
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items |
def auto_delete_files_on_instance_change(
instance: Any,
fieldnames: Iterable[str],
model_class) -> None:
"""
Deletes files from filesystem when object is changed.
model_class: ``Type[Model]``
... only the type checker in Py3.5 is broken; v.s.
"""
if not instance.pk:
return # instance not yet saved in database
# noinspection PyUnresolvedReferences
try:
# noinspection PyUnresolvedReferences
old_instance = model_class.objects.get(pk=instance.pk)
except model_class.DoesNotExist:
return # old version gone from database entirely
for fieldname in fieldnames:
old_filefield = getattr(old_instance, fieldname, None)
if not old_filefield:
continue
new_filefield = getattr(instance, fieldname, None)
if old_filefield != new_filefield:
if os.path.isfile(old_filefield.path):
os.remove(old_filefield.path) | Deletes files from filesystem when object is changed.
model_class: ``Type[Model]``
... only the type checker in Py3.5 is broken; v.s. |
def resync(self):
"""make sure we can ping the head and assigned node.
Possibly after an env.exit()"""
success = 0
for head in [True, False]:
for _ in range(30):
try:
self.status(head)
success += 1
break
except Exception as e:
self._log_error(e)
time.sleep(10)
if success != 2:
raise EnvException("Failed to contact service" + (" head" if success == 0 else "")) | make sure we can ping the head and assigned node.
Possibly after an env.exit() |
def _initialize(self):
"""Initialize transfer."""
payload = {
'apikey': self.session.cookies.get('apikey'),
'source': self.session.cookies.get('source')
}
if self.fm_user.logged_in:
payload['logintoken'] = self.session.cookies.get('logintoken')
payload.update(self.transfer_info)
method, url = get_URL('init')
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
for key in ['transferid', 'transferkey', 'transferurl']:
self.transfer_info[key] = res.json().get(key)
else:
hellraiser(res) | Initialize transfer. |
def show(self):
'''Display menus and connect even signals.'''
self.parent.addLayout(self._logSelectLayout)
self.menuCount += 1
self._connectSlots() | Display menus and connect even signals. |
def update_data_run(self, event_to_wait_on):
"""This is the thread that listens to an event from
the comm process to execute the update_data_func callback
in the context of the main process.
"""
# with the daemon=Turue setting, this thread should
# quit 'automatically'
while event_to_wait_on.wait():
event_to_wait_on.clear()
if self.update_data_callback_kill_event.is_set():
return
self.update_data_func() | This is the thread that listens to an event from
the comm process to execute the update_data_func callback
in the context of the main process. |
def parse_series(s):
"""
Parses things like '1n+2', or 'an+b' generally, returning (a, b)
"""
if isinstance(s, Element):
s = s._format_element()
if not s or s == '*':
# Happens when there's nothing, which the CSS parser thinks of as *
return (0, 0)
if isinstance(s, int):
# Happens when you just get a number
return (0, s)
if s == 'odd':
return (2, 1)
elif s == 'even':
return (2, 0)
elif s == 'n':
return (1, 0)
if 'n' not in s:
# Just a b
return (0, int(s))
a, b = s.split('n', 1)
if not a:
a = 1
elif a == '-' or a == '+':
a = int(a+'1')
else:
a = int(a)
if not b:
b = 0
elif b == '-' or b == '+':
b = int(b+'1')
else:
b = int(b)
return (a, b) | Parses things like '1n+2', or 'an+b' generally, returning (a, b) |
def _send_consumer_aware_request(self, group, payloads, encoder_fn, decoder_fn):
"""
Send a list of requests to the consumer coordinator for the group
specified using the supplied encode/decode functions. As the payloads
that use consumer-aware requests do not contain the group (e.g.
OffsetFetchRequest), all payloads must be for a single group.
Arguments:
group: the name of the consumer group (str) the payloads are for
payloads: list of object-like entities with topic (str) and
partition (int) attributes; payloads with duplicate
topic+partition are not supported.
encode_fn: a method to encode the list of payloads to a request body,
must accept client_id, correlation_id, and payloads as
keyword arguments
decode_fn: a method to decode a response body into response objects.
The response objects must be object-like and have topic
and partition attributes
Returns:
List of response objects in the same order as the supplied payloads
"""
# encoders / decoders do not maintain ordering currently
# so we need to keep this so we can rebuild order before returning
original_ordering = [(p.topic, p.partition) for p in payloads]
broker = self._get_coordinator_for_group(group)
# Send the list of request payloads and collect the responses and
# errors
responses = {}
requestId = self._next_id()
log.debug('Request %s to %s: %s', requestId, broker, payloads)
request = encoder_fn(client_id=self.client_id,
correlation_id=requestId, payloads=payloads)
# Send the request, recv the response
try:
conn = self._get_conn(broker.host.decode('utf-8'), broker.port)
conn.send(requestId, request)
except ConnectionError as e:
log.warning('ConnectionError attempting to send request %s '
'to server %s: %s', requestId, broker, e)
for payload in payloads:
topic_partition = (payload.topic, payload.partition)
responses[topic_partition] = FailedPayloadsError(payload)
# No exception, try to get response
else:
# decoder_fn=None signal that the server is expected to not
# send a response. This probably only applies to
# ProduceRequest w/ acks = 0
if decoder_fn is None:
log.debug('Request %s does not expect a response '
'(skipping conn.recv)', requestId)
for payload in payloads:
topic_partition = (payload.topic, payload.partition)
responses[topic_partition] = None
return []
try:
response = conn.recv(requestId)
except ConnectionError as e:
log.warning('ConnectionError attempting to receive a '
'response to request %s from server %s: %s',
requestId, broker, e)
for payload in payloads:
topic_partition = (payload.topic, payload.partition)
responses[topic_partition] = FailedPayloadsError(payload)
else:
_resps = []
for payload_response in decoder_fn(response):
topic_partition = (payload_response.topic,
payload_response.partition)
responses[topic_partition] = payload_response
_resps.append(payload_response)
log.debug('Response %s: %s', requestId, _resps)
# Return responses in the same order as provided
return [responses[tp] for tp in original_ordering] | Send a list of requests to the consumer coordinator for the group
specified using the supplied encode/decode functions. As the payloads
that use consumer-aware requests do not contain the group (e.g.
OffsetFetchRequest), all payloads must be for a single group.
Arguments:
group: the name of the consumer group (str) the payloads are for
payloads: list of object-like entities with topic (str) and
partition (int) attributes; payloads with duplicate
topic+partition are not supported.
encode_fn: a method to encode the list of payloads to a request body,
must accept client_id, correlation_id, and payloads as
keyword arguments
decode_fn: a method to decode a response body into response objects.
The response objects must be object-like and have topic
and partition attributes
Returns:
List of response objects in the same order as the supplied payloads |
def load_json(path, quiet=False, cli=False):
"""
Load a json serialized object and ensure it matches to the current
Assembly object format
"""
## load the JSON string and try with name+.json
checkfor = [path+".json", path]
for inpath in checkfor:
inpath = inpath.replace("~", os.path.expanduser("~"))
try:
with open(inpath, 'r') as infile:
## uses _tup_and_byte to ensure ascii and tuples are correct
fullj = json.loads(infile.read(), object_hook=_tup_and_byte)
except IOError:
pass
## create a new empty Assembly
try:
oldname = fullj["assembly"].pop("name")
olddir = fullj["assembly"]["dirs"]["project"]
oldpath = os.path.join(olddir, os.path.splitext(oldname)[0]+".json")
null = ip.Assembly(oldname, quiet=True, cli=cli)
except (UnboundLocalError, AttributeError) as inst:
raise IPyradWarningExit("""
Could not find saved Assembly file (.json) in expected location.
Checks in: [project_dir]/[assembly_name].json
Checked: {}
""".format(inpath))
## print msg with shortpath
if not quiet:
oldpath = oldpath.replace(os.path.expanduser("~"), "~")
print("{}loading Assembly: {}".format(null._spacer, oldname))
print("{}from saved path: {}".format(null._spacer, oldpath))
## First get the samples. Create empty sample dict of correct length
samplekeys = fullj["assembly"].pop("samples")
null.samples = {name: "" for name in samplekeys}
## Next get paramsdict and use set_params to convert values back to
## the correct dtypes. Allow set_params to fail because the object will
## be subsequently updated by the params from the params file, which may
## correct any errors/incompatibilities in the old params file
oldparams = fullj["assembly"].pop("paramsdict")
for param, val in oldparams.iteritems():
## a fix for backward compatibility with deprecated options
if param not in ["assembly_name", "excludes", "outgroups"]:
try:
null.set_params(param, val)
except IPyradWarningExit as inst:
#null.set_params(param, "")
LOGGER.warning("""
Load assembly error setting params. Not critical b/c new params file may
correct the problem. Recorded here for debugging:
{}
""".format(inst))
## Import the hackersonly dict. In this case we don't have the nice
## set_params so we're shooting from the hip to reset the values
try:
oldhackersonly = fullj["assembly"].pop("_hackersonly")
for param, val in oldhackersonly.iteritems():
if val == None:
null._hackersonly[param] = None
else:
null._hackersonly[param] = val
except Exception as inst:
LOGGER.warning("""
Load assembly error resetting hackersonly dict element. We will just use
the default value in the current assembly.""")
## Check remaining attributes of Assembly and Raise warning if attributes
## do not match up between old and new objects
newkeys = null.__dict__.keys()
oldkeys = fullj["assembly"].keys()
## find shared keys and deprecated keys
sharedkeys = set(oldkeys).intersection(set(newkeys))
lostkeys = set(oldkeys).difference(set(newkeys))
## raise warning if there are lost/deprecated keys
if lostkeys:
LOGGER.warning("""
load_json found {a} keys that are unique to the older Assembly.
- assembly [{b}] v.[{c}] has: {d}
- current assembly is v.[{e}]
""".format(a=len(lostkeys),
b=oldname,
c=fullj["assembly"]["_version"],
d=lostkeys,
e=null._version))
## load in remaining shared Assembly attributes to null
for key in sharedkeys:
null.__setattr__(key, fullj["assembly"][key])
## load in svd results if they exist
try:
if fullj["assembly"]["svd"]:
null.__setattr__("svd", fullj["assembly"]["svd"])
null.svd = ObjDict(null.svd)
except Exception:
LOGGER.debug("skipping: no svd results present in old assembly")
## Now, load in the Sample objects json dicts
sample_names = fullj["samples"].keys()
if not sample_names:
raise IPyradWarningExit("""
No samples found in saved assembly. If you are just starting a new
assembly the file probably got saved erroneously, so it's safe to try
removing the assembly file (e.g., rm {}.json) and restarting.
If you fully completed step 1 and you see this message you should probably
contact the developers.
""".format(inpath))
sample_keys = fullj["samples"][sample_names[0]].keys()
stats_keys = fullj["samples"][sample_names[0]]["stats"].keys()
stats_dfs_keys = fullj["samples"][sample_names[0]]["stats_dfs"].keys()
ind_statkeys = \
[fullj["samples"][sample_names[0]]["stats_dfs"][i].keys() \
for i in stats_dfs_keys]
ind_statkeys = list(itertools.chain(*ind_statkeys))
## check against a null sample
nsamp = ip.Sample()
newkeys = nsamp.__dict__.keys()
newstats = nsamp.__dict__["stats"].keys()
newstatdfs = nsamp.__dict__["stats_dfs"].keys()
newindstats = [nsamp.__dict__["stats_dfs"][i].keys() for i in newstatdfs]
newindstats = list(itertools.chain(*[i.values for i in newindstats]))
## different in attributes?
diffattr = set(sample_keys).difference(newkeys)
diffstats = set(stats_keys).difference(newstats)
diffindstats = set(ind_statkeys).difference(newindstats)
## Raise warning if any oldstats were lost or deprecated
alldiffs = diffattr.union(diffstats).union(diffindstats)
if any(alldiffs):
LOGGER.warning("""
load_json found {a} keys that are unique to the older Samples.
- assembly [{b}] v.[{c}] has: {d}
- current assembly is v.[{e}]
""".format(a=len(alldiffs),
b=oldname,
c=fullj["assembly"]["_version"],
d=alldiffs,
e=null._version))
## save stats and statsfiles to Samples
for sample in null.samples:
## create a null Sample
null.samples[sample] = ip.Sample()
## save stats
sdat = fullj["samples"][sample]['stats']
## Reorder the keys so they ascend by step, only include
## stats that are actually in the sample. newstats is a
## list of the new sample stat names, and stats_keys
## are the names of the stats from the json file.
newstats = [x for x in newstats if x in stats_keys]
null.samples[sample].stats = pd.Series(sdat).reindex(newstats)
## save stats_dfs
for statskey in stats_dfs_keys:
null.samples[sample].stats_dfs[statskey] = \
pd.Series(fullj["samples"][sample]["stats_dfs"][statskey])\
.reindex(nsamp.__dict__["stats_dfs"][statskey].keys())
## save Sample files
for filehandle in fullj["samples"][sample]["files"].keys():
null.samples[sample].files[filehandle] = \
fullj["samples"][sample]["files"][filehandle]
## build the Assembly object stats_dfs
for statskey in stats_dfs_keys:
indstat = null._build_stat(statskey)
if not indstat.empty:
null.stats_dfs[statskey] = indstat
## add remaning attributes to null Samples
shared_keys = set(sample_keys).intersection(newkeys)
shared_keys.discard("stats")
shared_keys.discard("files")
shared_keys.discard("stats_files")
shared_keys.discard("stats_dfs")
for sample in null.samples:
## set the others
for key in shared_keys:
null.samples[sample].__setattr__(key, fullj["samples"][sample][key])
## ensure objects are object dicts
null.dirs = ObjDict(null.dirs)
null.stats_files = ObjDict(null.stats_files)
null.stats_dfs = ObjDict(null.stats_dfs)
null.populations = ObjDict(null.populations)
null.outfiles = ObjDict(null.outfiles)
return null | Load a json serialized object and ensure it matches to the current
Assembly object format |
def heatmap(adata, var_names, groups=None, groupby=None, annotations=None, use_raw=False, layers=['X'], color_map=None,
color_map_anno=None, colorbar=True, row_width=None, xlabel=None, title=None, figsize=None, dpi=None,
show=True, save=None, ax=None, **kwargs):
"""\
Plot pseudotimeseries for genes as heatmap.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
var_names: `str`, list of `str`
Names of variables to use for the plot.
groups: `str`, list of `str` or `None` (default: `None`)
Groups selected to plot. Must be an element of adata.obs[groupby].
groupby: `str` or `None` (default: `None`)
Key in adata.obs. Indicates how to group the plot.
annotations: `str`, list of `str` or `None` (default: `None`)
Key in adata.obs. Annotations are plotted in the last row.
use_raw: `bool` (default: `False`)
If true, moments are used instead of raw data.
layers: `str`, list of `str` or `None` (default: `['X']`)
Selected layers.
color_map: `str`, list of `str` or `None` (default: `None`)
String denoting matplotlib color map for the heat map.
There must be one list entry for each layer.
color_map_anno: `str`, list of `str` or `None` (default: `None`)
String denoting matplotlib color map for the annotations.
There must be one list entry for each annotation.
colorbar: `bool` (default: `True`)
If True, a colormap for each layer is added on the right bottom corner.
row_width: `float` (default: `None`)
Constant width of all rows.
xlabel:
Label for the x-axis.
title: `str` or `None` (default: `None`)
Main plot title.
figsize: tuple (default: `(7,5)`)
Figure size.
dpi: `int` (default: 80)
Figure dpi.
show: `bool`, optional (default: `None`)
Show the plot, do not return axis.
save: `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the default filename.
Infer the filetype if ending on {'.pdf', '.png', '.svg'}.
ax: `matplotlib.Axes`, optional (default: `None`)
A matplotlib axes object. Only works if plotting a single component.
Returns
-------
If `show==False` a `matplotlib.Axis`
"""
# catch
if 'velocity_pseudotime' not in adata.obs.keys():
raise ValueError(
'A function requires computation of the pseudotime'
'for ordering at single-cell resolution')
if annotations is None:
annotations = []
if isinstance(var_names, str):
var_names = [var_names]
if len(var_names) == 0:
var_names = np.arange(adata.X.shape[1])
if var_names.ndim == 2:
var_names = var_names[:, 0]
var_names = [name for name in var_names if name in adata.var_names]
if len(var_names) == 0:
raise ValueError(
'The specified var_names are all not'
'contained in the adata.var_names.')
if layers is None:
layers = ['X']
if isinstance(layers, str):
layers = [layers]
layers = [layer for layer in layers if layer in adata.layers.keys() or layer == 'X']
if len(layers) == 0:
raise ValueError(
'The selected layers are not contained'
'in adata.layers.keys().')
if not use_raw:
layers = np.array(layers)
if 'X' in layers: layers[np.array([layer == 'X' for layer in layers])] = 'Ms'
if 'spliced' in layers: layers[np.array([layer == 'spliced' for layer in layers])] = 'Ms'
if 'unspliced' in layers: layers[np.array([layer == 'unspliced' for layer in layers])] = 'Ms'
layers = list(layers)
if 'Ms' in layers and 'Ms' not in adata.layers.keys():
raise ValueError(
'Moments have to be computed before'
'using this plot function.')
if 'Mu' in layers and 'Mu' not in adata.layers.keys():
raise ValueError(
'Moments have to be computed before'
'using this plot function.')
layers = unique(layers)
# Number of rows to plot
tot_len = len(var_names) * len(layers) + len(annotations)
# init main figure
figsize = rcParams['figure.figsize'] if figsize is None else figsize
if row_width is not None: figsize[1] = row_width * tot_len
ax = pl.figure(figsize=figsize, dpi=dpi).gca() if ax is None else ax
ax.set_yticks([])
ax.set_xticks([])
# groups bar
ax_bounds = ax.get_position().bounds
if groupby is not None:
# catch
if groupby not in adata.obs_keys():
raise ValueError(
'The selected groupby is not contained'
'in adata.obs_keys().')
if groups is None: # Then use everything of that obs
groups = unique(adata.obs.clusters.values)
imlist = []
for igroup, group in enumerate(groups):
for ivar, var in enumerate(var_names):
for ilayer, layer in enumerate(layers):
groups_axis = pl.axes([ax_bounds[0] + igroup * ax_bounds[2] / len(groups),
ax_bounds[1] + ax_bounds[3] * (
tot_len - ivar * len(layers) - ilayer - 1) / tot_len,
ax_bounds[2] / len(groups),
(ax_bounds[3] - ax_bounds[3] / tot_len * len(annotations)) / (
len(var_names) * len(layers))])
# Get data to fill and reshape
dat = adata[:, var]
idx_group = [adata.obs[groupby] == group]
idx_group = np.array(idx_group[0].tolist())
idx_var = [vn in var_names for vn in adata.var_names]
idx_pt = np.array(adata.obs.velocity_pseudotime).argsort()
idx_pt = idx_pt[np.array(np.isnan(np.array(dat.obs.velocity_pseudotime)[idx_pt]) == False)]
if layer == 'X':
laydat = dat.X
else:
laydat = dat.layers[layer]
t1, t2, t3 = idx_group, idx_var, idx_pt
t1 = t1[t3]
# laydat = laydat[:, t2] # select vars
laydat = laydat[t3]
laydat = laydat[t1] # select ordered groups
if issparse(laydat):
laydat = laydat.A
# transpose X for ordering in direction var_names: up->downwards
laydat = laydat.T[::-1]
laydat = laydat.reshape((1, len(laydat))) # ensure 1dimty
# plot
im = groups_axis.imshow(laydat, aspect='auto', interpolation="nearest", cmap=color_map[ilayer])
# Frames
if ilayer == 0:
groups_axis.spines['bottom'].set_visible(False)
elif ilayer == len(layer) - 1:
groups_axis.spines['top'].set_visible(False)
else:
groups_axis.spines['top'].set_visible(False)
groups_axis.spines['bottom'].set_visible(False)
# Further visuals
if igroup == 0:
if colorbar:
if len(layers) % 2 == 0:
if ilayer == len(layers) / 2 - 1:
pl.yticks([0.5], [var])
else:
groups_axis.set_yticks([])
else:
if ilayer == (len(layers) - 1) / 2:
pl.yticks([0], [var])
else:
groups_axis.set_yticks([])
else:
pl.yticks([0], [layer + ' ' + var])
else:
groups_axis.set_yticks([])
groups_axis.set_xticks([])
if ilayer == 0 and ivar == 0:
groups_axis.set_title(str(group))
groups_axis.grid(False)
# handle needed as mappable for colorbar
if igroup == len(groups) - 1:
imlist.append(im)
# further annotations for each group
if annotations is not None:
for ianno, anno in enumerate(annotations):
anno_axis = pl.axes([ax_bounds[0] + igroup * ax_bounds[2] / len(groups),
ax_bounds[1] + ax_bounds[3] / tot_len * (len(annotations) - ianno - 1),
ax_bounds[2] / len(groups),
ax_bounds[3] / tot_len])
if is_categorical(adata, anno):
colo = interpret_colorkey(adata, anno)[t3][t1]
colo.reshape(1, len(colo))
mapper = np.vectorize(ColorConverter.to_rgb)
a = mapper(colo)
a = np.array(a).T
Y = a.reshape(1, len(colo), 3)
else:
Y = np.array(interpret_colorkey(adata, anno))[t3][t1]
Y = Y.reshape(1, len(Y))
img = anno_axis.imshow(Y, aspect='auto',
interpolation='nearest', cmap=color_map_anno)
if igroup == 0:
anno_axis.set_yticklabels(['', anno, '']) # , fontsize=ytick_fontsize)
anno_axis.tick_params(axis='both', which='both', length=0)
else:
anno_axis.set_yticklabels([])
anno_axis.set_yticks([])
anno_axis.set_xticks([])
anno_axis.set_xticklabels([])
anno_axis.grid(False)
pl.ylim([.5, -.5]) # center ticks
else: # groupby is False
imlist = []
for ivar, var in enumerate(var_names):
for ilayer, layer in enumerate(layers):
ax_bounds = ax.get_position().bounds
groups_axis = pl.axes([ax_bounds[0],
ax_bounds[1] + ax_bounds[3] * (
tot_len - ivar * len(layers) - ilayer - 1) / tot_len,
ax_bounds[2],
(ax_bounds[3] - ax_bounds[3] / tot_len * len(annotations)) / (
len(var_names) * len(layers))])
# Get data to fill
dat = adata[:, var]
idx = np.array(dat.obs.velocity_pseudotime).argsort()
idx = idx[np.array(np.isnan(np.array(dat.obs.velocity_pseudotime)[idx]) == False)]
if layer == 'X':
laydat = dat.X
else:
laydat = dat.layers[layer]
laydat = laydat[idx]
if issparse(laydat):
laydat = laydat.A
# transpose X for ordering in direction var_names: up->downwards
laydat = laydat.T[::-1]
laydat = laydat.reshape((1, len(laydat)))
# plot
im = groups_axis.imshow(laydat, aspect='auto', interpolation="nearest", cmap=color_map[ilayer])
imlist.append(im)
# Frames
if ilayer == 0:
groups_axis.spines['bottom'].set_visible(False)
elif ilayer == len(layer) - 1:
groups_axis.spines['top'].set_visible(False)
else:
groups_axis.spines['top'].set_visible(False)
groups_axis.spines['bottom'].set_visible(False)
# Further visuals
groups_axis.set_xticks([])
groups_axis.grid(False)
pl.ylim([.5, -.5]) # center
if colorbar:
if len(layers) % 2 == 0:
if ilayer == len(layers) / 2 - 1:
pl.yticks([0.5], [var])
else:
groups_axis.set_yticks([])
else:
if ilayer == (len(layers) - 1) / 2:
pl.yticks([0], [var])
else:
groups_axis.set_yticks([])
else:
pl.yticks([0], [layer + ' ' + var])
# further annotations bars
if annotations is not None:
for ianno, anno in enumerate(annotations):
anno_axis = pl.axes([ax_bounds[0],
ax_bounds[1] + ax_bounds[3] / tot_len * (len(annotations) - ianno - 1),
ax_bounds[2],
ax_bounds[3] / tot_len])
dat = adata[:, var_names]
if is_categorical(dat, anno):
colo = interpret_colorkey(dat, anno)[idx]
colo.reshape(1, len(colo))
mapper = np.vectorize(ColorConverter.to_rgb)
a = mapper(colo)
a = np.array(a).T
Y = a.reshape(1, len(idx), 3)
else:
Y = np.array(interpret_colorkey(dat, anno)[idx]).reshape(1, len(idx))
img = anno_axis.imshow(Y, aspect='auto', interpolation='nearest', cmap=color_map_anno)
anno_axis.set_yticklabels(['', anno, '']) # , fontsize=ytick_fontsize)
anno_axis.tick_params(axis='both', which='both', length=0)
anno_axis.grid(False)
anno_axis.set_xticks([])
anno_axis.set_xticklabels([])
pl.ylim([-.5, +.5])
# Colorbar
if colorbar:
if len(layers) > 1:
# I must admit, this part is chaotic
for ilayer, layer in enumerate(layers):
w = 0.015 * 10 / figsize[0] # 0.02 * ax_bounds[2]
x = ax_bounds[0] + ax_bounds[2] * 0.99 + 1.5 * w + w * 1.2 * ilayer
y = ax_bounds[1]
h = ax_bounds[3] * .3
cbaxes = pl.axes([x, y, w, h])
cb = pl.colorbar(mappable=imlist[ilayer], cax=cbaxes)
pl.text(x - 40 * w, y + h * 4, layer, rotation=45, horizontalalignment='left',
verticalalignment='bottom')
if ilayer == len(layers) - 1:
ext = abs(cb.vmin - cb.vmax)
cb.set_ticks([cb.vmin + 0.07 * ext, cb.vmax - 0.07 * ext])
cb.ax.set_yticklabels(['Low', 'High']) # vertical colorbar
else:
cb.set_ticks([])
else:
cbaxes = pl.axes([ax_bounds[0] + ax_bounds[2] + .01,
ax_bounds[1],
0.02,
ax_bounds[3] * .3])
cb = pl.colorbar(mappable=im, cax=cbaxes)
cb.set_ticks([cb.vmin, cb.vmax])
cb.ax.set_yticklabels(['Low', 'High'])
if xlabel is None: xlabel = 'velocity' + ' ' + 'pseudotime'
if title is not None: ax.set_title(title, pad=30)
if len(annotations) == 0:
ax.set_xlabel(xlabel)
ax.xaxis.labelpad = 20
# set_label(xlabel, None, fontsize, basis)
# set_title(title, None, None, fontsize)
# ax = update_axes(ax, fontsize)
savefig_or_show('heatmap', dpi=dpi, save=save, show=show)
if not show: return ax | \
Plot pseudotimeseries for genes as heatmap.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
var_names: `str`, list of `str`
Names of variables to use for the plot.
groups: `str`, list of `str` or `None` (default: `None`)
Groups selected to plot. Must be an element of adata.obs[groupby].
groupby: `str` or `None` (default: `None`)
Key in adata.obs. Indicates how to group the plot.
annotations: `str`, list of `str` or `None` (default: `None`)
Key in adata.obs. Annotations are plotted in the last row.
use_raw: `bool` (default: `False`)
If true, moments are used instead of raw data.
layers: `str`, list of `str` or `None` (default: `['X']`)
Selected layers.
color_map: `str`, list of `str` or `None` (default: `None`)
String denoting matplotlib color map for the heat map.
There must be one list entry for each layer.
color_map_anno: `str`, list of `str` or `None` (default: `None`)
String denoting matplotlib color map for the annotations.
There must be one list entry for each annotation.
colorbar: `bool` (default: `True`)
If True, a colormap for each layer is added on the right bottom corner.
row_width: `float` (default: `None`)
Constant width of all rows.
xlabel:
Label for the x-axis.
title: `str` or `None` (default: `None`)
Main plot title.
figsize: tuple (default: `(7,5)`)
Figure size.
dpi: `int` (default: 80)
Figure dpi.
show: `bool`, optional (default: `None`)
Show the plot, do not return axis.
save: `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the default filename.
Infer the filetype if ending on {'.pdf', '.png', '.svg'}.
ax: `matplotlib.Axes`, optional (default: `None`)
A matplotlib axes object. Only works if plotting a single component.
Returns
-------
If `show==False` a `matplotlib.Axis` |
def run(
cmd,
env=None,
return_object=False,
block=True,
cwd=None,
verbose=False,
nospin=False,
spinner_name=None,
combine_stderr=True,
display_limit=200,
write_to_stdout=True,
):
"""Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality.
"""
_env = os.environ.copy()
if env:
_env.update(env)
if six.PY2:
fs_encode = partial(to_bytes, encoding=locale_encoding)
_env = {fs_encode(k): fs_encode(v) for k, v in _env.items()}
else:
_env = {k: fs_str(v) for k, v in _env.items()}
if not spinner_name:
spinner_name = "bouncingBar"
if six.PY2:
if isinstance(cmd, six.string_types):
cmd = cmd.encode("utf-8")
elif isinstance(cmd, (list, tuple)):
cmd = [c.encode("utf-8") for c in cmd]
if not isinstance(cmd, Script):
cmd = Script.parse(cmd)
if block or not return_object:
combine_stderr = False
start_text = ""
with spinner(
spinner_name=spinner_name,
start_text=start_text,
nospin=nospin,
write_to_stdout=write_to_stdout,
) as sp:
return _create_subprocess(
cmd,
env=_env,
return_object=return_object,
block=block,
cwd=cwd,
verbose=verbose,
spinner=sp,
combine_stderr=combine_stderr,
start_text=start_text,
write_to_stdout=True,
) | Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality. |
def linearize_data_types(self):
# type: () -> typing.List[UserDefined]
"""
Returns a list of all data types used in the namespace. Because the
inheritance of data types can be modeled as a DAG, the list will be a
linearization of the DAG. It's ideal to generate data types in this
order so that composite types that reference other composite types are
defined in the correct order.
"""
linearized_data_types = []
seen_data_types = set() # type: typing.Set[UserDefined]
def add_data_type(data_type):
# type: (UserDefined) -> None
if data_type in seen_data_types:
return
elif data_type.namespace != self:
# We're only concerned with types defined in this namespace.
return
if is_composite_type(data_type) and data_type.parent_type:
add_data_type(data_type.parent_type)
linearized_data_types.append(data_type)
seen_data_types.add(data_type)
for data_type in self.data_types:
add_data_type(data_type)
return linearized_data_types | Returns a list of all data types used in the namespace. Because the
inheritance of data types can be modeled as a DAG, the list will be a
linearization of the DAG. It's ideal to generate data types in this
order so that composite types that reference other composite types are
defined in the correct order. |
def _create(self):
"""Creates a new and empty database."""
from .tools import makedirs_safe
# create directory for sql database
makedirs_safe(os.path.dirname(self._database))
# create all the tables
Base.metadata.create_all(self._engine)
logger.debug("Created new empty database '%s'" % self._database) | Creates a new and empty database. |
def no(self, text, count=None):
"""
If count is 0, no, zero or nil, return 'no' followed by the plural
of text.
If count is one of:
1, a, an, one, each, every, this, that
return count followed by text.
Otherwise return count follow by the plural of text.
In the return value count is always followed by a space.
Whitespace at the start and end is preserved.
"""
if count is None and self.persistent_count is not None:
count = self.persistent_count
if count is None:
count = 0
mo = re.search(r"\A(\s*)(.+?)(\s*)\Z", text)
pre = mo.group(1)
word = mo.group(2)
post = mo.group(3)
if str(count).lower() in pl_count_zero:
return "{}no {}{}".format(pre, self.plural(word, 0), post)
else:
return "{}{} {}{}".format(pre, count, self.plural(word, count), post) | If count is 0, no, zero or nil, return 'no' followed by the plural
of text.
If count is one of:
1, a, an, one, each, every, this, that
return count followed by text.
Otherwise return count follow by the plural of text.
In the return value count is always followed by a space.
Whitespace at the start and end is preserved. |
def is_file(cls, file):
'''Return whether the file is likely to be HTML.'''
peeked_data = wpull.string.printable_bytes(
wpull.util.peek_file(file)).lower()
if b'<!doctype html' in peeked_data \
or b'<head' in peeked_data \
or b'<title' in peeked_data \
or b'<html' in peeked_data \
or b'<script' in peeked_data \
or b'<table' in peeked_data \
or b'<a href' in peeked_data:
return True | Return whether the file is likely to be HTML. |
def get_parent(self, level=1):
'''
get parent dir as a `DirectoryInfo`.
return `None` if self is top.
'''
try:
parent_path = self.path.get_parent(level)
except ValueError: # abspath cannot get parent
return None
assert parent_path
return DirectoryInfo(parent_path) | get parent dir as a `DirectoryInfo`.
return `None` if self is top. |
def list(self):
"""List Reserved Capacities"""
mask = """mask[availableInstanceCount, occupiedInstanceCount,
instances[id, billingItem[description, hourlyRecurringFee]], instanceCount, backendRouter[datacenter]]"""
results = self.client.call('Account', 'getReservedCapacityGroups', mask=mask)
return results | List Reserved Capacities |
def get_arthur_params_from_url(cls, url):
""" Get the arthur params given a URL for the data source """
params = {}
args = cls.get_perceval_params_from_url(url)
parser = GitLabCommand.setup_cmd_parser()
parsed_args = parser.parse(*args)
params['owner'] = parsed_args.owner
params['repository'] = parsed_args.repository
# include only blacklist ids information
params['blacklist_ids'] = parsed_args.blacklist_ids
return params | Get the arthur params given a URL for the data source |
def get_triplets_at_q(grid_point,
mesh,
point_group, # real space point group of space group
reciprocal_lattice, # column vectors
is_time_reversal=True,
swappable=True,
stores_triplets_map=False):
"""Parameters
----------
grid_point : int
A grid point
mesh : array_like
Mesh numbers
dtype='intc'
shape=(3,)
point_group : array_like
Rotation matrices in real space. Note that those in reciprocal space
mean these matrices transposed (local terminology).
dtype='intc'
shape=(n_rot, 3, 3)
reciprocal_lattice : array_like
Reciprocal primitive basis vectors given as column vectors
dtype='double'
shape=(3, 3)
is_time_reversal : bool
Inversion symemtry is added if it doesn't exist.
swappable : bool
q1 and q2 can be swapped. By this number of triplets decreases.
Returns
-------
triplets_at_q : ndarray
Symmetry reduced number of triplets are stored as grid point
integer numbers.
dtype='uintp'
shape=(n_triplets, 3)
weights : ndarray
Weights of triplets in Brillouin zone
dtype='intc'
shape=(n_triplets,)
bz_grid_address : ndarray
Integer grid address of the points in Brillouin zone including
surface. The first prod(mesh) numbers of points are
independent. But the rest of points are
translational-symmetrically equivalent to some other points.
dtype='intc'
shape=(n_grid_points, 3)
bz_map : ndarray
Grid point mapping table containing BZ surface. See more
detail in spglib docstring.
dtype='uintp'
shape=(prod(mesh*2),)
map_tripelts : ndarray or None
Returns when stores_triplets_map=True, otherwise None is
returned. Mapping table of all triplets to symmetrically
independent tripelts. More precisely, this gives a list of
index mapping from all q-points to independent q' of
q+q'+q''=G. Considering q' is enough because q is fixed and
q''=G-q-q' where G is automatically determined to choose
smallest |G|.
dtype='uintp'
shape=(prod(mesh),)
map_q : ndarray or None
Returns when stores_triplets_map=True, otherwise None is
returned. Irreducible q-points stabilized by q-point of
specified grid_point.
dtype='uintp'
shape=(prod(mesh),)
"""
map_triplets, map_q, grid_address = _get_triplets_reciprocal_mesh_at_q(
grid_point,
mesh,
point_group,
is_time_reversal=is_time_reversal,
swappable=swappable)
bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,
mesh,
reciprocal_lattice,
is_dense=True)
triplets_at_q, weights = _get_BZ_triplets_at_q(
grid_point,
bz_grid_address,
bz_map,
map_triplets,
mesh)
assert np.prod(mesh) == weights.sum(), \
"Num grid points %d, sum of weight %d" % (
np.prod(mesh), weights.sum())
# These maps are required for collision matrix calculation.
if not stores_triplets_map:
map_triplets = None
map_q = None
return triplets_at_q, weights, bz_grid_address, bz_map, map_triplets, map_q | Parameters
----------
grid_point : int
A grid point
mesh : array_like
Mesh numbers
dtype='intc'
shape=(3,)
point_group : array_like
Rotation matrices in real space. Note that those in reciprocal space
mean these matrices transposed (local terminology).
dtype='intc'
shape=(n_rot, 3, 3)
reciprocal_lattice : array_like
Reciprocal primitive basis vectors given as column vectors
dtype='double'
shape=(3, 3)
is_time_reversal : bool
Inversion symemtry is added if it doesn't exist.
swappable : bool
q1 and q2 can be swapped. By this number of triplets decreases.
Returns
-------
triplets_at_q : ndarray
Symmetry reduced number of triplets are stored as grid point
integer numbers.
dtype='uintp'
shape=(n_triplets, 3)
weights : ndarray
Weights of triplets in Brillouin zone
dtype='intc'
shape=(n_triplets,)
bz_grid_address : ndarray
Integer grid address of the points in Brillouin zone including
surface. The first prod(mesh) numbers of points are
independent. But the rest of points are
translational-symmetrically equivalent to some other points.
dtype='intc'
shape=(n_grid_points, 3)
bz_map : ndarray
Grid point mapping table containing BZ surface. See more
detail in spglib docstring.
dtype='uintp'
shape=(prod(mesh*2),)
map_tripelts : ndarray or None
Returns when stores_triplets_map=True, otherwise None is
returned. Mapping table of all triplets to symmetrically
independent tripelts. More precisely, this gives a list of
index mapping from all q-points to independent q' of
q+q'+q''=G. Considering q' is enough because q is fixed and
q''=G-q-q' where G is automatically determined to choose
smallest |G|.
dtype='uintp'
shape=(prod(mesh),)
map_q : ndarray or None
Returns when stores_triplets_map=True, otherwise None is
returned. Irreducible q-points stabilized by q-point of
specified grid_point.
dtype='uintp'
shape=(prod(mesh),) |
def filter(self, chamber, congress=CURRENT_CONGRESS, **kwargs):
"""
Takes a chamber and Congress,
OR state and district, returning a list of members
"""
check_chamber(chamber)
kwargs.update(chamber=chamber, congress=congress)
if 'state' in kwargs and 'district' in kwargs:
path = ("members/{chamber}/{state}/{district}/"
"current.json").format(**kwargs)
elif 'state' in kwargs:
path = ("members/{chamber}/{state}/"
"current.json").format(**kwargs)
else:
path = ("{congress}/{chamber}/"
"members.json").format(**kwargs)
return self.fetch(path, parse=lambda r: r['results']) | Takes a chamber and Congress,
OR state and district, returning a list of members |
def is_older_than_metadata(self):
"""
Return True if the package save file is older than the metadata. If it is, it should be rebuilt. Returns
False if the time of either can't be determined
:param path: Optional extra save path, used in save_path()
"""
try:
path = self.doc_file.path
except AttributeError:
path = self.doc_file
source_ref = self._doc.ref.path
try:
age_diff = getmtime(source_ref) - getmtime(path)
return age_diff > 0
except (FileNotFoundError, OSError):
return False | Return True if the package save file is older than the metadata. If it is, it should be rebuilt. Returns
False if the time of either can't be determined
:param path: Optional extra save path, used in save_path() |
def mcc(x, axis=0, autocorrect=False):
"""Matthews correlation
Parameters
----------
x : ndarray
dataset of binary [0,1] values
axis : int, optional
Variables as columns is the default (axis=0). If variables
are in the rows use axis=1
autocorrect : bool, optional
If all predictions are True or all are False, then MCC
returns np.NaN
Set autocorrect=True to return a 0.0 correlation instead.
Returns
-------
r : ndarray
Matthews correlation
p : ndarray
p-values of the Chi^2 test statistics
Notes:
------
(1) We cannot directly transform the Chi^2 test statistics to
the Matthews correlation because the relationship is
|r| = sqrt(chi2 / n)
chi2 = r * r * n
(2) The sign would be missing. Therefore, as a rule of thumbs,
If you want to optimize ABS(r_mcc) then just use the Chi2/n
directly (Divide Chi^2 by the number of observations)
Examples:
---------
import korr
r, pval = korr.mcc(X)
Alternatives:
-------------
from sklearn.metrics import matthews_corrcoef
r = matthews_corrcoef(y_true, y_pred)
"""
# transpose if axis<>0
if axis is not 0:
x = x.T
# read dimensions and
n, c = x.shape
# check if enough variables provided
if c < 2:
raise Exception(
"Only " + str(c) + " variables provided. Min. 2 required.")
# allocate variables
r = np.ones((c, c))
p = np.zeros((c, c))
# compute each (i,j)-th correlation
for i in range(0, c):
for j in range(i + 1, c):
cm = confusion(x[:, i], x[:, j])
r[i, j] = confusion_to_mcc(cm)
r[j, i] = r[i, j]
p[i, j] = 1 - scipy.stats.chi2.cdf(r[i, j] * r[i, j] * n, 1)
p[j, i] = p[i, j]
# replace NaN with 0.0
if autocorrect:
r = np.nan_to_num(r)
# done
return r, p | Matthews correlation
Parameters
----------
x : ndarray
dataset of binary [0,1] values
axis : int, optional
Variables as columns is the default (axis=0). If variables
are in the rows use axis=1
autocorrect : bool, optional
If all predictions are True or all are False, then MCC
returns np.NaN
Set autocorrect=True to return a 0.0 correlation instead.
Returns
-------
r : ndarray
Matthews correlation
p : ndarray
p-values of the Chi^2 test statistics
Notes:
------
(1) We cannot directly transform the Chi^2 test statistics to
the Matthews correlation because the relationship is
|r| = sqrt(chi2 / n)
chi2 = r * r * n
(2) The sign would be missing. Therefore, as a rule of thumbs,
If you want to optimize ABS(r_mcc) then just use the Chi2/n
directly (Divide Chi^2 by the number of observations)
Examples:
---------
import korr
r, pval = korr.mcc(X)
Alternatives:
-------------
from sklearn.metrics import matthews_corrcoef
r = matthews_corrcoef(y_true, y_pred) |
def filter_device_by_class(vid, pid, device_class):
"""! @brief Test whether the device should be ignored by comparing bDeviceClass.
This function checks the device's bDeviceClass to determine whether the it is likely to be
a CMSIS-DAP device. It uses the vid and pid for device-specific quirks.
@retval True Skip the device.
@retval False The device is valid.
"""
# Check valid classes for CMSIS-DAP firmware.
if device_class in CMSIS_DAP_USB_CLASSES:
return False
# Old "Mbed CMSIS-DAP" firmware has an incorrect bDeviceClass.
if ((vid, pid) == ARM_DAPLINK_ID) and (device_class == USB_CLASS_COMMUNICATIONS):
return False
# Any other class indicates the device is not CMSIS-DAP.
return True | ! @brief Test whether the device should be ignored by comparing bDeviceClass.
This function checks the device's bDeviceClass to determine whether the it is likely to be
a CMSIS-DAP device. It uses the vid and pid for device-specific quirks.
@retval True Skip the device.
@retval False The device is valid. |
def svg_data_uri(self, xmldecl=False, encode_minimal=False,
omit_charset=False, nl=False, **kw):
"""\
Converts the QR Code into a SVG data URI.
The XML declaration is omitted by default (set ``xmldecl`` to ``True``
to enable it), further the newline is omitted by default (set ``nl`` to
``True`` to enable it).
Aside from the missing ``out`` parameter and the different ``xmldecl``
and ``nl`` default values and the additional parameter ``encode_minimal``
and ``omit_charset`` this method uses the same parameters as the
usual SVG serializer.
:param bool xmldecl: Indicates if the XML declaration should be
serialized (default: ``False``)
:param bool encode_minimal: Indicates if the resulting data URI should
use minimal percent encoding (disabled by default).
:param bool omit_charset: Indicates if the ``;charset=...`` should be omitted
(disabled by default)
:rtype: str
"""
return writers.as_svg_data_uri(self.matrix, self._version,
xmldecl=xmldecl, nl=nl,
encode_minimal=encode_minimal,
omit_charset=omit_charset, **kw) | \
Converts the QR Code into a SVG data URI.
The XML declaration is omitted by default (set ``xmldecl`` to ``True``
to enable it), further the newline is omitted by default (set ``nl`` to
``True`` to enable it).
Aside from the missing ``out`` parameter and the different ``xmldecl``
and ``nl`` default values and the additional parameter ``encode_minimal``
and ``omit_charset`` this method uses the same parameters as the
usual SVG serializer.
:param bool xmldecl: Indicates if the XML declaration should be
serialized (default: ``False``)
:param bool encode_minimal: Indicates if the resulting data URI should
use minimal percent encoding (disabled by default).
:param bool omit_charset: Indicates if the ``;charset=...`` should be omitted
(disabled by default)
:rtype: str |
def load(cls, sc, path):
"""
Load a model from the given path.
"""
model = cls._load_java(sc, path)
wrapper =\
sc._jvm.org.apache.spark.mllib.api.python.PowerIterationClusteringModelWrapper(model)
return PowerIterationClusteringModel(wrapper) | Load a model from the given path. |
def _parse_table_name(self, table_id):
"""Parse a table name in the form of appid_YYYY_MM or
YYYY_MM_appid and return a tuple consisting of YYYY-MM and the app id.
Returns (None, None) in the event of a name like <desc>_YYYYMMDD_<int>
Parameters
----------
table_id : str
The table id as listed by BigQuery
Returns
-------
tuple
(year/month, app id), or (None, None) if the table id cannot be
parsed.
"""
# Prefix date
attributes = table_id.split('_')
year_month = "-".join(attributes[:2])
app_id = "-".join(attributes[2:])
# Check if date parsed correctly
if year_month.count("-") == 1 and all(
[num.isdigit() for num in year_month.split('-')]):
return year_month, app_id
# Postfix date
attributes = table_id.split('_')
year_month = "-".join(attributes[-2:])
app_id = "-".join(attributes[:-2])
# Check if date parsed correctly
if year_month.count("-") == 1 and all(
[num.isdigit() for num in year_month.split('-')]) and len(year_month) == 7:
return year_month, app_id
return None, None | Parse a table name in the form of appid_YYYY_MM or
YYYY_MM_appid and return a tuple consisting of YYYY-MM and the app id.
Returns (None, None) in the event of a name like <desc>_YYYYMMDD_<int>
Parameters
----------
table_id : str
The table id as listed by BigQuery
Returns
-------
tuple
(year/month, app id), or (None, None) if the table id cannot be
parsed. |
def newline(self):
"""Write eol, then start new line."""
self.write_str(self.eol)
self.room = self.maxlinelen | Write eol, then start new line. |
def iter_followers(self, login=None, number=-1, etag=None):
"""If login is provided, iterate over a generator of followers of that
login name; otherwise return a generator of followers of the
authenticated user.
:param str login: (optional), login of the user to check
:param int number: (optional), number of followers to return. Default:
-1 returns all followers
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`\ s
"""
if login:
return self.user(login).iter_followers()
return self._iter_follow('followers', int(number), etag=etag) | If login is provided, iterate over a generator of followers of that
login name; otherwise return a generator of followers of the
authenticated user.
:param str login: (optional), login of the user to check
:param int number: (optional), number of followers to return. Default:
-1 returns all followers
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`\ s |
def get_oauth_access_token(url, client_id, client_secret, token_type='jwt', grant_type='client_credentials',
refresh_token=None):
""" Retrieves OAuth 2.0 access token using the given grant type.
Args:
url (str): Oauth2 access token endpoint
client_id (str): client ID
client_secret (str): client secret
Kwargs:
token_type (str): Type of token to return. Options include bearer and jwt.
grant_type (str): One of 'client_credentials' or 'refresh_token'
refresh_token (str): The previous access token (for grant_type=refresh_token)
Returns:
tuple: Tuple containing access token string and expiration datetime.
"""
now = datetime.datetime.utcnow()
data = {
'grant_type': grant_type,
'client_id': client_id,
'client_secret': client_secret,
'token_type': token_type,
}
if refresh_token:
data['refresh_token'] = refresh_token
else:
assert grant_type != 'refresh_token', "refresh_token parameter required"
response = requests.post(
url,
data=data,
headers={
'User-Agent': USER_AGENT,
},
)
data = response.json()
try:
access_token = data['access_token']
expires_in = data['expires_in']
except KeyError:
raise requests.RequestException(response=response)
expires_at = now + datetime.timedelta(seconds=expires_in)
return access_token, expires_at | Retrieves OAuth 2.0 access token using the given grant type.
Args:
url (str): Oauth2 access token endpoint
client_id (str): client ID
client_secret (str): client secret
Kwargs:
token_type (str): Type of token to return. Options include bearer and jwt.
grant_type (str): One of 'client_credentials' or 'refresh_token'
refresh_token (str): The previous access token (for grant_type=refresh_token)
Returns:
tuple: Tuple containing access token string and expiration datetime. |
def commit(self, message=None, amend=False, stage=True):
"""Commit any changes, optionally staging all changes beforehand."""
return git_commit(self.repo_dir, message=message,
amend=amend, stage=stage) | Commit any changes, optionally staging all changes beforehand. |
def cli(env, keyword, package_type):
"""List packages that can be ordered via the placeOrder API.
::
# List out all packages for ordering
slcli order package-list
# List out all packages with "server" in the name
slcli order package-list --keyword server
# Select only specifict package types
slcli order package-list --package_type BARE_METAL_CPU
"""
manager = ordering.OrderingManager(env.client)
table = formatting.Table(COLUMNS)
_filter = {'type': {'keyName': {'operation': '!= BLUEMIX_SERVICE'}}}
if keyword:
_filter['name'] = {'operation': '*= %s' % keyword}
if package_type:
_filter['type'] = {'keyName': {'operation': package_type}}
packages = manager.list_packages(filter=_filter)
for package in packages:
table.add_row([
package['id'],
package['name'],
package['keyName'],
package['type']['keyName']
])
env.fout(table) | List packages that can be ordered via the placeOrder API.
::
# List out all packages for ordering
slcli order package-list
# List out all packages with "server" in the name
slcli order package-list --keyword server
# Select only specifict package types
slcli order package-list --package_type BARE_METAL_CPU |
def objects(self, cls=None):
"""
Return an iterater over all objects in this directory which are
instances of `cls`. By default, iterate over all objects (`cls=None`).
Parameters
----------
cls : a class, optional (default=None)
If a class is specified, only iterate over objects that are
instances of this class.
Returns
-------
A generator over the objects in this directory.
Examples
--------
$ rootpy browse myfile.root
In [1]: list(f1.objects(R.Directory))
Out[1]: [Directory('mydirectory')]
"""
objs = (asrootpy(x.ReadObj(), warn=False)
for x in self.GetListOfKeys())
if cls is not None:
objs = (obj for obj in objs if isinstance(obj, cls))
return objs | Return an iterater over all objects in this directory which are
instances of `cls`. By default, iterate over all objects (`cls=None`).
Parameters
----------
cls : a class, optional (default=None)
If a class is specified, only iterate over objects that are
instances of this class.
Returns
-------
A generator over the objects in this directory.
Examples
--------
$ rootpy browse myfile.root
In [1]: list(f1.objects(R.Directory))
Out[1]: [Directory('mydirectory')] |
def _keys_to_camel_case(self, obj):
"""
Make a copy of a dictionary with all keys converted to camel case. This is just calls to_camel_case on each of the keys in the dictionary and returns a new dictionary.
:param obj: Dictionary to convert keys to camel case.
:return: Dictionary with the input values and all keys in camel case
"""
return dict((to_camel_case(key), value) for (key, value) in obj.items()) | Make a copy of a dictionary with all keys converted to camel case. This is just calls to_camel_case on each of the keys in the dictionary and returns a new dictionary.
:param obj: Dictionary to convert keys to camel case.
:return: Dictionary with the input values and all keys in camel case |
def dispatch(self, method, url, auth=None, params=None, **kwargs):
""" Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success.
"""
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error) | Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success. |
def ppo_atari_base():
"""Pong base parameters."""
hparams = ppo_discrete_action_base()
hparams.learning_rate_constant = 1e-4
hparams.epoch_length = 200
hparams.gae_gamma = 0.985
hparams.gae_lambda = 0.985
hparams.entropy_loss_coef = 0.003
hparams.value_loss_coef = 1
hparams.optimization_epochs = 3
hparams.epochs_num = 1000
hparams.policy_network = "feed_forward_cnn_small_categorical_policy"
hparams.clipping_coef = 0.2
hparams.optimization_batch_size = 20
hparams.clip_grad_norm = 0.5
return hparams | Pong base parameters. |
def textbetween(variable,
firstnum=None,
secondnum=None,
locationoftext='regular'):
"""
Get The Text Between Two Parts
"""
if locationoftext == 'regular':
return variable[firstnum:secondnum]
elif locationoftext == 'toend':
return variable[firstnum:]
elif locationoftext == 'tostart':
return variable[:secondnum] | Get The Text Between Two Parts |
def _kernel(kernel_spec):
"""Expands the kernel spec into a length 2 list.
Args:
kernel_spec: An integer or a length 1 or 2 sequence that is expanded to a
list.
Returns:
A length 2 list.
"""
if isinstance(kernel_spec, tf.compat.integral_types):
return [kernel_spec, kernel_spec]
elif len(kernel_spec) == 1:
return [kernel_spec[0], kernel_spec[0]]
else:
assert len(kernel_spec) == 2
return kernel_spec | Expands the kernel spec into a length 2 list.
Args:
kernel_spec: An integer or a length 1 or 2 sequence that is expanded to a
list.
Returns:
A length 2 list. |
def create_question(self, question, type=None, **kwargs):
"""
Returns a Question of specified type.
"""
if not type:
return Question(question, **kwargs)
if type == "choice":
return ChoiceQuestion(question, **kwargs)
if type == "confirmation":
return ConfirmationQuestion(question, **kwargs) | Returns a Question of specified type. |
def moveToXY(self, vehID, edgeID, lane, x, y, angle=tc.INVALID_DOUBLE_VALUE, keepRoute=1):
'''Place vehicle at the given x,y coordinates and force it's angle to
the given value (for drawing).
If the angle is set to INVALID_DOUBLE_VALUE, the vehicle assumes the
natural angle of the edge on which it is driving.
If keepRoute is set to 1, the closest position
within the existing route is taken. If keepRoute is set to 0, the vehicle may move to
any edge in the network but it's route then only consists of that edge.
If keepRoute is set to 2 the vehicle has all the freedom of keepRoute=0
but in addition to that may even move outside the road network.
edgeID and lane are optional placement hints to resovle ambiguities'''
self._connection._beginMessage(tc.CMD_SET_VEHICLE_VARIABLE, tc.MOVE_TO_XY,
vehID, 1 + 4 + 1 + 4 + len(edgeID) + 1 + 4 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 1)
self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 6)
self._connection._packString(edgeID)
self._connection._string += struct.pack("!Bi", tc.TYPE_INTEGER, lane)
self._connection._string += struct.pack("!Bd", tc.TYPE_DOUBLE, x)
self._connection._string += struct.pack("!Bd", tc.TYPE_DOUBLE, y)
self._connection._string += struct.pack("!Bd", tc.TYPE_DOUBLE, angle)
self._connection._string += struct.pack("!BB", tc.TYPE_BYTE, keepRoute)
self._connection._sendExact() | Place vehicle at the given x,y coordinates and force it's angle to
the given value (for drawing).
If the angle is set to INVALID_DOUBLE_VALUE, the vehicle assumes the
natural angle of the edge on which it is driving.
If keepRoute is set to 1, the closest position
within the existing route is taken. If keepRoute is set to 0, the vehicle may move to
any edge in the network but it's route then only consists of that edge.
If keepRoute is set to 2 the vehicle has all the freedom of keepRoute=0
but in addition to that may even move outside the road network.
edgeID and lane are optional placement hints to resovle ambiguities |
def write(*args):
"""Like print(), but recognizes tensors and arrays and show
more details about them.
Example:
hl.write("My Tensor", my_tensor)
Prints:
My Tensor float32 (10, 3, 224, 224) min: 0.0 max: 1.0
"""
s = ""
for a in args:
# Convert tensors to Numpy arrays
a = to_data(a)
if isinstance(a, np.ndarray):
# Numpy Array
s += ("\t" if s else "") + "Tensor {} {} min: {:.3f} max: {:.3f}".format(
a.dtype, a.shape, a.min(), a.max())
print(s)
s = ""
elif isinstance(a, list):
s += ("\t" if s else "") + "list len: {} {}".format(len(a), a[:10])
else:
s += (" " if s else "") + str(a)
if s:
print(s) | Like print(), but recognizes tensors and arrays and show
more details about them.
Example:
hl.write("My Tensor", my_tensor)
Prints:
My Tensor float32 (10, 3, 224, 224) min: 0.0 max: 1.0 |
def clear(self, asset_manager_id, book_ids=None):
""" This method deletes all the data for an asset_manager_id
and option book_ids.
It should be used with extreme caution. In production it
is almost always better to Inactivate rather than delete. """
self.logger.info('Clear Transactions & Positions - Asset Manager: %s', asset_manager_id)
url = '%s/clear/%s' % (self.endpoint, asset_manager_id)
params = {'asset_manager_ids': ','.join(book_ids)} if book_ids else {}
response = self.session.delete(url, params=params)
if response.ok:
tran_count = response.json().get('transaction_count', 'Unknown')
self.logger.info('Deleted %s Transactions.', tran_count)
pos_count = response.json().get('position_count', 'Unknown')
self.logger.info('Deleted %s Positions.', pos_count)
return response.json()
else:
self.logger.error(response.text)
response.raise_for_status() | This method deletes all the data for an asset_manager_id
and option book_ids.
It should be used with extreme caution. In production it
is almost always better to Inactivate rather than delete. |
def pick_key(keys, use, alg='', key_type='', kid=''):
"""
Based on given criteria pick out the keys that fulfill them from a
given set of keys.
:param keys: List of keys. These are :py:class:`cryptojwt.jwk.JWK`
instances.
:param use: What the key is going to be used for 'sig'/'enc'
:param alg: crypto algorithm
:param key_type: Type of key 'rsa'/'ec'/'oct'
:param kid: Key ID
:return: A list of keys that match the pattern
"""
res = []
if not key_type:
if use == 'sig':
key_type = jws_alg2keytype(alg)
else:
key_type = jwe_alg2keytype(alg)
for key in keys:
if key.use and key.use != use:
continue
if key.kty == key_type:
if key.kid and kid:
if key.kid == kid:
res.append(key)
else:
continue
if key.alg == '':
if alg:
if key_type == 'EC':
if key.crv == 'P-{}'.format(alg[2:]):
res.append(key)
continue
res.append(key)
elif alg and key.alg == alg:
res.append(key)
else:
res.append(key)
return res | Based on given criteria pick out the keys that fulfill them from a
given set of keys.
:param keys: List of keys. These are :py:class:`cryptojwt.jwk.JWK`
instances.
:param use: What the key is going to be used for 'sig'/'enc'
:param alg: crypto algorithm
:param key_type: Type of key 'rsa'/'ec'/'oct'
:param kid: Key ID
:return: A list of keys that match the pattern |
def select(self, Class, set=None, recursive=True, ignore=True, node=None):
"""See :meth:`AbstractElement.select`"""
if self.include:
return self.subdoc.data[0].select(Class,set,recursive, ignore, node) #pass it on to the text node of the subdoc
else:
return iter([]) | See :meth:`AbstractElement.select` |
def dict_filter_nones(dict_):
r"""
Removes None values
Args:
dict_ (dict): a dictionary
Returns:
dict:
CommandLine:
python -m utool.util_dict --exec-dict_filter_nones
Example:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> # fails on python 3 because of dict None order
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: None, 2: 'blue', 3: 'four', None: 'fun'}
>>> dict2_ = dict_filter_nones(dict_)
>>> result = ut.repr4(dict2_, nl=False)
>>> print(result)
{None: 'fun', 2: 'blue', 3: 'four'}
"""
dict2_ = {
key: val
for key, val in six.iteritems(dict_)
if val is not None
}
return dict2_ | r"""
Removes None values
Args:
dict_ (dict): a dictionary
Returns:
dict:
CommandLine:
python -m utool.util_dict --exec-dict_filter_nones
Example:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> # fails on python 3 because of dict None order
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: None, 2: 'blue', 3: 'four', None: 'fun'}
>>> dict2_ = dict_filter_nones(dict_)
>>> result = ut.repr4(dict2_, nl=False)
>>> print(result)
{None: 'fun', 2: 'blue', 3: 'four'} |
def realtime_observations(cls, buoy, data_type='txt'):
"""Retrieve the realtime buoy data from NDBC.
Parameters
----------
buoy : str
Name of buoy
data_type : str
Type of data requested, must be one of
'txt' standard meteorological data
'drift' meteorological data from drifting buoys and limited moored buoy data
mainly from international partners
'cwind' continuous winds data (10 minute average)
'spec' spectral wave summaries
'ocean' oceanographic data
'srad' solar radiation data
'dart' water column height
'supl' supplemental measurements data
'rain' hourly rain data
Returns
-------
Raw data string
"""
endpoint = cls()
parsers = {'txt': endpoint._parse_met,
'drift': endpoint._parse_drift,
'cwind': endpoint._parse_cwind,
'spec': endpoint._parse_spec,
'ocean': endpoint._parse_ocean,
'srad': endpoint._parse_srad,
'dart': endpoint._parse_dart,
'supl': endpoint._parse_supl,
'rain': endpoint._parse_rain}
if data_type not in parsers:
raise KeyError('Data type must be txt, drift, cwind, spec, ocean, srad, dart,'
'supl, or rain for parsed realtime data.')
raw_data = endpoint.raw_buoy_data(buoy, data_type=data_type)
return parsers[data_type](raw_data) | Retrieve the realtime buoy data from NDBC.
Parameters
----------
buoy : str
Name of buoy
data_type : str
Type of data requested, must be one of
'txt' standard meteorological data
'drift' meteorological data from drifting buoys and limited moored buoy data
mainly from international partners
'cwind' continuous winds data (10 minute average)
'spec' spectral wave summaries
'ocean' oceanographic data
'srad' solar radiation data
'dart' water column height
'supl' supplemental measurements data
'rain' hourly rain data
Returns
-------
Raw data string |
def get_ips(self, instance_id):
"""Retrieves the ip addresses (public) from the cloud
provider by the given instance id.
:param str instance_id: id of the instance
:return: list (ips)
:raises: InstanceError if the ip could not be retrieved.
"""
if not instance_id:
raise InstanceError("could not retrieve the ip address for node: "
"no associated instance id")
gce = self._connect()
instances = gce.instances()
try:
request = instances.get(instance=instance_id,
project=self._project_id, zone=self._zone)
response = self._execute_request(request)
ip_public = None
# If the instance is in status TERMINATED, then there will be
# no IP addresses.
if response and response['status'] in ('STOPPING', 'TERMINATED'):
log.info("node '%s' state is '%s'; no IP address(es)" %
(instance_id, response['status']))
return [None]
if response and "networkInterfaces" in response:
interfaces = response['networkInterfaces']
if interfaces:
if "accessConfigs" in interfaces[0]:
ip_public = interfaces[0]['accessConfigs'][0]['natIP']
if ip_public:
return [ip_public]
else:
raise InstanceError("could not retrieve the ip address for "
"node `%s`, please check the node "
"through the cloud provider interface"
% instance_id)
except (HttpError, CloudProviderError) as e:
raise InstanceError('could not retrieve the ip address of `%s`: '
'`%s`' % (instance_id, e)) | Retrieves the ip addresses (public) from the cloud
provider by the given instance id.
:param str instance_id: id of the instance
:return: list (ips)
:raises: InstanceError if the ip could not be retrieved. |
def isRef(self, doc, attr):
"""Determine whether an attribute is of type Ref. In case we
have DTD(s) then this is simple, otherwise we use an
heuristic: name Ref (upper or lowercase). """
if doc is None: doc__o = None
else: doc__o = doc._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlIsRef(doc__o, self._o, attr__o)
return ret | Determine whether an attribute is of type Ref. In case we
have DTD(s) then this is simple, otherwise we use an
heuristic: name Ref (upper or lowercase). |
def _ReadStructureFromFileObject(
self, file_object, file_offset, data_type_map):
"""Reads a structure from a file-like object.
If the data type map has a fixed size this method will read the predefined
number of bytes from the file-like object. If the data type map has a
variable size, depending on values in the byte stream, this method will
continue to read from the file-like object until the data type map can be
successfully mapped onto the byte stream or until an error occurs.
Args:
file_object (dfvfs.FileIO): a file-like object to parse.
file_offset (int): offset of the structure data relative to the start
of the file-like object.
data_type_map (dtfabric.DataTypeMap): data type map of the structure.
Returns:
tuple[object, int]: structure values object and data size of
the structure.
Raises:
ParseError: if the structure cannot be read.
ValueError: if file-like object or data type map is missing.
"""
context = None
data = b''
last_data_size = 0
data_size = data_type_map.GetByteSize()
if not data_size:
data_size = data_type_map.GetSizeHint()
while data_size != last_data_size:
read_offset = file_offset + last_data_size
read_size = data_size - last_data_size
data_segment = self._ReadData(file_object, read_offset, read_size)
data = b''.join([data, data_segment])
try:
context = dtfabric_data_maps.DataTypeMapContext()
structure_values_object = data_type_map.MapByteStream(
data, context=context)
return structure_values_object, data_size
except dtfabric_errors.ByteStreamTooSmallError:
pass
except dtfabric_errors.MappingError as exception:
raise errors.ParseError((
'Unable to map {0:s} data at offset: 0x{1:08x} with error: '
'{2!s}').format(data_type_map.name, file_offset, exception))
last_data_size = data_size
data_size = data_type_map.GetSizeHint(context=context)
raise errors.ParseError(
'Unable to read {0:s} at offset: 0x{1:08x}'.format(
data_type_map.name, file_offset)) | Reads a structure from a file-like object.
If the data type map has a fixed size this method will read the predefined
number of bytes from the file-like object. If the data type map has a
variable size, depending on values in the byte stream, this method will
continue to read from the file-like object until the data type map can be
successfully mapped onto the byte stream or until an error occurs.
Args:
file_object (dfvfs.FileIO): a file-like object to parse.
file_offset (int): offset of the structure data relative to the start
of the file-like object.
data_type_map (dtfabric.DataTypeMap): data type map of the structure.
Returns:
tuple[object, int]: structure values object and data size of
the structure.
Raises:
ParseError: if the structure cannot be read.
ValueError: if file-like object or data type map is missing. |
async def async_oauth_dance(consumer_key, consumer_secret, callback_uri="oob"):
"""
OAuth dance to get the user's access token
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
callback_uri : str
Callback uri, defaults to 'oob'
Returns
-------
dict
Access tokens
"""
token = await get_oauth_token(consumer_key, consumer_secret, callback_uri)
oauth_verifier = await get_oauth_verifier(token['oauth_token'])
token = await get_access_token(
consumer_key,
consumer_secret,
oauth_verifier=oauth_verifier,
**token
)
token = dict(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token=token['oauth_token'],
access_token_secret=token['oauth_token_secret']
)
return token | OAuth dance to get the user's access token
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
callback_uri : str
Callback uri, defaults to 'oob'
Returns
-------
dict
Access tokens |
def _make_pcaps(self):
'''
Internal method. Create libpcap devices
for every network interface we care about and
set them in non-blocking mode.
'''
self._pcaps = {}
for devname,intf in self._devinfo.items():
if intf.iftype == InterfaceType.Loopback:
senddev = _RawSocket(devname, protocol=IPProtocol.UDP)
self._localsend[devname] = senddev
pdev = PcapLiveDevice(devname)
self._pcaps[devname] = pdev | Internal method. Create libpcap devices
for every network interface we care about and
set them in non-blocking mode. |
def get_buffer(self, format, output=None):
"""Get image as a buffer in (format).
Format should be 'jpeg', 'png', etc.
"""
return self.io.get_buffer(self._get_data(), self.get_header(),
format, output=output) | Get image as a buffer in (format).
Format should be 'jpeg', 'png', etc. |
def save_existing(self, form, instance, commit=True):
"""
NOTE: save_new method is completely overridden here, there's no
other way to pretend double save otherwise. Just assign translated data
to object
"""
self._prepare_multilingual_object(instance, form)
return forms.save_instance(form, instance, exclude=[self._pk_field.name], commit=commit) | NOTE: save_new method is completely overridden here, there's no
other way to pretend double save otherwise. Just assign translated data
to object |
def check_response(self, resp):
"""
Checks response after request was made.
Checks status of the response, mainly
:param resp:
:return:
"""
# For successful API call, response code will be 200 (OK)
if resp.ok:
json = resp.json()
self.response = ResponseHolder()
self.response.response = json
# Check the code
if 'status' not in json:
raise InvalidResponse('No status field')
self.response.status = self.field_to_long(json['status'])
if self.response.status != EBConsts.STATUS_OK:
txt_status = self.get_text_status(json)
raise InvalidStatus('Status is %s (%04X)'
% (txt_status if txt_status is not None else "", self.response.status))
if self.response_checker is not None:
self.response_checker(self.response)
return self.response
else:
# If response code is not ok (200), print the resulting http error code with description
resp.raise_for_status()
pass | Checks response after request was made.
Checks status of the response, mainly
:param resp:
:return: |
def _configure_formatting(self):
'''
Configures output formatting, and fitting output to the current terminal width.
Returns None.
'''
self.format_strings(self.DEFAULT_FORMAT, self.DEFAULT_FORMAT)
if self.fit_to_screen:
try:
import fcntl
import struct
import termios
# Get the terminal window width
hw = struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234'))
self.SCREEN_WIDTH = self.HEADER_WIDTH = hw[1]
except KeyboardInterrupt as e:
raise e
except Exception:
pass | Configures output formatting, and fitting output to the current terminal width.
Returns None. |
def graph(self, fnm=None, size=None, fntsz=None, fntfm=None, clrgen=None,
rmsz=False, prog='dot'):
"""
Construct call graph
Parameters
----------
fnm : None or string, optional (default None)
Filename of graph file to be written. File type is determined by
the file extentions (e.g. dot for 'graph.dot' and SVG for
'graph.svg'). If None, a file is not written.
size : string or None, optional (default None)
Graph image size specification string.
fntsz : int or None, optional (default None)
Font size for text.
fntnm : string or None, optional (default None)
Font family specification string.
clrgen : function or None, optional (default None)
Function to call to generate the group colours. This function
should take an integer specifying the number of groups as an
argument and return a list of graphviz-compatible colour
specification strings.
rmsz : bool, optional (default False)
If True, remove the width and height specifications from an
SVG format output file so that the size scales properly when
viewed in a web browser
prog : string, optional (default 'dot')
Name of graphviz layout program to use.
Returns
-------
pgr : pygraphviz.AGraph
Call graph of traced function calls
"""
# Default colour generation function
if clrgen is None:
clrgen = lambda n: self._clrgen(n, 0.330, 0.825)
# Generate color list
clrlst = clrgen(len(self.group))
# Initialise a pygraphviz graph
g = pgv.AGraph(strict=False, directed=True, landscape=False,
rankdir='LR', newrank=True, fontsize=fntsz,
fontname=fntfm, size=size, ratio='compress',
color='black', bgcolor='#ffffff00')
# Set graph attributes
g.node_attr.update(penwidth=0.25, shape='box', style='rounded,filled')
# Iterate over functions adding them as graph nodes
for k in self.fncts:
g.add_node(k, fontsize=fntsz, fontname=fntfm)
# If lnksub regex pair is provided, compute an href link
# target from the node name and add it as an attribute to
# the node
if self.lnksub is not None:
lnktgt = re.sub(self.lnksub[0], self.lnksub[1], k)
g.get_node(k).attr.update(href=lnktgt, target="_top")
# If function has no calls to it, set its rank to "source"
if self.fncts[k][1] == 0:
g.get_node(k).attr.update(rank='source')
# If groups defined, construct a subgraph for each and add the
# nodes in each group to the corresponding subgraph
if self.group:
fngrpnm = {}
# Iterate over group number/group name pairs
for k in zip(range(len(self.group)), sorted(self.group)):
g.add_subgraph(self.group[k[1]], name='cluster_' + k[1],
label=k[1], penwidth=2, style='dotted',
pencolor=clrlst[k[0]])
# Iterate over nodes in current group
for l in self.group[k[1]]:
# Create record of function group number
fngrpnm[l] = k[0]
# Set common group colour for current node
g.get_node(l).attr.update(fillcolor=clrlst[k[0]])
# Iterate over function calls, adding each as an edge
for k in self.calls:
# If groups defined, set edge colour according to group of
# calling function, otherwise set a standard colour
if self.group:
g.add_edge(k[0], k[1], penwidth=2, color=clrlst[fngrpnm[k[0]]])
else:
g.add_edge(k[0], k[1], color='grey')
# Call layout program
g.layout(prog=prog)
# Write graph file if filename provided
if fnm is not None:
ext = os.path.splitext(fnm)[1]
if ext == '.dot':
g.write(fnm)
else:
if ext == '.svg' and rmsz:
img = g.draw(format='svg').decode('utf-8')
cp = re.compile(r'\n<svg width=\"[^\"]*\" '
'height=\"[^\"]*\"')
img = cp.sub(r'\n<svg', img, count=1)
with open(fnm, 'w') as fd:
fd.write(img)
else:
g.draw(fnm)
# Return graph object
return g | Construct call graph
Parameters
----------
fnm : None or string, optional (default None)
Filename of graph file to be written. File type is determined by
the file extentions (e.g. dot for 'graph.dot' and SVG for
'graph.svg'). If None, a file is not written.
size : string or None, optional (default None)
Graph image size specification string.
fntsz : int or None, optional (default None)
Font size for text.
fntnm : string or None, optional (default None)
Font family specification string.
clrgen : function or None, optional (default None)
Function to call to generate the group colours. This function
should take an integer specifying the number of groups as an
argument and return a list of graphviz-compatible colour
specification strings.
rmsz : bool, optional (default False)
If True, remove the width and height specifications from an
SVG format output file so that the size scales properly when
viewed in a web browser
prog : string, optional (default 'dot')
Name of graphviz layout program to use.
Returns
-------
pgr : pygraphviz.AGraph
Call graph of traced function calls |
def process(self, document):
"""Logging versions of required tools."""
content = json.dumps(document)
versions = {}
versions.update({'Spline': Version(VERSION)})
versions.update(self.get_version("Bash", self.BASH_VERSION))
if content.find('"docker(container)":') >= 0 or content.find('"docker(image)":') >= 0:
versions.update(VersionsCheck.get_version("Docker", self.DOCKER_VERSION))
if content.find('"packer":') >= 0:
versions.update(VersionsCheck.get_version("Packer", self.PACKER_VERSION))
if content.find('"ansible(simple)":') >= 0:
versions.update(VersionsCheck.get_version('Ansible', self.ANSIBLE_VERSION))
return versions | Logging versions of required tools. |
def _dbdir():
"""Returns the path to the directory where acorn DBs are stored.
"""
global dbdir
from os import mkdir, path, getcwd, chdir
if dbdir is None:
from acorn.config import settings
config = settings("acorn")
if (config.has_section("database") and
config.has_option("database", "folder")):
dbdir = config.get("database", "folder")
else: # pragma: no cover
raise ValueError("The folder to save DBs in must be configured"
" in 'acorn.cfg'")
#It is possible to specify the database path relative to the repository
#root. path.abspath will map it correctly if we are in the root directory.
from acorn.utility import abspath
if not path.isabs(dbdir):
#We want absolute paths to make it easier to port this to other OS.
dbdir = abspath(dbdir)
if not path.isdir(dbdir): # pragma: no cover
mkdir(dbdir)
return dbdir | Returns the path to the directory where acorn DBs are stored. |
def open(self, file, mode='r', perm=0o0644):
"""
Opens a file on the node
:param file: file path to open
:param mode: open mode
:param perm: file permission in octet form
mode:
'r' read only
'w' write only (truncate)
'+' read/write
'x' create if not exist
'a' append
:return: a file descriptor
"""
args = {
'file': file,
'mode': mode,
'perm': perm,
}
return self._client.json('filesystem.open', args) | Opens a file on the node
:param file: file path to open
:param mode: open mode
:param perm: file permission in octet form
mode:
'r' read only
'w' write only (truncate)
'+' read/write
'x' create if not exist
'a' append
:return: a file descriptor |
def _post_process_yaml_data(self,
fixture_data: Dict[str, Dict[str, Any]],
relationship_columns: Set[str],
) -> Tuple[Dict[str, Dict[str, Any]], List[str]]:
"""
Convert and normalize identifier strings to Identifiers, as well as determine
class relationships.
"""
rv = {}
relationships = set()
if not fixture_data:
return rv, relationships
for identifier_id, data in fixture_data.items():
new_data = {}
for col_name, value in data.items():
if col_name not in relationship_columns:
new_data[col_name] = value
continue
identifiers = normalize_identifiers(value)
if identifiers:
relationships.add(identifiers[0].class_name)
if isinstance(value, str) and len(identifiers) <= 1:
new_data[col_name] = identifiers[0] if identifiers else None
else:
new_data[col_name] = identifiers
rv[identifier_id] = new_data
return rv, list(relationships) | Convert and normalize identifier strings to Identifiers, as well as determine
class relationships. |
def _readsie(self, pos):
"""Return interpretation of next bits as a signed interleaved exponential-Golomb code.
Advances position to after the read code.
Raises ReadError if the end of the bitstring is encountered while
reading the code.
"""
codenum, pos = self._readuie(pos)
if not codenum:
return 0, pos
try:
if self[pos]:
return -codenum, pos + 1
else:
return codenum, pos + 1
except IndexError:
raise ReadError("Read off end of bitstring trying to read code.") | Return interpretation of next bits as a signed interleaved exponential-Golomb code.
Advances position to after the read code.
Raises ReadError if the end of the bitstring is encountered while
reading the code. |
def _bond_percolation(network, tmask):
r"""
This private method is called by 'find_clusters'
"""
# Perform the clustering using scipy.csgraph
csr = network.create_adjacency_matrix(weights=tmask, fmt='csr',
drop_zeros=True)
clusters = sprs.csgraph.connected_components(csgraph=csr,
directed=False)[1]
# Convert clusters to a more usable output:
# Find pores attached to each invaded throats
Ps = network.find_connected_pores(throats=tmask, flatten=True)
# Adjust cluster numbers such that non-invaded pores are labelled -1
p_clusters = (clusters + 1)*(network.tomask(pores=Ps).astype(int)) - 1
# Label invaded throats with their neighboring pore's label
t_clusters = clusters[network['throat.conns']][:, 0]
# Label non-invaded throats with -1
t_clusters[~tmask] = -1
return (p_clusters, t_clusters) | r"""
This private method is called by 'find_clusters' |
def _subdivide_nodes(nodes):
"""Subdivide a curve into two sub-curves.
Does so by taking the unit interval (i.e. the domain of the surface) and
splitting it into two sub-intervals by splitting down the middle.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): The nodes defining a B |eacute| zier curve.
Returns:
Tuple[numpy.ndarray, numpy.ndarray]: The nodes for the two sub-curves.
"""
_, num_nodes = np.shape(nodes)
if num_nodes == 2:
left_nodes = _helpers.matrix_product(nodes, _LINEAR_SUBDIVIDE_LEFT)
right_nodes = _helpers.matrix_product(nodes, _LINEAR_SUBDIVIDE_RIGHT)
elif num_nodes == 3:
left_nodes = _helpers.matrix_product(nodes, _QUADRATIC_SUBDIVIDE_LEFT)
right_nodes = _helpers.matrix_product(
nodes, _QUADRATIC_SUBDIVIDE_RIGHT
)
elif num_nodes == 4:
left_nodes = _helpers.matrix_product(nodes, _CUBIC_SUBDIVIDE_LEFT)
right_nodes = _helpers.matrix_product(nodes, _CUBIC_SUBDIVIDE_RIGHT)
else:
left_mat, right_mat = make_subdivision_matrices(num_nodes - 1)
left_nodes = _helpers.matrix_product(nodes, left_mat)
right_nodes = _helpers.matrix_product(nodes, right_mat)
return left_nodes, right_nodes | Subdivide a curve into two sub-curves.
Does so by taking the unit interval (i.e. the domain of the surface) and
splitting it into two sub-intervals by splitting down the middle.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): The nodes defining a B |eacute| zier curve.
Returns:
Tuple[numpy.ndarray, numpy.ndarray]: The nodes for the two sub-curves. |
def magic_memit(self, line=''):
"""Measure memory usage of a Python statement
Usage, in line mode:
%memit [-ir<R>t<T>] statement
Options:
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 1
-i: run the code in the current environment, without forking a new process.
This is required on some MacOS versions of Accelerate if your line contains
a call to `np.dot`.
-t<T>: timeout after <T> seconds. Unused if `-i` is active. Default: None
Examples
--------
::
In [1]: import numpy as np
In [2]: %memit np.zeros(1e7)
maximum of 1: 76.402344 MB per loop
In [3]: %memit np.ones(1e6)
maximum of 1: 7.820312 MB per loop
In [4]: %memit -r 10 np.empty(1e8)
maximum of 10: 0.101562 MB per loop
In [5]: memit -t 3 while True: pass;
Subprocess timed out.
Subprocess timed out.
Subprocess timed out.
ERROR: all subprocesses exited unsuccessfully. Try again with the `-i`
option.
maximum of 1: -inf MB per loop
"""
opts, stmt = self.parse_options(line, 'r:t:i', posix=False, strict=False)
repeat = int(getattr(opts, 'r', 1))
if repeat < 1:
repeat == 1
timeout = int(getattr(opts, 't', 0))
if timeout <= 0:
timeout = None
run_in_place = hasattr(opts, 'i')
mem_usage = memory_usage((_func_exec, (stmt, self.shell.user_ns)), timeout=timeout,
run_in_place=run_in_place)
if mem_usage:
print('maximum of %d: %f MB per loop' % (repeat, max(mem_usage)))
else:
print('ERROR: could not read memory usage, try with a lower interval or more iterations') | Measure memory usage of a Python statement
Usage, in line mode:
%memit [-ir<R>t<T>] statement
Options:
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 1
-i: run the code in the current environment, without forking a new process.
This is required on some MacOS versions of Accelerate if your line contains
a call to `np.dot`.
-t<T>: timeout after <T> seconds. Unused if `-i` is active. Default: None
Examples
--------
::
In [1]: import numpy as np
In [2]: %memit np.zeros(1e7)
maximum of 1: 76.402344 MB per loop
In [3]: %memit np.ones(1e6)
maximum of 1: 7.820312 MB per loop
In [4]: %memit -r 10 np.empty(1e8)
maximum of 10: 0.101562 MB per loop
In [5]: memit -t 3 while True: pass;
Subprocess timed out.
Subprocess timed out.
Subprocess timed out.
ERROR: all subprocesses exited unsuccessfully. Try again with the `-i`
option.
maximum of 1: -inf MB per loop |
def get_values():
"""
Get dictionary of values from the backend
:return:
"""
# First load a mapping between config name and default value
default_initial = ((name, options[0])
for name, options in settings.CONFIG.items())
# Then update the mapping with actually values from the backend
initial = dict(default_initial, **dict(config._backend.mget(settings.CONFIG)))
return initial | Get dictionary of values from the backend
:return: |
def disable_digital_reporting(self, pin):
"""
Disables digital reporting. By turning reporting off for this pin, reporting
is disabled for all 8 bits in the "port" -
:param pin: Pin and all pins for this port
:return: No return value
"""
port = pin // 8
command = [self._command_handler.REPORT_DIGITAL + port, self.REPORTING_DISABLE]
self._command_handler.send_command(command) | Disables digital reporting. By turning reporting off for this pin, reporting
is disabled for all 8 bits in the "port" -
:param pin: Pin and all pins for this port
:return: No return value |
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
- lpar - expression for matching left-parentheses (default=Suppress('('))
- rpar - expression for matching right-parentheses (default=Suppress(')'))
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret | Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
- lpar - expression for matching left-parentheses (default=Suppress('('))
- rpar - expression for matching right-parentheses (default=Suppress(')')) |
def inner(self, x1, x2):
"""Return the inner product of ``x1`` and ``x2``.
Parameters
----------
x1, x2 : `LinearSpaceElement`
Elements whose inner product to compute.
Returns
-------
inner : `LinearSpace.field` element
Inner product of ``x1`` and ``x2``.
"""
if x1 not in self:
raise LinearSpaceTypeError('`x1` {!r} is not an element of '
'{!r}'.format(x1, self))
if x2 not in self:
raise LinearSpaceTypeError('`x2` {!r} is not an element of '
'{!r}'.format(x2, self))
inner = self._inner(x1, x2)
if self.field is None:
return inner
else:
return self.field.element(self._inner(x1, x2)) | Return the inner product of ``x1`` and ``x2``.
Parameters
----------
x1, x2 : `LinearSpaceElement`
Elements whose inner product to compute.
Returns
-------
inner : `LinearSpace.field` element
Inner product of ``x1`` and ``x2``. |
def delete(self, removealien=True):
"""Delete the current entity.
This will also call :meth:`RefobjInterface.get_children_to_delete` and
delete these children first by calling :meth:`Reftrack.delete`.
To delete the content it will call :meth:`RefobjInterface.delete`.
Then the refobject will be set to None. If the :class:`Reftrack` object is an alien to
the current scene, because it is not linked in the database, it will also remove itself
from the root and from the treemodel.
If it is not an alien, it will just empty all of tis children and update its status.
:param removealien: If True, remove this reftrack, if it is an alien
:type removealien: :class:`bool`
:returns: None
:rtype: None
:raises: None
"""
if self.status() is None:
parent = self.get_parent()
if parent:
parent.remove_child(self)
self._treeitem.parent().remove_child(self._treeitem)
self.get_root().remove_reftrack(self)
return
todelete = self.get_children_to_delete()
allchildren = self.get_all_children()
for c in reversed(todelete):
c._delete()
for c in allchildren:
self.get_root().remove_reftrack(c)
self._delete()
if self.alien() and removealien:
self.get_root().remove_reftrack(self)
self.update_restrictions()
self.emit_data_changed() | Delete the current entity.
This will also call :meth:`RefobjInterface.get_children_to_delete` and
delete these children first by calling :meth:`Reftrack.delete`.
To delete the content it will call :meth:`RefobjInterface.delete`.
Then the refobject will be set to None. If the :class:`Reftrack` object is an alien to
the current scene, because it is not linked in the database, it will also remove itself
from the root and from the treemodel.
If it is not an alien, it will just empty all of tis children and update its status.
:param removealien: If True, remove this reftrack, if it is an alien
:type removealien: :class:`bool`
:returns: None
:rtype: None
:raises: None |
def reduce_tree(node, parent=None):
"""
Internal function. Reduces a compiled pattern tree to an
intermediate representation suitable for feeding the
automaton. This also trims off any optional pattern elements(like
[a], a*).
"""
new_node = None
#switch on the node type
if node.type == syms.Matcher:
#skip
node = node.children[0]
if node.type == syms.Alternatives :
#2 cases
if len(node.children) <= 2:
#just a single 'Alternative', skip this node
new_node = reduce_tree(node.children[0], parent)
else:
#real alternatives
new_node = MinNode(type=TYPE_ALTERNATIVES)
#skip odd children('|' tokens)
for child in node.children:
if node.children.index(child)%2:
continue
reduced = reduce_tree(child, new_node)
if reduced is not None:
new_node.children.append(reduced)
elif node.type == syms.Alternative:
if len(node.children) > 1:
new_node = MinNode(type=TYPE_GROUP)
for child in node.children:
reduced = reduce_tree(child, new_node)
if reduced:
new_node.children.append(reduced)
if not new_node.children:
# delete the group if all of the children were reduced to None
new_node = None
else:
new_node = reduce_tree(node.children[0], parent)
elif node.type == syms.Unit:
if (isinstance(node.children[0], pytree.Leaf) and
node.children[0].value == '('):
#skip parentheses
return reduce_tree(node.children[1], parent)
if ((isinstance(node.children[0], pytree.Leaf) and
node.children[0].value == '[')
or
(len(node.children)>1 and
hasattr(node.children[1], "value") and
node.children[1].value == '[')):
#skip whole unit if its optional
return None
leaf = True
details_node = None
alternatives_node = None
has_repeater = False
repeater_node = None
has_variable_name = False
for child in node.children:
if child.type == syms.Details:
leaf = False
details_node = child
elif child.type == syms.Repeater:
has_repeater = True
repeater_node = child
elif child.type == syms.Alternatives:
alternatives_node = child
if hasattr(child, 'value') and child.value == '=': # variable name
has_variable_name = True
#skip variable name
if has_variable_name:
#skip variable name, '='
name_leaf = node.children[2]
if hasattr(name_leaf, 'value') and name_leaf.value == '(':
# skip parenthesis
name_leaf = node.children[3]
else:
name_leaf = node.children[0]
#set node type
if name_leaf.type == token_labels.NAME:
#(python) non-name or wildcard
if name_leaf.value == 'any':
new_node = MinNode(type=TYPE_ANY)
else:
if hasattr(token_labels, name_leaf.value):
new_node = MinNode(type=getattr(token_labels, name_leaf.value))
else:
new_node = MinNode(type=getattr(pysyms, name_leaf.value))
elif name_leaf.type == token_labels.STRING:
#(python) name or character; remove the apostrophes from
#the string value
name = name_leaf.value.strip("'")
if name in tokens:
new_node = MinNode(type=tokens[name])
else:
new_node = MinNode(type=token_labels.NAME, name=name)
elif name_leaf.type == syms.Alternatives:
new_node = reduce_tree(alternatives_node, parent)
#handle repeaters
if has_repeater:
if repeater_node.children[0].value == '*':
#reduce to None
new_node = None
elif repeater_node.children[0].value == '+':
#reduce to a single occurence i.e. do nothing
pass
else:
#TODO: handle {min, max} repeaters
raise NotImplementedError
pass
#add children
if details_node and new_node is not None:
for child in details_node.children[1:-1]:
#skip '<', '>' markers
reduced = reduce_tree(child, new_node)
if reduced is not None:
new_node.children.append(reduced)
if new_node:
new_node.parent = parent
return new_node | Internal function. Reduces a compiled pattern tree to an
intermediate representation suitable for feeding the
automaton. This also trims off any optional pattern elements(like
[a], a*). |
def add(self, resource, replace=False):
"""Add just a single resource."""
uri = resource.uri
if (uri in self and not replace):
raise ResourceSetDupeError(
"Attempt to add resource already in this set")
self[uri] = resource | Add just a single resource. |
def difference(self, second_iterable, selector=identity):
'''Returns those elements which are in the source sequence which are not
in the second_iterable.
This method is equivalent to the Except() LINQ operator, renamed to a
valid Python identifier.
Note: This method uses deferred execution, but as soon as execution
commences the entirety of the second_iterable is consumed;
therefore, although the source sequence may be infinite the
second_iterable must be finite.
Args:
second_iterable: Elements from this sequence are excluded from the
returned sequence. This sequence will be consumed in its
entirety, so must be finite.
selector: A optional single argument function with selects from the
elements of both sequences the values which will be
compared for equality. If omitted the identity function will
be used.
Returns:
A sequence containing all elements in the source sequence except
those which are also members of the second sequence.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If the second_iterable is not in fact iterable.
TypeError: If the selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call difference() on a "
"closed Queryable.")
if not is_iterable(second_iterable):
raise TypeError("Cannot compute difference() with second_iterable"
"of non-iterable {0}".format(str(type(second_iterable))[7: -2]))
if not is_callable(selector):
raise TypeError("difference() parameter selector={0} is "
"not callable".format(repr(selector)))
return self._create(self._generate_difference_result(second_iterable,
selector)) | Returns those elements which are in the source sequence which are not
in the second_iterable.
This method is equivalent to the Except() LINQ operator, renamed to a
valid Python identifier.
Note: This method uses deferred execution, but as soon as execution
commences the entirety of the second_iterable is consumed;
therefore, although the source sequence may be infinite the
second_iterable must be finite.
Args:
second_iterable: Elements from this sequence are excluded from the
returned sequence. This sequence will be consumed in its
entirety, so must be finite.
selector: A optional single argument function with selects from the
elements of both sequences the values which will be
compared for equality. If omitted the identity function will
be used.
Returns:
A sequence containing all elements in the source sequence except
those which are also members of the second sequence.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If the second_iterable is not in fact iterable.
TypeError: If the selector is not callable. |
def _register_update(self, fmt={}, replot=False, force=False,
todefault=False):
"""
Register formatoptions for the update
Parameters
----------
fmt: dict
Keys can be any valid formatoptions with the corresponding values
(see the :attr:`formatoptions` attribute)
replot: bool
Boolean that determines whether the data specific formatoptions
shall be updated in any case or not.
%(InteractiveBase._register_update.parameters.force|todefault)s"""
if self.disabled:
return
self.replot = self.replot or replot
self._todefault = self._todefault or todefault
if force is True:
force = list(fmt)
self._force.update(
[ret[0] for ret in map(self.check_key, force or [])])
# check the keys
list(map(self.check_key, fmt))
self._registered_updates.update(fmt) | Register formatoptions for the update
Parameters
----------
fmt: dict
Keys can be any valid formatoptions with the corresponding values
(see the :attr:`formatoptions` attribute)
replot: bool
Boolean that determines whether the data specific formatoptions
shall be updated in any case or not.
%(InteractiveBase._register_update.parameters.force|todefault)s |
def listItem(node):
"""
An item in a list
"""
o = nodes.list_item()
for n in MarkDown(node):
o += n
return o | An item in a list |
def chunks(self):
"""Block dimensions for this dataset's data or None if it's not a dask
array.
"""
chunks = {}
for v in self.variables.values():
if v.chunks is not None:
for dim, c in zip(v.dims, v.chunks):
if dim in chunks and c != chunks[dim]:
raise ValueError('inconsistent chunks')
chunks[dim] = c
return Frozen(SortedKeysDict(chunks)) | Block dimensions for this dataset's data or None if it's not a dask
array. |
def update(self, eid, data, token):
"""
Update a given Library Entry.
:param eid str: Entry ID
:param data dict: Attributes
:param token str: OAuth token
:return: True or ServerError
:rtype: Bool or Exception
"""
final_dict = {"data": {"id": eid, "type": "libraryEntries", "attributes": data}}
final_headers = self.header
final_headers['Authorization'] = "Bearer {}".format(token)
r = requests.patch(self.apiurl + "/library-entries/{}".format(eid), json=final_dict, headers=final_headers)
if r.status_code != 200:
raise ConnectionError(r.text)
return True | Update a given Library Entry.
:param eid str: Entry ID
:param data dict: Attributes
:param token str: OAuth token
:return: True or ServerError
:rtype: Bool or Exception |
def _setup_time_axis(self, t_start=None, t_stop=None):
""" Setup time axis. """
# now check to see how many integrations requested
ii_start, ii_stop = 0, self.n_ints_in_file
if t_start:
ii_start = t_start
if t_stop:
ii_stop = t_stop
n_ints = ii_stop - ii_start
## Setup time axis
t0 = self.header[b'tstart']
t_delt = self.header[b'tsamp']
self.timestamps = np.arange(0, n_ints) * t_delt / 24./60./60 + t0
return ii_start, ii_stop, n_ints | Setup time axis. |
def _get_windows_console_width() -> int:
"""
A small utility function for getting the current console window's width, in Windows.
:return: The current console window's width.
"""
from ctypes import byref, windll
import pyreadline
out = windll.kernel32.GetStdHandle(-11)
info = pyreadline.console.CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(out, byref(info))
return info.dwSize.X | A small utility function for getting the current console window's width, in Windows.
:return: The current console window's width. |
def get_instance(self, payload):
"""
Build an instance of UsageInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.UsageInstance
:rtype: twilio.rest.api.v2010.account.usage.UsageInstance
"""
return UsageInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | Build an instance of UsageInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.UsageInstance
:rtype: twilio.rest.api.v2010.account.usage.UsageInstance |
def list_price(self):
"""List Price.
:return:
A tuple containing:
1. Float representation of price.
2. ISO Currency code (string).
"""
price = self._safe_get_element_text('ItemAttributes.ListPrice.Amount')
currency = self._safe_get_element_text(
'ItemAttributes.ListPrice.CurrencyCode')
if price:
return float(price) / 100, currency
else:
return None, None | List Price.
:return:
A tuple containing:
1. Float representation of price.
2. ISO Currency code (string). |
def get_host(self):
"""
Gets the host name or IP address.
:return: the host name or IP address.
"""
host = self.get_as_nullable_string("host")
host = host if host != None else self.get_as_nullable_string("ip")
return host | Gets the host name or IP address.
:return: the host name or IP address. |
def ndim(n, *args, **kwargs):
"""
Makes a multi-dimensional array of random floats. (Replaces RandomArray).
"""
thunk = kwargs.get("thunk", lambda: random.random())
if not args:
return [thunk() for i in range(n)]
A = []
for i in range(n):
A.append( ndim(*args, thunk=thunk) )
return A | Makes a multi-dimensional array of random floats. (Replaces RandomArray). |
def open_resource(name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename):
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
if resource_stream is not None:
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb') | Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location. |
def _init_data_with_tdms(self, tdms_filename):
"""Initializes the current RT-DC dataset with a tdms file.
"""
tdms_file = TdmsFile(str(tdms_filename))
# time is always there
table = "Cell Track"
# Edit naming.dclab2tdms to add features
for arg in naming.tdms2dclab:
try:
data = tdms_file.object(table, arg).data
except KeyError:
pass
else:
if data is None or len(data) == 0:
# Ignore empty features. npTDMS treats empty
# features in the following way:
# - in nptdms 0.8.2, `data` is `None`
# - in nptdms 0.9.0, `data` is an array of length 0
continue
self._events[naming.tdms2dclab[arg]] = data
# Set up configuration
tdms_config = Configuration(
files=[self.path.with_name(self._mid + "_para.ini"),
self.path.with_name(self._mid + "_camera.ini")],
)
dclab_config = Configuration()
for section in naming.configmap:
for pname in naming.configmap[section]:
meta = naming.configmap[section][pname]
typ = dfn.config_funcs[section][pname]
if isinstance(meta, tuple):
osec, opar = meta
if osec in tdms_config and opar in tdms_config[osec]:
val = tdms_config[osec].pop(opar)
dclab_config[section][pname] = typ(val)
else:
dclab_config[section][pname] = typ(meta)
self.config = dclab_config
self._complete_config_tdms(tdms_config)
self._init_filters() | Initializes the current RT-DC dataset with a tdms file. |
def _piped_bamprep_region_gatk(data, region, prep_params, out_file, tmp_dir):
"""Perform semi-piped BAM preparation using Picard/GATK tools.
"""
broad_runner = broad.runner_from_config(data["config"])
cur_bam, cl = _piped_input_cl(data, region, tmp_dir, out_file, prep_params)
if not prep_params["realign"]:
prerecal_bam = None
elif prep_params["realign"] == "gatk":
prerecal_bam, cl = _piped_realign_gatk(data, region, cl, out_file, tmp_dir,
prep_params)
else:
raise NotImplementedError("Realignment method: %s" % prep_params["realign"])
with file_transaction(data, out_file) as tx_out_file:
out_flag = ("-o" if (prep_params["realign"] == "gatk"
or not prep_params["realign"])
else ">")
cmd = "{cl} {out_flag} {tx_out_file}".format(**locals())
do.run(cmd, "GATK: realign {0}".format(region), data)
_cleanup_tempfiles(data, [cur_bam, prerecal_bam]) | Perform semi-piped BAM preparation using Picard/GATK tools. |
def cluster_uniform_time(data=None, k=None, stride=1, metric='euclidean',
n_jobs=None, chunksize=None, skip=0, **kwargs):
r"""Uniform time clustering
If given data, performs a clustering that selects data points uniformly in
time and then assigns the data using a Voronoi discretization. Returns a
:class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` object
that can be used to extract the discretized data sequences, or to assign
other data points to the same partition. If data is not given, an empty
:class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` will be created that
still needs to be parametrized, e.g. in a :func:`pipeline`.
Parameters
----------
data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created
by source function input data, if available in memory
k : int
the number of cluster centers. When not specified (None), min(sqrt(N), 5000) is chosen as default value,
where N denotes the number of data points
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this
could cause this calculation to be very slow for large data sets. Since
molecular dynamics data is usually correlated at short timescales, it is
often sufficient to estimate transformations at a longer stride.
Note that the stride option in the get_output() function of the returned
object is independent, so you can parametrize at a long stride, and
still map all frames through the transformer.
metric : str
metric to use during clustering ('euclidean', 'minRMSD')
n_jobs : int or None, default None
Number of threads to use during assignment of the data.
If None, all available CPUs will be used.
chunksize: int, default=None
Number of data frames to process at once. Choose a higher value here,
to optimize thread usage and gain processing speed. If None is passed,
use the default value of the underlying reader/data source. Choose zero to
disable chunking at all.
skip : int, default=0
skip the first initial n frames per trajectory.
Returns
-------
uniformTime : a :class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` clustering object
Object for uniform time clustering.
It holds discrete trajectories and cluster center information.
.. autoclass:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering
:attributes:
"""
from pyemma.coordinates.clustering.uniform_time import UniformTimeClustering
res = UniformTimeClustering(k, metric=metric, n_jobs=n_jobs, skip=skip, stride=stride)
from pyemma.util.reflection import get_default_args
cs = _check_old_chunksize_arg(chunksize, get_default_args(cluster_uniform_time)['chunksize'], **kwargs)
if data is not None:
res.estimate(data, chunksize=cs)
else:
res.chunksize = cs
return res | r"""Uniform time clustering
If given data, performs a clustering that selects data points uniformly in
time and then assigns the data using a Voronoi discretization. Returns a
:class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` object
that can be used to extract the discretized data sequences, or to assign
other data points to the same partition. If data is not given, an empty
:class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` will be created that
still needs to be parametrized, e.g. in a :func:`pipeline`.
Parameters
----------
data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created
by source function input data, if available in memory
k : int
the number of cluster centers. When not specified (None), min(sqrt(N), 5000) is chosen as default value,
where N denotes the number of data points
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this
could cause this calculation to be very slow for large data sets. Since
molecular dynamics data is usually correlated at short timescales, it is
often sufficient to estimate transformations at a longer stride.
Note that the stride option in the get_output() function of the returned
object is independent, so you can parametrize at a long stride, and
still map all frames through the transformer.
metric : str
metric to use during clustering ('euclidean', 'minRMSD')
n_jobs : int or None, default None
Number of threads to use during assignment of the data.
If None, all available CPUs will be used.
chunksize: int, default=None
Number of data frames to process at once. Choose a higher value here,
to optimize thread usage and gain processing speed. If None is passed,
use the default value of the underlying reader/data source. Choose zero to
disable chunking at all.
skip : int, default=0
skip the first initial n frames per trajectory.
Returns
-------
uniformTime : a :class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` clustering object
Object for uniform time clustering.
It holds discrete trajectories and cluster center information.
.. autoclass:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering
:attributes: |
def except_keyword(source, start, keyword):
""" Returns position after keyword if found else None
Note: skips white space"""
start = pass_white(source, start)
kl = len(keyword) #keyword len
if kl + start > len(source):
return None
if source[start:start + kl] != keyword:
return None
if kl + start < len(source) and source[start + kl] in IDENTIFIER_PART:
return None
return start + kl | Returns position after keyword if found else None
Note: skips white space |
def eval_objfn(self):
"""Compute components of objective function as well as total
contribution to objective function.
"""
fval = self.obfn_f()
gval = self.obfn_g(self.obfn_gvar())
obj = fval + gval
return (obj, fval, gval) | Compute components of objective function as well as total
contribution to objective function. |
def ecdsa_private_key(privkey_str=None, compressed=None):
"""
Make a private key, but enforce the following rule:
* unless the key's hex encoding specifically ends in '01', treat it as uncompressed.
"""
if compressed is None:
compressed = False
if privkey_str is not None:
if len(privkey_str) == 66 and privkey_str[-2:] == '01':
compressed = True
return _ECPrivateKey(privkey_str, compressed=compressed) | Make a private key, but enforce the following rule:
* unless the key's hex encoding specifically ends in '01', treat it as uncompressed. |
def generate_one_of(self):
"""
Means that value have to be valid by only one of those definitions. It can't be valid
by two or more of them.
.. code-block:: python
{
'oneOf': [
{'type': 'number', 'multipleOf': 3},
{'type': 'number', 'multipleOf': 5},
],
}
Valid values for this definition are 3, 5, 6, ... but not 15 for example.
"""
self.l('{variable}_one_of_count = 0')
for definition_item in self._definition['oneOf']:
# When we know it's failing (one of means exactly once), we do not need to do another expensive try-except.
with self.l('if {variable}_one_of_count < 2:'):
with self.l('try:'):
self.generate_func_code_block(definition_item, self._variable, self._variable_name, clear_variables=True)
self.l('{variable}_one_of_count += 1')
self.l('except JsonSchemaException: pass')
with self.l('if {variable}_one_of_count != 1:'):
self.l('raise JsonSchemaException("{name} must be valid exactly by one of oneOf definition")') | Means that value have to be valid by only one of those definitions. It can't be valid
by two or more of them.
.. code-block:: python
{
'oneOf': [
{'type': 'number', 'multipleOf': 3},
{'type': 'number', 'multipleOf': 5},
],
}
Valid values for this definition are 3, 5, 6, ... but not 15 for example. |
def zone_compare(timezone):
'''
Compares the given timezone with the machine timezone. Mostly useful for
running state checks.
Args:
timezone (str):
The timezone to compare. This can be in Windows or Unix format. Can
be any of the values returned by the ``timezone.list`` function
Returns:
bool: ``True`` if they match, otherwise ``False``
Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
'''
# if it's one of the key's just use it
if timezone.lower() in mapper.win_to_unix:
check_zone = timezone
elif timezone.lower() in mapper.unix_to_win:
# if it's one of the values, use the key
check_zone = mapper.get_win(timezone)
else:
# Raise error because it's neither key nor value
raise CommandExecutionError('Invalid timezone passed: {0}'
''.format(timezone))
return get_zone() == mapper.get_unix(check_zone, 'Unknown') | Compares the given timezone with the machine timezone. Mostly useful for
running state checks.
Args:
timezone (str):
The timezone to compare. This can be in Windows or Unix format. Can
be any of the values returned by the ``timezone.list`` function
Returns:
bool: ``True`` if they match, otherwise ``False``
Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver' |
def _run_ext_wsgiutils(app, config, mode):
"""Run WsgiDAV using ext_wsgiutils_server from the wsgidav package."""
from wsgidav.server import ext_wsgiutils_server
_logger.info(
"Running WsgiDAV {} on wsgidav.ext_wsgiutils_server...".format(__version__)
)
_logger.warning(
"WARNING: This single threaded server (ext-wsgiutils) is not meant for production."
)
try:
ext_wsgiutils_server.serve(config, app)
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
return | Run WsgiDAV using ext_wsgiutils_server from the wsgidav package. |
def apply_corrections(self):
"""
Method to directly apply the corrections.
"""
for error in self.errors:
for solution in error.scheduler_adapter_solutions:
if self.scheduler_adapter is not None:
if self.scheduler_adapter.__getattribut__(solution[0].__name__)(solution[1]):
return True
for solution in error.application_adapter_solutions:
if self.application_adapter is not None:
if self.application_adapter.__getattribut__(solution[0].__name__)(solution[1]):
return True
return False | Method to directly apply the corrections. |
def endpoint_from_name(endpoint_name):
"""The object used for interacting with the named relations, or None.
"""
if endpoint_name is None:
return None
factory = relation_factory(endpoint_name)
if factory:
return factory.from_name(endpoint_name) | The object used for interacting with the named relations, or None. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.