code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def required_get_and_update(self, name, default=None):
'''a wrapper to get_and_update, but if not successful, will print an
error and exit.
'''
setting = self._get_and_update_setting(name, default=None)
if setting in [None, ""]:
bot.exit('You must export %s' % name)
return setting | a wrapper to get_and_update, but if not successful, will print an
error and exit. |
def parse_rest_doc(doc):
""" Extract the headers, delimiters, and text from reST-formatted docstrings.
Parameters
----------
doc: Union[str, None]
Returns
-------
Dict[str, Section] """
class Section(object):
def __init__(self, header=None, body=None):
self.header = header # str
self.body = body # str
doc_sections = OrderedDict([('', Section(header=''))])
if not doc:
return doc_sections
doc = cleandoc(doc)
lines = iter(doc.splitlines())
header = ''
body = []
section = Section(header=header)
line = ''
while True:
try:
prev_line = line
line = next(lines)
# section header encountered
if is_delimiter(line) and 0 < len(prev_line) <= len(line):
# prev-prev-line is overline
if len(body) >= 2 and len(body[-2]) == len(line) \
and body[-2][0] == line[0] and is_delimiter(body[-2]):
lim = -2
else:
lim = -1
section.body = "\n".join(body[:lim]).rstrip()
doc_sections.update([(header.strip(), section)])
section = Section(header="\n".join(body[lim:] + [line]))
header = prev_line
body = []
line = ''
else:
body.append(line)
except StopIteration:
section.body = "\n".join(body).rstrip()
doc_sections.update([(header.strip(), section)])
break
return doc_sections | Extract the headers, delimiters, and text from reST-formatted docstrings.
Parameters
----------
doc: Union[str, None]
Returns
-------
Dict[str, Section] |
def get_translation_objects(self, request, language_code, obj=None, inlines=True):
"""
Return all objects that should be deleted when a translation is deleted.
This method can yield all QuerySet objects or lists for the objects.
"""
if obj is not None:
# A single model can hold multiple TranslatedFieldsModel objects.
# Return them all.
for translations_model in obj._parler_meta.get_all_models():
try:
translation = translations_model.objects.get(master=obj, language_code=language_code)
except translations_model.DoesNotExist:
continue
yield [translation]
if inlines:
for inline, qs in self._get_inline_translations(request, language_code, obj=obj):
yield qs | Return all objects that should be deleted when a translation is deleted.
This method can yield all QuerySet objects or lists for the objects. |
def convert_http_request(request, referrer_host=None):
'''Convert a HTTP request.
Args:
request: An instance of :class:`.http.request.Request`.
referrer_host (str): The referrering hostname or IP address.
Returns:
Request: An instance of :class:`urllib.request.Request`
'''
new_request = urllib.request.Request(
request.url_info.url,
origin_req_host=referrer_host,
)
for name, value in request.fields.get_all():
new_request.add_header(name, value)
return new_request | Convert a HTTP request.
Args:
request: An instance of :class:`.http.request.Request`.
referrer_host (str): The referrering hostname or IP address.
Returns:
Request: An instance of :class:`urllib.request.Request` |
def create_scratch_org(self, org_name, config_name, days=None, set_password=True):
""" Adds/Updates a scratch org config to the keychain from a named config """
scratch_config = getattr(
self.project_config, "orgs__scratch__{}".format(config_name)
)
if days is not None:
# Allow override of scratch config's default days
scratch_config["days"] = days
else:
# Use scratch config days or default of 1 day
scratch_config.setdefault("days", 1)
scratch_config["set_password"] = bool(set_password)
scratch_config["scratch"] = True
scratch_config.setdefault("namespaced", False)
scratch_config["config_name"] = config_name
scratch_config["sfdx_alias"] = "{}__{}".format(
self.project_config.project__name, org_name
)
org_config = ScratchOrgConfig(scratch_config, org_name)
self.set_org(org_config) | Adds/Updates a scratch org config to the keychain from a named config |
def cli(
paths,
dbname,
separator,
quoting,
skip_errors,
replace_tables,
table,
extract_column,
date,
datetime,
datetime_format,
primary_key,
fts,
index,
shape,
filename_column,
no_index_fks,
no_fulltext_fks,
):
"""
PATHS: paths to individual .csv files or to directories containing .csvs
DBNAME: name of the SQLite database file to create
"""
# make plural for more readable code:
extract_columns = extract_column
del extract_column
if extract_columns:
click.echo("extract_columns={}".format(extract_columns))
if dbname.endswith(".csv"):
raise click.BadParameter("dbname must not end with .csv")
if "." not in dbname:
dbname += ".db"
db_existed = os.path.exists(dbname)
conn = sqlite3.connect(dbname)
dataframes = []
csvs = csvs_from_paths(paths)
sql_type_overrides = None
for name, path in csvs.items():
try:
df = load_csv(path, separator, skip_errors, quoting, shape)
df.table_name = table or name
if filename_column:
df[filename_column] = name
if shape:
shape += ",{}".format(filename_column)
sql_type_overrides = apply_shape(df, shape)
apply_dates_and_datetimes(df, date, datetime, datetime_format)
dataframes.append(df)
except LoadCsvError as e:
click.echo("Could not load {}: {}".format(path, e), err=True)
click.echo("Loaded {} dataframes".format(len(dataframes)))
# Use extract_columns to build a column:(table,label) dictionary
foreign_keys = {}
for col in extract_columns:
bits = col.split(":")
if len(bits) == 3:
foreign_keys[bits[0]] = (bits[1], bits[2])
elif len(bits) == 2:
foreign_keys[bits[0]] = (bits[1], "value")
else:
foreign_keys[bits[0]] = (bits[0], "value")
# Now we have loaded the dataframes, we can refactor them
created_tables = {}
refactored = refactor_dataframes(
conn, dataframes, foreign_keys, not no_fulltext_fks
)
for df in refactored:
# This is a bit trickier because we need to
# create the table with extra SQL for foreign keys
if replace_tables and table_exists(conn, df.table_name):
drop_table(conn, df.table_name)
if table_exists(conn, df.table_name):
df.to_sql(df.table_name, conn, if_exists="append", index=False)
else:
to_sql_with_foreign_keys(
conn,
df,
df.table_name,
foreign_keys,
sql_type_overrides,
primary_keys=primary_key,
index_fks=not no_index_fks,
)
created_tables[df.table_name] = df
if index:
for index_defn in index:
add_index(conn, df.table_name, index_defn)
# Create FTS tables
if fts:
fts_version = best_fts_version()
if not fts_version:
conn.close()
raise click.BadParameter(
"Your SQLite version does not support any variant of FTS"
)
# Check that columns make sense
for table, df in created_tables.items():
for fts_column in fts:
if fts_column not in df.columns:
raise click.BadParameter(
'FTS column "{}" does not exist'.format(fts_column)
)
generate_and_populate_fts(conn, created_tables.keys(), fts, foreign_keys)
conn.close()
if db_existed:
click.echo(
"Added {} CSV file{} to {}".format(
len(csvs), "" if len(csvs) == 1 else "s", dbname
)
)
else:
click.echo(
"Created {} from {} CSV file{}".format(
dbname, len(csvs), "" if len(csvs) == 1 else "s"
)
) | PATHS: paths to individual .csv files or to directories containing .csvs
DBNAME: name of the SQLite database file to create |
def get_xritdecompress_outfile(stdout):
"""Analyse the output of the xRITDecompress command call and return the file."""
outfile = b''
for line in stdout:
try:
k, v = [x.strip() for x in line.split(b':', 1)]
except ValueError:
break
if k == b'Decompressed file':
outfile = v
break
return outfile | Analyse the output of the xRITDecompress command call and return the file. |
def profile_detail(
request, username,
template_name=accounts_settings.ACCOUNTS_PROFILE_DETAIL_TEMPLATE,
extra_context=None, **kwargs):
"""
Detailed view of an user.
:param username:
String of the username of which the profile should be viewed.
:param template_name:
String representing the template name that should be used to display
the profile.
:param extra_context:
Dictionary of variables which should be supplied to the template. The
``profile`` key is always the current profile.
**Context**
``profile``
Instance of the currently viewed ``Profile``.
"""
user = get_object_or_404(get_user_model(),
username__iexact=username)
profile_model = get_profile_model()
try:
profile = user.get_profile()
except profile_model.DoesNotExist:
profile = profile_model(user=user)
profile.save()
if not profile.can_view_profile(request.user):
return HttpResponseForbidden(_("You don't have permission to view this profile."))
if not extra_context:
extra_context = dict()
extra_context['profile'] = user.get_profile()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request) | Detailed view of an user.
:param username:
String of the username of which the profile should be viewed.
:param template_name:
String representing the template name that should be used to display
the profile.
:param extra_context:
Dictionary of variables which should be supplied to the template. The
``profile`` key is always the current profile.
**Context**
``profile``
Instance of the currently viewed ``Profile``. |
def get_datetime(self, timestamp: str, unix=True):
"""Converts a %Y%m%dT%H%M%S.%fZ to a UNIX timestamp
or a datetime.datetime object
Parameters
---------
timestamp: str
A timstamp in the %Y%m%dT%H%M%S.%fZ format, usually returned by the API
in the ``created_time`` field for example (eg. 20180718T145906.000Z)
unix: Optional[bool] = True
Whether to return a POSIX timestamp (seconds since epoch) or not
Returns int or datetime.datetime
"""
time = datetime.strptime(timestamp, '%Y%m%dT%H%M%S.%fZ')
if unix:
return int(time.timestamp())
else:
return time | Converts a %Y%m%dT%H%M%S.%fZ to a UNIX timestamp
or a datetime.datetime object
Parameters
---------
timestamp: str
A timstamp in the %Y%m%dT%H%M%S.%fZ format, usually returned by the API
in the ``created_time`` field for example (eg. 20180718T145906.000Z)
unix: Optional[bool] = True
Whether to return a POSIX timestamp (seconds since epoch) or not
Returns int or datetime.datetime |
def generate_index(self, schemas):
'''Generates html for an index file'''
params = {'schemas': sorted(schemas, key=lambda x: x.object_name),
'project': self.project_name,
'title': '{}: Database schema documentation'\
.format(self.project_name)}
template = self.lookup.get_template('index.mako')
html = template.render_unicode(**params)
return html | Generates html for an index file |
def trigger(self, events, *args, **kwargs):
"""
Fires the given *events* (string or list of strings). All callbacks
associated with these *events* will be called and if their respective
objects have a *times* value set it will be used to determine when to
remove the associated callback from the event.
If given, callbacks associated with the given *events* will be called
with *args* and *kwargs*.
"""
# Make sure our _on_off_events dict is present (if first invokation)
if not hasattr(self, '_on_off_events'):
self._on_off_events = {}
if not hasattr(self, 'exc_info'):
self.exc_info = None
logging.debug("OnOffMixin triggering event(s): %s" % events)
if isinstance(events, (str, unicode)):
events = [events]
for event in events:
if event in self._on_off_events:
for callback_obj in self._on_off_events[event]:
callback_obj['callback'](*args, **kwargs)
callback_obj['calls'] += 1
if callback_obj['calls'] == callback_obj['times']:
self.off(event, callback_obj['callback']) | Fires the given *events* (string or list of strings). All callbacks
associated with these *events* will be called and if their respective
objects have a *times* value set it will be used to determine when to
remove the associated callback from the event.
If given, callbacks associated with the given *events* will be called
with *args* and *kwargs*. |
def Bankoff(m, x, rhol, rhog, mul, mug, D, roughness=0, L=1):
r'''Calculates two-phase pressure drop with the Bankoff (1960) correlation,
as shown in [2]_, [3]_, and [4]_.
.. math::
\Delta P_{tp} = \phi_{l}^{7/4} \Delta P_{l}
.. math::
\phi_l = \frac{1}{1-x}\left[1 - \gamma\left(1 - \frac{\rho_g}{\rho_l}
\right)\right]^{3/7}\left[1 + x\left(\frac{\rho_l}{\rho_g} - 1\right)
\right]
.. math::
\gamma = \frac{0.71 + 2.35\left(\frac{\rho_g}{\rho_l}\right)}
{1 + \frac{1-x}{x} \cdot \frac{\rho_g}{\rho_l}}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
L : float, optional
Length of pipe, [m]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
This correlation is not actually shown in [1]_. Its origin is unknown.
The author recommends against using this.
Examples
--------
>>> Bankoff(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, mug=14E-6,
... D=0.05, roughness=0, L=1)
4746.059442453399
References
----------
.. [1] Bankoff, S. G. "A Variable Density Single-Fluid Model for Two-Phase
Flow With Particular Reference to Steam-Water Flow." Journal of Heat
Transfer 82, no. 4 (November 1, 1960): 265-72. doi:10.1115/1.3679930.
.. [2] Thome, John R. "Engineering Data Book III." Wolverine Tube Inc
(2004). http://www.wlv.com/heat-transfer-databook/
.. [3] Moreno Quibén, Jesús. "Experimental and Analytical Study of Two-
Phase Pressure Drops during Evaporation in Horizontal Tubes," 2005.
doi:10.5075/epfl-thesis-3337.
.. [4] Mekisso, Henock Mateos. "Comparison of Frictional Pressure Drop
Correlations for Isothermal Two-Phase Horizontal Flow." Thesis, Oklahoma
State University, 2013. https://shareok.org/handle/11244/11109.
'''
# Liquid-only properties, for calculation of dP_lo
v_lo = m/rhol/(pi/4*D**2)
Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D)
fd_lo = friction_factor(Re=Re_lo, eD=roughness/D)
dP_lo = fd_lo*L/D*(0.5*rhol*v_lo**2)
gamma = (0.71 + 2.35*rhog/rhol)/(1. + (1.-x)/x*rhog/rhol)
phi_Bf = 1./(1.-x)*(1 - gamma*(1 - rhog/rhol))**(3/7.)*(1. + x*(rhol/rhog -1.))
return dP_lo*phi_Bf**(7/4.) | r'''Calculates two-phase pressure drop with the Bankoff (1960) correlation,
as shown in [2]_, [3]_, and [4]_.
.. math::
\Delta P_{tp} = \phi_{l}^{7/4} \Delta P_{l}
.. math::
\phi_l = \frac{1}{1-x}\left[1 - \gamma\left(1 - \frac{\rho_g}{\rho_l}
\right)\right]^{3/7}\left[1 + x\left(\frac{\rho_l}{\rho_g} - 1\right)
\right]
.. math::
\gamma = \frac{0.71 + 2.35\left(\frac{\rho_g}{\rho_l}\right)}
{1 + \frac{1-x}{x} \cdot \frac{\rho_g}{\rho_l}}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
L : float, optional
Length of pipe, [m]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
This correlation is not actually shown in [1]_. Its origin is unknown.
The author recommends against using this.
Examples
--------
>>> Bankoff(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, mug=14E-6,
... D=0.05, roughness=0, L=1)
4746.059442453399
References
----------
.. [1] Bankoff, S. G. "A Variable Density Single-Fluid Model for Two-Phase
Flow With Particular Reference to Steam-Water Flow." Journal of Heat
Transfer 82, no. 4 (November 1, 1960): 265-72. doi:10.1115/1.3679930.
.. [2] Thome, John R. "Engineering Data Book III." Wolverine Tube Inc
(2004). http://www.wlv.com/heat-transfer-databook/
.. [3] Moreno Quibén, Jesús. "Experimental and Analytical Study of Two-
Phase Pressure Drops during Evaporation in Horizontal Tubes," 2005.
doi:10.5075/epfl-thesis-3337.
.. [4] Mekisso, Henock Mateos. "Comparison of Frictional Pressure Drop
Correlations for Isothermal Two-Phase Horizontal Flow." Thesis, Oklahoma
State University, 2013. https://shareok.org/handle/11244/11109. |
def get_file_uuid(fpath, hasher=None, stride=1):
""" Creates a uuid from the hash of a file
"""
if hasher is None:
hasher = hashlib.sha1() # 20 bytes of output
#hasher = hashlib.sha256() # 32 bytes of output
# sha1 produces a 20 byte hash
hashbytes_20 = get_file_hash(fpath, hasher=hasher, stride=stride)
# sha1 produces 20 bytes, but UUID requires 16 bytes
hashbytes_16 = hashbytes_20[0:16]
uuid_ = uuid.UUID(bytes=hashbytes_16)
return uuid_ | Creates a uuid from the hash of a file |
def transform(self, X, perplexity=5, initialization="median", k=25,
learning_rate=1, n_iter=100, exaggeration=2, momentum=0):
"""Embed new points into the existing embedding.
This procedure optimizes each point only with respect to the existing
embedding i.e. it ignores any interactions between the points in ``X``
among themselves.
Please see the :ref:`parameter-guide` for more information.
Parameters
----------
X: np.ndarray
The data matrix to be added to the existing embedding.
perplexity: float
Perplexity can be thought of as the continuous :math:`k` number of
nearest neighbors, for which t-SNE will attempt to preserve
distances. However, when transforming, we only consider neighbors in
the existing embedding i.e. each data point is placed into the
embedding, independently of other new data points.
initialization: Union[np.ndarray, str]
The initial point positions to be used in the embedding space. Can
be a precomputed numpy array, ``median``, ``weighted`` or
``random``. In all cases, ``median`` of ``weighted`` should be
preferred.
k: int
The number of nearest neighbors to consider when initially placing
the point onto the embedding. This is different from ``perpelxity``
because perplexity affects optimization while this only affects the
initial point positions.
learning_rate: float
The learning rate for t-SNE optimization. Typical values range
between 100 to 1000. Setting the learning rate too low or too high
may result in the points forming a "ball". This is also known as the
crowding problem.
n_iter: int
The number of iterations to run in the normal optimization regime.
Typically, the number of iterations needed when adding new data
points is much lower than with regular optimization.
exaggeration: float
The exaggeration factor to use during the normal optimization phase.
This can be used to form more densely packed clusters and is useful
for large data sets.
momentum: float
The momentum to use during optimization phase.
Returns
-------
PartialTSNEEmbedding
The positions of the new points in the embedding space.
"""
# We check if the affinity `to_new` methods takes the `perplexity`
# parameter and raise an informative error if not. This happes when the
# user uses a non-standard affinity class e.g. multiscale, then attempts
# to add points via `transform`. These classes take `perplexities` and
# fail
affinity_signature = inspect.signature(self.affinities.to_new)
if "perplexity" not in affinity_signature.parameters:
raise TypeError(
"`transform` currently does not support non `%s` type affinity "
"classes. Please use `prepare_partial` and `optimize` to add "
"points to the embedding." % PerplexityBasedNN.__name__
)
embedding = self.prepare_partial(
X, perplexity=perplexity, initialization=initialization, k=k
)
try:
embedding.optimize(
n_iter=n_iter,
learning_rate=learning_rate,
exaggeration=exaggeration,
momentum=momentum,
inplace=True,
propagate_exception=True,
)
except OptimizationInterrupt as ex:
log.info("Optimization was interrupted with callback.")
embedding = ex.final_embedding
return embedding | Embed new points into the existing embedding.
This procedure optimizes each point only with respect to the existing
embedding i.e. it ignores any interactions between the points in ``X``
among themselves.
Please see the :ref:`parameter-guide` for more information.
Parameters
----------
X: np.ndarray
The data matrix to be added to the existing embedding.
perplexity: float
Perplexity can be thought of as the continuous :math:`k` number of
nearest neighbors, for which t-SNE will attempt to preserve
distances. However, when transforming, we only consider neighbors in
the existing embedding i.e. each data point is placed into the
embedding, independently of other new data points.
initialization: Union[np.ndarray, str]
The initial point positions to be used in the embedding space. Can
be a precomputed numpy array, ``median``, ``weighted`` or
``random``. In all cases, ``median`` of ``weighted`` should be
preferred.
k: int
The number of nearest neighbors to consider when initially placing
the point onto the embedding. This is different from ``perpelxity``
because perplexity affects optimization while this only affects the
initial point positions.
learning_rate: float
The learning rate for t-SNE optimization. Typical values range
between 100 to 1000. Setting the learning rate too low or too high
may result in the points forming a "ball". This is also known as the
crowding problem.
n_iter: int
The number of iterations to run in the normal optimization regime.
Typically, the number of iterations needed when adding new data
points is much lower than with regular optimization.
exaggeration: float
The exaggeration factor to use during the normal optimization phase.
This can be used to form more densely packed clusters and is useful
for large data sets.
momentum: float
The momentum to use during optimization phase.
Returns
-------
PartialTSNEEmbedding
The positions of the new points in the embedding space. |
def draw_beam(ax, p1, p2, width=0, beta1=None, beta2=None,
format=None, **kwds):
r"""Draw a laser beam."""
if format is None: format = 'k-'
if width == 0:
x0 = [p1[0], p2[0]]
y0 = [p1[1], p2[1]]
ax.plot(x0, y0, format, **kwds)
else:
a = width/2
x1, y1 = p1
x2, y2 = p2
x11 = (a*x1**2*cos(beta1) - 2*a*x1*x2*cos(beta1) + a*x2**2*cos(beta1) + a*y1**2*cos(beta1) + a*y2**2*cos(beta1) - (2*a*y1*cos(beta1) - sqrt((x1 - x2)**2 + (y1 - y2)**2)*x1*cos(beta1))*y2 - (x1*y1*cos(beta1) - x1**2*sin(beta1) + x1*x2*sin(beta1))*sqrt((x1 - x2)**2 + (y1 - y2)**2))/(sqrt((x1 - x2)**2 + (y1 - y2)**2)*y2*cos(beta1) - sqrt((x1 - x2)**2 + (y1 - y2)**2)*(y1*cos(beta1) - x1*sin(beta1) + x2*sin(beta1)))
y11 = (a*x1**2*sin(beta1) - 2*a*x1*x2*sin(beta1) + a*x2**2*sin(beta1) + a*y1**2*sin(beta1) + a*y2**2*sin(beta1) - (2*a*y1*sin(beta1) - sqrt((x1 - x2)**2 + (y1 - y2)**2)*y1*cos(beta1))*y2 - (y1**2*cos(beta1) - (x1*sin(beta1) - x2*sin(beta1))*y1)*sqrt((x1 - x2)**2 + (y1 - y2)**2))/(sqrt((x1 - x2)**2 + (y1 - y2)**2)*y2*cos(beta1) - sqrt((x1 - x2)**2 + (y1 - y2)**2)*(y1*cos(beta1) - x1*sin(beta1) + x2*sin(beta1)))
x21 = (a*x1**2*cos(beta2) - 2*a*x1*x2*cos(beta2) + a*x2**2*cos(beta2) + a*y1**2*cos(beta2) + a*y2**2*cos(beta2) - (2*a*y1*cos(beta2) - sqrt((x1 - x2)**2 + (y1 - y2)**2)*x2*cos(beta2))*y2 - (x2*y1*cos(beta2) - x1*x2*sin(beta2) + x2**2*sin(beta2))*sqrt((x1 - x2)**2 + (y1 - y2)**2))/(sqrt((x1 - x2)**2 + (y1 - y2)**2)*y2*cos(beta2) - sqrt((x1 - x2)**2 + (y1 - y2)**2)*(y1*cos(beta2) - x1*sin(beta2) + x2*sin(beta2)))
y21 = (a*x1**2*sin(beta2) - 2*a*x1*x2*sin(beta2) + a*x2**2*sin(beta2) + a*y1**2*sin(beta2) + (a*sin(beta2) + sqrt((x1 - x2)**2 + (y1 - y2)**2)*cos(beta2))*y2**2 - (2*a*y1*sin(beta2) + sqrt((x1 - x2)**2 + (y1 - y2)**2)*(y1*cos(beta2) - x1*sin(beta2) + x2*sin(beta2)))*y2)/(sqrt((x1 - x2)**2 + (y1 - y2)**2)*y2*cos(beta2) - sqrt((x1 - x2)**2 + (y1 - y2)**2)*(y1*cos(beta2) - x1*sin(beta2) + x2*sin(beta2)))
ax.plot([x11, x21], [y11, y21], format, **kwds)
x12 = -(a*x1**2*cos(beta2) - 2*a*x1*x2*cos(beta2) + a*x2**2*cos(beta2) + a*y1**2*cos(beta2) + a*y2**2*cos(beta2) - (2*a*y1*cos(beta2) + sqrt((x1 - x2)**2 + (y1 - y2)**2)*x2*cos(beta2))*y2 + (x2*y1*cos(beta2) - x1*x2*sin(beta2) + x2**2*sin(beta2))*sqrt((x1 - x2)**2 + (y1 - y2)**2))/(sqrt((x1 - x2)**2 + (y1 - y2)**2)*y2*cos(beta2) - sqrt((x1 - x2)**2 + (y1 - y2)**2)*(y1*cos(beta2) - x1*sin(beta2) + x2*sin(beta2)))
y12 = -(a*x1**2*sin(beta2) - 2*a*x1*x2*sin(beta2) + a*x2**2*sin(beta2) + a*y1**2*sin(beta2) + (a*sin(beta2) - sqrt((x1 - x2)**2 + (y1 - y2)**2)*cos(beta2))*y2**2 - (2*a*y1*sin(beta2) - sqrt((x1 - x2)**2 + (y1 - y2)**2)*(y1*cos(beta2) - x1*sin(beta2) + x2*sin(beta2)))*y2)/(sqrt((x1 - x2)**2 + (y1 - y2)**2)*y2*cos(beta2) - sqrt((x1 - x2)**2 + (y1 - y2)**2)*(y1*cos(beta2) - x1*sin(beta2) + x2*sin(beta2)))
x22 = -(a*x1**2*cos(beta1) - 2*a*x1*x2*cos(beta1) + a*x2**2*cos(beta1) + a*y1**2*cos(beta1) + a*y2**2*cos(beta1) - (2*a*y1*cos(beta1) + sqrt((x1 - x2)**2 + (y1 - y2)**2)*x1*cos(beta1))*y2 + (x1*y1*cos(beta1) - x1**2*sin(beta1) + x1*x2*sin(beta1))*sqrt((x1 - x2)**2 + (y1 - y2)**2))/(sqrt((x1 - x2)**2 + (y1 - y2)**2)*y2*cos(beta1) - sqrt((x1 - x2)**2 + (y1 - y2)**2)*(y1*cos(beta1) - x1*sin(beta1) + x2*sin(beta1)))
y22 = -(a*x1**2*sin(beta1) - 2*a*x1*x2*sin(beta1) + a*x2**2*sin(beta1) + a*y1**2*sin(beta1) + a*y2**2*sin(beta1) - (2*a*y1*sin(beta1) + sqrt((x1 - x2)**2 + (y1 - y2)**2)*y1*cos(beta1))*y2 + (y1**2*cos(beta1) - (x1*sin(beta1) - x2*sin(beta1))*y1)*sqrt((x1 - x2)**2 + (y1 - y2)**2))/(sqrt((x1 - x2)**2 + (y1 - y2)**2)*y2*cos(beta1) - sqrt((x1 - x2)**2 + (y1 - y2)**2)*(y1*cos(beta1) - x1*sin(beta1) + x2*sin(beta1)))
ax.plot([x12, x22], [y12, y22], format, **kwds) | r"""Draw a laser beam. |
def use_comparative_sequence_rule_enabler_rule_view(self):
"""Pass through to provider SequenceRuleEnablerRuleLookupSession.use_comparative_sequence_rule_enabler_rule_view"""
self._object_views['sequence_rule_enabler_rule'] = COMPARATIVE
# self._get_provider_session('sequence_rule_enabler_rule_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_sequence_rule_enabler_rule_view()
except AttributeError:
pass | Pass through to provider SequenceRuleEnablerRuleLookupSession.use_comparative_sequence_rule_enabler_rule_view |
def from_gps(cls, gps, Name = None):
"""
Instantiate a Time element initialized to the value of the
given GPS time. The Name attribute will be set to the
value of the Name parameter if given.
Note: the new Time element holds a reference to the GPS
time, not a copy of it. Subsequent modification of the GPS
time object will be reflected in what gets written to disk.
"""
self = cls(AttributesImpl({u"Type": u"GPS"}))
if Name is not None:
self.Name = Name
self.pcdata = gps
return self | Instantiate a Time element initialized to the value of the
given GPS time. The Name attribute will be set to the
value of the Name parameter if given.
Note: the new Time element holds a reference to the GPS
time, not a copy of it. Subsequent modification of the GPS
time object will be reflected in what gets written to disk. |
def heightmap_get_interpolated_value(
hm: np.ndarray, x: float, y: float
) -> float:
"""Return the interpolated height at non integer coordinates.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
x (float): A floating point x coordinate.
y (float): A floating point y coordinate.
Returns:
float: The value at ``x``, ``y``.
"""
return float(
lib.TCOD_heightmap_get_interpolated_value(_heightmap_cdata(hm), x, y)
) | Return the interpolated height at non integer coordinates.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
x (float): A floating point x coordinate.
y (float): A floating point y coordinate.
Returns:
float: The value at ``x``, ``y``. |
def wait_for_keys_modified(self, *keys, modifiers_to_check=_mod_keys,
timeout=0):
"""The same as wait_for_keys, but returns a frozen_set which contains
the pressed key, and the modifier keys.
:param modifiers_to_check: iterable of modifiers for which the function
will check whether they are pressed
:type modifiers: Iterable[int]"""
set_mods = pygame.key.get_mods()
return frozenset.union(
frozenset([self.wait_for_keys(*keys, timeout=timeout)]),
EventListener._contained_modifiers(set_mods, modifiers_to_check)) | The same as wait_for_keys, but returns a frozen_set which contains
the pressed key, and the modifier keys.
:param modifiers_to_check: iterable of modifiers for which the function
will check whether they are pressed
:type modifiers: Iterable[int] |
def basename(path):
"""Rightmost part of path after separator."""
base_path = path.strip(SEP)
sep_ind = base_path.rfind(SEP)
if sep_ind < 0:
return path
return base_path[sep_ind + 1:] | Rightmost part of path after separator. |
async def api_bikes(request):
"""
Gets stolen bikes within a radius of a given postcode.
:param request: The aiohttp request.
:return: The bikes stolen with the given range from a postcode.
"""
postcode: Optional[str] = request.match_info.get('postcode', None)
try:
radius = int(request.match_info.get('radius', 10))
except ValueError:
raise web.HTTPBadRequest(text="Invalid Radius")
try:
postcode = (await get_postcode_random()) if postcode == "random" else postcode
bikes = await get_bikes(postcode, radius)
except CachingError as e:
raise web.HTTPInternalServerError(text=e.status)
else:
if bikes is None:
raise web.HTTPNotFound(text="Post code does not exist.")
else:
return str_json_response([bike.serialize() for bike in bikes]) | Gets stolen bikes within a radius of a given postcode.
:param request: The aiohttp request.
:return: The bikes stolen with the given range from a postcode. |
def purchase_time(self):
"""Date and time of app purchase.
:rtype: datetime
"""
ts = self._iface.get_purchase_time(self.app_id)
return datetime.utcfromtimestamp(ts) | Date and time of app purchase.
:rtype: datetime |
def get_participants(self, namespace, room):
"""Return an iterable with the active participants in a room."""
for sid, active in six.iteritems(self.rooms[namespace][room].copy()):
yield sid | Return an iterable with the active participants in a room. |
def monitor(self):
"""
Access the Monitor Twilio Domain
:returns: Monitor Twilio Domain
:rtype: twilio.rest.monitor.Monitor
"""
if self._monitor is None:
from twilio.rest.monitor import Monitor
self._monitor = Monitor(self)
return self._monitor | Access the Monitor Twilio Domain
:returns: Monitor Twilio Domain
:rtype: twilio.rest.monitor.Monitor |
def async_make_reply(msgname, types, arguments_future, major):
"""Wrap future that will resolve with arguments needed by make_reply()."""
arguments = yield arguments_future
raise gen.Return(make_reply(msgname, types, arguments, major)) | Wrap future that will resolve with arguments needed by make_reply(). |
def delete(self):
"""Reverts all files in this changelist then deletes the changelist from perforce"""
try:
self.revert()
except errors.ChangelistError:
pass
self._connection.run(['change', '-d', str(self._change)]) | Reverts all files in this changelist then deletes the changelist from perforce |
def visit_For(self, node):
""" Handle iterate variable in for loops.
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse('''
... def foo():
... a = b = c = 2
... for i in __builtin__.range(1):
... a -= 1
... b += 1''')
>>> pm = passmanager.PassManager("test")
>>> res = pm.gather(RangeValues, node)
>>> res['a']
Interval(low=-inf, high=2)
>>> res['b']
Interval(low=2, high=inf)
>>> res['c']
Interval(low=2, high=2)
"""
assert isinstance(node.target, ast.Name), "For apply on variables."
self.visit(node.iter)
if isinstance(node.iter, ast.Call):
for alias in self.aliases[node.iter.func]:
if isinstance(alias, Intrinsic):
self.add(node.target.id,
alias.return_range_content(
[self.visit(n) for n in node.iter.args]))
self.visit_loop(node) | Handle iterate variable in for loops.
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse('''
... def foo():
... a = b = c = 2
... for i in __builtin__.range(1):
... a -= 1
... b += 1''')
>>> pm = passmanager.PassManager("test")
>>> res = pm.gather(RangeValues, node)
>>> res['a']
Interval(low=-inf, high=2)
>>> res['b']
Interval(low=2, high=inf)
>>> res['c']
Interval(low=2, high=2) |
def _split(string, splitters):
"""Splits a string into parts at multiple characters"""
part = ''
for character in string:
if character in splitters:
yield part
part = ''
else:
part += character
yield part | Splits a string into parts at multiple characters |
def noise_op(latents, hparams):
"""Adds isotropic gaussian-noise to each latent.
Args:
latents: 4-D or 5-D tensor, shape=(NTHWC) or (NHWC).
hparams: HParams.
Returns:
latents: latents with isotropic gaussian noise appended.
"""
if hparams.latent_noise == 0 or hparams.mode != tf.estimator.ModeKeys.TRAIN:
return latents
latent_shape = common_layers.shape_list(latents)
return latents + tf.random_normal(latent_shape, stddev=hparams.latent_noise) | Adds isotropic gaussian-noise to each latent.
Args:
latents: 4-D or 5-D tensor, shape=(NTHWC) or (NHWC).
hparams: HParams.
Returns:
latents: latents with isotropic gaussian noise appended. |
def parallel_assimilate(self, rootpath):
"""
Assimilate the entire subdirectory structure in rootpath.
"""
logger.info('Scanning for valid paths...')
valid_paths = []
for (parent, subdirs, files) in os.walk(rootpath):
valid_paths.extend(self._drone.get_valid_paths((parent, subdirs,
files)))
manager = Manager()
data = manager.list()
status = manager.dict()
status['count'] = 0
status['total'] = len(valid_paths)
logger.info('{} valid paths found.'.format(len(valid_paths)))
p = Pool(self._num_drones)
p.map(order_assimilation, ((path, self._drone, data, status)
for path in valid_paths))
for d in data:
self._data.append(json.loads(d, cls=MontyDecoder)) | Assimilate the entire subdirectory structure in rootpath. |
def start(self):
"""
Given the pipeline topology starts ``Pipers`` in the order input ->
output. See ``Piper.start``. ``Pipers`` instances are started in two
stages, which allows them to share ``NuMaps``.
"""
# top - > bottom of pipeline
pipers = self.postorder()
#
for piper in pipers:
piper.start(stages=(0, 1))
for piper in pipers:
piper.start(stages=(2,)) | Given the pipeline topology starts ``Pipers`` in the order input ->
output. See ``Piper.start``. ``Pipers`` instances are started in two
stages, which allows them to share ``NuMaps``. |
def patch(self, overrides):
"""
Patches the config with the given overrides.
Example:
If the current dictionary looks like this:
a: 1,
b: {
c: 3,
d: 4
}
and `patch` is called with the following overrides:
b: {
d: 2,
e: 4
},
c: 5
then, the following will be the resulting dictionary:
a: 1,
b: {
c: 3,
d: 2,
e: 4
},
c: 5
"""
overrides = overrides or {}
for key, value in iteritems(overrides):
current = self.get(key)
if isinstance(value, dict) and isinstance(current, dict):
current.patch(value)
else:
self[key] = value | Patches the config with the given overrides.
Example:
If the current dictionary looks like this:
a: 1,
b: {
c: 3,
d: 4
}
and `patch` is called with the following overrides:
b: {
d: 2,
e: 4
},
c: 5
then, the following will be the resulting dictionary:
a: 1,
b: {
c: 3,
d: 2,
e: 4
},
c: 5 |
def main():
"""
Main entry point.
"""
import jsonschema
parser = argparse.ArgumentParser(version=__version__)
parser.add_argument(
'--verbose', action='store_true', default=False,
help='Turn on verbose output.')
parser.add_argument(
'--header', action='store_true', default=False,
help='Turn on header.')
parser.add_argument(
'playbooks', metavar='PLAYBOOKS', type=str, nargs='+',
help='The playbook(s) to lint.')
args = parser.parse_args()
validation_results = {}
all_valid = True
# Load the playbook schema
with open(resource_filename(
'relent', 'schemas/playbook_schema.json'), 'r') as schema_fp:
schema = json.load(schema_fp)
# Do checks against playbooks
for playbook in args.playbooks:
try:
with open(playbook, 'r') as pb:
# Schema check
jsonschema.validate(json.load(pb), schema)
except jsonschema.ValidationError, e:
all_valid = False
validation_results[playbook] = (
False, e.message, list(e.schema_path), str(e))
except ValueError, e:
all_valid = False
validation_results[playbook] = (
False, 'JSON is invalid.', str(e), str(e))
# If all_valid is True then return back happy results
if all_valid is True:
if args.verbose or args.header:
parser._print_message('Found 0 issues.\n')
raise SystemExit(0)
else:
if args.verbose or args.header:
parser._print_message('Found %s issues.\n\n' % len(
validation_results))
for problem_playbook in validation_results.keys():
parser._print_message('%s: E: %s %s\n' % (
problem_playbook,
validation_results[problem_playbook][1],
validation_results[problem_playbook][2]))
if args.verbose:
parser._print_message(
validation_results[problem_playbook][3] + '\n--\n')
raise SystemExit(1) | Main entry point. |
def join_syllables_spaces(syllables: List[str], spaces: List[int]) -> str:
"""
Given a list of syllables, and a list of integers indicating the position of spaces, return
a string that has a space inserted at the designated points.
:param syllables:
:param spaces:
:return:
>>> join_syllables_spaces(["won", "to", "tree", "dun"], [3, 6, 11])
'won to tree dun'
"""
syllable_line = list("".join(syllables))
for space in spaces:
syllable_line.insert(space, " ")
return "".join(flatten(syllable_line)) | Given a list of syllables, and a list of integers indicating the position of spaces, return
a string that has a space inserted at the designated points.
:param syllables:
:param spaces:
:return:
>>> join_syllables_spaces(["won", "to", "tree", "dun"], [3, 6, 11])
'won to tree dun' |
def lookup(self, domain_name, validate=True):
"""
Lookup an existing SimpleDB domain. This differs from
:py:meth:`get_domain` in that ``None`` is returned if ``validate`` is
``True`` and no match was found (instead of raising an exception).
:param str domain_name: The name of the domain to retrieve
:param bool validate: If ``True``, a ``None`` value will be returned
if the specified domain can't be found. If ``False``, a
:py:class:`Domain <boto.sdb.domain.Domain>` object will be dumbly
returned, regardless of whether it actually exists.
:rtype: :class:`boto.sdb.domain.Domain` object or ``None``
:return: The Domain object or ``None`` if the domain does not exist.
"""
try:
domain = self.get_domain(domain_name, validate)
except:
domain = None
return domain | Lookup an existing SimpleDB domain. This differs from
:py:meth:`get_domain` in that ``None`` is returned if ``validate`` is
``True`` and no match was found (instead of raising an exception).
:param str domain_name: The name of the domain to retrieve
:param bool validate: If ``True``, a ``None`` value will be returned
if the specified domain can't be found. If ``False``, a
:py:class:`Domain <boto.sdb.domain.Domain>` object will be dumbly
returned, regardless of whether it actually exists.
:rtype: :class:`boto.sdb.domain.Domain` object or ``None``
:return: The Domain object or ``None`` if the domain does not exist. |
def load_all_csvs_to_model(path, model, field_names=None, delimiter=None, batch_len=10000,
dialect=None, num_header_rows=1, mode='rUb',
strip=True, clear=False, dry_run=True, ignore_errors=True,
sort_files=True, recursive=False, ext='',
min_mod_date=None, max_mod_date=None,
verbosity=2):
"""Bulk create database records from all csv files found within a directory."""
if min_mod_date and isinstance(min_mod_date, basestring):
min_mod_date = parse_date(min_mod_date)
if isinstance(min_mod_date, datetime.date):
min_mod_date = datetime.datetime.combine(min_mod_date, datetime.datetime.min.time())
if max_mod_date and isinstance(max_mod_date, basestring):
max_mod_date = parse_date(max_mod_date)
if isinstance(max_mod_date, datetime.date):
max_mod_date = datetime.datetime.combine(max_mod_date, datetime.datetime.min.time())
path = path or './'
batch_len = batch_len or 1000
if verbosity > 0:
if dry_run:
print 'DRY_RUN: actions will not modify the database.'
else:
print 'THIS IS NOT A DRY RUN, THESE ACTIONS WILL MODIFY THE DATABASE!!!!!!!!!'
if clear:
clear_model(model, dry_run=dry_run, verbosity=verbosity)
file_dicts = util.find_files(path, ext=ext, level=None if recursive else 0, verbosity=verbosity)
file_bytes = reduce(lambda a,b: a+b['size'], file_dicts, 0)
if sort_files:
file_dicts = sorted(file_dicts, key=itemgetter('path'))
if verbosity > 1:
print file_dicts
if verbosity > 0:
widgets = [pb.Counter(), '/%d bytes for all files: ' % file_bytes, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
i, pbar = 0, pb.ProgressBar(widgets=widgets, maxval=file_bytes)
pbar.start()
N = 0
file_bytes_done = 0
for meta in file_dicts:
if (min_mod_date and meta['modified'] < min_mod_date) or (max_mod_date and meta['modified'] > max_mod_date):
if verbosity > 1:
print("Skipping {0} because it's mdate is not between {1} and {2}".format(meta['path'], min_mod_date, max_mod_date))
continue
if verbosity > 0:
print
print 'Loading "%s"...' % meta['path']
N += load_csv_to_model(path=meta['path'], model=model, field_names=field_names, delimiter=delimiter, batch_len=batch_len,
dialect=dialect, num_header_rows=num_header_rows, mode=mode,
strip=strip, clear=False, dry_run=dry_run,
ignore_errors=ignore_errors, verbosity=verbosity)
if verbosity > 0:
file_bytes_done += meta['size']
pbar.update(file_bytes_done)
if verbosity > 0:
pbar.finish()
return N | Bulk create database records from all csv files found within a directory. |
def create_cells(self, blocks):
"""Turn the list of blocks into a list of notebook cells."""
cells = []
for block in blocks:
if (block['type'] == self.code) and (block['IO'] == 'input'):
code_cell = self.create_code_cell(block)
cells.append(code_cell)
elif (block['type'] == self.code and
block['IO'] == 'output' and
cells[-1].cell_type == 'code'):
cells[-1].outputs = self.create_outputs(block)
elif block['type'] == self.markdown:
markdown_cell = self.create_markdown_cell(block)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
return cells | Turn the list of blocks into a list of notebook cells. |
def _elapsed(self):
""" Returns elapsed time at update. """
self.last_time = time.time()
return self.last_time - self.start | Returns elapsed time at update. |
def add_done_callback(self, fn):
"""Add a callback to be executed when the operation is complete.
If the operation is not already complete, this will start a helper
thread to poll for the status of the operation in the background.
Args:
fn (Callable[Future]): The callback to execute when the operation
is complete.
"""
if self._result_set:
_helpers.safe_invoke_callback(fn, self)
return
self._done_callbacks.append(fn)
if self._polling_thread is None:
# The polling thread will exit on its own as soon as the operation
# is done.
self._polling_thread = _helpers.start_daemon_thread(
target=self._blocking_poll
) | Add a callback to be executed when the operation is complete.
If the operation is not already complete, this will start a helper
thread to poll for the status of the operation in the background.
Args:
fn (Callable[Future]): The callback to execute when the operation
is complete. |
def blast_representative_sequence_to_pdb(self, seq_ident_cutoff=0, evalue=0.0001, display_link=False,
outdir=None, force_rerun=False):
"""BLAST the representative protein sequence to the PDB. Saves a raw BLAST result file (XML file).
Args:
seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form)
evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal,
0.0001 is stringent (default).
display_link (bool, optional): Set to True if links to the HTML results should be displayed
outdir (str): Path to output directory of downloaded XML files, must be set if protein directory
was not initialized
force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False.
Returns:
list: List of new ``PDBProp`` objects added to the ``structures`` attribute
"""
# Check if a representative sequence was set
if not self.representative_sequence:
log.warning('{}: no representative sequence set, cannot BLAST'.format(self.id))
return None
# Also need to check if a sequence has been stored
if not self.representative_sequence.seq:
log.warning('{}: no representative sequence loaded, cannot BLAST'.format(self.id))
return None
# BLAST the sequence to the PDB
blast_results = self.representative_sequence.blast_pdb(seq_ident_cutoff=seq_ident_cutoff,
evalue=evalue,
display_link=display_link,
outdir=outdir,
force_rerun=force_rerun)
new_pdbs = []
# Add BLAST results to the list of structures
if blast_results:
# Filter for new BLASTed PDBs
pdbs = [x['hit_pdb'].lower() for x in blast_results]
new_pdbs = [y for y in pdbs if not self.structures.has_id(y)]
if new_pdbs:
log.debug('{}: adding {} PDBs from BLAST results'.format(self.id, len(new_pdbs)))
else:
already_have = [y for y in pdbs if self.structures.has_id(y)]
log.debug('{}: PDBs already contained in structures list'.format(';'.join(already_have)))
blast_results = [z for z in blast_results if z['hit_pdb'].lower() in new_pdbs]
for blast_result in blast_results:
pdb = blast_result['hit_pdb'].lower()
chains = blast_result['hit_pdb_chains']
for chain in chains:
# load_pdb will append this protein to the list of structures
new_pdb = self.load_pdb(pdb_id=pdb, mapped_chains=chain)
new_pdb.add_chain_ids(chain)
new_chain = new_pdb.chains.get_by_id(chain)
# Store BLAST results within the chain
new_chain.blast_results = blast_result
return new_pdbs | BLAST the representative protein sequence to the PDB. Saves a raw BLAST result file (XML file).
Args:
seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form)
evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal,
0.0001 is stringent (default).
display_link (bool, optional): Set to True if links to the HTML results should be displayed
outdir (str): Path to output directory of downloaded XML files, must be set if protein directory
was not initialized
force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False.
Returns:
list: List of new ``PDBProp`` objects added to the ``structures`` attribute |
def is_installable_file(path):
# type: (PipfileType) -> bool
"""Determine if a path can potentially be installed"""
from packaging import specifiers
if isinstance(path, Mapping):
path = convert_entry_to_path(path)
# If the string starts with a valid specifier operator, test if it is a valid
# specifier set before making a path object (to avoid breaking windows)
if any(path.startswith(spec) for spec in "!=<>~"):
try:
specifiers.SpecifierSet(path)
# If this is not a valid specifier, just move on and try it as a path
except specifiers.InvalidSpecifier:
pass
else:
return False
parsed = urlparse(path)
is_local = (
not parsed.scheme
or parsed.scheme == "file"
or (len(parsed.scheme) == 1 and os.name == "nt")
)
if parsed.scheme and parsed.scheme == "file":
path = vistir.compat.fs_decode(vistir.path.url_to_path(path))
normalized_path = vistir.path.normalize_path(path)
if is_local and not os.path.exists(normalized_path):
return False
is_archive = pip_shims.shims.is_archive_file(normalized_path)
is_local_project = os.path.isdir(normalized_path) and is_installable_dir(
normalized_path
)
if is_local and is_local_project or is_archive:
return True
if not is_local and pip_shims.shims.is_archive_file(parsed.path):
return True
return False | Determine if a path can potentially be installed |
def _send_command(self, cmd, expect=None):
"""Send a command to MPlayer.
cmd: the command string
expect: expect the output starts with a certain string
The result, if any, is returned as a string.
"""
if not self.is_alive:
raise NotPlayingError()
logger.debug("Send command to mplayer: " + cmd)
cmd = cmd + "\n"
# In Py3k, TypeErrors will be raised because cmd is a string but stdin
# expects bytes. In Python 2.x on the other hand, UnicodeEncodeErrors
# will be raised if cmd is unicode. In both cases, encoding the string
# will fix the problem.
try:
self.sub_proc.stdin.write(cmd)
except (TypeError, UnicodeEncodeError):
self.sub_proc.stdin.write(cmd.encode('utf-8', 'ignore'))
time.sleep(0.1) # wait for mplayer (better idea?)
# Expect a response for 'get_property' only
if not expect:
return
while True:
try:
output = self.sub_proc.stdout.readline().rstrip()
output = output.decode('utf-8')
except IOError:
return None
# print output
split_output = output.split('=')
# print(split_output)
if len(split_output) == 2 and split_output[0].strip() == expect:
# We found it
value = split_output[1]
return value.strip() | Send a command to MPlayer.
cmd: the command string
expect: expect the output starts with a certain string
The result, if any, is returned as a string. |
def assign_value(rs, data_type, val,
unit_id, name, metadata={}, data_hash=None, user_id=None, source=None):
"""
Insert or update a piece of data in a scenario.
If the dataset is being shared by other resource scenarios, a new dataset is inserted.
If the dataset is ONLY being used by the resource scenario in question, the dataset
is updated to avoid unnecessary duplication.
"""
log.debug("Assigning value %s to rs %s in scenario %s",
name, rs.resource_attr_id, rs.scenario_id)
if rs.scenario.locked == 'Y':
raise PermissionError("Cannot assign value. Scenario %s is locked"
%(rs.scenario_id))
#Check if this RS is the only RS in the DB connected to this dataset.
#If no results is found, the RS isn't in the DB yet, so the condition is false.
update_dataset = False # Default behaviour is to create a new dataset.
if rs.dataset is not None:
#Has this dataset changed?
if rs.dataset.hash == data_hash:
log.debug("Dataset has not changed. Returning.")
return
connected_rs = db.DBSession.query(ResourceScenario).filter(ResourceScenario.dataset_id==rs.dataset.id).all()
#If there's no RS found, then the incoming rs is new, so the dataset can be altered
#without fear of affecting something else.
if len(connected_rs) == 0:
#If it's 1, the RS exists in the DB, but it's the only one using this dataset or
#The RS isn't in the DB yet and the datset is being used by 1 other RS.
update_dataset = True
if len(connected_rs) == 1 :
if connected_rs[0].scenario_id == rs.scenario_id and connected_rs[0].resource_attr_id==rs.resource_attr_id:
update_dataset = True
else:
update_dataset=False
if update_dataset is True:
log.info("Updating dataset '%s'", name)
dataset = data.update_dataset(rs.dataset.id, name, data_type, val, unit_id, metadata, flush=False, **dict(user_id=user_id))
rs.dataset = dataset
rs.dataset_id = dataset.id
log.info("Set RS dataset id to %s"%dataset.id)
else:
log.info("Creating new dataset %s in scenario %s", name, rs.scenario_id)
dataset = data.add_dataset(data_type,
val,
unit_id,
metadata=metadata,
name=name,
**dict(user_id=user_id))
rs.dataset = dataset
rs.source = source
db.DBSession.flush() | Insert or update a piece of data in a scenario.
If the dataset is being shared by other resource scenarios, a new dataset is inserted.
If the dataset is ONLY being used by the resource scenario in question, the dataset
is updated to avoid unnecessary duplication. |
def get_supplier_properties_per_page(self, per_page=1000, page=1, params=None):
"""
Get supplier properties per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
"""
return self._get_resource_per_page(resource=SUPPLIER_PROPERTIES, per_page=per_page, page=page, params=params) | Get supplier properties per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list |
def ekf_ok(self):
"""
``True`` if the EKF status is considered acceptable, ``False`` otherwise (``boolean``).
"""
# legacy check for dronekit-python for solo
# use same check that ArduCopter::system.pde::position_ok() is using
if self.armed:
return self._ekf_poshorizabs and not self._ekf_constposmode
else:
return self._ekf_poshorizabs or self._ekf_predposhorizabs | ``True`` if the EKF status is considered acceptable, ``False`` otherwise (``boolean``). |
def send(self, s):
"""
Send data to the channel. Returns the number of bytes sent, or 0 if
the channel stream is closed. Applications are responsible for
checking that all data has been sent: if only some of the data was
transmitted, the application needs to attempt delivery of the remaining
data.
:param str s: data to send
:return: number of bytes actually sent, as an `int`
:raises socket.timeout: if no data could be sent before the timeout set
by `settimeout`.
"""
m = Message()
m.add_byte(cMSG_CHANNEL_DATA)
m.add_int(self.remote_chanid)
return self._send(s, m) | Send data to the channel. Returns the number of bytes sent, or 0 if
the channel stream is closed. Applications are responsible for
checking that all data has been sent: if only some of the data was
transmitted, the application needs to attempt delivery of the remaining
data.
:param str s: data to send
:return: number of bytes actually sent, as an `int`
:raises socket.timeout: if no data could be sent before the timeout set
by `settimeout`. |
def resort(self, attributeID, isAscending=None):
"""Sort by one of my specified columns, identified by attributeID
"""
if isAscending is None:
isAscending = self.defaultSortAscending
newSortColumn = self.columns[attributeID]
if newSortColumn.sortAttribute() is None:
raise Unsortable('column %r has no sort attribute' % (attributeID,))
if self.currentSortColumn == newSortColumn:
# if this query is to be re-sorted on the same column, but in the
# opposite direction to our last query, then use the first item in
# the result set as the marker
if self.isAscending == isAscending:
offset = 0
else:
# otherwise use the last
offset = -1
else:
offset = 0
self.currentSortColumn = newSortColumn
self.isAscending = isAscending
self._updateResults(self._sortAttributeValue(offset), True) | Sort by one of my specified columns, identified by attributeID |
def replace_namespaced_limit_range(self, name, namespace, body, **kwargs):
"""
replace the specified LimitRange
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_limit_range(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the LimitRange (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1LimitRange body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1LimitRange
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_limit_range_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_limit_range_with_http_info(name, namespace, body, **kwargs)
return data | replace the specified LimitRange
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_limit_range(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the LimitRange (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1LimitRange body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1LimitRange
If the method is called asynchronously,
returns the request thread. |
def jira_connection(config):
"""
Gets a JIRA API connection. If a connection has already been created the existing connection
will be returned.
"""
global _jira_connection
if _jira_connection:
return _jira_connection
else:
jira_options = {'server': config.get('jira').get('url')}
cookies = configuration._get_cookies_as_dict()
jira_connection = jira_ext.JIRA(options=jira_options)
session = jira_connection._session
reused_session = False
if cookies:
requests.utils.add_dict_to_cookiejar(session.cookies, cookies)
try:
jira_connection.session()
reused_session = True
except Exception:
pass
if not reused_session:
session.auth = (config['jira']['username'], base64.b64decode(config['jira']['password']))
jira_connection.session()
session.auth = None
cookie_jar_hash = requests.utils.dict_from_cookiejar(session.cookies)
for key, value in cookie_jar_hash.iteritems():
configuration._save_cookie(key, value)
_jira_connection = jira_connection
return _jira_connection | Gets a JIRA API connection. If a connection has already been created the existing connection
will be returned. |
def is_empty(self):
"""
Test interval emptiness.
:return: True if interval is empty, False otherwise.
"""
return (
self._lower > self._upper or
(self._lower == self._upper and (self._left == OPEN or self._right == OPEN))
) | Test interval emptiness.
:return: True if interval is empty, False otherwise. |
def split(self, text):
"""Split text into a list of cells."""
import re
if re.search('\n\n', text):
return text.split('\n\n')
elif re.search('\r\n\r\n', text):
return text.split('\r\n\r\n')
else:
LOGGER.error("'%s' does not appear to be a 'srt' subtitle file",
self.filename)
sys.exit(1) | Split text into a list of cells. |
def reporter(self):
"""
Create a .csv file with the strain name, and the number of core genes present/the total number of core genes
"""
with open(os.path.join(self.reportpath, 'Escherichia_core.csv'), 'w') as report:
data = 'Strain,Genes Present/Total\n'
for sample in self.runmetadata.samples:
# Convert the set to a list for JSON serialization
sample[self.analysistype].coreset = list(sample[self.analysistype].coreset)
sample[self.analysistype].coreresults = '{cs}/{cg}'.format(cs=len(sample[self.analysistype].coreset),
cg=len(self.coregenomes))
# Add strain name, the number of core genes present, and the number of total core genes to the string
data += '{sn},{cr}\n'.format(sn=sample.name,
cr=sample[self.analysistype].coreresults)
report.write(data)
for sample in self.metadata:
# Remove the messy blast results and set/list of core genes from the object
try:
delattr(sample[self.analysistype], "blastresults")
except AttributeError:
pass
try:
delattr(sample[self.analysistype], 'coreset')
except AttributeError:
pass | Create a .csv file with the strain name, and the number of core genes present/the total number of core genes |
def value(source, key, ext=COMPLETE):
"""Extracts value for the specified metadata key from the given extension set.
Keyword arguments:
source -- string containing MultiMarkdown text
ext -- extension bitfield for processing text
key -- key to extract
"""
_MMD_LIB.extract_metadata_value.restype = ctypes.c_char_p
_MMD_LIB.extract_metadata_value.argtypes = [ctypes.c_char_p, ctypes.c_ulong, ctypes.c_char_p]
src = source.encode('utf-8')
dkey = key.encode('utf-8')
value = _MMD_LIB.extract_metadata_value(src, ext, dkey)
return value.decode('utf-8') if value else '' | Extracts value for the specified metadata key from the given extension set.
Keyword arguments:
source -- string containing MultiMarkdown text
ext -- extension bitfield for processing text
key -- key to extract |
def wysiwyg_setup(protocol="http", editor_override=None):
"""
Create the <style> and <script> tags needed to initialize the rich text editor.
Create a local django_wysiwyg/includes.html template if you don't want to use Yahoo's CDN
"""
ctx = {
"protocol": protocol,
}
ctx.update(get_settings(editor_override=editor_override))
return render_to_string(
"django_wysiwyg/%s/includes.html" % ctx['DJANGO_WYSIWYG_FLAVOR'],
ctx
) | Create the <style> and <script> tags needed to initialize the rich text editor.
Create a local django_wysiwyg/includes.html template if you don't want to use Yahoo's CDN |
def add_update_resources(self, resources, ignore_datasetid=False):
# type: (List[Union[hdx.data.resource.Resource,Dict,str]], bool) -> None
"""Add new or update existing resources with new metadata to the dataset
Args:
resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries
ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False.
Returns:
None
"""
if not isinstance(resources, list):
raise HDXError('Resources should be a list!')
for resource in resources:
self.add_update_resource(resource, ignore_datasetid) | Add new or update existing resources with new metadata to the dataset
Args:
resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries
ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False.
Returns:
None |
def _init_objgoea(self, pop, assoc):
"""Run gene ontology enrichment analysis (GOEA)."""
propagate_counts = not self.args.no_propagate_counts
return GOEnrichmentStudy(pop, assoc, self.godag,
propagate_counts=propagate_counts,
relationships=False,
alpha=self.args.alpha,
pvalcalc=self.args.pvalcalc,
methods=self.methods) | Run gene ontology enrichment analysis (GOEA). |
def evaluate(self, dataset, metric="auto",
missing_value_action='auto', with_predictions=False, options={}, **kwargs):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, list[str]
Evaluation metric(s) to be computed.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
options : dict
additional options to be passed in to prediction
kwargs : dict
additional options to be passed into prediction
"""
if missing_value_action == 'auto':
missing_value_action = select_default_missing_value_policy(
self, 'evaluate')
_raise_error_if_not_sframe(dataset, "dataset")
results = self.__proxy__.evaluate(
dataset, missing_value_action, metric, with_predictions=with_predictions);
return results | Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, list[str]
Evaluation metric(s) to be computed.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
options : dict
additional options to be passed in to prediction
kwargs : dict
additional options to be passed into prediction |
def _clean_features(struct):
"""Cleans up the features collected in parse_play_details.
:struct: Pandas Series of features parsed from details string.
:returns: the same dict, but with cleaner features (e.g., convert bools,
ints, etc.)
"""
struct = dict(struct)
# First, clean up play type bools
ptypes = ['isKickoff', 'isTimeout', 'isFieldGoal', 'isPunt', 'isKneel',
'isSpike', 'isXP', 'isTwoPoint', 'isPresnapPenalty', 'isPass',
'isRun']
for pt in ptypes:
struct[pt] = struct[pt] if pd.notnull(struct.get(pt)) else False
# Second, clean up other existing variables on a one-off basis
struct['callUpheld'] = struct.get('callUpheld') == 'upheld'
struct['fgGood'] = struct.get('fgGood') == 'good'
struct['isBlocked'] = struct.get('isBlocked') == 'blocked'
struct['isComplete'] = struct.get('isComplete') == 'complete'
struct['isFairCatch'] = struct.get('isFairCatch') == 'fair catch'
struct['isMuffedCatch'] = pd.notnull(struct.get('isMuffedCatch'))
struct['isNoPlay'] = (
' (no play)' in struct['detail'] and
'penalty enforced in end zone' not in struct['detail']
if struct.get('detail') else False)
struct['isOnside'] = struct.get('isOnside') == 'onside'
struct['isSack'] = pd.notnull(struct.get('sackYds'))
struct['isSafety'] = (struct.get('isSafety') == ', safety' or
(struct.get('detail') and
'enforced in end zone, safety' in struct['detail']))
struct['isTD'] = struct.get('isTD') == ', touchdown'
struct['isTouchback'] = struct.get('isTouchback') == ', touchback'
struct['oob'] = pd.notnull(struct.get('oob'))
struct['passLoc'] = PASS_OPTS.get(struct.get('passLoc'), np.nan)
if struct['isPass']:
pyds = struct['passYds']
struct['passYds'] = pyds if pd.notnull(pyds) else 0
if pd.notnull(struct['penalty']):
struct['penalty'] = struct['penalty'].strip()
struct['penDeclined'] = struct.get('penDeclined') == 'Declined'
if struct['quarter'] == 'OT':
struct['quarter'] = 5
struct['rushDir'] = RUSH_OPTS.get(struct.get('rushDir'), np.nan)
if struct['isRun']:
ryds = struct['rushYds']
struct['rushYds'] = ryds if pd.notnull(ryds) else 0
year = struct.get('season', np.nan)
struct['timeoutTeam'] = sportsref.nfl.teams.team_ids(year).get(
struct.get('timeoutTeam'), np.nan
)
struct['twoPointSuccess'] = struct.get('twoPointSuccess') == 'succeeds'
struct['xpGood'] = struct.get('xpGood') == 'good'
# Third, ensure types are correct
bool_vars = [
'fgGood', 'isBlocked', 'isChallenge', 'isComplete', 'isFairCatch',
'isFieldGoal', 'isKickoff', 'isKneel', 'isLateral', 'isNoPlay',
'isPass', 'isPresnapPenalty', 'isPunt', 'isRun', 'isSack', 'isSafety',
'isSpike', 'isTD', 'isTimeout', 'isTouchback', 'isTwoPoint', 'isXP',
'isMuffedCatch', 'oob', 'penDeclined', 'twoPointSuccess', 'xpGood'
]
int_vars = [
'down', 'fgBlockRetYds', 'fgDist', 'fumbRecYdLine', 'fumbRetYds',
'intRetYds', 'intYdLine', 'koRetYds', 'koYds', 'muffRetYds',
'pbp_score_aw', 'pbp_score_hm', 'passYds', 'penYds', 'puntBlockRetYds',
'puntRetYds', 'puntYds', 'quarter', 'rushYds', 'sackYds', 'timeoutNum',
'ydLine', 'yds_to_go'
]
float_vars = [
'exp_pts_after', 'exp_pts_before', 'home_wp'
]
string_vars = [
'challenger', 'detail', 'fairCatcher', 'fgBlockRecoverer',
'fgBlocker', 'fgKicker', 'fieldSide', 'fumbForcer',
'fumbRecFieldSide', 'fumbRecoverer', 'fumbler', 'intFieldSide',
'interceptor', 'kneelQB', 'koKicker', 'koReturner', 'muffRecoverer',
'muffedBy', 'passLoc', 'passer', 'penOn', 'penalty',
'puntBlockRecoverer', 'puntBlocker', 'puntReturner', 'punter',
'qtr_time_remain', 'rushDir', 'rusher', 'sacker1', 'sacker2',
'spikeQB', 'tackler1', 'tackler2', 'target', 'timeoutTeam',
'xpKicker'
]
for var in bool_vars:
struct[var] = struct.get(var) is True
for var in int_vars:
try:
struct[var] = int(struct.get(var))
except (ValueError, TypeError):
struct[var] = np.nan
for var in float_vars:
try:
struct[var] = float(struct.get(var))
except (ValueError, TypeError):
struct[var] = np.nan
for var in string_vars:
if var not in struct or pd.isnull(struct[var]) or var == '':
struct[var] = np.nan
# Fourth, create new helper variables based on parsed variables
# creating fieldSide and ydline from location
if struct['isXP']:
struct['fieldSide'] = struct['ydLine'] = np.nan
else:
fieldSide, ydline = _loc_to_features(struct.get('location'))
struct['fieldSide'] = fieldSide
struct['ydLine'] = ydline
# creating secsElapsed (in entire game) from qtr_time_remain and quarter
if pd.notnull(struct.get('qtr_time_remain')):
qtr = struct['quarter']
mins, secs = map(int, struct['qtr_time_remain'].split(':'))
struct['secsElapsed'] = qtr * 900 - mins * 60 - secs
# creating columns for turnovers
struct['isInt'] = pd.notnull(struct.get('interceptor'))
struct['isFumble'] = pd.notnull(struct.get('fumbler'))
# create column for isPenalty
struct['isPenalty'] = pd.notnull(struct.get('penalty'))
# create columns for EPA
struct['team_epa'] = struct['exp_pts_after'] - struct['exp_pts_before']
struct['opp_epa'] = struct['exp_pts_before'] - struct['exp_pts_after']
return pd.Series(struct) | Cleans up the features collected in parse_play_details.
:struct: Pandas Series of features parsed from details string.
:returns: the same dict, but with cleaner features (e.g., convert bools,
ints, etc.) |
def retry(n, errors, wait=0.0, logger_name=None):
"""This is a decorator that retries a function.
Tries `n` times and catches a given tuple of `errors`.
If the `n` retries are not enough, the error is reraised.
If desired `waits` some seconds.
Optionally takes a 'logger_name' of a given logger to print the caught error.
"""
def wrapper(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
retries = 0
while True:
try:
result = func(*args, **kwargs)
if retries and logger_name:
logger = logging.getLogger(logger_name)
logger.debug('Retry of `%s` successful' % func.__name__)
return result
except errors:
if retries >= n:
if logger_name:
logger = logging.getLogger(logger_name)
logger.exception('I could not execute `%s` with args %s and kwargs %s, '
'starting next try. ' % (func.__name__,
str(args),
str(kwargs)))
raise
elif logger_name:
logger = logging.getLogger(logger_name)
logger.debug('I could not execute `%s` with args %s and kwargs %s, '
'starting next try. ' % (func.__name__,
str(args),
str(kwargs)))
retries += 1
if wait:
time.sleep(wait)
return new_func
return wrapper | This is a decorator that retries a function.
Tries `n` times and catches a given tuple of `errors`.
If the `n` retries are not enough, the error is reraised.
If desired `waits` some seconds.
Optionally takes a 'logger_name' of a given logger to print the caught error. |
def deploy_func_between_two_axis_partitions(
cls, axis, func, num_splits, len_of_left, kwargs, *partitions
):
"""Deploy a function along a full axis between two data sets in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`).
len_of_left: The number of values in `partitions` that belong to the
left data set.
kwargs: A dictionary of keyword arguments.
partitions: All partitions that make up the full axis (row or column)
for both data sets.
Returns:
A list of Pandas DataFrames.
"""
lt_frame = pandas.concat(list(partitions[:len_of_left]), axis=axis, copy=False)
rt_frame = pandas.concat(list(partitions[len_of_left:]), axis=axis, copy=False)
result = func(lt_frame, rt_frame, **kwargs)
return split_result_of_axis_func_pandas(axis, num_splits, result) | Deploy a function along a full axis between two data sets in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`).
len_of_left: The number of values in `partitions` that belong to the
left data set.
kwargs: A dictionary of keyword arguments.
partitions: All partitions that make up the full axis (row or column)
for both data sets.
Returns:
A list of Pandas DataFrames. |
def _realToVisibleColumn(self, text, realColumn):
"""If \t is used, real position of symbol in block and visible position differs
This function converts real to visible
"""
generator = self._visibleCharPositionGenerator(text)
for i in range(realColumn):
val = next(generator)
val = next(generator)
return val | If \t is used, real position of symbol in block and visible position differs
This function converts real to visible |
def flushTable(self, login, tableName, startRow, endRow, wait):
"""
Parameters:
- login
- tableName
- startRow
- endRow
- wait
"""
self.send_flushTable(login, tableName, startRow, endRow, wait)
self.recv_flushTable() | Parameters:
- login
- tableName
- startRow
- endRow
- wait |
def xml2dict(root):
"""Use functions instead of Class and remove namespace based on:
http://stackoverflow.com/questions/2148119
"""
output = {}
if root.items():
output.update(dict(root.items()))
for element in root:
if element:
if len(element) == 1 or element[0].tag != element[1].tag:
one_dict = xml2dict(element)
else:
one_dict = {ns(element[0].tag): xml2list(element)}
if element.items():
one_dict.update(dict(element.items()))
output.update({ns(element.tag): one_dict})
elif element.items():
output.update({ns(element.tag): dict(element.items())})
else:
output.update({ns(element.tag): element.text})
return output | Use functions instead of Class and remove namespace based on:
http://stackoverflow.com/questions/2148119 |
def run_script(scriptfile):
'''run a script file'''
try:
f = open(scriptfile, mode='r')
except Exception:
return
mpstate.console.writeln("Running script %s" % scriptfile)
sub = mp_substitute.MAVSubstitute()
for line in f:
line = line.strip()
if line == "" or line.startswith('#'):
continue
try:
line = sub.substitute(line, os.environ)
except mp_substitute.MAVSubstituteError as ex:
print("Bad variable: %s" % str(ex))
if mpstate.settings.script_fatal:
sys.exit(1)
continue
if line.startswith('@'):
line = line[1:]
else:
mpstate.console.writeln("-> %s" % line)
process_stdin(line)
f.close() | run a script file |
def _bss_image_crit(s_true, e_spat, e_interf, e_artif):
"""Measurement of the separation quality for a given image in terms of
filtered true source, spatial error, interference and artifacts.
"""
# energy ratios
sdr = _safe_db(np.sum(s_true**2), np.sum((e_spat+e_interf+e_artif)**2))
isr = _safe_db(np.sum(s_true**2), np.sum(e_spat**2))
sir = _safe_db(np.sum((s_true+e_spat)**2), np.sum(e_interf**2))
sar = _safe_db(np.sum((s_true+e_spat+e_interf)**2), np.sum(e_artif**2))
return (sdr, isr, sir, sar) | Measurement of the separation quality for a given image in terms of
filtered true source, spatial error, interference and artifacts. |
def pending_batch_info(self):
"""Returns a tuple of the current size of the pending batch queue
and the current queue limit.
"""
c_length = ctypes.c_int(0)
c_limit = ctypes.c_int(0)
self._call(
'pending_batch_info',
ctypes.byref(c_length),
ctypes.byref(c_limit))
return (c_length.value, c_limit.value) | Returns a tuple of the current size of the pending batch queue
and the current queue limit. |
def Cpu():
""" Get number of available CPUs """
cpu = 'Unknown'
try:
cpu = str(multiprocessing.cpu_count())
except Exception as e: # pragma: no cover
logger.error("Can't access CPU count' " + str(e))
return cpu | Get number of available CPUs |
def _mudraw(buffer, fmt):
"""Use mupdf draw to rasterize the PDF in the memory buffer"""
with NamedTemporaryFile(suffix='.pdf') as tmp_in:
tmp_in.write(buffer)
tmp_in.seek(0)
tmp_in.flush()
proc = run(
['mudraw', '-F', fmt, '-o', '-', tmp_in.name], stdout=PIPE, stderr=PIPE
)
if proc.stderr:
raise RuntimeError(proc.stderr.decode())
return proc.stdout | Use mupdf draw to rasterize the PDF in the memory buffer |
def encode_varint(value, write):
""" Encode an integer to a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
value (int): Value to encode
write (function): Called per byte that needs to be writen
Returns:
int: Number of bytes written
"""
value = (value << 1) ^ (value >> 63)
if value <= 0x7f: # 1 byte
write(value)
return 1
if value <= 0x3fff: # 2 bytes
write(0x80 | (value & 0x7f))
write(value >> 7)
return 2
if value <= 0x1fffff: # 3 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(value >> 14)
return 3
if value <= 0xfffffff: # 4 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(0x80 | ((value >> 14) & 0x7f))
write(value >> 21)
return 4
if value <= 0x7ffffffff: # 5 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(0x80 | ((value >> 14) & 0x7f))
write(0x80 | ((value >> 21) & 0x7f))
write(value >> 28)
return 5
else:
# Return to general algorithm
bits = value & 0x7f
value >>= 7
i = 0
while value:
write(0x80 | bits)
bits = value & 0x7f
value >>= 7
i += 1
write(bits)
return i | Encode an integer to a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
value (int): Value to encode
write (function): Called per byte that needs to be writen
Returns:
int: Number of bytes written |
def load_site_config(name):
"""Load and return site configuration as a dict."""
return _load_config_json(
os.path.join(
CONFIG_PATH,
CONFIG_SITES_PATH,
name + CONFIG_EXT
)
) | Load and return site configuration as a dict. |
def _add_scope_decorations(self, block, start, end):
"""
Show a scope decoration on the editor widget
:param start: Start line
:param end: End line
"""
try:
parent = FoldScope(block).parent()
except ValueError:
parent = None
if TextBlockHelper.is_fold_trigger(block):
base_color = self._get_scope_highlight_color()
factor_step = 5
if base_color.lightness() < 128:
factor_step = 10
factor = 70
else:
factor = 100
while parent:
# highlight parent scope
parent_start, parent_end = parent.get_range()
self._add_scope_deco(
start, end + 1, parent_start, parent_end,
base_color, factor)
# next parent scope
start = parent_start
end = parent_end
parent = parent.parent()
factor += factor_step
# global scope
parent_start = 0
parent_end = self.editor.document().blockCount()
self._add_scope_deco(
start, end + 1, parent_start, parent_end, base_color,
factor + factor_step)
else:
self._clear_scope_decos() | Show a scope decoration on the editor widget
:param start: Start line
:param end: End line |
def importpath(path, error_text=None):
"""
Import value by specified ``path``.
Value can represent module, class, object, attribute or method.
If ``error_text`` is not None and import will
raise ImproperlyConfigured with user friendly text.
"""
result = None
attrs = []
parts = path.split('.')
exception = None
while parts:
try:
result = __import__('.'.join(parts), {}, {}, [''])
except ImportError as e:
if exception is None:
exception = e
attrs = parts[-1:] + attrs
parts = parts[:-1]
else:
break
for attr in attrs:
try:
result = getattr(result, attr)
except (AttributeError, ValueError) as e:
if error_text is not None:
raise ImproperlyConfigured('Error: %s can import "%s"' % (
error_text, path))
else:
raise exception
return result | Import value by specified ``path``.
Value can represent module, class, object, attribute or method.
If ``error_text`` is not None and import will
raise ImproperlyConfigured with user friendly text. |
def _show_prompt(self, prompt=None, html=False, newline=True):
""" Writes a new prompt at the end of the buffer.
Parameters
----------
prompt : str, optional
The prompt to show. If not specified, the previous prompt is used.
html : bool, optional (default False)
Only relevant when a prompt is specified. If set, the prompt will
be inserted as formatted HTML. Otherwise, the prompt will be treated
as plain text, though ANSI color codes will be handled.
newline : bool, optional (default True)
If set, a new line will be written before showing the prompt if
there is not already a newline at the end of the buffer.
"""
# Save the current end position to support _append*(before_prompt=True).
cursor = self._get_end_cursor()
self._append_before_prompt_pos = cursor.position()
# Insert a preliminary newline, if necessary.
if newline and cursor.position() > 0:
cursor.movePosition(QtGui.QTextCursor.Left,
QtGui.QTextCursor.KeepAnchor)
if cursor.selection().toPlainText() != '\n':
self._append_plain_text('\n')
# Write the prompt.
self._append_plain_text(self._prompt_sep)
if prompt is None:
if self._prompt_html is None:
self._append_plain_text(self._prompt)
else:
self._append_html(self._prompt_html)
else:
if html:
self._prompt = self._append_html_fetching_plain_text(prompt)
self._prompt_html = prompt
else:
self._append_plain_text(prompt)
self._prompt = prompt
self._prompt_html = None
self._prompt_pos = self._get_end_cursor().position()
self._prompt_started() | Writes a new prompt at the end of the buffer.
Parameters
----------
prompt : str, optional
The prompt to show. If not specified, the previous prompt is used.
html : bool, optional (default False)
Only relevant when a prompt is specified. If set, the prompt will
be inserted as formatted HTML. Otherwise, the prompt will be treated
as plain text, though ANSI color codes will be handled.
newline : bool, optional (default True)
If set, a new line will be written before showing the prompt if
there is not already a newline at the end of the buffer. |
def send_frame(self, frame):
'''
Queue a frame for sending. Will send immediately if there are no
pending synchronous transactions on this connection.
'''
if self.closed:
if self.close_info and len(self.close_info['reply_text']) > 0:
raise ChannelClosed(
"channel %d is closed: %s : %s",
self.channel_id,
self.close_info['reply_code'],
self.close_info['reply_text'])
raise ChannelClosed()
# If there's any pending event at all, then it means that when the
# current dispatch loop started, all possible frames were flushed
# and the remaining item(s) starts with a sync callback. After careful
# consideration, it seems that it's safe to assume the len>0 means to
# buffer the frame. The other advantage here is
if not len(self._pending_events):
if not self._active and \
isinstance(frame, (ContentFrame, HeaderFrame)):
raise Channel.Inactive(
"Channel %d flow control activated", self.channel_id)
self._connection.send_frame(frame)
else:
self._pending_events.append(frame) | Queue a frame for sending. Will send immediately if there are no
pending synchronous transactions on this connection. |
def vertex_colors(self, values):
"""
Set the colors for each vertex of a mesh
This will apply these colors and delete any previously specified
color information.
Parameters
------------
colors: (len(mesh.vertices), 3), set each face to the color
(len(mesh.vertices), 4), set each face to the color
(3,) int, set the whole mesh this color
(4,) int, set the whole mesh this color
"""
if values is None:
if 'vertex_colors' in self._data:
self._data.data.pop('vertex_colors')
return
# make sure passed values are numpy array
values = np.asanyarray(values)
# Ensure the color shape is sane
if (self.mesh is not None and not
(values.shape == (len(self.mesh.vertices), 3) or
values.shape == (len(self.mesh.vertices), 4) or
values.shape == (3,) or
values.shape == (4,))):
return
colors = to_rgba(values)
if (self.mesh is not None and
colors.shape == (4,)):
count = len(self.mesh.vertices)
colors = np.tile(colors, (count, 1))
# if we set any color information, clear the others
self._data.clear()
self._data['vertex_colors'] = colors
self._cache.verify() | Set the colors for each vertex of a mesh
This will apply these colors and delete any previously specified
color information.
Parameters
------------
colors: (len(mesh.vertices), 3), set each face to the color
(len(mesh.vertices), 4), set each face to the color
(3,) int, set the whole mesh this color
(4,) int, set the whole mesh this color |
def _irc_upper(self, in_string):
"""Convert us to our upper-case equivalent, given our std."""
conv_string = self._translate(in_string)
if self._upper_trans is not None:
conv_string = in_string.translate(self._upper_trans)
return str.upper(conv_string) | Convert us to our upper-case equivalent, given our std. |
def inject_closure_values(func, **kwargs):
"""
Returns a new function identical to the previous one except that it acts as
though global variables named in `kwargs` have been closed over with the
values specified in the `kwargs` dictionary.
Works on properties, class/static methods and functions.
This can be useful for mocking and other nefarious activities.
"""
wrapped_by = None
if isinstance(func, property):
fget, fset, fdel = func.fget, func.fset, func.fdel
if fget: fget = fix_func(fget, **kwargs)
if fset: fset = fix_func(fset, **kwargs)
if fdel: fdel = fix_func(fdel, **kwargs)
wrapped_by = type(func)
return wrapped_by(fget, fset, fdel)
elif isinstance(func, (staticmethod, classmethod)):
func = func.__func__
wrapped_by = type(func)
newfunc = _inject_closure_values(func, **kwargs)
if wrapped_by:
newfunc = wrapped_by(newfunc)
return newfunc | Returns a new function identical to the previous one except that it acts as
though global variables named in `kwargs` have been closed over with the
values specified in the `kwargs` dictionary.
Works on properties, class/static methods and functions.
This can be useful for mocking and other nefarious activities. |
async def on_raw_329(self, message):
""" Channel creation time. """
target, channel, timestamp = message.params
if not self.in_channel(channel):
return
self.channels[channel]['created'] = datetime.datetime.fromtimestamp(int(timestamp)) | Channel creation time. |
def fast_int(
x,
key=lambda x: x,
_uni=unicodedata.digit,
_first_char=POTENTIAL_FIRST_CHAR,
):
"""
Convert a string to a int quickly, return input as-is if not possible.
We don't need to accept all input that the real fast_int accepts because
natsort is controlling what is passed to this function.
Parameters
----------
x : str
String to attempt to convert to an int.
key : callable
Single-argument function to apply to *x* if conversion fails.
Returns
-------
*str* or *int*
"""
if x[0] in _first_char:
try:
return long(x)
except ValueError:
try:
return _uni(x, key(x)) if len(x) == 1 else key(x)
except TypeError: # pragma: no cover
return key(x)
else:
try:
return _uni(x, key(x)) if len(x) == 1 else key(x)
except TypeError: # pragma: no cover
return key(x) | Convert a string to a int quickly, return input as-is if not possible.
We don't need to accept all input that the real fast_int accepts because
natsort is controlling what is passed to this function.
Parameters
----------
x : str
String to attempt to convert to an int.
key : callable
Single-argument function to apply to *x* if conversion fails.
Returns
-------
*str* or *int* |
def max_freq(self, tech_in_nm=130, ffoverhead=None):
""" Estimates the max frequency of a block in MHz.
:param tech_in_nm: the size of the circuit technology to be estimated
(for example, 65 is 65nm and 250 is 0.25um)
:param ffoverhead: setup and ff propagation delay in picoseconds
:return: a number representing an estimate of the max frequency in Mhz
If a timing_map has already been generated by timing_analysis, it will be used
to generate the estimate (and `gate_delay_funcs` will be ignored). Regardless,
all params are optional and have reasonable default values. Estimation is based
on Dennard Scaling assumption and does not include wiring effect -- as a result
the estimates may be optimistic (especially below 65nm).
"""
cp_length = self.max_length()
scale_factor = 130.0 / tech_in_nm
if ffoverhead is None:
clock_period_in_ps = scale_factor * (cp_length + 189 + 194)
else:
clock_period_in_ps = (scale_factor * cp_length) + ffoverhead
return 1e6 * 1.0/clock_period_in_ps | Estimates the max frequency of a block in MHz.
:param tech_in_nm: the size of the circuit technology to be estimated
(for example, 65 is 65nm and 250 is 0.25um)
:param ffoverhead: setup and ff propagation delay in picoseconds
:return: a number representing an estimate of the max frequency in Mhz
If a timing_map has already been generated by timing_analysis, it will be used
to generate the estimate (and `gate_delay_funcs` will be ignored). Regardless,
all params are optional and have reasonable default values. Estimation is based
on Dennard Scaling assumption and does not include wiring effect -- as a result
the estimates may be optimistic (especially below 65nm). |
def get_subject_with_file_validation(jwt_bu64, cert_path):
"""Same as get_subject_with_local_validation() except that the signing certificate
is read from a local PEM file."""
cert_obj = d1_common.cert.x509.deserialize_pem_file(cert_path)
return get_subject_with_local_validation(jwt_bu64, cert_obj) | Same as get_subject_with_local_validation() except that the signing certificate
is read from a local PEM file. |
def decode_index_value(self, index, value):
"""
Decodes a secondary index value into the correct Python type.
:param index: the name of the index
:type index: str
:param value: the value of the index entry
:type value: str
:rtype str or int
"""
if index.endswith("_int"):
return int(value)
else:
return bytes_to_str(value) | Decodes a secondary index value into the correct Python type.
:param index: the name of the index
:type index: str
:param value: the value of the index entry
:type value: str
:rtype str or int |
def _add(self, hostport):
"""Creates a peer from the hostport and adds it to the peer heap"""
peer = self.peer_class(
tchannel=self.tchannel,
hostport=hostport,
on_conn_change=self._update_heap,
)
peer.rank = self.rank_calculator.get_rank(peer)
self._peers[peer.hostport] = peer
self.peer_heap.add_and_shuffle(peer) | Creates a peer from the hostport and adds it to the peer heap |
def getSkeletalBoneDataCompressed(self, action, eMotionRange, pvCompressedData, unCompressedSize):
"""
Reads the state of the skeletal bone data in a compressed form that is suitable for
sending over the network. The required buffer size will never exceed ( sizeof(VR_BoneTransform_t)*boneCount + 2).
Usually the size will be much smaller.
"""
fn = self.function_table.getSkeletalBoneDataCompressed
punRequiredCompressedSize = c_uint32()
result = fn(action, eMotionRange, pvCompressedData, unCompressedSize, byref(punRequiredCompressedSize))
return result, punRequiredCompressedSize.value | Reads the state of the skeletal bone data in a compressed form that is suitable for
sending over the network. The required buffer size will never exceed ( sizeof(VR_BoneTransform_t)*boneCount + 2).
Usually the size will be much smaller. |
def search(self, q, **kw):
"""Search Gnip for given query, returning deserialized response."""
url = '{base_url}/search/{stream}'.format(**vars(self))
params = {
'q': q,
}
params.update(self.params)
params.update(kw)
response = self.session.get(url, params=params)
response.raise_for_status()
return response.json() | Search Gnip for given query, returning deserialized response. |
def http_post(url, data=None, opt=opt_default):
"""
Shortcut for urlopen (POST) + read. We'll probably want to add a
nice timeout here later too.
"""
return _http_request(url, method='POST', data=_marshalled(data), opt=opt) | Shortcut for urlopen (POST) + read. We'll probably want to add a
nice timeout here later too. |
def _get_error_values(self, startingPercentage, endPercentage, startDate, endDate):
"""Gets the defined subset of self._errorValues.
Both parameters will be correct at this time.
:param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0].
It represents the value, where the error calculation should be started.
25.0 for example means that the first 25% of all calculated errors will be ignored.
:param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0].
It represents the value, after which all error values will be ignored. 90.0 for example means that
the last 10% of all local errors will be ignored.
:param float startDate: Epoch representing the start date used for error calculation.
:param float endDate: Epoch representing the end date used in the error calculation.
:return: Returns a list with the defined error values.
:rtype: list
:raise: Raises a ValueError if startDate or endDate do not represent correct boundaries for error calculation.
"""
if startDate is not None:
possibleDates = filter(lambda date: date >= startDate, self._errorDates)
if 0 == len(possibleDates):
raise ValueError("%s does not represent a valid startDate." % startDate)
startIdx = self._errorDates.index(min(possibleDates))
else:
startIdx = int((startingPercentage * len(self._errorValues)) / 100.0)
if endDate is not None:
possibleDates = filter(lambda date: date <= endDate, self._errorDates)
if 0 == len(possibleDates):
raise ValueError("%s does not represent a valid endDate." % endDate)
endIdx = self._errorDates.index(max(possibleDates)) + 1
else:
endIdx = int((endPercentage * len(self._errorValues)) / 100.0)
return self._errorValues[startIdx:endIdx] | Gets the defined subset of self._errorValues.
Both parameters will be correct at this time.
:param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0].
It represents the value, where the error calculation should be started.
25.0 for example means that the first 25% of all calculated errors will be ignored.
:param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0].
It represents the value, after which all error values will be ignored. 90.0 for example means that
the last 10% of all local errors will be ignored.
:param float startDate: Epoch representing the start date used for error calculation.
:param float endDate: Epoch representing the end date used in the error calculation.
:return: Returns a list with the defined error values.
:rtype: list
:raise: Raises a ValueError if startDate or endDate do not represent correct boundaries for error calculation. |
def utc_datetime(dt=None, local_value=True):
""" Convert local datetime and/or datetime without timezone information to UTC datetime with timezone
information.
:param dt: local datetime to convert. If is None, then system datetime value is used
:param local_value: whether dt is a datetime in system timezone or UTC datetime without timezone information
:return: datetime in UTC with tz set
"""
# TODO: rename local_value to local_tz or in_local_tz
if dt is None:
return datetime.now(tz=timezone.utc)
result = dt
if result.utcoffset() is None:
if local_value is False:
return result.replace(tzinfo=timezone.utc)
else:
result = result.replace(tzinfo=local_tz())
return result.astimezone(timezone.utc) | Convert local datetime and/or datetime without timezone information to UTC datetime with timezone
information.
:param dt: local datetime to convert. If is None, then system datetime value is used
:param local_value: whether dt is a datetime in system timezone or UTC datetime without timezone information
:return: datetime in UTC with tz set |
def _preflight_check(desired, fromrepo, **kwargs):
'''
Perform platform-specific checks on desired packages
'''
if 'pkg.check_db' not in __salt__:
return {}
ret = {'suggest': {}, 'no_suggest': []}
pkginfo = __salt__['pkg.check_db'](
*list(desired.keys()), fromrepo=fromrepo, **kwargs
)
for pkgname in pkginfo:
if pkginfo[pkgname]['found'] is False:
if pkginfo[pkgname]['suggestions']:
ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions']
else:
ret['no_suggest'].append(pkgname)
return ret | Perform platform-specific checks on desired packages |
def bulk_upsert(self, docs, namespace, timestamp):
"""Update or insert multiple documents into Solr
docs may be any iterable
"""
if self.auto_commit_interval is not None:
add_kwargs = {
"commit": (self.auto_commit_interval == 0),
"commitWithin": str(self.auto_commit_interval)
}
else:
add_kwargs = {"commit": False}
cleaned = (self._clean_doc(d, namespace, timestamp) for d in docs)
if self.chunk_size > 0:
batch = list(next(cleaned) for i in range(self.chunk_size))
while batch:
self.solr.add(batch, **add_kwargs)
batch = list(next(cleaned)
for i in range(self.chunk_size))
else:
self.solr.add(cleaned, **add_kwargs) | Update or insert multiple documents into Solr
docs may be any iterable |
def get_processors(processor_cat, prop_defs, data_attr=None):
""" reads the prop defs and adds applicable processors for the property
Args:
processor_cat(str): The category of processors to retreive
prop_defs: property defintions as defined by the rdf defintions
data_attr: the attr to manipulate during processing.
Returns:
list: a list of processors
"""
processor_defs = prop_defs.get(processor_cat,[])
processor_list = []
for processor in processor_defs:
proc_class = PropertyProcessor[processor['rdf_type'][0]]
processor_list.append(proc_class(processor.get('kds_params', [{}]),
data_attr))
return processor_list | reads the prop defs and adds applicable processors for the property
Args:
processor_cat(str): The category of processors to retreive
prop_defs: property defintions as defined by the rdf defintions
data_attr: the attr to manipulate during processing.
Returns:
list: a list of processors |
def delete_table_rate_rule_by_id(cls, table_rate_rule_id, **kwargs):
"""Delete TableRateRule
Delete an instance of TableRateRule by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_table_rate_rule_by_id(table_rate_rule_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_rule_id: ID of tableRateRule to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_table_rate_rule_by_id_with_http_info(table_rate_rule_id, **kwargs)
else:
(data) = cls._delete_table_rate_rule_by_id_with_http_info(table_rate_rule_id, **kwargs)
return data | Delete TableRateRule
Delete an instance of TableRateRule by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_table_rate_rule_by_id(table_rate_rule_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_rule_id: ID of tableRateRule to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def load_dsdl(*paths, **args):
"""
Loads the DSDL files under the given directory/directories, and creates
types for each of them in the current module's namespace.
If the exclude_dist argument is not present, or False, the DSDL
definitions installed with this package will be loaded first.
Also adds entries for all datatype (ID, kind)s to the DATATYPES
dictionary, which maps datatype (ID, kind)s to their respective type
classes.
"""
global DATATYPES, TYPENAMES
paths = list(paths)
# Try to prepend the built-in DSDL files
# TODO: why do we need try/except here?
# noinspection PyBroadException
try:
if not args.get("exclude_dist", None):
dsdl_path = pkg_resources.resource_filename(__name__, "dsdl_files") # @UndefinedVariable
paths = [os.path.join(dsdl_path, "uavcan")] + paths
custom_path = os.path.join(os.path.expanduser("~"), "uavcan_vendor_specific_types")
if os.path.isdir(custom_path):
paths += [f for f in [os.path.join(custom_path, f) for f in os.listdir(custom_path)]
if os.path.isdir(f)]
except Exception:
pass
root_namespace = Namespace()
dtypes = dsdl.parse_namespaces(paths)
for dtype in dtypes:
namespace, _, typename = dtype.full_name.rpartition(".")
root_namespace._path(namespace).__dict__[typename] = dtype
TYPENAMES[dtype.full_name] = dtype
if dtype.default_dtid:
DATATYPES[(dtype.default_dtid, dtype.kind)] = dtype
# Add the base CRC to each data type capable of being transmitted
dtype.base_crc = dsdl.crc16_from_bytes(struct.pack("<Q", dtype.get_data_type_signature()))
logger.debug("DSDL Load {: >30} DTID: {: >4} base_crc:{: >8}"
.format(typename, dtype.default_dtid, hex(dtype.base_crc)))
def create_instance_closure(closure_type, _mode=None):
# noinspection PyShadowingNames
def create_instance(*args, **kwargs):
if _mode:
assert '_mode' not in kwargs, 'Mode cannot be supplied to service type instantiation helper'
kwargs['_mode'] = _mode
return transport.CompoundValue(closure_type, *args, **kwargs)
return create_instance
dtype._instantiate = create_instance_closure(dtype)
if dtype.kind == dtype.KIND_SERVICE:
dtype.Request = create_instance_closure(dtype, _mode='request')
dtype.Response = create_instance_closure(dtype, _mode='response')
namespace = root_namespace._path("uavcan")
for top_namespace in namespace._namespaces():
MODULE.__dict__[str(top_namespace)] = namespace.__dict__[top_namespace]
MODULE.__dict__["thirdparty"] = Namespace()
for ext_namespace in root_namespace._namespaces():
if str(ext_namespace) != "uavcan":
# noinspection PyUnresolvedReferences
MODULE.thirdparty.__dict__[str(ext_namespace)] = root_namespace.__dict__[ext_namespace] | Loads the DSDL files under the given directory/directories, and creates
types for each of them in the current module's namespace.
If the exclude_dist argument is not present, or False, the DSDL
definitions installed with this package will be loaded first.
Also adds entries for all datatype (ID, kind)s to the DATATYPES
dictionary, which maps datatype (ID, kind)s to their respective type
classes. |
def acquireConnection(self):
""" Get a Connection instance.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources.
"""
self._logger.debug("Acquiring connection")
# Check connection and attempt to re-establish it if it died (this is
# what PooledDB does)
self._conn._ping_check()
connWrap = ConnectionWrapper(dbConn=self._conn,
cursor=self._conn.cursor(),
releaser=self._releaseConnection,
logger=self._logger)
return connWrap | Get a Connection instance.
Parameters:
----------------------------------------------------------------
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance's release() method or use it in a context manager
expression (with ... as:) to release resources. |
def run_example(example_coroutine, *extra_args):
"""Run a hangups example coroutine.
Args:
example_coroutine (coroutine): Coroutine to run with a connected
hangups client and arguments namespace as arguments.
extra_args (str): Any extra command line arguments required by the
example.
"""
args = _get_parser(extra_args).parse_args()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.WARNING)
# Obtain hangups authentication cookies, prompting for credentials from
# standard input if necessary.
cookies = hangups.auth.get_auth_stdin(args.token_path)
client = hangups.Client(cookies)
loop = asyncio.get_event_loop()
task = asyncio.ensure_future(_async_main(example_coroutine, client, args),
loop=loop)
try:
loop.run_until_complete(task)
except KeyboardInterrupt:
task.cancel()
loop.run_until_complete(task)
finally:
loop.close() | Run a hangups example coroutine.
Args:
example_coroutine (coroutine): Coroutine to run with a connected
hangups client and arguments namespace as arguments.
extra_args (str): Any extra command line arguments required by the
example. |
def end(self):
"""Get or set the end of the event.
| Will return an :class:`Arrow` object.
| May be set to anything that :func:`Arrow.get` understands.
| If set to a non null value, removes any already
existing duration.
| Setting to None will have unexpected behavior if
begin is not None.
| Must not be set to an inferior value than self.begin.
"""
if self._duration: # if end is duration defined
# return the beginning + duration
return self.begin + self._duration
elif self._end_time: # if end is time defined
if self.all_day:
return self._end_time
else:
return self._end_time
elif self._begin: # if end is not defined
if self.all_day:
return self._begin + timedelta(days=1)
else:
# instant event
return self._begin
else:
return None | Get or set the end of the event.
| Will return an :class:`Arrow` object.
| May be set to anything that :func:`Arrow.get` understands.
| If set to a non null value, removes any already
existing duration.
| Setting to None will have unexpected behavior if
begin is not None.
| Must not be set to an inferior value than self.begin. |
def _rectangles_to_polygons(df):
"""
Convert rect data to polygons
Paramters
---------
df : dataframe
Dataframe with *xmin*, *xmax*, *ymin* and *ymax* columns,
plus others for aesthetics ...
Returns
-------
data : dataframe
Dataframe with *x* and *y* columns, plus others for
aesthetics ...
"""
n = len(df)
# Helper indexing arrays
xmin_idx = np.tile([True, True, False, False], n)
xmax_idx = ~xmin_idx
ymin_idx = np.tile([True, False, False, True], n)
ymax_idx = ~ymin_idx
# There are 2 x and 2 y values for each of xmin, xmax, ymin & ymax
# The positions are as layed out in the indexing arrays
# x and y values
x = np.empty(n*4)
y = np.empty(n*4)
x[xmin_idx] = df['xmin'].repeat(2)
x[xmax_idx] = df['xmax'].repeat(2)
y[ymin_idx] = df['ymin'].repeat(2)
y[ymax_idx] = df['ymax'].repeat(2)
# Aesthetic columns and others
other_cols = df.columns.difference(
['x', 'y', 'xmin', 'xmax', 'ymin', 'ymax'])
d = {col: np.repeat(df[col].values, 4) for col in other_cols}
data = pd.DataFrame({
'x': x,
'y': y,
**d
})
return data | Convert rect data to polygons
Paramters
---------
df : dataframe
Dataframe with *xmin*, *xmax*, *ymin* and *ymax* columns,
plus others for aesthetics ...
Returns
-------
data : dataframe
Dataframe with *x* and *y* columns, plus others for
aesthetics ... |
def _hook_xfer_mem(self, uc, access, address, size, value, data):
"""
Handle memory operations from unicorn.
"""
assert access in (UC_MEM_WRITE, UC_MEM_READ, UC_MEM_FETCH)
if access == UC_MEM_WRITE:
self._cpu.write_int(address, value, size * 8)
# If client code is attempting to read a value, we need to bring it
# in from Manticore state. If we try to mem_write it here, Unicorn
# will segfault. We add the value to a list of things that need to
# be written, and ask to restart the emulation.
elif access == UC_MEM_READ:
value = self._cpu.read_bytes(address, size)
if address in self._should_be_written:
return True
self._should_be_written[address] = value
self._should_try_again = True
return False
return True | Handle memory operations from unicorn. |
def delete_collection_namespaced_service_account(self, namespace, **kwargs):
"""
delete collection of ServiceAccount
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_service_account(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_service_account_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_service_account_with_http_info(namespace, **kwargs)
return data | delete collection of ServiceAccount
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_service_account(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread. |
def do_random(context, seq):
"""Return a random item from the sequence."""
try:
return random.choice(seq)
except IndexError:
return context.environment.undefined('No random item, sequence was empty.') | Return a random item from the sequence. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.