content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def create_export_settings_window():
"""
This function contains all the logic of the export settings window and will run the window by it's own.
:return: None
"""
window = sg.Window("Export Settings", generate_export_settings_layout(), modal=True, finalize=True,
keep_on_top=True)
while True:
n_event, _ = window.read()
if n_event in ["Exit", sg.WIN_CLOSED]:
window.close()
return None
if n_event == "-PROGRAM_CODE-":
export(window)
if n_event == "-OVERWATCH_CODE-":
export_as_overwatch_code(window)
|
9552cfb269cb3e67cf3332783b9a43a674bc9e3d
| 6,900
|
def get_vertex_list(session, node_id, part_info):
"""Wrapper for HAPI_GetVertexList
Args:
session (int): The session of Houdini you are interacting with.
node_id (int): The node to get.
part_info (PartInfo): Part info of querying
Returns:
np.ndarray: Array of vertices
"""
data_buffer = (c_int * part_info.vertexCount)()
result = HAPI_LIB.HAPI_GetVertexList(
byref(session), node_id, part_info.id, byref(data_buffer),
0, part_info.vertexCount)
assert result == HDATA.Result.SUCCESS,\
"GetVertexList Failed with {0}".format(HDATA.Result(result).name)
data_np = np.frombuffer(data_buffer, np.int32)
return data_np
|
dd5a37e248347dc9e9b5f8fba07d202008626ea5
| 6,901
|
def lamb1(u,alpha=.5):
"""Approximate the Lambert W function.
Approximate the Lambert W function from its upper and lower bounds.
The parameter alpha (between 0 and 1) determines how close the
approximation is to the lower bound instead of the upper bound.
:arg float u: Modified argument of the function.
:arg float alpha: Bound parameter (default 0.5).
:returns: (-z)-value of the Lambert function.
:raises ValueError: If u is negative.
:raises ValueError: If alpha is not between 0 and 1.
"""
if u < 0:
errmsg = 'Argument u must be positive'
raise ValueError(errmsg)
if alpha < 0 or alpha > 1:
errmsg = 'Parameter alpha must be between 0 and 1'
raise ValueError(errmsg)
beta = (2 + alpha)/3
negz = 1 + (2*u)**.5 + beta*u
return negz
|
1d769ccb74334eef55aa1bc0697328b34ba067bc
| 6,902
|
def loglikelihood(time_steps: list) -> float:
"""Calculate the log-likelihood of the time steps from the estimation
Parameters
----------
time_steps : list
estimation time steps
Returns
-------
float
log-likelihood
"""
loglikelihood = 0
for time_step in time_steps:
loglikelihood += _loglikelihood(time_step)
return loglikelihood
|
6761ced2947d9ac382d53eef390bd827ceb51203
| 6,903
|
def get_r0_rm_rp(s, i_delta):
""" compute 3 points r0, r_minus and r_plus to determine apsis
compute these at s.i-i_delta and s.i-2*i_delta
"""
xp = s.Xlast[:, s.i % s.save_last]
x0 = s.Xlast[:, (s.i - i_delta) % s.save_last]
xm = s.Xlast[:, (s.i - 2 * i_delta) % s.save_last]
rp = norm(xp[0:3] - xp[3:6])
r0 = norm(x0[0:3] - x0[3:6])
rm = norm(xm[0:3] - xm[3:6])
return r0, rm, rp
|
83595b9b15eb9c9373aa4e8f75d2ffc39c8ba248
| 6,904
|
import os
def create_tf_example(image,
image_dir,
seg,
seg_dir):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
image_dir: directory containing the image files.
seg: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
seg_dir: directory containing the image files.
Returns:
example: The converted tf.Example
num_annotations_skipped: Number of (invalid) annotations that were ignored.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
filename = image['file_name']
img_format = os.path.splitext(filename)[-1]
full_path = os.path.join(image_dir, filename)
with tf.io.gfile.GFile(full_path, 'rb') as fid:
encoded_img = fid.read()
feature_dict = tfrecord_lib.image_info_to_feature_dict(
image['height'], image['width'], filename, image['id'], encoded_img, img_format)
seg_full_path = os.path.join(seg_dir, seg['file_name'])
with tf.io.gfile.GFile(seg_full_path, 'rb') as fid:
seg_encoded_img = fid.read()
feature_dict['image/segmentation/class/encoded'] = tfrecord_lib.convert_to_feature(seg_encoded_img)
num_annotations_skipped = 0 # data checks
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example, num_annotations_skipped
|
23afe328d5a5436904cf1700b344d5f7d2c0f722
| 6,905
|
def build_rfb_lite(base, feature_layer, mbox, num_classes):
"""Receptive Field Block Net for Accurate and Fast Object Detection for embeded system
See: https://arxiv.org/pdf/1711.07767.pdf for more details.
"""
base_, extras_, norm_, head_ = add_extras(base(), feature_layer, mbox, num_classes, version='rfb_lite')
return RFB(base_, extras_, norm_, head_, feature_layer, num_classes)
|
c8b1810d088f816d4e3be587cb1085bacde08076
| 6,906
|
def bfunsmat(u, p, U):
"""Computes a matrix of the form :math:`B_{ij}`, where
:math:`i=0\\ldots p` and for each :math:`j` th column the
row :math:`i` of the matrix corresponds to the value of
:math:`(\\mathrm{span}(u_j)-p+i)` th bspline basis function at
:math:`u_j`.
Parameters:
u (np.array(float)) : evaluation point(s)
p (int) : basis function degree
U (np.array(float)) : knot vector
Returns:
np.array(float) : matrix :math:`B_{ij}`
"""
nkts = U.size
nbfuns = nkts - p - 1
npts = u.size
Bij = np.zeros((nbfuns, npts))
for j in range(0, npts):
span = fspan(u[j], p, U)
B_i = bfuns(span, u[j], p, U)
for i in range(0, p+1):
Bij[i,j] = B_i[i]
return Bij
|
6dc260a165c5ae25ac9914ff0b96c1fd8f05b93c
| 6,907
|
def getFourgram(words, join_string):
"""
Input: a list of words, e.g., ['I', 'am', 'Denny', 'boy']
Output: a list of trigram, e.g., ['I_am_Denny_boy']
I use _ as join_string for this example.
"""
assert type(words) == list
L = len(words)
if L > 3:
lst = []
for i in xrange(L-3):
lst.append( join_string.join([words[i], words[i+1], words[i+2], words[i+3]]) )
else:
# set it as bigram
lst = getTrigram(words, join_string)
return lst
|
17717bb608a7ef5eff1ac9e1f49d2606b7113360
| 6,908
|
import math
def get_age_carbon_14_dating(carbon_14_ratio):
"""Returns the estimated age of the sample in year.
carbon_14_ratio: the percent (0 < percent < 1) of carbon-14
in the sample conpared to the amount in living
tissue (unitless). """
if isinstance(carbon_14_ratio, str):
raise TypeError("Please provide an integer")
elif carbon_14_ratio <= 0:
raise ValueError("Not acceptable, must be greater than 0 but less than 1")
elif carbon_14_ratio > 1:
raise ValueError("Too large, must be between 0 and 1")
calculation = math.log(carbon_14_ratio) / DECAY_CONSTANT * T_HALF
age = "{:.2f}".format(calculation) # rounds to 2 decimal places
return age
|
8b0ab86e3c45a97065fefb6c4f02ab87c3e82d23
| 6,909
|
def get_input_definition() -> InputDefinition:
"""
Query ReconAll's input file definition (*t1_files*) to check for existing
runs.
Returns
-------
InputDefinition
ReconAll's *t1_files* input definition
"""
node = get_node()
return node.analysis_version.input_definitions.get(key=T1_FILES_KEY)
|
1575bc2521b6f041c4151be6405ac1d458333d62
| 6,910
|
def create_ou_process(action_spec, ou_stddev, ou_damping):
"""Create nested zero-mean Ornstein-Uhlenbeck processes.
The temporal update equation is:
.. code-block:: python
x_next = (1 - damping) * x + N(0, std_dev)
Note: if ``action_spec`` is nested, the returned nested OUProcess will not bec
checkpointed.
Args:
action_spec (nested BountedTensorSpec): action spec
ou_damping (float): Damping rate in the above equation. We must have
:math:`0 <= damping <= 1`.
ou_stddev (float): Standard deviation of the Gaussian component.
Returns:
nested ``OUProcess`` with the same structure as ``action_spec``.
"""
def _create_ou_process(action_spec):
return dist_utils.OUProcess(action_spec.zeros(), ou_damping, ou_stddev)
ou_process = alf.nest.map_structure(_create_ou_process, action_spec)
return ou_process
|
292b235863e57b49e531e5e5b091f55688357122
| 6,911
|
def clean_data(df):
"""
remove the duplicates from a dataframe
parameters:
df(Dataframe): data frame
"""
df=df.drop_duplicates()
return df
|
7072885f7233c5407060344e6858f89108d61ee8
| 6,912
|
def IssueFactory(data, journal_id, issue_order):
"""
Realiza o registro fascículo utilizando o opac schema.
Esta função pode lançar a exceção `models.Journal.DoesNotExist`.
"""
mongo_connect()
metadata = data["metadata"]
issue = models.Issue()
issue._id = issue.iid = data.get("id")
issue.type = metadata.get("type", "regular")
issue.spe_text = metadata.get("spe_text", "")
issue.start_month = metadata.get("publication_month", 0)
issue.end_month = metadata.get("publication_season", [0])[-1]
issue.year = metadata.get("publication_year")
issue.volume = metadata.get("volume", "")
issue.number = metadata.get("number", "")
issue.label = metadata.get(
"label", "%s%s" % ("v" + issue.volume, "n" + issue.number)
)
issue.order = metadata.get("order", 0)
issue.pid = metadata.get("pid", "")
issue.journal = models.Journal.objects.get(_id=journal_id)
issue.order = issue_order
return issue
|
49ef57cb1c628c05e30a35e10680d34140066182
| 6,913
|
def _is_permission_in_db(permission_name: str):
"""To check whether the given permission is in the DB
Parameters
----------
permission_name: str
A permission name we use internally.
E.g., hazard, hazard:hazard, project...
"""
return bool(
models.Auth0Permission.query.filter_by(permission_name=permission_name).first()
)
|
6e0e672d5c73e0740b695f29d3459a3b80c86831
| 6,914
|
import sys
import pyflakes
def check(source):
"""Return messages from pyflakes."""
if sys.version_info[0] == 2 and isinstance(source, unicode):
# Convert back to original byte string encoding, otherwise pyflakes
# call to compile() will complain. See PEP 263. This only affects
# Python 2.
try:
source = source.encode('utf-8')
except UnicodeError: # pragma: no cover
return []
reporter = ListReporter()
try:
pyflakes.api.check(source, filename='<string>', reporter=reporter)
except (AttributeError, RecursionError, UnicodeDecodeError):
pass
return reporter.messages
|
2b07fc9e7522ca8d356ce6509d93d8d9db04b204
| 6,915
|
from typing import Optional
def get_dataset(dataset_id: Optional[str] = None,
location: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatasetResult:
"""
Gets any metadata associated with a dataset.
"""
__args__ = dict()
__args__['datasetId'] = dataset_id
__args__['location'] = location
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:healthcare/v1:getDataset', __args__, opts=opts, typ=GetDatasetResult).value
return AwaitableGetDatasetResult(
name=__ret__.name,
time_zone=__ret__.time_zone)
|
985a7e9b7b124c0dba37455426889683e5769aaf
| 6,916
|
def is_email_available() -> bool:
"""
Returns whether email services are available on this instance (i.e. settings are in place).
"""
return bool(settings.EMAIL_HOST)
|
c8b8362aed7f2af5dd49070dce7f522fd0c2088a
| 6,917
|
def sql2label(sql, num_cols):
"""encode sql"""
# because of classification task, label is from 0
# so sel_num and cond_num should -1,and label should +1 in prediction phrase
cond_conn_op_label = sql.cond_conn_op
sel_num_label = sql.sel_num - 1
# the new dataset has cond_num = 0, do not -1
cond_num_label = len(sql.conds) + len(sql.having)
sel_label = np.zeros(num_cols, dtype='int32')
sel_agg_label = np.zeros((num_cols, SQL.num_agg_ops), dtype='int32')
for col_id, agg_op in zip(sql.sel, sql.agg):
assert col_id < num_cols, f"select col_id({col_id}) >= num_cols({num_cols}): {sql}"
sel_agg_label[col_id][agg_op] = 1
sel_label[col_id] = 1
# len(SQL.op_sql_dict) over all op ID range,which means defaults to no OP
cond_op_label = np.ones(num_cols, dtype='int32') * len(SQL.op_sql_dict)
having_agg_label = np.zeros((num_cols, SQL.num_agg_ops), dtype='int32')
for col_id, cond_op, _ in sql.conds:
assert col_id < num_cols, f"where col_id({col_id}) >= num_cols({num_cols}): {sql}"
cond_op_label[col_id] = cond_op
for agg, col_id, cond_op, _ in sql.having:
assert col_id < num_cols, f"having col_id({col_id}) >= num_cols({num_cols}): {sql}"
cond_op_label[col_id] = cond_op
having_agg_label[col_id][agg] = 1
order_col_label = np.zeros(num_cols, dtype='int32')
order_agg_label = np.zeros((num_cols, SQL.num_agg_ops), dtype='int32')
order_direction_label = sql.order_direction
for agg, order_col in sql.order_by:
order_col_label[order_col] = 1
order_agg_label[order_col][agg] = 1
group_num_label = sql.group_num
having_num_label = len(sql.having)
group_col_label = np.zeros(num_cols, dtype='int32')
for col_id in sql.group_by:
assert col_id < num_cols, f"group_by col_id({col_id}) >= num_cols({num_cols}): {sql}"
group_col_label[col_id] = 1
return sel_num_label, cond_num_label, cond_conn_op_label, \
sel_agg_label, sel_label, cond_op_label, \
order_col_label, order_agg_label, order_direction_label, \
group_num_label, having_num_label, group_col_label, having_agg_label
|
b25c819e4645c07216970877ac95d20b0f8baab6
| 6,918
|
import time
def retrieveToken(verbose: bool = False, save: bool = False, **kwargs)->str:
"""
LEGACY retrieve token directly following the importConfigFile or Configure method.
"""
token_with_expiry = token_provider.get_token_and_expiry_for_config(config.config_object,**kwargs)
token = token_with_expiry['token']
config.config_object['token'] = token
config.config_object['date_limit'] = time.time() + token_with_expiry['expiry'] / 1000 - 500
config.header.update({'Authorization': f'Bearer {token}'})
if verbose:
print(f"token valid till : {time.ctime(time.time() + token_with_expiry['expiry'] / 1000)}")
return token
|
b419934bf2725b46d23abc506c5b5a2828de1d0c
| 6,919
|
def format_str_for_write(input_str: str) -> bytes:
"""Format a string for writing to SteamVR's stream."""
if len(input_str) < 1:
return "".encode("utf-8")
if input_str[-1] != "\n":
return (input_str + "\n").encode("utf-8")
return input_str.encode("utf-8")
|
1b83a2c75118b03b7af06350e069775c0b877816
| 6,920
|
def reverse_result(func):
"""The recursive function `get_path` returns results in order reversed
from desired. This decorator just reverses those results before returning
them to caller.
"""
@wraps(func)
def inner(*args, **kwargs):
result = func(*args, **kwargs)
if result is not None:
return result[::-1]
return inner
|
c13d28550e77a8fba149c50673252012c712961f
| 6,921
|
def convert_from_opencorpora_tag(to_ud, tag: str, text: str):
"""
Конвертировать теги их формата OpenCorpora в Universal Dependencies
:param to_ud: конвертер.
:param tag: тег в OpenCorpora.
:param text: токен.
:return: тег в UD.
"""
ud_tag = to_ud(str(tag), text)
pos = ud_tag.split()[0]
gram = ud_tag.split()[1]
return pos, gram
|
0e650cc4976d408ed88ef9280fe3a74261353561
| 6,922
|
import struct
def reg_to_float(reg):
"""convert reg value to Python float"""
st = struct.pack(">L", reg)
return struct.unpack(">f", st)[0]
|
f4a2d416e880807503f3c0ba0b042fbbecc09064
| 6,923
|
def wvelocity(grid, u, v, zeta=0):
"""
Compute "true" vertical velocity
Parameters
----------
grid : seapy.model.grid,
The grid to use for the calculations
u : ndarray,
The u-field in time
v : ndarray,
The v-field in time
zeta : ndarray, optional,
The zeta-field in time
Returns
-------
w : ndarray,
Vertical Velocity
"""
grid=seapy.model.asgrid(grid)
u=np.ma.array(u)
v=np.ma.array(v)
zeta=np.ma.array(zeta)
# Check the sizes
while u.ndim < 4:
u=u[np.newaxis, ...]
while v.ndim < 4:
v=v[np.newaxis, ...]
while zeta.ndim < 3:
zeta=zeta[np.newaxis, ...]
# Get omega
W, z_r, z_w, thick_u, thick_v=omega(grid, u, v, zeta, scale=True,
work=True)
# Compute quasi-horizontal motions (Ui + Vj)*GRAD s(z)
vert=z_r * 0
# U-contribution
wrk=u * (z_r[:, :, :, 1:] - z_r[:, :, :, :-1]) * \
(grid.pm[:, 1:] - grid.pm[:, :-1])
vert[:, :, :, 1:-1]=0.25 * (wrk[:, :, :, :-1] + wrk[:, :, :, 1:])
# V-contribution
wrk = v * (z_r[:, :, 1:, :] - z_r[:, :, :-1, :]) * \
(grid.pn[1:, :] - grid.pn[:-1, :])
vert[:, :, 1:-1, :] += 0.25 * (wrk[:, :, :-1, :] + wrk[:, :, 1:, :])
# Compute barotropic velocity [ERROR IN FORMULATION RIGHT NOW]
wrk = np.zeros((vert.shape[0], vert.shape[2], vert.shape[3]))
ubar = np.sum(u * thick_u, axis=1) / np.sum(thick_u, axis=1)
vbar = np.sum(v * thick_v, axis=1) / np.sum(thick_v, axis=1)
# wrk[:, 1:-1, 1:-1] = (ubar[:, 1:-1, :-1] - ubar[:, 1:-1, 1:] +
# vbar[:, :-1, 1:-1] - vbar[:, 1:, 1:-1])
# Shift vert from rho to w
wvel = z_w * 0
# First two layers
slope = (z_r[:, 0, :, :] - z_w[:, 0, :, :]) / \
(z_r[:, 1, :, :] - z_r[:, 0, :, :])
wvel[:, 0, :, :] = 0.375 * (vert[:, 0, :, :] - slope *
(vert[:, 1, :, :] - vert[:, 0, :, :])) + \
0.75 * vert[:, 0, :, :] - \
0.125 * vert[:, 1, :, :]
wvel[:, 1, :, :] = W[:, 1, :, :] + wrk + \
0.375 * vert[:, 0, :, :] + \
0.75 * vert[:, 1, :, :] - 0.125 * vert[:, 2, :, :]
# Middle of the grid
wvel[:, 2:-2, :, :] = W[:, 2:-2, :, :] + \
wrk[:, np.newaxis, :, :] + \
0.5625 * (vert[:, 1:-2, :, :] + vert[:, 2:-1, :, :]) - \
0.0625 * (vert[:, :-3, :, :] + vert[:, 3:, :, :])
# Upper two layers
slope = (z_w[:, -1, :, :] - z_r[:, -1, :, :]) / \
(z_r[:, -1, :, :] - z_r[:, -2, :, :])
wvel[:, -1, :, :] = wrk + 0.375 * (vert[:, -1, :, :] + slope *
(vert[:, -1, :, :] - vert[:, -2, :, :])) + \
0.75 * vert[:, -1, :, :] - \
0.0625 * vert[:, -2, :, :]
wvel[:, -2, :, :] = W[:, -2, :, :] + 0.375 * vert[:, -1, :, :] + \
wrk + 0.75 * vert[:, -2, :, :] - \
0.125 * vert[:, -3, :, :]
# No gradient at the boundaries
wvel[:, :, 0, :] = wvel[:, :, 1, :]
wvel[:, :, -2:, :] = wvel[:, :, -3:-2, :]
wvel[:, :, :, 0] = wvel[:, :, :, 1]
wvel[:, :, :, -2:] = wvel[:, :, :, -3:-2]
return wvel
|
452e84b334b42b9099ed888319a3cc88e7191e9b
| 6,924
|
def _as_nested_lists(vertices):
""" Convert a nested structure such as an ndarray into a list of lists. """
out = []
for part in vertices:
if hasattr(part[0], "__iter__"):
verts = _as_nested_lists(part)
out.append(verts)
else:
out.append(list(part))
return out
|
c69bd2084aa8e76a53adf3e25286a8dd7ae23176
| 6,925
|
def markdown(code: str) -> str:
"""Convert markdown to HTML using markdown2."""
return markdown2.markdown(code, extras=markdown_extensions)
|
09f463aa28f9289d05b44244e6ac60ce7905af83
| 6,926
|
import json
import urllib
async def post_notification(request):
"""
Create a new notification to run a specific plugin
:Example:
curl -X POST http://localhost:8081/fledge/notification -d '{"name": "Test Notification", "description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false}'
curl -X POST http://localhost:8081/fledge/notification -d '{"name": "Test Notification", "description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false, "rule_config": {}, "delivery_config": {}}'
"""
try:
notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name)
_address, _port = notification_service[0]._address, notification_service[0]._port
except service_registry_exceptions.DoesNotExist:
raise web.HTTPNotFound(reason="No Notification service available.")
try:
data = await request.json()
if not isinstance(data, dict):
raise ValueError('Data payload must be a valid JSON')
name = data.get('name', None)
description = data.get('description', None)
rule = data.get('rule', None)
channel = data.get('channel', None)
notification_type = data.get('notification_type', None)
enabled = data.get('enabled', None)
rule_config = data.get('rule_config', {})
delivery_config = data.get('delivery_config', {})
retrigger_time = data.get('retrigger_time', None)
try:
if retrigger_time:
if float(retrigger_time) > 0 and float(retrigger_time).is_integer():
pass
else:
raise ValueError
except ValueError:
raise ValueError('Invalid retrigger_time property in payload.')
if name is None or name.strip() == "":
raise ValueError('Missing name property in payload.')
if description is None:
raise ValueError('Missing description property in payload.')
if rule is None:
raise ValueError('Missing rule property in payload.')
if channel is None:
raise ValueError('Missing channel property in payload.')
if notification_type is None:
raise ValueError('Missing notification_type property in payload.')
if utils.check_reserved(name) is False:
raise ValueError('Invalid name property in payload.')
if utils.check_reserved(rule) is False:
raise ValueError('Invalid rule property in payload.')
if utils.check_reserved(channel) is False:
raise ValueError('Invalid channel property in payload.')
if notification_type not in NOTIFICATION_TYPE:
raise ValueError('Invalid notification_type property in payload.')
if enabled is not None:
if enabled not in ['true', 'false', True, False]:
raise ValueError('Only "true", "false", true, false are allowed for value of enabled.')
is_enabled = "true" if ((type(enabled) is str and enabled.lower() in ['true']) or (
(type(enabled) is bool and enabled is True))) else "false"
storage = connect.get_storage_async()
config_mgr = ConfigurationManager(storage)
curr_config = await config_mgr.get_category_all_items(name)
if curr_config is not None:
raise ValueError("A Category with name {} already exists.".format(name))
try:
# Get default config for rule and channel plugins
url = '{}/plugin'.format(request.url)
try:
# When authentication is mandatory we need to pass token in request header
auth_token = request.token
except AttributeError:
auth_token = None
list_plugins = json.loads(await _hit_get_url(url, auth_token))
r = list(filter(lambda rules: rules['name'] == rule, list_plugins['rules']))
c = list(filter(lambda channels: channels['name'] == channel, list_plugins['delivery']))
if len(r) == 0 or len(c) == 0: raise KeyError
rule_plugin_config = r[0]['config']
delivery_plugin_config = c[0]['config']
except KeyError:
raise ValueError("Invalid rule plugin {} and/or delivery plugin {} supplied.".format(rule, channel))
# Verify if rule_config contains valid keys
if rule_config != {}:
for k, v in rule_config.items():
if k not in rule_plugin_config:
raise ValueError("Invalid key {} in rule_config {} supplied for plugin {}.".format(k, rule_config, rule))
# Verify if delivery_config contains valid keys
if delivery_config != {}:
for k, v in delivery_config.items():
if k not in delivery_plugin_config:
raise ValueError(
"Invalid key {} in delivery_config {} supplied for plugin {}.".format(k, delivery_config, channel))
# First create templates for notification and rule, channel plugins
post_url = 'http://{}:{}/notification/{}'.format(_address, _port, urllib.parse.quote(name))
await _hit_post_url(post_url) # Create Notification template
post_url = 'http://{}:{}/notification/{}/rule/{}'.format(_address, _port, urllib.parse.quote(name),
urllib.parse.quote(rule))
await _hit_post_url(post_url) # Create Notification rule template
post_url = 'http://{}:{}/notification/{}/delivery/{}'.format(_address, _port, urllib.parse.quote(name),
urllib.parse.quote(channel))
await _hit_post_url(post_url) # Create Notification delivery template
# Create configurations
notification_config = {
"description": description,
"rule": rule,
"channel": channel,
"notification_type": notification_type,
"enable": is_enabled,
}
if retrigger_time:
notification_config["retrigger_time"] = retrigger_time
await _update_configurations(config_mgr, name, notification_config, rule_config, delivery_config)
audit = AuditLogger(storage)
await audit.information('NTFAD', {"name": name})
except ValueError as ex:
raise web.HTTPBadRequest(reason=str(ex))
except Exception as e:
raise web.HTTPInternalServerError(reason=str(e))
else:
return web.json_response({'result': "Notification {} created successfully".format(name)})
|
bdc85dd3d93f51352776a3e63b34a18961014058
| 6,927
|
def test_send_file_to_router(monkeypatch, capsys):
"""
.
"""
# pylint: disable=unused-argument
@counter_wrapper
def get_commands(*args, **kwargs):
"""
.
"""
return "commands"
@counter_wrapper
def add_log(log: Log, cursor=None):
"""
.
"""
assert (
log.message == "Adding command set /tmp/foo.sh to router"
), "Log has correct file name"
monkeypatch.setattr(deploy_helper, "generate_bash_commands", get_commands)
monkeypatch.setattr(db, "add_deployment_log", add_log)
monkeypatch.setattr(
deploy_helper, "write_data_to_router_file", lambda *args, **kwargs: False
)
with pytest.raises(ValueError):
deployment.send_file_to_router(
"before", "after", None, ["commands"], {}, "/tmp/foo.sh"
)
assert get_commands.counter == 1, "Commands generated"
assert add_log.counter == 1, "Log added"
printed = capsys.readouterr()
assert printed.out == "Failed to write /tmp/foo.sh to router\n", "Error printed"
monkeypatch.setattr(
deploy_helper, "write_data_to_router_file", lambda *args, **kwargs: True
)
deployment.send_file_to_router(
"before", "after", None, ["commands"], {}, "/tmp/foo.sh"
)
assert get_commands.counter == 2, "Commands generated"
assert add_log.counter == 2, "Log added"
|
739e9d2dbb9adc40b386566b4e73dae98381ed4c
| 6,928
|
def smiles2mol(smiles):
"""Convert SMILES string into rdkit.Chem.rdchem.Mol.
Args:
smiles: str, a SMILES string.
Returns:
mol: rdkit.Chem.rdchem.Mol
"""
smiles = canonicalize(smiles)
mol = Chem.MolFromSmiles(smiles)
if mol is None:
return None
Chem.Kekulize(mol)
return mol
|
56a8e0b28f98b1dd920cf03977eb6086a134fd8f
| 6,929
|
import sys
def parallel_execute(objects, func, get_name, msg, get_deps=None):
"""Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
get_deps called on object must return a collection with its dependencies.
get_name called on object must return its name.
"""
objects = list(objects)
stream = get_output_stream(sys.stderr)
writer = ParallelStreamWriter(stream, msg)
for obj in objects:
writer.initialize(get_name(obj))
q = setup_queue(objects, func, get_deps, get_name)
done = 0
errors = {}
error_to_reraise = None
returned = [None] * len(objects)
while done < len(objects):
try:
obj, result, exception = q.get(timeout=1)
except Empty:
continue
# See https://github.com/docker/compose/issues/189
except thread.error:
raise ShutdownException()
if exception is None:
writer.write(get_name(obj), 'done')
returned[objects.index(obj)] = result
elif isinstance(exception, APIError):
errors[get_name(obj)] = exception.explanation
writer.write(get_name(obj), 'error')
else:
errors[get_name(obj)] = exception
error_to_reraise = exception
done += 1
for obj_name, error in errors.items():
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
if error_to_reraise:
raise error_to_reraise
return returned
|
e774ceee6a9289bdbf70e2f7c5055a2f39c9804b
| 6,930
|
def build_term_map(deg, blocklen):
"""
Builds term map (degree, index) -> term
:param deg:
:param blocklen:
:return:
"""
term_map = [[0] * comb(blocklen, x, True) for x in range(deg + 1)]
for dg in range(1, deg + 1):
for idx, x in enumerate(term_generator(dg, blocklen - 1)):
term_map[dg][idx] = x
return term_map
|
3e70cb38314189ff33da3eeb43ca0c68d13904cd
| 6,931
|
def gen_sets():
"""
List of names of all available problem generators
"""
return registered_gens.keys()
|
f5aefd9d480115013ef8423ce6fd173d5acf0045
| 6,932
|
def is_valid_currency(currency_: str) -> bool:
"""
is_valid_currency:判断给定货币是否有效
@currency_(str):货币代码
return(bool):FROM_CNY、TO_CNY均有currency_记录
"""
return currency_ in FROM_CNY and currency_ in TO_CNY
|
5b95b0d0a76e5d979e7a560ee14f6adf2c79e140
| 6,933
|
from typing import List
from typing import Tuple
def load_gene_prefixes() -> List[Tuple[str, str, str]]:
"""Returns FamPlex gene prefixes as a list of rows
Returns
-------
list
List of lists corresponding to rows in gene_prefixes.csv. Each row has
three columns [Pattern, Category, Notes].
"""
return _load_csv(GENE_PREFIXES_PATH)
|
9fc450636a4b517a79350b9b6131dccfe860c58e
| 6,934
|
def create_page_panels_base(num_panels=0,
layout_type=None,
type_choice=None,
page_name=None):
"""
This function creates the base panels for one page
it specifies how a page should be layed out and
how many panels should be in it
:param num_panels: how many panels should be on a page
if 0 then the function chooses, defaults to 0
:type num_panels: int, optional
:param layout_type: whether the page should consist of
vertical, horizontal or both types of panels, defaults to None
:type layout_type: str, optional
:param type_choice: If having selected vh panels select a type
of layout specifically, defaults to None
:type type_choice: str, optional
:param page_name: A specific name for the page
:type page_name: str, optional
:return: A Page object with the panels initalized
:rtype: Page
"""
# TODO: Skew panel number distribution
# Page dimensions turned to coordinates
topleft = (0.0, 0.0)
topright = (cfg.page_width, 0.0)
bottomleft = (0.0, cfg.page_height)
bottomright = cfg.page_size
coords = [
topleft,
topright,
bottomright,
bottomleft
]
if layout_type is None:
layout_type = np.random.choice(["v", "h", "vh"])
# Panels encapsulated and returned within page
if page_name is None:
page = Page(coords, layout_type, num_panels)
else:
page = Page(coords, layout_type, num_panels, name=page_name)
# If you want only vertical panels
if layout_type == "v":
max_num_panels = 4
if num_panels < 1:
num_panels = np.random.choice([3, 4])
page.num_panels = num_panels
else:
page.num_panels = num_panels
draw_n_shifted(num_panels, page, "v")
# If you want only horizontal panels
elif layout_type == "h":
max_num_panels = 5
if num_panels < 1:
num_panels = np.random.randint(3, max_num_panels+1)
page.num_panels = num_panels
else:
page.num_panels = num_panels
draw_n_shifted(num_panels, page, "h")
# If you want both horizontal and vertical panels
elif layout_type == "vh":
max_num_panels = 8
if num_panels < 1:
num_panels = np.random.randint(2, max_num_panels+1)
page.num_panels = num_panels
else:
page.num_panels = num_panels
if num_panels == 2:
# Draw 2 rectangles
# vertically or horizontally
horizontal_vertical = np.random.choice(["h", "v"])
draw_two_shifted(page, horizontal_vertical)
if num_panels == 3:
# Draw 2 rectangles
# Vertically or Horizontally
horizontal_vertical = np.random.choice(["h", "v"])
draw_two_shifted(page, horizontal_vertical)
next_div = invert_for_next(horizontal_vertical)
# Pick one and divide it into 2 rectangles
choice_idx = choose(page)
choice = page.get_child(choice_idx)
draw_two_shifted(choice, next_div)
if num_panels == 4:
horizontal_vertical = np.random.choice(["h", "v"])
# Possible layouts with 4 panels
if type_choice is None:
type_choice = np.random.choice(["eq", "uneq", "div",
"trip", "twoonethree"])
# Draw two rectangles
if type_choice == "eq":
draw_two_shifted(page, horizontal_vertical, shift=0.5)
next_div = invert_for_next(horizontal_vertical)
# Divide each into 2 rectangles equally
shift_min = 25
shift_max = 75
shift = np.random.randint(shift_min, shift_max)
shift = shift/100
draw_two_shifted(page.get_child(0), next_div, shift)
draw_two_shifted(page.get_child(1), next_div, shift)
# Draw two rectangles
elif type_choice == "uneq":
draw_two_shifted(page, horizontal_vertical, shift=0.5)
next_div = invert_for_next(horizontal_vertical)
# Divide each into 2 rectangles unequally
draw_two_shifted(page.get_child(0), next_div)
draw_two_shifted(page.get_child(1), next_div)
elif type_choice == "div":
draw_two_shifted(page, horizontal_vertical, shift=0.5)
next_div = invert_for_next(horizontal_vertical)
# Pick one and divide into 2 rectangles
choice1_idx = choose(page)
choice1 = page.get_child(choice1_idx)
draw_two_shifted(choice1, next_div)
# Pick one of these two and divide that into 2 rectangles
choice2_idx = choose(choice1)
choice2 = choice1.get_child(choice2_idx)
next_div = invert_for_next(next_div)
draw_two_shifted(choice2, next_div)
# Draw three rectangles
elif type_choice == "trip":
draw_n(3, page, horizontal_vertical)
# Pick one and divide it into two
choice_idx = choose(page)
choice = page.get_child(choice_idx)
next_div = invert_for_next(horizontal_vertical)
draw_two_shifted(choice, next_div)
# Draw two rectangles
elif type_choice == "twoonethree":
draw_two_shifted(page, horizontal_vertical)
# Pick one and divide it into 3 rectangles
choice_idx = choose(page)
choice = page.get_child(choice_idx)
next_div = invert_for_next(horizontal_vertical)
draw_n_shifted(3, choice, next_div)
if num_panels == 5:
# Draw two rectangles
horizontal_vertical = np.random.choice(["h", "v"])
# Possible layouts with 5 panels
if type_choice is None:
type_choice = np.random.choice(["eq", "uneq", "div",
"twotwothree", "threetwotwo",
"fourtwoone"])
if type_choice == "eq" or type_choice == "uneq":
draw_two_shifted(page, horizontal_vertical, shift=0.5)
next_div = invert_for_next(horizontal_vertical)
# Pick one and divide it into two then
choice_idx = choose(page)
choice = page.get_child(choice_idx)
draw_two_shifted(choice, next_div)
# Divide each into 2 rectangles equally
if type_choice == "eq":
shift_min = 25
shift_max = 75
shift = np.random.randint(shift_min, shift_max)
set_shift = shift / 100
else:
# Divide each into 2 rectangles unequally
set_shift = None
next_div = invert_for_next(next_div)
draw_two_shifted(choice.get_child(0),
next_div,
shift=set_shift)
draw_two_shifted(choice.get_child(1),
next_div,
shift=set_shift)
# Draw two rectangles
elif type_choice == "div":
draw_two_shifted(page, horizontal_vertical, shift=0.5)
next_div = invert_for_next(horizontal_vertical)
# Divide both equally
draw_two_shifted(page.get_child(0), next_div)
draw_two_shifted(page.get_child(1), next_div)
# Pick one of all of them and divide into two
page_child_chosen = np.random.choice(page.children)
choice_idx, left_choices = choose_and_return_other(
page_child_chosen
)
choice = page_child_chosen.get_child(choice_idx)
next_div = invert_for_next(next_div)
draw_two_shifted(choice,
horizontal_vertical=next_div,
shift=0.5
)
# Draw two rectangles
elif type_choice == "twotwothree":
draw_two_shifted(page, horizontal_vertical, shift=0.5)
next_div = invert_for_next(horizontal_vertical)
# Pick which one gets 2 and which gets 3
choice_idx, left_choices = choose_and_return_other(page)
choice = page.get_child(choice_idx)
other = page.get_child(left_choices[0])
# Divide one into 2
next_div = invert_for_next(horizontal_vertical)
draw_two_shifted(choice, next_div)
# Divide other into 3
draw_n(3, other, next_div)
# Draw 3 rectangles (horizontally or vertically)
elif type_choice == "threetwotwo":
draw_n(3, page, horizontal_vertical)
next_div = invert_for_next(horizontal_vertical)
choice1_idx, left_choices = choose_and_return_other(page)
choice2_idx = np.random.choice(left_choices)
choice1 = page.get_child(choice1_idx)
choice2 = page.get_child(choice2_idx)
# Pick two and divide each into two
draw_two_shifted(choice1, next_div)
draw_two_shifted(choice2, next_div)
# Draw 4 rectangles vertically
elif type_choice == "fourtwoone":
draw_n(4, page, horizontal_vertical)
# Pick one and divide into two
choice_idx = choose(page)
choice = page.get_child(choice_idx)
next_div = invert_for_next(horizontal_vertical)
draw_two_shifted(choice, next_div)
if num_panels == 6:
# Possible layouts with 6 panels
if type_choice is None:
type_choice = np.random.choice(["tripeq", "tripuneq",
"twofourtwo", "twothreethree",
"fourtwotwo"])
horizontal_vertical = np.random.choice(["v", "h"])
# Draw 3 rectangles (V OR H)
if type_choice == "tripeq" or type_choice == "tripuneq":
draw_n_shifted(3, page, horizontal_vertical)
# Split each equally
if type_choice == "tripeq":
shift = np.random.randint(25, 75)
shift = shift/100
# Split each unequally
else:
shift = None
next_div = invert_for_next(horizontal_vertical)
for panel in page.children:
draw_two_shifted(panel, next_div, shift=shift)
# Draw 2 rectangles
elif type_choice == "twofourtwo":
draw_two_shifted(page, horizontal_vertical)
# Split into 4 one half 2 in another
next_div = invert_for_next(horizontal_vertical)
draw_n_shifted(4, page.get_child(0), next_div)
draw_two_shifted(page.get_child(1), next_div)
# Draw 2 rectangles
elif type_choice == "twothreethree":
# Split 3 in each
draw_two_shifted(page, horizontal_vertical)
next_div = invert_for_next(horizontal_vertical)
for panel in page.children:
# Allow each inital panel to grow to up to 75% of 100/n
n = 3
shifts = []
choice_max = round((100/n)*1.5)
choice_min = round((100/n)*0.5)
for i in range(0, n):
shift_choice = np.random.randint(
choice_min,
choice_max
)
choice_max = choice_max + ((100/n) - shift_choice)
shifts.append(shift_choice)
to_add_or_remove = (100 - sum(shifts))/len(shifts)
normalized_shifts = []
for shift in shifts:
new_shift = shift + to_add_or_remove
normalized_shifts.append(new_shift/100)
draw_n_shifted(3,
panel,
next_div,
shifts=normalized_shifts
)
# Draw 4 rectangles
elif type_choice == "fourtwotwo":
draw_n_shifted(4, page, horizontal_vertical)
# Split two of them
choice1_idx, left_choices = choose_and_return_other(page)
choice2_idx = np.random.choice(left_choices)
choice1 = page.get_child(choice1_idx)
choice2 = page.get_child(choice2_idx)
next_div = invert_for_next(horizontal_vertical)
draw_two_shifted(choice1, next_div)
draw_two_shifted(choice2, next_div)
if num_panels == 7:
# Possible layouts with 7 panels
types = ["twothreefour", "threethreetwotwo", "threefourtwoone",
"threethreextwoone", "fourthreextwo"]
if type_choice is None:
type_choice = np.random.choice(types)
# Draw two split 3-4 - HV
# Draw two rectangles
if type_choice == "twothreefour":
horizontal_vertical = np.random.choice(["h", "v"])
draw_two_shifted(page, horizontal_vertical, shift=0.5)
# Pick one and split one into 4 rectangles
choice_idx, left_choices = choose_and_return_other(page)
choice = page.get_child(choice_idx)
other = page.get_child(left_choices[0])
next_div = invert_for_next(horizontal_vertical)
draw_n_shifted(4, choice, next_div)
# Some issue with the function calls and seeding
n = 3
shifts = []
choice_max = round((100/n)*1.5)
choice_min = round((100/n)*0.5)
for i in range(0, n):
shift_choice = np.random.randint(choice_min, choice_max)
choice_max = choice_max + ((100/n) - shift_choice)
shifts.append(shift_choice)
to_add_or_remove = (100 - sum(shifts))/len(shifts)
normalized_shifts = []
for shift in shifts:
new_shift = shift + to_add_or_remove
normalized_shifts.append(new_shift/100)
# Pick another and split into 3 rectangles
draw_n_shifted(3, other, next_div, shifts=normalized_shifts)
# Draw three rectangles
elif type_choice == "threethreetwotwo":
draw_n(3, page, "h")
# Pick one and split it into 3 rectangles
choice_idx, left_choices = choose_and_return_other(page)
choice = page.get_child(choice_idx)
draw_n_shifted(3, choice, "v")
# Split the other two into 2 rectangles
draw_two_shifted(page.get_child(left_choices[0]), "v")
draw_two_shifted(page.get_child(left_choices[1]), "v")
# Draw 3 rectangles
elif type_choice == "threefourtwoone":
draw_n(3, page, "h")
# Pick two of three rectangles and let one be
choice_idx, left_choices = choose_and_return_other(page)
choice = page.get_child(choice_idx)
other_idx = np.random.choice(left_choices)
other = page.get_child(other_idx)
# Of the picked split one into 4 rectangles
draw_n_shifted(4, choice, "v")
# Split the other into 2 rectangles
draw_two_shifted(other, "v")
# Draw 3 rectangles
elif type_choice == "threethreextwoone":
draw_n(3, page, "h")
# Pick two and leave one
choice_idx, left_choices = choose_and_return_other(page)
choice = page.get_child(choice_idx)
other = page.get_child(left_choices[0])
# Of the picked split one into 3
draw_n_shifted(3, choice, "v")
# Some issue with the function calls and seeding
n = 3
shifts = []
choice_max = round((100/n)*1.5)
choice_min = round((100/n)*0.5)
for i in range(0, n):
shift_choice = np.random.randint(choice_min, choice_max)
choice_max = choice_max + ((100/n) - shift_choice)
shifts.append(shift_choice)
to_add_or_remove = (100 - sum(shifts))/len(shifts)
normalized_shifts = []
for shift in shifts:
new_shift = shift + to_add_or_remove
normalized_shifts.append(new_shift/100)
# Split the other into 3 as well
draw_n_shifted(3, other, "v", shifts=normalized_shifts)
# Draw 4 split 3x2 - HV
# Draw 4 rectangles
elif type_choice == "fourthreextwo":
horizontal_vertical = np.random.choice(["h", "v"])
draw_n(4, page, horizontal_vertical)
# Choose one and leave as is
choice_idx, left_choices = choose_and_return_other(page)
# Divide the rest into two
next_div = invert_for_next(horizontal_vertical)
for panel in left_choices:
draw_two_shifted(page.get_child(panel), next_div)
if num_panels == 8:
# Possible layouts for 8 panels
types = ["fourfourxtwoeq", "fourfourxtwouneq",
"threethreethreetwo", "threefourtwotwo",
"threethreefourone"]
if type_choice is None:
type_choice = np.random.choice(types)
# Draw 4 rectangles
# equal or uneqal 4-4x2
if type_choice == types[0] or type_choice == types[1]:
# panels = draw_n_shifted(4, *coords, "h")
draw_n(4, page, "h")
# Equal
if type_choice == "fourfourxtwoeq":
shift_min = 25
shift_max = 75
shift = np.random.randint(shift_min, shift_max)
set_shift = shift/100
# Unequal
else:
set_shift = None
# Drivide each into two
for panel in page.children:
draw_two_shifted(panel, "v", shift=set_shift)
# Where three rectangles need to be drawn
if type_choice in types[2:]:
draw_n(3, page, "h")
# Draw 3 rectangles then
if type_choice == "threethreethreetwo":
# Choose one and divide it into two
choice_idx, left_choices = choose_and_return_other(page)
choice = page.get_child(choice_idx)
draw_two_shifted(choice, "v")
# Divide the rest into 3
for panel in left_choices:
# Some issue with the function calls and seeding
n = 3
shifts = []
choice_max = round((100/n)*1.5)
choice_min = round((100/n)*0.5)
for i in range(0, n):
shift_choice = np.random.randint(
choice_min,
choice_max
)
choice_max = choice_max + ((100/n) - shift_choice)
shifts.append(shift_choice)
to_add_or_remove = (100 - sum(shifts))/len(shifts)
normalized_shifts = []
for shift in shifts:
new_shift = shift + to_add_or_remove
normalized_shifts.append(new_shift/100)
draw_n_shifted(3,
page.get_child(panel),
"v",
shifts=normalized_shifts
)
# Draw 3 rectangles then
elif type_choice == "threefourtwotwo":
# Choosen one and divide it into 4
choice_idx, left_choices = choose_and_return_other(page)
choice = page.get_child(choice_idx)
draw_n_shifted(4, choice, "v")
for panel in left_choices:
draw_two_shifted(page.get_child(panel), "v")
# Draw 3 3-4-1 - H
# Draw three rectangles then
elif type_choice == "threethreefourone":
# Choose two and leave one as is
choice_idx, left_choices = choose_and_return_other(page)
choice = page.get_child(choice_idx)
other_idx = np.random.choice(left_choices)
other = page.get_child(other_idx)
# Divide one into 3 rectangles
draw_n_shifted(3, choice, "v")
# Some issue with the function calls and seeding
n = 4
shifts = []
choice_max = round((100/n)*1.5)
choice_min = round((100/n)*0.5)
for i in range(0, n):
shift_choice = np.random.randint(
choice_min,
choice_max
)
choice_max = choice_max + ((100/n) - shift_choice)
shifts.append(shift_choice)
to_add_or_remove = (100 - sum(shifts))/len(shifts)
normalized_shifts = []
for shift in shifts:
new_shift = shift + to_add_or_remove
normalized_shifts.append(new_shift/100)
# Divide the other into 4 rectangles
draw_n_shifted(4, other, "v", shifts=normalized_shifts)
return page
|
2503a2e911f877b357c408665f49a385026721f4
| 6,935
|
def uri2dict(uri):
"""Take a license uri and convert it into a dictionary of values."""
if uri.startswith(LICENSES_BASE) and uri.endswith('/'):
base = LICENSES_BASE
license_info = {}
raw_info = uri[len(base):]
raw_info = raw_info.rstrip('/')
info_list = raw_info.split('/')
if len(info_list) not in (1,2,3):
raise InvalidURIError, "Invalid Creative Commons URI: <%s>"%uri
retval = dict( code=info_list[0] )
if len(info_list) > 1:
retval['version'] = info_list[1]
if len(info_list) > 2:
retval['jurisdiction'] = info_list[2]
# XXX perform any validation on the dict produced?
return retval
elif uri.startswith(CC0_BASE) and uri.endswith('/'):
base = CC0_BASE
retval = {'code': 'CC0', 'jurisdiction': None}
retval['version'] = uri.rstrip('/').split('/')[-1]
return retval
elif uri.startswith(PUBLICDOMAIN_MARK_BASE) and uri.endswith('/'):
base = PUBLICDOMAIN_MARK_BASE
retval = {'code': 'mark', 'jurisdiction': None}
retval['version'] = uri.rstrip('/').split('/')[-1]
return retval
else:
raise InvalidURIError, "Invalid Creative Commons URI: <%s>" % uri
|
1f2ccdc52b1dc3424b7554857a87f85a02ea1dbd
| 6,936
|
import re
def test_clean_str(text, language='english'):
"""
Method to pre-process an text for training word embeddings.
This is post by Sebastian Ruder: https://s3.amazonaws.com/aylien-main/data/multilingual-embeddings/preprocess.py
and is used at this paper: https://arxiv.org/pdf/1609.02745.pdf
"""
"""
Cleans an input string and prepares it for tokenization.
:type text: unicode
:param text: input text
:return the cleaned input string
"""
text = text.lower()
# replace all numbers with 0
text = re.sub(r"[-+]?[-/.\d]*[\d]+[:,.\d]*", ' 0 ', text)
# English-specific pre-processing
if language == 'english':
text = re.sub(r"\'s", " \'s", text)
text = re.sub(r"\'ve", " \'ve", text)
text = re.sub(r"n\'t", " n\'t", text)
text = re.sub(r"\'re", " \'re", text)
text = re.sub(r"\'d", " \'d", text)
text = re.sub(r"\'ll", " \'ll", text)
elif language == 'french':
# French-specific pre-processing
text = re.sub(r"c\'", " c\' ", text)
text = re.sub(r"l\'", " l\' ", text)
text = re.sub(r"j\'", " j\' ", text)
text = re.sub(r"d\'", " d\' ", text)
text = re.sub(r"s\'", " s\' ", text)
text = re.sub(r"n\'", " n\' ", text)
text = re.sub(r"m\'", " m\' ", text)
text = re.sub(r"qu\'", " qu\' ", text)
elif language == 'spanish':
# Spanish-specific pre-processing
text = re.sub(r"¡", " ", text)
elif language == 'chinese':
pass
text = re.sub(r'[,:;\.\(\)-/"<>]', " ", text)
# separate exclamation marks and question marks
text = re.sub(r"!+", " ! ", text)
text = re.sub(r"\?+", " ? ", text)
text = re.sub(r"\s+", " ", text)
return text.strip()
|
683f6d27e7486990d0b2a11dd5aeb78f2c1bab07
| 6,937
|
from ..loghelper import run_cmd
from ..loghelper import noLOG
import sys
import os
def run_venv_script(venv, script, fLOG=None,
file=False, is_cmd=False,
skip_err_if=None, platform=None,
**kwargs): # pragma: no cover
"""
Runs a script on a vritual environment (the script should be simple).
@param venv virtual environment
@param script script as a string (not a file)
@param fLOG logging function
@param file is script a file or a string to execute
@param is_cmd if True, script is a command line to run (as a list) for python executable
@param skip_err_if do not pay attention to standard error if this string was found in standard output
@param platform platform (``sys.platform`` by default)
@param kwargs others arguments for function @see fn run_cmd.
@return output
The function does not work from a virtual environment.
"""
if fLOG is None:
fLOG = noLOG
def filter_err(err):
lis = err.split("\n")
lines = []
for li in lis:
if "missing dependencies" in li:
continue
if "' misses '" in li:
continue
lines.append(li)
return "\n".join(lines).strip(" \r\n\t")
if is_virtual_environment():
raise NotImplementedErrorFromVirtualEnvironment()
if platform is None:
platform = sys.platform
if platform.startswith("win"):
exe = os.path.join(venv, "Scripts", "python")
else:
exe = os.path.join(venv, "bin", "python")
if is_cmd:
cmd = " ".join([exe] + script)
out, err = run_cmd(cmd, wait=True, fLOG=fLOG, **kwargs)
err = filter_err(err)
if len(err) > 0 and (skip_err_if is None or skip_err_if not in out):
raise VirtualEnvError(
"unable to run cmd at {2}\n--CMD--\n{3}\n--OUT--\n{0}\n[pyqerror]"
"\n{1}".format(out, err, venv, cmd))
return out
else:
script = ";".join(script.split("\n"))
if file:
if not os.path.exists(script):
raise FileNotFoundError(script)
cmd = " ".join([exe, "-u", '"{0}"'.format(script)])
else:
cmd = " ".join([exe, "-u", "-c", '"{0}"'.format(script)])
out, err = run_cmd(cmd, wait=True, fLOG=fLOG, **kwargs)
err = filter_err(err)
if len(err) > 0:
raise VirtualEnvError(
"Unable to run script at {2}\n--CMD--\n{3}\n--OUT--\n{0}\n"
"[pyqerror]\n{1}".format(out, err, venv, cmd))
return out
|
5f0ae63a2f9ee4a5c666e679141c8fb68e63896b
| 6,938
|
def calc_iou(boxes1, boxes2, scope='iou'):
"""calculate ious
Args:
boxes1: 5-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ====> (x_center, y_center, w, h)
boxes2: 5-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ===> (x_center, y_center, w, h)
Return:
iou: 4-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
"""
with tf.variable_scope(scope):
# transform (x_center, y_center, w, h) to (x1, y1, x2, y2)
boxes1_t = tf.stack([boxes1[..., 0] - boxes1[..., 2] / 2.0,
boxes1[..., 1] - boxes1[..., 3] / 2.0,
boxes1[..., 0] + boxes1[..., 2] / 2.0,
boxes1[..., 1] + boxes1[..., 3] / 2.0],
axis=-1)
boxes2_t = tf.stack([boxes2[..., 0] - boxes2[..., 2] / 2.0,
boxes2[..., 1] - boxes2[..., 3] / 2.0,
boxes2[..., 0] + boxes2[..., 2] / 2.0,
boxes2[..., 1] + boxes2[..., 3] / 2.0],
axis=-1)
# calculate the left up point & right down point
lu = tf.maximum(boxes1_t[..., :2], boxes2_t[..., :2])
rd = tf.minimum(boxes1_t[..., 2:], boxes2_t[..., 2:])
# intersection
intersection = tf.maximum(0.0, rd - lu)
inter_square = intersection[..., 0] * intersection[..., 1]
# calculate the boxs1 square and boxs2 square
square1 = boxes1[..., 2] * boxes1[..., 3]
square2 = boxes2[..., 2] * boxes2[..., 3]
union_square = tf.maximum(square1 + square2 - inter_square, 1e-10)
return tf.clip_by_value(inter_square / union_square, 0.0, 1.0)
|
e5714cf74be851b6b6003458c44e3308308907a3
| 6,939
|
def not_before(cert):
"""
Gets the naive datetime of the certificates 'not_before' field.
This field denotes the first date in time which the given certificate
is valid.
:param cert:
:return: Datetime
"""
return cert.not_valid_before
|
e5e269e67de3059fe0ddfa9a35fb13e7f124d798
| 6,940
|
def get_data_from_dict_for_2pttype(type1,type2,datadict):
"""
Given strings identifying the type of 2pt data in a fits file
and a dictionary of 2pt data (i.e. the blinding factors),
returns the data from the dictionary matching those types.
"""
#spectra type codes in fits file, under hdutable.header['quant1'] and quant2
galaxy_position_fourier = "GPF"
galaxy_shear_emode_fourier = "GEF"
galaxy_shear_bmode_fourier = "GBF"
galaxy_position_real = "GPR"
galaxy_shear_plus_real = "G+R"
galaxy_shear_minus_real = "G-R"
if type1==galaxy_position_fourier and type2 == galaxy_position_fourier:
yfromdict=datadict['gal_gal_cl']
xfromdict=datadict['gal_gal_l']
elif (type1==galaxy_shear_emode_fourier and type2 == galaxy_position_fourier) or (type2==galaxy_shear_emode_fourier and type2 == galaxy_position_fourier):
yfromdict=datadict['gal_shear_cl']
xfromdict=datadict['gal_shear_l']
elif (type1==galaxy_shear_emode_fourier and type2 == galaxy_shear_emode_fourier):
yfromdict=datadict['shear_shear_cl']
xfromdict=datadict['shear_shear_l']
elif type1==galaxy_position_real and type2 == galaxy_position_real:
yfromdict=datadict['gal_gal_xi']
xfromdict=datadict['gal_gal_theta']
elif (type1==galaxy_shear_plus_real and type2 == galaxy_position_real) or (type2==galaxy_shear_plus_real and type1 == galaxy_position_real):
yfromdict=datadict['gal_shear_xi']
xfromdict=datadict['gal_shear_theta']
elif (type1==galaxy_shear_plus_real and type2 == galaxy_shear_plus_real):
yfromdict=datadict['shear_shear_xip']
xfromdict=datadict['shear_shear_theta']
elif (type1==galaxy_shear_minus_real and type2 == galaxy_shear_minus_real):
yfromdict=datadict['shear_shear_xim']
xfromdict=datadict['shear_shear_theta']
else:
print "Spectra type {0:s} - {1:s} not recognized.".format(type1,type2)
return xfromdict,yfromdict
|
d8656e6274dd8fb4001d477572220f2c51c08e01
| 6,941
|
def simple_unweighted_distance(g, source, return_as_dicts=True):
"""Returns the unweighted shortest path length between nodes and source."""
dist_dict = nx.shortest_path_length(g, source)
if return_as_dicts:
return dist_dict
else:
return np.fromiter((dist_dict[ni] for ni in g), dtype=int)
|
d82742ac88f26db8296dec9d28794d3e6d60eec7
| 6,942
|
def A070939(i: int = 0) -> int:
"""Length of binary representation of n."""
return len(f"{i:b}")
|
31b12e493645c3bdf7e636a48ceccff5d9ecc492
| 6,943
|
import time
def feed_pump(pin: int, water_supply_time: int=FEED_PUMP_DEFAULT_TIME) -> bool:
"""
feed water
Parameters
----------
pin : int
target gpio (BCM)
water_supply_time : int
water feeding time
Returns
-------
bool
Was water feeding successful ?
"""
is_running = gpio_read(pin)
if is_running:
return False
# pump on
gpio_write(pin, 1)
try:
publish_device_state()
except:
gpio_write(pin, 0)
return False
time.sleep(water_supply_time)
# pump off
gpio_write(pin, 0)
publish_device_state()
return True
|
c45b1775991a4914116468961ae979dae71f6caf
| 6,944
|
def app_nav(context):
"""Renders the main nav, topnav on desktop, sidenav on mobile"""
url_name = get_url_name(context)
namespace = get_namespace(context)
cache_id = "{}:{}x".format(context['request'].user.username, context.request.path)
cache_key = make_template_fragment_key('app_nav', [cache_id])
context['app_nav_cache_id'] = cache_id
# Only bother doing this work if we don't have a cached template render
if not cache.get(cache_key):
# Build an app list for the page and user
app_list = []
for app in APP_LIST:
# Check we have access
if app['access'](context.request.user):
# Set active flag if namespace matches
app['active'] = (app['app'] == namespace)
# Add to returned list
app_list.append(app)
context['app_list'] = app_list
context['app'] = namespace
if namespace:
context['page_title'] = get_page_title(get_module_nav_list(namespace, url_name, context.request.user), context)
return context
|
8e9cc5428b9af22bad13c6454f462d585a04c005
| 6,945
|
def centre_to_zeroes(cartesian_point, centre_point):
"""Converts centre-based coordinates to be in relation to the (0,0) point.
PIL likes to do things based on (0,0), and in this project I'd like to keep
the origin at the centre point.
Parameters
----------
cartesian_point : (numeric)
x, y coordinates in terms of the centre
centre_point : (numeric)
x, y coordinates of the centre
"""
x = cartesian_point[0] + centre_point[0]
y = centre_point[1] - cartesian_point[1]
return x, y
|
f0ddd632650127e3bb1ed766191950ccf7f06d87
| 6,946
|
def get_all_stack_names(cf_client=boto3.client("cloudformation")):
"""
Get all stack names
Args:
cf_client: boto3 CF client
Returns: list of StackName
"""
LOGGER.info("Attempting to retrieve stack information")
response = cf_client.describe_stacks()
LOGGER.info("Retrieved stack information: %s", response)
return [stack["StackName"] for stack in response["Stacks"]]
|
47a36e15651495cc0b5c80e642bb5154640d6b7d
| 6,947
|
import calendar
def match_date(date, date_pattern):
"""
Match a specific date, a four-tuple with no special values, with a date
pattern, four-tuple possibly having special values.
"""
# unpack the date and pattern
year, month, day, day_of_week = date
year_p, month_p, day_p, day_of_week_p = date_pattern
# check the year
if year_p == 255:
# any year
pass
elif year != year_p:
# specific year
return False
# check the month
if month_p == 255:
# any month
pass
elif month_p == 13:
# odd months
if (month % 2) == 0:
return False
elif month_p == 14:
# even months
if (month % 2) == 1:
return False
elif month != month_p:
# specific month
return False
# check the day
if day_p == 255:
# any day
pass
elif day_p == 32:
# last day of the month
last_day = calendar.monthrange(year + 1900, month)[1]
if day != last_day:
return False
elif day_p == 33:
# odd days of the month
if (day % 2) == 0:
return False
elif day_p == 34:
# even days of the month
if (day % 2) == 1:
return False
elif day != day_p:
# specific day
return False
# check the day of week
if day_of_week_p == 255:
# any day of the week
pass
elif day_of_week != day_of_week_p:
# specific day of the week
return False
# all tests pass
return True
|
d794cf211589840697007ecec7cd9e3ba0655b0f
| 6,948
|
def get_heating_features(df, fine_grained_HP_types=False):
"""Get heating type category based on HEATING_TYPE category.
heating_system: heat pump, boiler, community scheme etc.
heating_source: oil, gas, LPC, electric.
Parameters
----------
df : pandas.DataFrame
Dataframe that is updated with heating features.
fine_grained_HP_types : bool, default=False
If True, get different heat pump types (air sourced, ground sourced etc.).
If False, return "heat pump" as heating type category.
Return
---------
df : pandas.DataFrame
Updated dataframe with heating system and source."""
# Collections
heating_system_types = []
heating_source_types = []
# Get heating types
heating_types = df["MAINHEAT_DESCRIPTION"]
# Get specific and general heating category for each entry
for heating in heating_types:
# Set default value
system_type = "unknown"
source_type = "unknown"
# If heating value exists
if not (pd.isnull(heating) and isinstance(heating, float)):
# Lowercase
heating = heating.lower()
other_heating_system = [
("boiler and radiator" in heating),
("boiler & radiator" in heating),
("boiler and underfloor" in heating),
("boiler & underfloor" in heating),
("community scheme" in heating),
("heater" in heating), # not specified heater
]
# Different heat pump types
# --------------------------
if "ground source heat pump" in heating:
system_type = "ground source heat pump"
source_type = "electric"
elif "air source heat pump" in heating:
system_type = "air source heat pump"
source_type = "electric"
elif "water source heat pump" in heating:
system_type = "water source heat pump"
source_type = "electric"
elif "heat pump" in heating:
system_type = "heat pump"
source_type = "electric"
# Electric heaters
# --------------------------
elif "electric storage heaters" in heating:
system_type = "storage heater"
source_type = "electric"
elif "electric underfloor heating" in heating:
system_type = "underfloor heating"
source_type = "electric"
# Warm air
# --------------------------
elif "warm air" in heating:
system_type = "warm air"
source_type = "electric"
# Boiler and radiator / Boiler and underfloor / Community scheme / Heater (unspecified)
# --------------------------
elif any(other_heating_system):
# Set heating system dict
heating_system_dict = {
"boiler and radiator": "boiler and radiator",
"boiler & radiator": "boiler and radiator",
"boiler and underfloor": "boiler and underfloor",
"boiler & underfloor": "boiler and underfloor",
"community scheme": "community scheme",
"heater": "heater", # not specified heater (otherwise handeld above)
}
# Set heating source dict
heating_source_dict = {
"gas": "gas",
", oil": "oil", # with preceeding comma (!= "boiler")
"lpg": "LPG",
"electric": "electric",
}
# If heating system word is found, save respective system type
for word, system in heating_system_dict.items():
if word in heating:
system_type = system
# If heating source word is found, save respective source type
for word, source in heating_source_dict.items():
if word in heating:
source_type = source
# Don't differentiate between heat pump types
if not fine_grained_HP_types:
if "heat pump" in system_type:
system_type = "heat pump"
# Save heating system type and source type
heating_system_types.append(system_type)
heating_source_types.append(source_type)
# Add heating system and source to df
df["HEATING_SYSTEM"] = heating_system_types
df["HEATING_SOURCE"] = heating_source_types
return df
|
5707975a63aca4778e8dbdd70670e317c777c998
| 6,949
|
def integrate_eom(initial_conditions, t_span, design_params, SRM1, SRM2):
"""Numerically integrates the zero gravity equations of motion.
Args:
initial_conditions (np.array()): Array of initial conditions. Typically set
to an array of zeros.
t_span (np.array()): Time vector (s) over which to integrate the equations
of motions.
design_params (np.array()): Array of design parameters.
[r1, r2, d1, d2, Ixx, Iyy, Izz] where r1 and r2 are the radial locations of
the solid rocket motors (m), d1 and d2 are the longitudinal locations of the two
motors (m), and Ixx, Iyy, and Izz are the interia values (kg-m^2).
SRM1 (SolidRocketMotor()): First solid rocket motor organized into a class.
SRM2 (SolidRocketMotor()): Second solid rocket motor organized into a class.
Returns:
(np.array()): Numerical solutions for wx, wy, wz, psi, theta, and phi.
"""
return odeint(euler_eom, initial_conditions, t_span, args=(design_params, SRM1, SRM2))
|
07574c775268798371425b837b20706ac9af5f52
| 6,950
|
def activation_sparse(net, transformer, images_files):
"""
Activation bottom/top blob sparse analyze
Args:
net: the instance of Caffe inference
transformer:
images_files: sparse dataset
Returns:
none
"""
print("\nAnalyze the sparse info of the Activation:")
# run float32 inference on sparse dataset to analyze activations
for i , image in enumerate(images_files):
net_forward(net, image, transformer)
# analyze bottom/top blob
for layer in sparse_layer_lists:
blob = net.blobs[layer.bottom_blob_name].data[0].flatten()
layer.analyze_bottom_blob(blob)
blob = net.blobs[layer.top_blob_name].data[0].flatten()
layer.analyze_top_blob(blob)
# calculate top blob and flag the sparse channels in every layers
for layer in sparse_layer_lists:
layer.sparse_bottom_blob()
layer.sparse_top_blob()
return None
|
da138764d002e84bdee306e15b6c8524b223bcbc
| 6,951
|
def cfg_load(filename):
"""Load a config yaml file."""
return omegaconf2namespace(OmegaConf.load(filename))
|
2aa5f808f89d1f654cd95cd6a1c8f903d4baade6
| 6,952
|
def char_to_num(x: str) -> int:
"""Converts a character to a number
:param x: Character
:type x: str
:return: Corresponding number
:rtype: int
"""
total = 0
for i in range(len(x)):
total += (ord(x[::-1][i]) - 64) * (26 ** i)
return total
|
f66ee13d696ec1872fbc2a9960362456a5c4cbe9
| 6,953
|
from typing import Callable
import time
def time_it(f: Callable):
"""
Timer decorator: shows how long execution of function took.
:param f: function to measure
:return: /
"""
def timed(*args, **kwargs):
t1 = time.time()
res = f(*args, **kwargs)
t2 = time.time()
log("\'", f.__name__, "\' took ", round(t2 - t1, 3), " seconds to complete.", sep="")
return res
return timed
|
bc7321721afe9dc9b4a2861b2c849e6a5d2c309a
| 6,954
|
def has_prefix(sub_s, dictionary):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
s = ''
for letter in sub_s:
s += letter
for words in dictionary:
if words.startswith(s):
return True
return False
|
b45f3bf7ed699bc215d1670f35ebc0f15b7ec0ff
| 6,955
|
import os
def search_paths_for_executables(*path_hints):
"""Given a list of path hints returns a list of paths where
to search for an executable.
Args:
*path_hints (list of paths): list of paths taken into
consideration for a search
Returns:
A list containing the real path of every existing directory
in `path_hints` and its `bin` subdirectory if it exists.
"""
executable_paths = []
for path in path_hints:
if not os.path.isdir(path):
continue
path = os.path.abspath(path)
executable_paths.append(path)
bin_dir = os.path.join(path, 'bin')
if os.path.isdir(bin_dir):
executable_paths.append(bin_dir)
return executable_paths
|
f6546fba4c3ac89b975d2c0757064edae3dca340
| 6,956
|
def tf_center_crop(images, sides):
"""Crops central region"""
images_shape = tf.shape(images)
top = (images_shape[1] - sides[0]) // 2
left = (images_shape[2] - sides[1]) // 2
return tf.image.crop_to_bounding_box(images, top, left, sides[0], sides[1])
|
1b1c8bcab55164a04b0ac6109a7b91d084f55b7b
| 6,957
|
from datetime import datetime
import pytz
def convert_timezone(time_in: datetime.datetime) -> datetime.datetime:
"""
用来将系统自动生成的datetime格式的utc时区时间转化为本地时间
:param time_in: datetime.datetime格式的utc时间
:return:输出仍旧是datetime.datetime格式,但已经转换为本地时间
"""
time_utc = time_in.replace(tzinfo=pytz.timezone("UTC"))
time_local = time_utc.astimezone(pytz.timezone(settings.TIME_ZONE))
return time_local
|
3843aa62a5ff29fd629776e69c52cd95c51fac5d
| 6,958
|
from typing import Any
def convert_bool(
key: str, val: bool, attr_type: bool, attr: dict[str, Any] = {}, cdata: bool = False
) -> str:
"""Converts a boolean into an XML element"""
if DEBUGMODE: # pragma: no cover
LOG.info(
f'Inside convert_bool(): key="{str(key)}", val="{str(val)}", type(val) is: "{type(val).__name__}"'
)
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr["type"] = get_xml_type(val)
attrstring = make_attrstring(attr)
return f"<{key}{attrstring}>{str(val).lower()}</{key}>"
|
2ed2a92506189803cb2854bff9041c492ff479dc
| 6,959
|
from IPython import display
import os
def plot_model(model,
to_file='model.png',
show_shapes=False,
show_dtype=False,
show_layer_names=True,
rankdir='TB',
expand_nested=False,
dpi=96,
layer_range=None,
show_layer_activations=False):
"""Converts a Keras model to dot format and save to a file.
Example:
```python
input = tf.keras.Input(shape=(100,), dtype='int32', name='input')
x = tf.keras.layers.Embedding(
output_dim=512, input_dim=10000, input_length=100)(input)
x = tf.keras.layers.LSTM(32)(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
output = tf.keras.layers.Dense(1, activation='sigmoid', name='output')(x)
model = tf.keras.Model(inputs=[input], outputs=[output])
dot_img_file = '/tmp/model_1.png'
tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
```
Args:
model: A Keras model instance
to_file: File name of the plot image.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot: 'TB' creates a vertical
plot; 'LR' creates a horizontal plot.
expand_nested: Whether to expand nested models into clusters.
dpi: Dots per inch.
layer_range: input of `list` containing two `str` items, which is the
starting layer name and ending layer name (both inclusive) indicating the
range of layers for which the plot will be generated. It also accepts
regex patterns instead of exact name. In such case, start predicate will
be the first element it matches to `layer_range[0]` and the end predicate
will be the last element it matches to `layer_range[1]`. By default `None`
which considers all layers of model. Note that you must pass range such
that the resultant subgraph must be complete.
show_layer_activations: Display layer activations (only for layers that
have an `activation` property).
Returns:
A Jupyter notebook Image object if Jupyter is installed.
This enables in-line display of the model plots in notebooks.
"""
dot = model_to_dot(
model,
show_shapes=show_shapes,
show_dtype=show_dtype,
show_layer_names=show_layer_names,
rankdir=rankdir,
expand_nested=expand_nested,
dpi=dpi,
layer_range=layer_range,
show_layer_activations=show_layer_activations)
to_file = path_to_string(to_file)
if dot is None:
return
_, extension = os.path.splitext(to_file)
if not extension:
extension = 'png'
else:
extension = extension[1:]
# Save image to disk.
dot.write(to_file, format=extension)
# Return the image as a Jupyter Image object, to be displayed in-line.
# Note that we cannot easily detect whether the code is running in a
# notebook, and thus we always return the Image if Jupyter is available.
if extension != 'pdf':
try:
return display.Image(filename=to_file)
except ImportError:
pass
|
772032a8e3117ae6128b5ce957b1e59bea79866b
| 6,960
|
import numpy
from sys import path
import warnings
def catalog_info(EPIC_ID=None, TIC_ID=None, KIC_ID=None):
"""Takes EPIC ID, returns limb darkening parameters u (linear) and
a,b (quadratic), and stellar parameters. Values are pulled for minimum
absolute deviation between given/catalog Teff and logg. Data are from:
- K2 Ecliptic Plane Input Catalog, Huber+ 2016, 2016ApJS..224....2H
- New limb-darkening coefficients, Claret+ 2012, 2013,
2012A&A...546A..14C, 2013A&A...552A..16C"""
if (EPIC_ID is None) and (TIC_ID is None) and (KIC_ID is None):
raise ValueError("No ID was given")
if (EPIC_ID is not None) and (TIC_ID is not None):
raise ValueError("Only one ID allowed")
if (EPIC_ID is not None) and (KIC_ID is not None):
raise ValueError("Only one ID allowed")
if (TIC_ID is not None) and (KIC_ID is not None):
raise ValueError("Only one ID allowed")
# KOI CASE (Kepler K1)
if KIC_ID is not None:
Teff, logg, radius, radius_min, radius_max, mass, mass_min, mass_max = catalog_info_KIC(
KIC_ID
)
# EPIC CASE (Kepler K2)
if EPIC_ID is not None:
Teff, logg, radius, radius_min, radius_max, mass, mass_min, mass_max = catalog_info_EPIC(
EPIC_ID
)
# TESS CASE
if TIC_ID is not None:
Teff, logg, radius, radius_min, radius_max, mass, mass_min, mass_max = catalog_info_TIC(
TIC_ID
)
ld = numpy.genfromtxt(
path.join(tls_constants.resources_dir, "ld_claret_tess.csv"),
skip_header=1,
delimiter=",",
dtype="f8, int32, f8, f8",
names=["logg", "Teff", "a", "b"],
)
else: # Limb darkening is the same for K1 (KIC) and K2 (EPIC)
ld = numpy.genfromtxt(
path.join(tls_constants.resources_dir, "JAA546A14limb1-4.csv"),
skip_header=1,
delimiter=",",
dtype="f8, int32, f8, f8, f8",
names=["logg", "Teff", "u", "a", "b"],
)
if logg is None:
logg = 4
warnings.warn("No logg in catalog. Proceeding with logg=4")
if Teff is None:
Teff = 6000
warnings.warn("No Teff in catalog. Proceeding with Teff=6000")
"""From here on, K2 and TESS catalogs work the same:
- Take Teff from star catalog and find nearest entry in LD catalog
- Same for logg, but only for the Teff values returned before
- Return stellar parameters and best-match LD
"""
nearest_Teff = ld["Teff"][(numpy.abs(ld["Teff"] - Teff)).argmin()]
idx_all_Teffs = numpy.where(ld["Teff"] == nearest_Teff)
relevant_lds = numpy.copy(ld[idx_all_Teffs])
idx_nearest = numpy.abs(relevant_lds["logg"] - logg).argmin()
a = relevant_lds["a"][idx_nearest]
b = relevant_lds["b"][idx_nearest]
mass = numpy.array(mass)
mass_min = numpy.array(mass_min)
mass_max = numpy.array(mass_max)
radius = numpy.array(radius)
radius_min = numpy.array(radius_min)
radius_max = numpy.array(radius_max)
if mass == 0.0:
mass = numpy.nan
if mass_min == 0.0:
mass_min = numpy.nan
if mass_max == 0.0:
mass_max = numpy.nan
if radius == 0.0:
radius = numpy.nan
if radius_min == 0.0:
radius_min = numpy.nan
if radius_max == 0.0:
radius_max = numpy.nan
return ((a, b), mass, mass_min, mass_max, radius, radius_min, radius_max)
|
86583524d074fd93bf0a124a64bf26cb1e7e5d83
| 6,961
|
import six
def classifier_fn_from_tfhub(output_fields, inception_model, return_tensor=False):
"""Returns a function that can be as a classifier function.
Copied from tfgan but avoid loading the model each time calling _classifier_fn
Args:
output_fields: A string, list, or `None`. If present, assume the module
outputs a dictionary, and select this field.
inception_model: A model loaded from TFHub.
return_tensor: If `True`, return a single tensor instead of a dictionary.
Returns:
A one-argument function that takes an image Tensor and returns outputs.
"""
if isinstance(output_fields, six.string_types):
output_fields = [output_fields]
def _classifier_fn(images):
output = inception_model(images)
if output_fields is not None:
output = {x: output[x] for x in output_fields}
if return_tensor:
assert len(output) == 1
output = list(output.values())[0]
return tf.nest.map_structure(tf.compat.v1.layers.flatten, output)
return _classifier_fn
|
e7f54a4c46519465460cc0e97b0f6f12f91a98d4
| 6,962
|
import json
def get_rate_limit(client):
"""
Get the Github API rate limit current state for the used token
"""
query = '''query {
rateLimit {
limit
remaining
resetAt
}
}'''
response = client.execute(query)
json_response = json.loads(response)
return json_response['data']['rateLimit']
|
ec5f853014f25c841e71047da62ca41907b02e13
| 6,963
|
import functools
import pprint
def pret(f):
"""
Decorator which prints the result returned by `f`.
>>> @pret
... def f(x, y): return {'sum': x + y, 'prod': x * y}
>>> res = f(2, 3)
==> @pret(f) -- {'prod': 6, 'sum': 5}
"""
@functools.wraps(f)
def g(*args, **kwargs):
ret = f(*args, **kwargs)
_pdeco("pret", f.__name__, "{retstr}".format(
retstr=tstr(pprint.pformat(ret), 120, "<... truncated>"),
))
return ret
return g
|
fedb8cf19913042d0defef676db6b22715e8c572
| 6,964
|
def parse_arguments() -> tuple[str, str, bool]:
"""Return the command line arguments."""
current_version = get_version()
description = f"Release Quality-time. Current version is {current_version}."
epilog = """preconditions for release:
- the current folder is the release folder
- the current branch is master
- the workspace has no uncommitted changes
- the generated data model documentation is up-to-date
- the change log has an '[Unreleased]' header
- the change log contains no release candidates"""
parser = ArgumentParser(description=description, epilog=epilog, formatter_class=RawDescriptionHelpFormatter)
allowed_bumps_in_rc_mode = ["rc", "rc-major", "rc-minor", "rc-patch", "drop-rc"] # rc = release candidate
allowed_bumps = ["rc-patch", "rc-minor", "rc-major", "patch", "minor", "major"]
bumps = allowed_bumps_in_rc_mode if "rc" in current_version else allowed_bumps
parser.add_argument("bump", choices=bumps)
parser.add_argument(
"-c", "--check-preconditions-only", action="store_true", help="only check the preconditions and then exit"
)
arguments = parser.parse_args()
return current_version, arguments.bump, arguments.check_preconditions_only
|
7b58b2b3c99a4297bb12b714b289336cdbc75a5e
| 6,965
|
import os
def process_submission(problem_id: str, participant_id: str, file_type: str,
submission_file: InMemoryUploadedFile,
timestamp: str) -> STATUS_AND_OPT_ERROR_T:
"""
Function to process a new :class:`~judge.models.Submission` for a problem by a participant.
:param problem_id: Problem ID for the problem corresponding to the submission
:param participant_id: Participant ID
:param file_type: Submission file type
:param submission_file: Submission file
:param timestamp: Time at submission
:returns: A 2-tuple - 1st element indicating whether the processing has succeeded, and
2nd element providing a ``ValidationError`` if processing is unsuccessful.
"""
problem = models.Problem.objects.filter(code=problem_id)
if not problem.exists():
return (False,
ValidationError('Problem with code = {} not found'
.format(problem_id)))
problem = problem[0]
if file_type not in problem.file_exts.split(','):
return (False,
ValidationError({'file_type':
['Accepted file types: \"{}\"'
.format(', '.join(problem.file_exts.split(',')))]}))
participant = models.Person.objects.filter(email=participant_id.lower())
if not participant.exists():
return (False,
ValidationError('Person with email = {} not found'
.format(participant_id.lower())))
participant = participant[0]
try:
sub = problem.submission_set.create(participant=participant, file_type=file_type,
submission_file=submission_file, timestamp=timestamp)
sub.save()
# Catch any weird errors that might pop up during the creation
except Exception as other_err:
print_exc()
return (False, ValidationError(str(other_err)))
testcases = models.TestCase.objects.filter(problem=problem)
if not os.path.exists(os.path.join('content', 'tmp')):
os.makedirs(os.path.join('content', 'tmp'))
# NB: File structure here
# PROBLEM_ID
# SUBMISSION_ID
# FILE_FORMAT
# TIME_LIMIT
# MEMORY_LIMIT
# TESTCASE_1
# TESTCASE_2
# ....
with open(os.path.join('content', 'tmp', 'sub_run_' + str(sub.pk) + '.txt'), 'w') as f:
f.write('{}\n'.format(problem.pk))
f.write('{}\n'.format(sub.pk))
f.write('{}\n'.format(file_type))
f.write('{}\n'.format(int(problem.time_limit.total_seconds())))
f.write('{}\n'.format(problem.memory_limit))
for testcase in testcases:
f.write('{}\n'.format(testcase.pk))
try:
for testcase in testcases:
models.SubmissionTestCase.objects.create(submission=sub, testcase=testcase,
verdict='R', memory_taken=0,
time_taken=timedelta(seconds=0))
# Catch any weird errors that might pop up during the creation
except Exception as other_err:
print_exc()
return (False, ValidationError(other_err))
else:
return (True, None)
|
6104eef6f32ba68df05e64cd2cc869bde0fcb318
| 6,966
|
import sys
def eq_text_partially_marked(
ann_objs,
restrict_types=None,
ignore_types=None,
nested_types=None):
"""Searches for spans that match in string content but are not all
marked."""
# treat None and empty list uniformly
restrict_types = [] if restrict_types is None else restrict_types
ignore_types = [] if ignore_types is None else ignore_types
nested_types = [] if nested_types is None else nested_types
# TODO: check that constraints are properly applied
matches = SearchMatchSet("Text marked partially")
text_type_ann_map = _get_text_type_ann_map(
ann_objs, restrict_types, ignore_types, nested_types)
max_length_tagged = max([len(s) for s in text_type_ann_map] + [0])
# TODO: faster and less hacky way to detect missing annotations
text_untagged_map = {}
for ann_obj in ann_objs:
doctext = ann_obj.get_document_text()
# TODO: proper tokenization.
# NOTE: this will include space.
#tokens = re.split(r'(\s+)', doctext)
try:
tokens = _split_and_tokenize(doctext)
tokens = _split_tokens_more(tokens)
except BaseException:
# TODO: proper error handling
print("ERROR: failed tokenization in %s, skipping" % ann_obj._input_files[
0], file=sys.stderr)
continue
# document-specific map
offset_ann_map = _get_offset_ann_map([ann_obj])
# this one too
sentence_num = _get_offset_sentence_map(doctext)
start_offset = 0
for start in range(len(tokens)):
for end in range(start, len(tokens)):
s = "".join(tokens[start:end])
end_offset = start_offset + len(s)
if len(s) > max_length_tagged:
# can't hit longer strings, none tagged
break
if s not in text_type_ann_map:
# consistently untagged
continue
# Some matching is tagged; this is considered
# inconsistent (for this check) if the current span
# has no fully covering tagging. Note that type
# matching is not considered here.
start_spanning = offset_ann_map.get(start_offset, set())
# NOTE: -1 needed, see _get_offset_ann_map()
end_spanning = offset_ann_map.get(end_offset - 1, set())
if len(start_spanning & end_spanning) == 0:
if s not in text_untagged_map:
text_untagged_map[s] = []
text_untagged_map[s].append(
(ann_obj, start_offset, end_offset, s, sentence_num[start_offset]))
start_offset += len(tokens[start])
# form match objects, grouping by text
for text in text_untagged_map:
assert text in text_type_ann_map, "INTERNAL ERROR"
# collect tagged and untagged cases for "compressing" output
# in cases where one is much more common than the other
tagged = []
untagged = []
for type_ in text_type_ann_map[text]:
for ann_obj, ann in text_type_ann_map[text][type_]:
#matches.add_match(ann_obj, ann)
tagged.append((ann_obj, ann))
for ann_obj, start, end, s, snum in text_untagged_map[text]:
# TODO: need a clean, standard way of identifying a text span
# that does not involve an annotation; this is a bit of a hack
tm = TextMatch(start, end, s, snum)
#matches.add_match(ann_obj, tm)
untagged.append((ann_obj, tm))
# decide how to output depending on relative frequency
freq_ratio_cutoff = 3
cutoff_limit = 5
if (len(tagged) > freq_ratio_cutoff * len(untagged) and
len(tagged) > cutoff_limit):
# cut off all but cutoff_limit from tagged
for ann_obj, m in tagged[:cutoff_limit]:
matches.add_match(ann_obj, m)
for ann_obj, m in untagged:
matches.add_match(ann_obj, m)
print("(note: omitting %d instances of tagged '%s')" % (len(tagged) - cutoff_limit, text))
elif (len(untagged) > freq_ratio_cutoff * len(tagged) and
len(untagged) > cutoff_limit):
# cut off all but cutoff_limit from tagged
for ann_obj, m in tagged:
matches.add_match(ann_obj, m)
for ann_obj, m in untagged[:cutoff_limit]:
matches.add_match(ann_obj, m)
print("(note: omitting %d instances of untagged '%s')" % (len(untagged) - cutoff_limit, text))
else:
# include all
for ann_obj, m in tagged + untagged:
matches.add_match(ann_obj, m)
return matches
|
cf7a3b272f7e9de7812981642c20dbef5f31e895
| 6,967
|
def wait_for_status(status_key, status, get_client, object_id,
interval: tobiko.Seconds = None,
timeout: tobiko.Seconds = None,
error_ok=False, **kwargs):
"""Waits for an object to reach a specific status.
:param status_key: The key of the status field in the response.
Ex. provisioning_status
:param status: The status to wait for. Ex. "ACTIVE"
:param get_client: The tobiko client get method.
Ex. _client.get_loadbalancer
:param object_id: The id of the object to query.
:param interval: How often to check the status, in seconds.
:param timeout: The maximum time, in seconds, to check the status.
:param error_ok: When true, ERROR status will not raise an exception.
:raises TimeoutException: The object did not achieve the status or ERROR in
the check_timeout period.
:raises UnexpectedStatusException: The request returned an unexpected
response code.
"""
for attempt in tobiko.retry(timeout=timeout,
interval=interval,
default_timeout=(
CONF.tobiko.octavia.check_timeout),
default_interval=(
CONF.tobiko.octavia.check_interval)):
response = get_client(object_id, **kwargs)
if response[status_key] == status:
return response
if response[status_key] == octavia.ERROR and not error_ok:
message = ('{name} {field} was updated to an invalid state of '
'ERROR'.format(name=get_client.__name__,
field=status_key))
raise octavia.RequestException(message)
# it will raise tobiko.RetryTimeLimitError in case of timeout
attempt.check_limits()
LOG.debug(f"Waiting for {get_client.__name__} {status_key} to get "
f"from '{response[status_key]}' to '{status}'...")
|
5384dec0c4a078c5d810f366927d868692ae6bf3
| 6,968
|
def can_hold_bags(rule: str, bag_rules: dict) -> dict:
"""
Returns a dict of all bags that can be held by given bag color
:param rule: Color of a given bag
:param bag_rules: Dictionary of rules
:type rule: str
:type bag_rules: dict
:return:
"""
return bag_rules[rule]
|
b7554c32bd91f9a05cd84c9249d92cc6354458a9
| 6,969
|
def fix_levers_on_same_level(same_level, above_level):
"""
Input: 3D numpy array with malmo_object_to_index mapping
Returns:
3D numpy array where 3 channels represent
object index, color index, state index
for minigrid
"""
lever_idx = malmo_object_to_index['lever']
condition = above_level == lever_idx
minimap_array = np.where(condition, above_level, same_level)
return minimap_array
|
d1727e188f9a5935a660d806f69f9b472db94217
| 6,970
|
def iv_plot(df, var_name=None, suffix='_dev'):
"""Returns an IV plot for a specified variable"""
p_suffix = suffix.replace('_','').upper()
sub_df = df if var_name is None else df.loc[df.var_name==var_name, ['var_cuts_string'+suffix, 'ln_odds'+suffix, 'resp_rate'+suffix, 'iv'+suffix]]
sub_df['resp_rate_trend'+suffix] = _trend(sub_df['resp_rate'+suffix])
iv_val = round(sub_df['iv'+suffix].sum(), 4)
f, ax = plt.subplots()
ax2 = ax.twinx()
sns.lineplot(x='var_cuts_string'+suffix, y='resp_rate'+suffix, data=sub_df, color='red', ax=ax)
sns.lineplot(x='var_cuts_string'+suffix, y='resp_rate_trend'+suffix, data=sub_df, color='red', linestyle='--', ax=ax)
sns.lineplot(x='var_cuts_string'+suffix, y='ln_odds'+suffix, data=sub_df, color='darkgreen', ax=ax2)
ax.set_xticklabels(list(sub_df['var_cuts_string'+suffix]), rotation=45, ha='right')
ax.set(xlabel='Variable Bins', ylabel=f'Resp Rate ({p_suffix})', title=f'IV of {var_name} ({iv_val})')
ax2.set(ylabel=f'Log Odds ({p_suffix})')
ax.legend(handles=[l for a in [ax, ax2] for l in a.lines], labels=[f'Resp Rate ({p_suffix})', f'Resp Rate Trend ({p_suffix})', f'Log Odds ({p_suffix})'], loc=0)
return f
|
dd35329b5b91a19babdfa943c2f7688bb013c680
| 6,971
|
from py._path.local import LocalPath
def is_alive(pid):
"""Return whether a process is running with the given PID."""
return LocalPath('/proc').join(str(pid)).isdir()
|
e6086b79aa648dc4483085e15f096152185aa780
| 6,972
|
from pyspark import SparkContext
from typing import Callable
import functools
from typing import Any
def inheritable_thread_target(f: Callable) -> Callable:
"""
Return thread target wrapper which is recommended to be used in PySpark when the
pinned thread mode is enabled. The wrapper function, before calling original
thread target, it inherits the inheritable properties specific
to JVM thread such as ``InheritableThreadLocal``.
Also, note that pinned thread mode does not close the connection from Python
to JVM when the thread is finished in the Python side. With this wrapper, Python
garbage-collects the Python thread instance and also closes the connection
which finishes JVM thread correctly.
When the pinned thread mode is off, it return the original ``f``.
.. versionadded:: 3.2.0
Parameters
----------
f : function
the original thread target.
Notes
-----
This API is experimental.
It is important to know that it captures the local properties when you decorate it
whereas :class:`InheritableThread` captures when the thread is started.
Therefore, it is encouraged to decorate it when you want to capture the local
properties.
For example, the local properties from the current Spark context is captured
when you define a function here instead of the invocation:
>>> @inheritable_thread_target
... def target_func():
... pass # your codes.
If you have any updates on local properties afterwards, it would not be reflected to
the Spark context in ``target_func()``.
The example below mimics the behavior of JVM threads as close as possible:
>>> Thread(target=inheritable_thread_target(target_func)).start() # doctest: +SKIP
"""
if isinstance(SparkContext._gateway, ClientServer): # type: ignore[attr-defined]
# Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
# NOTICE the internal difference vs `InheritableThread`. `InheritableThread`
# copies local properties when the thread starts but `inheritable_thread_target`
# copies when the function is wrapped.
properties = (
SparkContext._active_spark_context._jsc.sc() # type: ignore[attr-defined]
.getLocalProperties()
.clone()
)
@functools.wraps(f)
def wrapped(*args: Any, **kwargs: Any) -> Any:
try:
# Set local properties in child thread.
SparkContext._active_spark_context._jsc.sc().setLocalProperties( # type: ignore[attr-defined]
properties
)
return f(*args, **kwargs)
finally:
InheritableThread._clean_py4j_conn_for_current_thread()
return wrapped
else:
return f
|
02d2e58449c736bf8ef19354bfd8f7a21066615b
| 6,973
|
import scipy
def build_grad(verts, edges, edge_tangent_vectors):
"""
Build a (V, V) complex sparse matrix grad operator. Given real inputs at vertices, produces a complex (vector value) at vertices giving the gradient. All values pointwise.
- edges: (2, E)
"""
edges_np = toNP(edges)
edge_tangent_vectors_np = toNP(edge_tangent_vectors)
# TODO find a way to do this in pure numpy?
# Build outgoing neighbor lists
N = verts.shape[0]
vert_edge_outgoing = [[] for i in range(N)]
for iE in range(edges_np.shape[1]):
tail_ind = edges_np[0, iE]
tip_ind = edges_np[1, iE]
if tip_ind != tail_ind:
vert_edge_outgoing[tail_ind].append(iE)
# Build local inversion matrix for each vertex
row_inds = []
col_inds = []
data_vals = []
eps_reg = 1e-5
for iV in range(N):
n_neigh = len(vert_edge_outgoing[iV])
lhs_mat = np.zeros((n_neigh, 2))
rhs_mat = np.zeros((n_neigh, n_neigh + 1))
ind_lookup = [iV]
for i_neigh in range(n_neigh):
iE = vert_edge_outgoing[iV][i_neigh]
jV = edges_np[1, iE]
ind_lookup.append(jV)
edge_vec = edge_tangent_vectors[iE][:]
w_e = 1.
lhs_mat[i_neigh][:] = w_e * edge_vec
rhs_mat[i_neigh][0] = w_e * (-1)
rhs_mat[i_neigh][i_neigh + 1] = w_e * 1
lhs_T = lhs_mat.T
lhs_inv = np.linalg.inv(lhs_T @ lhs_mat + eps_reg * np.identity(2)) @ lhs_T
sol_mat = lhs_inv @ rhs_mat
sol_coefs = (sol_mat[0, :] + 1j * sol_mat[1, :]).T
for i_neigh in range(n_neigh + 1):
i_glob = ind_lookup[i_neigh]
row_inds.append(iV)
col_inds.append(i_glob)
data_vals.append(sol_coefs[i_neigh])
# build the sparse matrix
row_inds = np.array(row_inds)
col_inds = np.array(col_inds)
data_vals = np.array(data_vals)
mat = scipy.sparse.coo_matrix(
(data_vals, (row_inds, col_inds)), shape=(
N, N)).tocsc()
return mat
|
8faeea92e132afcf1f612cd17d48ef488fc907bb
| 6,974
|
from typing import OrderedDict
def join_label_groups(grouped_issues, grouped_prs, issue_label_groups,
pr_label_groups):
"""Combine issue and PR groups in to one dictionary.
PR-only groups are added after all issue groups. Any groups that are
shared between issues and PRs are added according to the order in the
issues list of groups. This results in "label-groups" remaining in the
same order originally specified even if a group does not have issues
in it. Otherwise, a shared group may end up at the end of the combined
dictionary and not in the order originally specified by the user.
"""
issue_group_names = [x['name'] for x in issue_label_groups]
pr_group_names = [x['name'] for x in pr_label_groups]
shared_groups = []
for idx, group_name in enumerate(issue_group_names):
if len(pr_group_names) > idx and group_name == pr_group_names[idx]:
shared_groups.append(group_name)
else:
break
label_groups = OrderedDict()
# add shared groups first
for group_name in shared_groups:
# make sure to copy the issue group in case it is added to
label_groups[group_name] = grouped_issues.get(group_name, [])[:]
# add any remaining issue groups
for group_name, group in grouped_issues.items():
if group_name in shared_groups:
continue
label_groups[group_name] = group[:]
# add any remaining PR groups (extending any existing groups)
for group_name, group in grouped_prs.items():
label_groups.setdefault(group_name, []).extend(group)
return label_groups
|
b51a70a60bde3580326816eaf0d3b76cb51062ac
| 6,975
|
def healpix_ijs_neighbours(istar, jstar, nside):
"""Gets the healpix i, jstar neighbours for a single healpix pixel.
Parameters
----------
istar : array
Healpix integer i star index.
jstar : array
Healpix integer i star index.
nside : int
Healpix nside.
Returns
-------
istar_neigh : array
Neighbour healpix integer i star index.
jstar_neigh : array
Neighbour healpix integer j star index.
"""
if jstar - istar + 1 == 2*nside:
istar_neigh = [istar, istar + 1, istar + 1, istar + nside, istar + nside, istar - nside, istar + 1 - nside, istar+2*nside]
jstar_neigh = [jstar - 1, jstar - 1, jstar, jstar - 1 + nside, jstar + nside, jstar - nside, jstar - nside, jstar+2*nside]
elif istar - jstar + 1 == 2*nside:
istar_neigh = [istar, istar - 1, istar - 1, istar - nside, istar - nside, istar + nside, istar - 1 + nside, istar-2*nside]
jstar_neigh = [jstar + 1, jstar + 1, jstar, jstar + 1 - nside, jstar - nside, jstar + nside, jstar + nside, jstar-2*nside]
elif jstar - istar + 1 == nside and istar % nside == 0:
istar_neigh = [istar - 1, istar, istar + 1, istar - 1, istar + 1, istar, istar + 1]
jstar_neigh = [jstar - 1, jstar - 1, jstar - 1, jstar, jstar, jstar + 1, jstar + 1]
elif istar - jstar + 1 == nside and jstar % nside == 0:
istar_neigh = [istar - 1, istar, istar - 1, istar + 1, istar - 1, istar, istar + 1]
jstar_neigh = [jstar - 1, jstar - 1, jstar, jstar, jstar + 1, jstar + 1, jstar + 1]
elif istar % nside == 0 and jstar + 1 - nside*(np.floor(istar/nside) + 1) > 0:
istar_neigh = [istar, istar + 1, istar + 1, istar, istar + 1,
istar - ((jstar+1)-nside*np.floor(jstar/nside)),
istar - ((jstar)-nside*np.floor(jstar/nside)),
istar - ((jstar-1)-nside*np.floor(jstar/nside))]
jstar_neigh = [jstar - 1, jstar - 1, jstar, jstar + 1, jstar + 1,
nside*np.floor(jstar/nside)-1,
nside*np.floor(jstar/nside)-1,
nside*np.floor(jstar/nside)-1]
elif jstar % nside == 0 and istar + 1 - nside*(np.floor(jstar/nside) + 1) > 0:
jstar_neigh = [jstar, jstar + 1, jstar + 1, jstar, jstar + 1,
jstar - ((istar+2)-nside*np.floor(istar/nside)),
jstar - ((istar+1)-nside*np.floor(istar/nside)),
jstar - ((istar)-nside*np.floor(istar/nside))]
istar_neigh = [istar - 1, istar - 1, istar, istar + 1, istar + 1,
nside*np.floor(istar/nside)-1,
nside*np.floor(istar/nside)-1,
nside*np.floor(istar/nside)-1]
elif (jstar + 1 - nside) % nside == 0 and jstar + 1 - nside*(np.floor(istar/nside) + 1) > 0:
jstar_neigh = [jstar, jstar - 1, jstar - 1, jstar, jstar - 1,
jstar + nside*(np.floor(istar/nside)+1)-istar,
jstar + nside*(np.floor(istar/nside)+1)-istar-1,
jstar + nside*(np.floor(istar/nside)+1)-istar+1]
istar_neigh = [istar - 1, istar - 1, istar, istar + 1, istar + 1,
nside*(np.floor(istar/nside)+1),
nside*(np.floor(istar/nside)+1),
nside*(np.floor(istar/nside)+1)]
elif (istar + 1 - nside) % nside == 0 and istar + 1 - nside*(np.floor(jstar/nside) + 1) > 0:
istar_neigh = [istar, istar - 1, istar - 1, istar, istar - 1,
istar + nside*(np.floor(jstar/nside)+1)-jstar,
istar + nside*(np.floor(jstar/nside)+1)-jstar-1,
istar + nside*(np.floor(jstar/nside)+1)-jstar+1]
jstar_neigh = [jstar - 1, jstar - 1, jstar, jstar + 1, jstar + 1,
nside*(np.floor(jstar/nside)+1),
nside*(np.floor(jstar/nside)+1),
nside*(np.floor(jstar/nside)+1)]
else:
istar_neigh = [istar - 1, istar, istar + 1, istar - 1, istar + 1, istar - 1, istar, istar + 1]
jstar_neigh = [jstar - 1, jstar - 1, jstar - 1, jstar, jstar, jstar + 1, jstar + 1, jstar + 1]
istar_neigh = np.array(istar_neigh)
jstar_neigh = np.array(jstar_neigh)
cond = np.where(istar_neigh + jstar_neigh > 9*nside-1)[0]
istar_neigh[cond] = istar_neigh[cond] - 4*nside
jstar_neigh[cond] = jstar_neigh[cond] - 4*nside
cond = np.where(istar_neigh + jstar_neigh < nside-1)[0]
istar_neigh[cond] = istar_neigh[cond] + 4*nside
jstar_neigh[cond] = jstar_neigh[cond] + 4*nside
istar_neigh = np.unique(istar_neigh)
jstar_neigh = np.unique(jstar_neigh)
return istar_neigh, jstar_neigh
|
48cae5cd13101529c7d03f9c08ed0f2c2d77a7b8
| 6,976
|
def micropub_blog_endpoint_POST(blog_name: str):
"""The POST verb for the micropub blog route
Used by clients to change content (CRUD operations on posts)
If this is a multipart/form-data request,
note that the multiple media items can be uploaded in one request,
and they should be sent with a `name` of either `photo`, `video`, or `audio`.
(multipart/form-data POST requests can send more than one attachment with the same `name`.)
This is in contrast to the media endpoint,
which expects a single item with a `name` of simply `file`.
"""
blog: HugoBase = current_app.config["APPCONFIG"].blog(blog_name)
content_type = request.headers.get("Content-type")
if not content_type:
raise MicropubInvalidRequestError("No 'Content-type' header")
request_body, request_files = process_POST_body(request, content_type)
current_app.logger.debug(
f"/{blog_name}: all headers before calling authentiate_POST: {request.headers}"
)
verified = authenticate_POST(request.headers, request_body, blog)
auth_test = request.headers.get("X-Interpersonal-Auth-Test")
# Check for the header we use in testing, and return a success message
if auth_test:
return jsonify({"interpersonal_test_result": "authentication_success"})
contype_test = request_body.get("interpersonal_content-type_test")
# Check for the value we use in testing, and return a success message
if contype_test:
return jsonify(
{
"interpersonal_test_result": contype_test,
"content_type": content_type,
"uploaded_file_count": len(listflatten(request_files.values())),
}
)
# Per spec, missing 'action' should imply create
action = request_body.get("action", "create")
# Ahh yes, the famous CUUD.
# These are all actions supported by the spec:
# supported_actions = ["delete", "undelete", "update", "create"]
# But I don't support them all right now.
# TODO: Support delete, undelete, and update actions
supported_actions = ["create"]
if action not in verified["scopes"]:
raise MicropubInsufficientScopeError(action)
if action not in supported_actions:
raise MicropubInvalidRequestError(f"'{action}' action not supported")
actest = request_body.get("interpersonal_action_test")
if actest:
return jsonify({"interpersonal_test_result": actest, "action": action})
if action == "create":
if content_type == "application/json":
mf2obj = request_body
elif content_type == "application/x-www-form-urlencoded":
mf2obj = form_body_to_mf2_json(request_body)
elif content_type.startswith("multipart/form-data"):
mf2obj = form_body_to_mf2_json(request_body)
# Multipart forms contain attachments.
# Upload the attachments, then append the URIs to the mf2 object.
# We want to append, not replace, the attachments -
# if the post includes a photo URI and also some photo uploads,
# we need to keep both.
# (Not sure if that actually happens out in the wild, but maybe?)
# mtype will be one of 'photo', 'video', 'audio'.
for mtype in request_files:
mitems = request_files[mtype]
added = blog.add_media(mitems)
if mtype not in mf2obj["properties"]:
mf2obj["properties"][mtype] = []
mf2obj["properties"][mtype] += [a.uri for a in added]
else:
raise MicropubInvalidRequestError(
f"Unhandled 'Content-type': '{content_type}'"
)
new_post_location = blog.add_post_mf2(mf2obj)
resp = Response("")
resp.headers["Location"] = new_post_location
resp.status_code = 201
return resp
else:
return json_error(500, f"Unhandled action '{action}'")
|
3d8f4b80099ca77d2f7ad2ec13f4a45f4102dc8c
| 6,977
|
def create_parser(config: YAMLConfig) -> ArgumentParser:
"""
Automatically creates a parser from all of the values specified in a config
file. Will use the dot syntax for nested dictionaries.
Parameters
----------
config: YAMLConfig
Config object
Returns
-------
ArgumentParser
Parser loaded up with all of the values specified in the config
"""
key_pairs = config.keys()
parser = ArgumentParser(
description=f"""
This argument parser was autogenerated from the config file. This allows you to
overwrite specific YAML values on the fly. The options listed here do not
entail an exhaustive list of the things that you can configure. For more
information on possible kwargs, refer to the class definition of the object in
question.
"""
)
parser.add_argument(f"config_file", help="YAML config file")
for k in key_pairs:
current = config.access(k)
parser.add_argument(f"--{k}", type=type(current))
return parser
|
8fcf886448061b7f520d133bbf9bb66047e9f516
| 6,978
|
def detect_version(conn):
"""
Detect the version of the database. This is typically done by reading the
contents of the ``configuration`` table, but before that was added we can
guess a couple of versions based on what tables exist (or don't). Returns
``None`` if the database appears uninitialized, and raises
:exc:`RuntimeError` is the version is so ancient we can't do anything with
it.
"""
try:
with conn.begin():
db_version = conn.scalar(text(
"SELECT version FROM configuration"))
except exc.ProgrammingError:
with conn.begin():
packages_exists = bool(conn.scalar(text(
"SELECT 1 FROM pg_catalog.pg_tables "
"WHERE schemaname = 'public' AND tablename = 'packages'")))
with conn.begin():
statistics_exists = bool(conn.scalar(text(
"SELECT 1 FROM pg_catalog.pg_views "
"WHERE schemaname = 'public' AND viewname = 'statistics'")))
with conn.begin():
files_exists = bool(conn.scalar(text(
"SELECT 1 FROM pg_catalog.pg_tables "
"WHERE schemaname = 'public' AND tablename = 'files'")))
if not packages_exists:
# Database is uninitialized
return None
elif not files_exists:
# Database is too ancient to upgrade
raise RuntimeError("Database version older than 0.4; cannot upgrade")
elif not statistics_exists:
return "0.4"
else:
return "0.5"
else:
return db_version
|
6429dbb1e1767cf6fd93c3fd240ce095f1b50ef7
| 6,979
|
def nIonDotBHmodel2(z):
"""Ionization model 2 from BH2007: constant above z=6.
"""
return ((z < 6) * nIonDotLowz(z) +
(z >= 6) * nIonDotLowz(6))
|
438cdd69a229e445f8e313145e84ed11618ee2cb
| 6,980
|
def answer(input):
"""
>>> answer("1234")
1234
"""
lines = input.split('\n')
for line in lines:
return int(line)
|
b9ce42d88a09976444563493a01741475dce67c5
| 6,981
|
def get_leading_states(contributions):
"""
Return state contributions, names as lists in descending order of contribution amount
:param contributions:
:return:
"""
contributions['state'] = contributions['clean_fips'].apply(get_state)
states = contributions.groupby('state')
state_sums = states.sum()
ordered_sums = state_sums.sort('clean_contribution', ascending=False)['clean_contribution']
names = list(ordered_sums.index)
values = list(ordered_sums)
unwanted = ['NO_STATE_NAME', 'american samoa',
'northern mariana islands', 'guam', 'virgin islands', 'puerto rico']
state_contributions = []
state_names = []
for i in range(0, len(values)):
amount = values[i]
name = names[i]
if name not in unwanted:
state_contributions.append(amount)
state_names.append(name)
return state_contributions, state_names
|
7028f87ad7b106e267104dddebc2fe42546d3cfd
| 6,982
|
def contacts_per_person_normal_self_20():
"""
Real Name: b'contacts per person normal self 20'
Original Eqn: b'30'
Units: b'contact/Day'
Limits: (None, None)
Type: constant
b''
"""
return 30
|
4a240066b2aefd8af2e19f174632e1bf854bf7d3
| 6,983
|
def __compute_partition_gradient(data, fit_intercept=True):
"""
Compute hetero regression gradient for:
gradient = ∑d*x, where d is fore_gradient which differ from different algorithm
Parameters
----------
data: DTable, include fore_gradient and features
fit_intercept: bool, if model has interception or not. Default True
Returns
----------
numpy.ndarray
hetero regression model gradient
"""
feature = []
fore_gradient = []
for key, value in data:
feature.append(value[0])
fore_gradient.append(value[1])
feature = np.array(feature)
fore_gradient = np.array(fore_gradient)
gradient = []
if feature.shape[0] <= 0:
return 0
for j in range(feature.shape[1]):
feature_col = feature[:, j]
gradient_j = fate_operator.dot(feature_col, fore_gradient)
gradient.append(gradient_j)
if fit_intercept:
bias_grad = np.sum(fore_gradient)
gradient.append(bias_grad)
return np.array(gradient)
|
e987fc53b1f1ee8cc7a0ddbe83de23b1623b532e
| 6,984
|
def calc_nsd(x, n=21):
"""
Estimate Noise Standard Deviation of Data.
Parameters
----------
x : 1d-ndarray
Input data.
n : int
Size of segment.
Returns
-------
result : float
Value of noise standard deviation.
"""
x_diff = np.diff(x, n=2)
x_frag = np.array_split(x_diff, len(x_diff) // n)
cursor = np.argmin([np.std(i, ddof=1) for i in x_frag])
for i in range(n * (cursor + 1), len(x_diff)):
i_frag = x_diff[i-n:i-1]
i_frag_avg = np.mean(i_frag)
i_frag_std = np.std(i_frag, ddof=1)
if np.abs(x_diff[i] - i_frag_avg) > 3 * i_frag_std:
x_diff[i] = i_frag_avg
for i in range(0, n * cursor - 1)[::-1]:
if n * cursor - 1 < 0:
break
i_frag = x_diff[i+1:i+n]
i_frag_avg = np.mean(i_frag)
i_frag_std = np.std(i_frag, ddof=1)
if np.abs(x_diff[i] - i_frag_avg) > 3 * i_frag_std:
x_diff[i] = i_frag_avg
return np.std(x_diff, ddof=1) / 6 ** 0.5
|
23b0041fc1a9bde364828a0a94b12fc7292a391a
| 6,985
|
def deflection_from_kappa_grid_adaptive(kappa_high_res, grid_spacing, low_res_factor, high_res_kernel_size):
"""
deflection angles on the convergence grid with adaptive FFT
the computation is performed as a convolution of the Green's function with the convergence map using FFT
The grid is returned in the lower resolution grid
:param kappa_high_res: convergence values for each pixel (2-d array)
:param grid_spacing: pixel size of high resolution grid
:param low_res_factor: lower resolution factor of larger scale kernel.
:param high_res_kernel_size: int, size of high resolution kernel in units of degraded pixels
:return: numerical deflection angles in x- and y- direction
"""
kappa_low_res = image_util.re_size(kappa_high_res, factor=low_res_factor)
num_pix = len(kappa_high_res) * 2
if num_pix % 2 == 0:
num_pix += 1
#if high_res_kernel_size % low_res_factor != 0:
# assert ValueError('fine grid kernel size needs to be a multiplicative factor of low_res_factor! Settings used: '
# 'fine_grid_kernel_size=%s, low_res_factor=%s' % (high_res_kernel_size, low_res_factor))
kernel_x, kernel_y = deflection_kernel(num_pix, grid_spacing)
grid_spacing_low_res = grid_spacing * low_res_factor
kernel_low_res_x, kernel_high_res_x = kernel_util.split_kernel(kernel_x, high_res_kernel_size, low_res_factor,
normalized=False)
f_x_high_res = scp.fftconvolve(kappa_high_res, kernel_high_res_x, mode='same') / np.pi * grid_spacing ** 2
f_x_high_res = image_util.re_size(f_x_high_res, low_res_factor)
f_x_low_res = scp.fftconvolve(kappa_low_res, kernel_low_res_x, mode='same') / np.pi * grid_spacing_low_res ** 2
f_x = f_x_high_res + f_x_low_res
kernel_low_res_y, kernel_high_res_y = kernel_util.split_kernel(kernel_y, high_res_kernel_size, low_res_factor,
normalized=False)
f_y_high_res = scp.fftconvolve(kappa_high_res, kernel_high_res_y, mode='same') / np.pi * grid_spacing ** 2
f_y_high_res = image_util.re_size(f_y_high_res, low_res_factor)
f_y_low_res = scp.fftconvolve(kappa_low_res, kernel_low_res_y, mode='same') / np.pi * grid_spacing_low_res ** 2
f_y = f_y_high_res + f_y_low_res
return f_x, f_y
|
cc71b9bd35c5e09e45815cf578870c481a03b8ed
| 6,986
|
from params import pop_sizes
def remove_sus_from_Reff(strain, data_date):
"""
This removes the inferred susceptibility depletion from the Reff estimates out of EpyReff.
The inferred Reff = S(t) * Reff_1 where S(t) is the effect of susceptible depletion (i.e. a
factor between 0 and 1) and Reff_1 is the Reff without the effect of a reducing susceptibility
pool.
"""
data_date = pd.to_datetime(data_date)
# read in Reff samples
df_Reff = pd.read_csv(
"results/EpyReff/Reff_"
+ strain
+ "_samples"
+ data_date.strftime("%Y-%m-%d")
+ "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
# read in assumed CA
CA = pd.read_csv(
"results/"
+ "CA_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# read in cases by infection dates
cases = pd.read_csv(
"results/"
+ "cases_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date_inferred"]
)
# scale the local cases by the assumed CA
cases["local_scaled"] = cases["local"]
cases.loc[cases.date_inferred <= pd.to_datetime("2021-12-09"), "local_scaled"] *= 1 / 0.75
cases.loc[cases.date_inferred > pd.to_datetime("2021-12-09"), "local_scaled"] *= 1 / 0.50
# read in the inferred susceptibility depletion factor and convert to a simple array
samples = pd.read_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
sus_dep_factor = samples["phi"][:2000]
sus_dep_factor.to_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "sampled_susceptible_depletion_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
sus_dep_factor = sus_dep_factor.to_numpy()
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
# init a dataframe to hold the Reff samples without susceptible depletion
df_Reff_adjusted = pd.DataFrame()
df_cases_adjusted = pd.DataFrame()
for state in states:
# filter cases by the state and after March 2020
cases_state = cases.loc[cases.STATE == state]
dates_complete = pd.DataFrame(
pd.date_range(
start=df_Reff.INFECTION_DATES.min(),
end=max(df_Reff.INFECTION_DATES)
),
columns=["date_inferred"],
)
# merging on date_inferred forces missing dates to be added into cases_state
cases_state = dates_complete.merge(right=cases_state, how='left', on='date_inferred')
cases_state.fillna(0, inplace=True)
cases_state.loc[cases_state.date_inferred <= "2021-06-25", "local_scaled"] = 0
cases_state["cum_local_scaled"] = cases_state["local_scaled"].cumsum()
df_cases_adjusted = pd.concat((df_cases_adjusted, cases_state), axis=0)
cases_state = cases_state.cum_local_scaled.to_numpy()
cases_state = np.tile(cases_state, (2000, 1)).T
# invert the susceptible depletion factor for the model
scaling_factor = 1 / (1 - sus_dep_factor * cases_state / pop_sizes[state])
df_Reff_state = df_Reff.loc[df_Reff.STATE == state]
df_Reff_state.iloc[:, :-2] = df_Reff_state.iloc[:, :-2] * scaling_factor
df_Reff_adjusted = pd.concat((df_Reff_adjusted, df_Reff_state), axis=0)
# save the unscaled Reff
df_Reff_adjusted.to_csv(
"results/EpyReff/Reff_"
+ strain
+ "_samples"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
index=False,
)
df_cases_adjusted.to_csv(
"results/EpyReff/cases_adjusted_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
index=False,
)
return None
|
9342896ff84507ecbe93b96a81b781ed6f8c336e
| 6,987
|
def word2bytes(word, big_endian=False):
""" Converts a 32-bit word into a list of 4 byte values.
"""
return unpack_bytes(pack_word(word, big_endian))
|
9c208efc87bb830692771f3dacb1618a1d8d7da4
| 6,988
|
def statfcn(status, _id, _ret):
"""
Callback for libngspice to report simulation status like 'tran 5%'
"""
logger.warn(status.decode('ascii'))
return 0
|
344210160227ae76470f53eecd43c913b9dec495
| 6,989
|
def decode_eventdata(sensor_type, offset, eventdata, sdr):
"""Decode extra event data from an alert or log
Provide a textual summary of eventdata per descriptions in
Table 42-3 of the specification. This is for sensor specific
offset events only.
:param sensor_type: The sensor type number from the event
:param offset: Sensor specific offset
:param eventdata: The three bytes from the log or alert
"""
if sensor_type == 5 and offset == 4: # link loss, indicates which port
return 'Port {0}'.format(eventdata[1])
elif sensor_type == 8 and offset == 6: # PSU cfg error
errtype = eventdata[2] & 0b1111
return psucfg_errors.get(errtype, 'Unknown')
elif sensor_type == 0xc and offset == 8: # Memory spare
return 'Module {0}'.format(eventdata[2])
elif sensor_type == 0xf:
if offset == 0: # firmware error
return firmware_errors.get(eventdata[1], 'Unknown')
elif offset in (1, 2):
return firmware_progress.get(eventdata[1], 'Unknown')
elif sensor_type == 0x10:
if offset == 0: # Correctable error logging on a specific memory part
return 'Module {0}'.format(eventdata[1])
elif offset == 1:
return 'Reading type {0:02X}h, offset {1:02X}h'.format(
eventdata[1], eventdata[2] & 0b1111)
elif offset == 5:
return '{0}%'.format(eventdata[2])
elif offset == 6:
return 'Processor {0}'.format(eventdata[1])
elif sensor_type == 0x12:
if offset == 3:
action = (eventdata[1] & 0b1111000) >> 4
return auxlog_actions.get(action, 'Unknown')
elif offset == 4:
sysactions = []
if eventdata[1] & 0b1 << 5:
sysactions.append('NMI')
if eventdata[1] & 0b1 << 4:
sysactions.append('OEM action')
if eventdata[1] & 0b1 << 3:
sysactions.append('Power Cycle')
if eventdata[1] & 0b1 << 2:
sysactions.append('Reset')
if eventdata[1] & 0b1 << 1:
sysactions.append('Power Down')
if eventdata[1] & 0b1:
sysactions.append('Alert')
return ','.join(sysactions)
elif offset == 5: # Clock change event, either before or after
if eventdata[1] & 0b10000000:
return 'After'
else:
return 'Before'
elif sensor_type == 0x19 and offset == 0:
return 'Requested {0] while {1}'.format(eventdata[1], eventdata[2])
elif sensor_type == 0x1d and offset == 7:
return restart_causes.get(eventdata[1], 'Unknown')
elif sensor_type == 0x21:
return '{0} {1}'.format(slot_types.get(eventdata[1], 'Unknown'),
eventdata[2])
elif sensor_type == 0x23:
phase = eventdata[1] & 0b1111
return watchdog_boot_phases.get(phase, 'Unknown')
elif sensor_type == 0x28:
if offset == 4:
return 'Sensor {0}'.format(eventdata[1])
elif offset == 5:
islogical = (eventdata[1] & 0b10000000)
if islogical:
if eventdata[2] in sdr.fru:
return sdr.fru[eventdata[2]].fru_name
else:
return 'FRU {0}'.format(eventdata[2])
elif sensor_type == 0x2a and offset == 3:
return 'User {0}'.format(eventdata[1])
elif sensor_type == 0x2b:
return version_changes.get(eventdata[1], 'Unknown')
elif sensor_type == 0x2c:
cause = (eventdata[1] & 0b11110000) >> 4
cause = fru_states.get(cause, 'Unknown')
oldstate = eventdata[1] & 0b1111
if oldstate != offset:
try:
cause += '(change from {0})'.format(
ipmiconst.sensor_type_offsets[0x2c][oldstate]['desc'])
except KeyError:
pass
|
7a90810657edd017b42f7f70a7a0c617435cb14f
| 6,990
|
def get_log_path():
"""
Requests the logging path to the external python library (that calls
the bindings-common).
:return: The path where to store the logs.
"""
if __debug__:
logger.debug("Requesting log path")
log_path = compss.get_logging_path()
if __debug__:
logger.debug("Log path received: %s" % log_path)
return log_path
|
ccb7adf37df06de721f53253a86cc2ecdff962b9
| 6,991
|
def about_incumbent(branch_df):
"""
number of incumbent updates
incumbent throughput: num_updates / num_nodes
max_improvement, min_improvement, avg_improvement
avg incumbent improvement / first incumbent value
max, min, avg distance between past incumbent updates
distance between last update and last node explored
"""
abs_improvement = pd.Series(abs(branch_df['best_integer'].diff(1)))
bool_updates = pd.Series((abs_improvement != 0))
avg_improvement = abs_improvement.sum() / bool_updates.sum() if bool_updates.sum() != 0 else None
nnz_idx = branch_df['best_integer'].to_numpy.nonzero()
first_incumbent = branch_df['best_integer'].iloc[nnz_idx[0][0]] if len(nnz_idx[0]) != 0 else None
num_updates = bool_updates.sum() # real number of updates (could be 0)
second = float(num_updates) / branch_df['num_nodes'].iloc[-1] if branch_df['num_nodes'].iloc[-1] != 0 else None
sixth = avg_improvement / first_incumbent if avg_improvement and first_incumbent else None
# add dummy 1 (update) at the end of bool_updates
bool_updates[bool_updates.shape[0]] = 1.
non_zeros = bool_updates.values == 1
zeros = ~non_zeros
zero_counts = np.cumsum(zeros)[non_zeros]
zero_counts[1:] -= zero_counts[:-1].copy() # distance between two successive incumbent updates
zeros_to_last = zero_counts[-1]
zero_counts = zero_counts[:-1] # removes last count (to the end) to compute max, min, avg
try:
zeros_stat = [zero_counts.max(), zero_counts.min(), zero_counts.mean(), zeros_to_last]
except ValueError:
zeros_stat = [None]*4
incumbent_list = [
num_updates,
second,
abs_improvement.max(),
abs_improvement.min(),
abs_improvement.mean(),
sixth
]
incumbent_list.extend(zeros_stat)
if len(incumbent_list) != 10:
print("***len(incumbent_list): {}".format(len(incumbent_list)))
return incumbent_list, len(incumbent_list)
|
309dd09a6fcad58064e98c79536ca73256fe3ac2
| 6,992
|
from typing import List
def unique_chars(texts: List[str]) -> List[str]:
"""
Get a list of unique characters from list of text.
Args:
texts: List of sentences
Returns:
A sorted list of unique characters
"""
return sorted(set("".join(texts)))
|
02bc9ce28498bd129fdb68c2f797d138ca584490
| 6,993
|
def adaptive_max_pool1d(input, output_size):
"""Apply the 1d adaptive max pooling to input.
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
output_size : Union[int, Sequence[int]]
The target output size.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
See Also
--------
`torch.nn.AdaptiveMaxPool1d(...)`_
"""
args = utils._get_adaptive_pool_args(
input.size()[-1:], utils._single(output_size))
return _pool('MAX', utils._single, input, **args)
|
06556ea06ebe282bf24739d56ff016924a730c8b
| 6,994
|
def get_return_nb(input_value, output_value):
"""Get return from input and output value."""
if input_value == 0:
if output_value == 0:
return 0.
return np.inf * np.sign(output_value)
return_value = (output_value - input_value) / input_value
if input_value < 0:
return_value *= -1
return return_value
|
fe9ef59feb7b4e9797a74258ecbf890171f6df59
| 6,995
|
def get_rocauc(val,num_iterations):
""" Trains a logistic regression and calculates the roc auc
for classifying products as >=4 stars """
recalls = np.zeros(num_iterations)
precisions = np.zeros(num_iterations)
f1s = np.zeros(num_iterations)
roc_aucs = np.zeros(num_iterations)
factory = lr_wrapper(val,feature_columns=['sim_score_db','sim_score_dm','rating_mean'],y_column='class')
for z in range(num_iterations):
# Slightly annoying thing here that each call to factory uses its own
# train_test_split, so y_test used for recalls will be different than
# y_test used in roc aucs
y_test,y_preds = factory.fit_and_return_preds()
recalls[z] = recall_score(y_test,y_preds)
precisions[z] = precision_score(y_test,y_preds)
f1s[z] = f1_score(y_test,y_preds)
y_test,y_probas = factory.fit_and_return_probas()
roc_aucs[z] = roc_auc_score(y_test, y_probas)
# print(roc_aucs)
return np.mean(recalls),np.mean(precisions),np.mean(f1s),np.mean(roc_aucs)
|
d2b2ceae240db6c3ce474d74aea1ebd4d1ed9830
| 6,996
|
def split_expList(expList, max_nr_of_instr: int=8000,
verbose: bool=True):
"""
Splits a pygsti expList into sub lists to facilitate running on the CCL
and not running into the instruction limit.
Assumptions made:
- there is a fixed instruction overhead per program
- there is a fixed instruction overhead per kernel (measurement + init)
- every gate (in the gatestring) consists of a single instruction
"""
# FIXME: platform dependency on CClight
fixed_program_overhad = 12 + 3 # declare registers + infinite loop
kernel_overhead = 4 # prepz wait and measure
instr_cnt = 0
instr_cnt += fixed_program_overhad
# Determine where to split the expLists
cutting_indices = [0]
for i, gatestring in enumerate(expList):
instr_cnt += kernel_overhead
instr_cnt += len(gatestring)
if instr_cnt > max_nr_of_instr:
cutting_indices.append(i)
instr_cnt = fixed_program_overhad
# Create the expSubLists, a list contain expList objects for each part
expSubLists = []
if len(cutting_indices) == 1:
expSubLists.append(expList)
else:
for exp_num, start_idx in enumerate(cutting_indices[:-1]):
stop_idx = cutting_indices[exp_num+1]
expSubLists.append(expList[start_idx:stop_idx])
# Final slice is not by default included in the experiment list
expSubLists.append(expList[cutting_indices[-1]:])
if verbose:
print("Splitted expList into {} sub lists".format(len(expSubLists)))
return expSubLists
|
0c006b3026bfd11f28a3f1ecfc40e6e87801759e
| 6,997
|
def __make_node_aliases(data: list[str]):
"""Alias a genes ID to their families
in order to build edges between them"""
famcom = {}
elems = [tokens for tokens in data if tokens[2] in ["FAMILY", "COMPLEX"]]
# Add all (gene) containers first
for tokens in elems:
famcom[tokens[1]] = AliasItem(tokens[3], [])
log.debug(famcom)
elems = [tokens for tokens in data if tokens[2] == "GENE"]
for tokens in elems:
# Add gene to its parent
famcom[tokens[3]].genes.append(GeneElem(tokens[0], tokens[1], tokens[3]))
return famcom
|
3372d41de2b8a4caf5cf599ff09a0491af7740f9
| 6,998
|
import torch
def poly_edges_min_length(P, T, distFcn=norm):
"""
Returns the per polygon min edge length
Parameters
----------
P : Tensor
a (N, D,) points set tensor
T : LongTensor
a (M, T,) topology tensor
Returns
-------
Tensor
the (T, M,) min edge length tensor
"""
return torch.min(poly_edges_length(P, T, distFcn=distFcn), dim=1, keepdim=True)[0]
|
efa68aa752d0f3c1efc29a846f06e006bd8bceb9
| 6,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.