content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def shuffle_data(data):
"""
Shuffle the data
"""
rng_state = np.random.get_state()
for c, d in data.items():
np.random.set_state(rng_state)
np.random.shuffle(d)
data[c] = d
return data | 25,700 |
def index(current_user=None):
""" Display home page """
return render_template('homepage.html', username=current_user['name'], \
logged_in=current_user['is_authenticated'], \
display_error=request.cookies.get('last_attempt_error') == 'True', \
login_banner=APP.config['LOGIN_BANNER']) | 25,701 |
def save_as_jpeg(img, filename):
"""
Save a numpy array as a jpeg image
Attributes
----------
:param img: Image to be saved
:type img: Numpy array
"""
imsave(filename + '.png', img, 'png') | 25,702 |
def _read_data_file(data_path):
"""
Reads a data file into a :class:`pandas:pandas.DataFrame` object.
Parameters
----------
data_path : str
Path of the data file with extension. Supports ``.csv``, ``.xlsx``/``.xls``, ``.json``, and ``.xml``.
Returns
-------
:class:`pandas:pandas.DataFrame`
A dataframe object of the file data.
Author
------
Richard Wen <rrwen.dev@gmail.com>
Example
-------
.. code::
from msdss_data_api.cli import _read_data_file
data = _read_data_file('path/to/data.json')
"""
data_ext = os.path.splitext(data_path)[1].lower()
if data_ext == '.csv':
out = pandas.read_csv(data_path)
elif data_ext in ('.xlsx', '.xls'):
out = pandas.read_excel(data_path)
elif data_ext == '.json':
out = pandas.read_json(data_path)
elif data_ext == '.xml':
out = pandas.read_xml(data_path)
return out | 25,703 |
def disable_admin_access(session, return_type=None, **kwargs):
"""
Disable Admin acccess
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
path = "/api/users/admin_access/disable.json"
return session.post_api(path=path, return_type=return_type, **kwargs) | 25,704 |
def _get_bbox_indices(x, y, bbox):
"""
Convert bbox values to array indices
:param x, y: arrays with the X, Y coordinates
:param bbox: minx, miny, maxx, maxy values
:return: bbox converted to array indices
"""
minx, miny, maxx, maxy = bbox
xindices, = np.where((x >= minx) & (x <= maxx))
yindices, = np.where((y >= miny) & (y <= maxy))
return xindices[0], xindices[-1]+1, yindices[0], yindices[-1]+1 | 25,705 |
def skill_competencies():
"""
Called by S3OptionsFilter to provide the competency options for a
particular Skill Type
"""
table = s3db.hrm_skill
ttable = s3db.hrm_skill_type
rtable = s3db.hrm_competency_rating
query = (table.id == request.args[0]) & \
(table.skill_type_id == ttable.id) & \
(rtable.skill_type_id == table.skill_type_id)
records = db(query).select(rtable.id,
rtable.name,
orderby = ~rtable.priority,
)
response.headers["Content-Type"] = "application/json"
return records.json() | 25,706 |
def test_add_request_names(client, jwt, app):
"""
Setup:
Test:
Validate:
:param client:
:param jwt:
:param app:
:return:
"""
do_test_cleanup()
# Initialize the service
nr_svc = NameRequestService()
"""
Test adding two new names
"""
# We will need a base NR
nr = build_nr(State.DRAFT, {}, [test_names_no_id[0]])
# We can't save the NR without an NR Num
nr.nrNum = 'NR L000001'
# Save to DB so PK sequences are updated
nr.save_to_db()
db.session.flush()
nr = Request.find_by_nr(nr.nrNum)
# Set data to the service, all we need to test is names so just provide what's necessary
nr_svc.request_data = {
'names': [
# Same as test name 1
test_names_no_id[1],
test_names_no_id[2]
]
}
# Build the names
nr = nr_svc.map_request_names(nr)
nr.save_to_db()
nr = Request.find_by_nr(nr.nrNum)
# Convert to dict
nr = nr.json()
assert nr is not None
# Test the names
assert_names_are_mapped_correctly(nr_svc.request_data.get('names'), nr.get('names'))
# Clean up
do_test_cleanup() | 25,707 |
def calc_field_changes(element, np_id):
"""
Walk up the tree of geo-locations, finding the new parents
These will be set onto all the museumobjects.
"""
fieldname = element._meta.concrete_model.museumobject_set.\
related.field.name
field_changes = {}
field_changes[fieldname] = element.id
if hasattr(element, 'parent'):
field_changes.update(
calc_field_changes(element.parent, element.parent.id))
return field_changes | 25,708 |
def index(request, _):
"""
路由请求
`` request `` 请求对象
"""
if request.method == 'GET' or request.method == 'get':
return index_page(request)
elif request.method == 'POST' or request.method == 'post':
return send_wxmsg(request)
else:
rsp = JsonResponse({'code': -1, 'errorMsg': '请求方式错误'},
json_dumps_params={'ensure_ascii': False})
logger.info('response result: {}'.format(rsp.content.decode('utf-8')))
return render(request, 'index.html') | 25,709 |
def get_daisy_client():
"""Get Daisy client instance."""
endpoint = conf.get('discoverd', 'daisy_url')
return daisy_client.Client(version=1, endpoint=endpoint) | 25,710 |
def intermediate_statistics(
scores, ground_truth, audio_durations, *,
segment_length=1., time_decimals=6, num_jobs=1,
):
"""
Args:
scores (dict, str, pathlib.Path): dict of SED score DataFrames
(cf. sed_scores_eval.utils.scores.create_score_dataframe)
or a directory path (as str or pathlib.Path) from where the SED
scores can be loaded.
ground_truth (dict, str or pathlib.Path): dict of lists of ground truth
event tuples (onset, offset, event label) for each audio clip or a
file path from where the ground truth can be loaded.
audio_durations: The duration of each audio file in the evaluation set.
segment_length: the segment length of the segments that are to be
evaluated.
time_decimals (int): the decimal precision used for evaluation. If
chosen to high detected or ground truth events that have
onsets or offsets right on a segment boundary may swap over to the
adjacent segment because of small deviations due to limited
floating point precision.
num_jobs (int): the number of processes to use. Default is 1 in which
case no multiprocessing is used.
Returns:
"""
if not isinstance(num_jobs, int) or num_jobs < 1:
raise ValueError(
f'num_jobs has to be an integer greater or equal to 1 but '
f'{num_jobs} was given.'
)
scores, ground_truth, audio_ids = parse_inputs(scores, ground_truth)
if isinstance(audio_durations, (str, Path)):
audio_durations = Path(audio_durations)
assert audio_durations.is_file(), audio_durations
audio_durations = read_audio_durations(audio_durations)
if audio_durations is not None and not audio_durations.keys() == set(audio_ids):
raise ValueError(
f'audio_durations audio ids do not match audio ids in scores. '
f'Missing ids: {set(audio_ids) - audio_durations.keys()}. '
f'Additional ids: {audio_durations.keys() - set(audio_ids)}.'
)
_, event_classes = validate_score_dataframe(scores[audio_ids[0]])
single_label_ground_truths = multi_label_to_single_label_ground_truths(
ground_truth, event_classes)
def worker(audio_ids, output_queue=None):
segment_scores = None
segment_targets = None
for audio_id in audio_ids:
scores_k = scores[audio_id]
timestamps, _ = validate_score_dataframe(
scores_k, event_classes=event_classes)
timestamps = np.round(timestamps, time_decimals)
if segment_scores is None:
segment_scores = {class_name: [] for class_name in event_classes}
segment_targets = {class_name: [] for class_name in event_classes}
scores_k = scores_k[event_classes].to_numpy()
if audio_durations is None:
duration = max(
[timestamps[-1]] + [t_off for t_on, t_off, _ in ground_truth[audio_id]]
)
else:
duration = audio_durations[audio_id]
n_segments = int(np.ceil(duration / segment_length))
segment_boundaries = np.round(
np.arange(n_segments+1) * segment_length,
time_decimals
)
segment_onsets = segment_boundaries[:-1]
segment_offsets = segment_boundaries[1:]
for class_name in event_classes:
gt = single_label_ground_truths[class_name][audio_id]
if len(gt) == 0:
segment_targets[class_name].append(
np.zeros(n_segments, dtype=np.bool_))
else:
segment_targets[class_name].append(
np.any([
(segment_onsets < gt_offset)
* (segment_offsets > gt_onset)
* (segment_offsets > segment_onsets)
for gt_onset, gt_offset in
single_label_ground_truths[class_name][audio_id]
], axis=0)
)
for i in range(n_segments):
idx_on = get_first_index_where(
timestamps, "gt", segment_onsets[i]) - 1
idx_on = max(idx_on, 0)
idx_off = get_first_index_where(
timestamps, "geq", segment_offsets[i])
idx_off = min(idx_off, len(timestamps)-1)
if idx_off <= idx_on:
scores_ki = np.zeros(scores_k.shape[-1])
else:
scores_ki = np.max(scores_k[idx_on:idx_off], axis=0)
for c, class_name in enumerate(event_classes):
segment_scores[class_name].append(scores_ki[c])
if output_queue is not None:
output_queue.put((segment_scores, segment_targets))
return segment_scores, segment_targets
if num_jobs == 1:
segment_scores, segment_targets = worker(audio_ids)
else:
queue = multiprocessing.Queue()
shard_size = int(np.ceil(len(audio_ids) / num_jobs))
shards = [
audio_ids[i*shard_size:(i+1)*shard_size] for i in range(num_jobs)
if i*shard_size < len(audio_ids)
]
processes = [
multiprocessing.Process(
target=worker, args=(shard, queue), daemon=True,
)
for shard in shards
]
try:
for p in processes:
p.start()
segment_scores, segment_targets = None, None
count = 0
while count < len(shards):
seg_scores_i, seg_targets_i = queue.get()
if segment_scores is None:
segment_scores = seg_scores_i
segment_targets = seg_targets_i
else:
for class_name in segment_scores:
segment_scores[class_name].extend(seg_scores_i[class_name])
segment_targets[class_name].extend(seg_targets_i[class_name])
count += 1
finally:
for p in processes:
p.terminate()
stats = {}
for class_name in event_classes:
segment_scores[class_name] = np.array(segment_scores[class_name]+[np.inf])
sort_idx = np.argsort(segment_scores[class_name])
segment_scores[class_name] = segment_scores[class_name][sort_idx]
segment_targets[class_name] = np.concatenate(
segment_targets[class_name]+[np.zeros(1)])[sort_idx]
tps = np.cumsum(segment_targets[class_name][::-1])[::-1]
n_sys = np.arange(len(tps))[::-1]
segment_scores[class_name], unique_idx = np.unique(segment_scores[class_name], return_index=True)
n_ref = tps[0]
fns = n_ref - tps
tns = n_sys[0] - n_sys - fns
stats[class_name] = {
'tps': tps[unique_idx],
'fps': n_sys[unique_idx] - tps[unique_idx],
'tns': tns,
'n_ref': n_ref,
}
return {
class_name: (segment_scores[class_name], stats[class_name])
for class_name in event_classes
} | 25,711 |
def test_get_default_db_uri_from_env(monkeypatch):
"""Default db uri can be set from the env var."""
monkeypatch.setenv("POKEDEX_DB_ENGINE", "/path/to/db")
uri, origin = default.db_uri_with_origin()
assert "environment" == origin
assert "/path/to/db" == uri | 25,712 |
def vnorm(v1):
"""vnorm(ConstSpiceDouble [3] v1) -> SpiceDouble"""
return _cspyce0.vnorm(v1) | 25,713 |
def mean_iou(y_true, y_pred):
"""F2 loss"""
prec = []
for t in np.arange(0.5, 1.0, 0.05):
y_pred_ = tf.to_int32(y_pred > t)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0) | 25,714 |
def _encode_string_parts(value, encodings):
"""Convert a unicode string into a byte string using the given
list of encodings.
This is invoked if `encode_string` failed to encode `value` with a single
encoding. We try instead to use different encodings for different parts
of the string, using the encoding that can encode the longest part of
the rest of the string as we go along.
Parameters
----------
value : text type
The unicode string as presented to the user.
encodings : list
The encodings needed to encode the string as a list of Python
encodings, converted from the encodings in Specific Character Set.
Returns
-------
byte string
The encoded string, including the escape sequences needed to switch
between different encodings.
Raises
------
ValueError
If `value` could not be encoded with the given encodings.
"""
encoded = bytearray()
unencoded_part = value
while unencoded_part:
# find the encoding that can encode the longest part of the rest
# of the string still to be encoded
max_index = 0
best_encoding = None
for encoding in encodings:
try:
unencoded_part.encode(encoding)
# if we get here, the whole rest of the value can be encoded
best_encoding = encoding
max_index = len(unencoded_part)
break
except UnicodeError as e:
if e.start > max_index:
# e.start is the index of first character failed to encode
max_index = e.start
best_encoding = encoding
# none of the given encodings can encode the first character - give up
if best_encoding is None:
raise ValueError()
# encode the part that can be encoded with the found encoding
encoded_part = unencoded_part[:max_index].encode(best_encoding)
if best_encoding not in handled_encodings:
encoded += ENCODINGS_TO_CODES.get(best_encoding, b'')
encoded += encoded_part
# set remaining unencoded part of the string and handle that
unencoded_part = unencoded_part[max_index:]
# unencoded_part is empty - we are done, return the encoded string
return encoded | 25,715 |
def get_files(path, pattern):
"""
Recursively find all files rooted in <path> that match the regexp <pattern>
"""
L = []
if not path.endswith('/'): path += '/'
# base case: path is just a file
if (re.match(pattern, os.path.basename(path)) != None) and os.path.isfile(path):
L.append(path)
return L
# general case
if not os.path.isdir(path):
return L
contents = os.listdir(path)
for item in contents:
item = path + item
if (re.search(pattern, os.path.basename(item)) != None) and os.path.isfile(item):
L.append(item)
elif os.path.isdir(path):
L.extend(get_files(item + '/', pattern))
return L | 25,716 |
def SocketHandler(qt):
""" `SocketHandler` wraps a websocket connection.
HTTP GET /ws
"""
class _handler(websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
qt.log("new socket open ...")
qt.register_socket(self)
def on_close(self):
qt.remove_socket(self)
def on_message(self, msg):
qt.log("Got socket command: %s" % (msg))
qt.command(msg)
return _handler | 25,717 |
def var_swap(asset: Asset, tenor: str, forward_start_date: Optional[str] = None,
*, source: str = None, real_time: bool = False) -> Series:
"""
Strike such that the price of an uncapped variance swap on the underlying index is zero at inception. If
forward start date is provided, then the result is a forward starting variance swap.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param forward_start_date: forward start date e.g. 2m, 1y; defaults to none
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: implied volatility curve
"""
if forward_start_date is None:
_logger.debug('where tenor=%s', tenor)
where = dict(tenor=[tenor])
df = _get_var_swap_df(asset, where, source, real_time)
series = ExtendedSeries() if df.empty else ExtendedSeries(df[Fields.VAR_SWAP.value])
series.dataset_ids = getattr(df, 'dataset_ids', ())
return series
else:
if not isinstance(forward_start_date, str):
raise MqTypeError('forward_start_date must be a relative date')
x = _tenor_to_month(tenor)
y = _tenor_to_month(forward_start_date)
z = x + y
yt = _month_to_tenor(y)
zt = _month_to_tenor(z)
tenors = _var_swap_tenors(asset)
if yt not in tenors or zt not in tenors:
series = ExtendedSeries()
series.dataset_ids = ()
return series
_logger.debug('where tenor=%s', f'{yt},{zt}')
where = dict(tenor=[yt, zt])
df = _get_var_swap_df(asset, where, source, real_time)
dataset_ids = getattr(df, 'dataset_ids', ())
if df.empty:
series = ExtendedSeries()
else:
grouped = df.groupby(Fields.TENOR.value)
try:
yg = grouped.get_group(yt)[Fields.VAR_SWAP.value]
zg = grouped.get_group(zt)[Fields.VAR_SWAP.value]
except KeyError:
_logger.debug('no data for one or more tenors')
series = ExtendedSeries()
series.dataset_ids = ()
return series
series = ExtendedSeries(sqrt((z * zg ** 2 - y * yg ** 2) / x))
series.dataset_ids = dataset_ids
return series | 25,718 |
def extract_flow_global_roi(flow_x, flow_y, box):
"""
create global roi cropped flow image (for numpy image)
image:
numpy array image
box:
list of [xmin, ymin, xmax, ymax]
"""
flow_x_roi = extract_global_roi(flow_x, box)
flow_y_roi = extract_global_roi(flow_y, box)
if flow_x_roi is None or flow_y_roi is None:
return None
else:
return (flow_x_roi, flow_y_roi) | 25,719 |
def patch_shell(response=None, error=False):
"""Mock the `AdbDeviceTcpFake.shell` and `DeviceFake.shell` methods."""
def shell_success(self, cmd):
"""Mock the `AdbDeviceTcpFake.shell` and `DeviceFake.shell` methods when they are successful."""
self.shell_cmd = cmd
return response
def shell_fail_python(self, cmd):
"""Mock the `AdbDeviceTcpFake.shell` method when it fails."""
self.shell_cmd = cmd
raise AttributeError
def shell_fail_server(self, cmd):
"""Mock the `DeviceFake.shell` method when it fails."""
self.shell_cmd = cmd
raise ConnectionResetError
if not error:
return {"python": patch("{}.AdbDeviceTcpFake.shell".format(__name__), shell_success), "server": patch("{}.DeviceFake.shell".format(__name__), shell_success)}
return {"python": patch("{}.AdbDeviceTcpFake.shell".format(__name__), shell_fail_python), "server": patch("{}.DeviceFake.shell".format(__name__), shell_fail_server)} | 25,720 |
def __setAdjacent_square__(self, pos):
"""
Sets all adjacencies in the map for a map with square tiles.
"""
self.__checkIndices__(pos)
i, j = pos; adjacent = []
# Function to filter out nonexistent cells.
def filterfn(p):
do_not_filter = 0 <= p[0] < self.__numrows__ and 0 <= p[1] < self.__numcols__
return do_not_filter and not self.__isdisabled__[p[0]][p[1]]
for cell in filter(filterfn, ( (i+1,j), (i-1,j), (i,j+1), (i,j-1) )):
adjacent += [cell]
self.__adjacent__[i][j] = adjacent | 25,721 |
def model_definition_nested_events():
"""Test model for state- and parameter-dependent heavisides.
ODEs
----
d/dt x_1:
inflow_1 - decay_1 * x1
d/dt x_2:
- decay_2 * x_2
Events:
-------
event_1:
trigger: x_1 > inflow_1 / decay_2
bolus: [[ 0],
[ -1 / time]]
event_2:
trigger: x_2 > 0.5
bolus: [[ bolus],
[ bolus]]
"""
# Model components
species = ['x_1', 'x_2']
initial_assignments = {
'x_1': 'k1',
'x_2': 'k2',
}
rate_rules = {
'x_1': 'inflow_1 - decay_1 * x_1',
'x_2': '- decay_2 * x_2',
}
parameters = {
'k1': 0,
'k2': 0,
'inflow_1': 4,
'decay_1': 2,
'decay_2': 5,
'bolus': 0, # for bolus != 0, nested event sensitivities are off!
}
events = {
'event_1': {
'trigger': 'x_1 > inflow_1 / decay_2',
'target': 'x_2',
'assignment': 'x_2 - 1 / time'
},
'event_2': {
'trigger': 'x_2 < - 0.5',
'target': ['x_1', 'x_2'],
'assignment': ['x_1 + bolus', 'x_2 + bolus'],
}
}
timepoints = np.linspace(0, 1, 101)
# Analytical solution
def x_pected(t, k1, k2, inflow_1, decay_1, decay_2, bolus):
# gather temporary variables
# event_time = x_1 > inflow_1 / decay_2
equil = inflow_1 / decay_1
tmp1 = inflow_1 / decay_2 - inflow_1 / decay_1
tmp2 = k1 - inflow_1 / decay_1
event_time = (- 1 / decay_1) * np.log( tmp1 / tmp2)
def get_early_x(t):
# compute dynamics before event
x_1 = equil * (1 - np.exp(-decay_1 * t)) + k1*np.exp(-decay_1 * t)
x_2 = k2 * np.exp(-decay_2 * t)
return np.array([[x_1], [x_2]])
if t < event_time:
x = get_early_x(t).flatten()
else:
# compute state after event
x_tau = get_early_x(event_time)
tau_x1 = x_tau[0] + bolus
tau_x2 = x_tau[1] - 1 / event_time + bolus
# compute dynamics after event
inhom = np.exp(decay_1 * event_time) * tau_x1
x_1 = equil * (1 - np.exp(decay_1 * (event_time - t))) + \
inhom * np.exp(- decay_1 * t)
x_2 = tau_x2 * np.exp(decay_2 * event_time) * np.exp(-decay_2 * t)
x = np.array([[x_1], [x_2]])
return x.flatten()
def sx_pected(t, parameters):
# get sx, w.r.t. parameters, via finite differences
sx = []
for ip in parameters:
eps = 1e-6
perturbed_params = deepcopy(parameters)
perturbed_params[ip] += eps
sx_p = x_pected(t, **perturbed_params)
perturbed_params[ip] -= 2*eps
sx_m = x_pected(t, **perturbed_params)
sx.append((sx_p - sx_m) / (2 * eps))
return np.array(sx)
return (
initial_assignments,
parameters,
rate_rules,
species,
events,
timepoints,
x_pected,
sx_pected
) | 25,722 |
def status():
"""Show kdump status"""
clicommon.run_command("sonic-kdump-config --status")
clicommon.run_command("sonic-kdump-config --memory")
clicommon.run_command("sonic-kdump-config --num_dumps")
clicommon.run_command("sonic-kdump-config --files") | 25,723 |
async def address_balance_history(
request: Request,
address: Address,
token_id: TokenID = Query(None, description="Optional token id"),
timestamps: bool = Query(
False, description="Include timestamps in addition to block heights"
),
flat: bool | None = Query(True, description="Return data as flat arrays."),
limit: int | None = Query(50, gt=0, le=10000),
offset: int | None = Query(0, ge=0),
desc: bool | None = Query(True, description="Most recent first"),
):
"""
ERG or token balance history of an address.
"""
query = f"""
select d.height
{', h.timestamp' if timestamps else ''}
, sum(d.value) over (order by d.height) as balance
from bal.{'erg' if token_id is None else 'tokens'}_diffs d
join core.headers h on h.height = d.height
where d.address = $1
{'' if token_id is None else 'and token_id = $4'}
order by 1 {'desc' if desc else ''}
limit $2 offset $3;
"""
opt_args = [] if token_id is None else [token_id]
async with request.app.state.db.acquire() as conn:
rows = await conn.fetch(query, address, limit, offset, *opt_args)
if not rows:
raise HTTPException(status_code=404, detail=DETAIL_404)
if flat:
if timestamps:
return {
"heights": [r["height"] for r in rows],
"timestamps": [r["timestamp"] for r in rows],
"balances": [r["balance"] for r in rows],
}
else:
return {
"heights": [r["height"] for r in rows],
"balances": [r["balance"] for r in rows],
}
else:
return rows | 25,724 |
def load(as_pandas=None):
"""
Loads the Grunfeld data and returns a Dataset class.
Parameters
----------
as_pandas : bool
Flag indicating whether to return pandas DataFrames and Series
or numpy recarrays and arrays. If True, returns pandas.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
Notes
-----
raw_data has the firm variable expanded to dummy variables for each
firm (ie., there is no reference dummy)
"""
return du.as_numpy_dataset(load_pandas(), as_pandas=as_pandas) | 25,725 |
def rule_if_system(system_rule, non_system_rule, context):
"""Helper function to pick a rule based on system-ness of context.
This can be used (with functools.partial) to choose between two
rule names, based on whether or not the context has system
scope. Specifically if we will fail the parent of a nested policy
check based on scope_types=['project'], this can be used to choose
the parent rule name for the error message check in
common_policy_check().
"""
if context.system_scope:
return system_rule
else:
return non_system_rule | 25,726 |
def find_inactive_ranges(note_sequence):
"""Returns ranges where no notes are active in the note_sequence."""
start_sequence = sorted(
note_sequence.notes, key=lambda note: note.start_time, reverse=True)
end_sequence = sorted(
note_sequence.notes, key=lambda note: note.end_time, reverse=True)
notes_active = 0
time = start_sequence[-1].start_time
inactive_ranges = []
if time > 0:
inactive_ranges.append(0.)
inactive_ranges.append(time)
start_sequence.pop()
notes_active += 1
# Iterate through all note on events
while start_sequence or end_sequence:
if start_sequence and (start_sequence[-1].start_time <
end_sequence[-1].end_time):
if notes_active == 0:
time = start_sequence[-1].start_time
inactive_ranges.append(time)
notes_active += 1
start_sequence.pop()
else:
notes_active -= 1
if notes_active == 0:
time = end_sequence[-1].end_time
inactive_ranges.append(time)
end_sequence.pop()
# if the last note is the same time as the end, don't add it
# remove the start instead of creating a sequence with 0 length
if inactive_ranges[-1] < note_sequence.total_time:
inactive_ranges.append(note_sequence.total_time)
else:
inactive_ranges.pop()
assert len(inactive_ranges) % 2 == 0
inactive_ranges = [(inactive_ranges[2 * i], inactive_ranges[2 * i + 1])
for i in range(len(inactive_ranges) // 2)]
return inactive_ranges | 25,727 |
def is_connected(G):
"""Returns True if the graph is connected, False otherwise.
Parameters
----------
G : NetworkX Graph
An undirected graph.
Returns
-------
connected : bool
True if the graph is connected, false otherwise.
Raises
------
NetworkXNotImplemented:
If G is directed.
Examples
--------
>>> G = nx.path_graph(4)
>>> print(nx.is_connected(G))
True
See Also
--------
is_strongly_connected
is_weakly_connected
is_semiconnected
is_biconnected
connected_components
Notes
-----
For undirected graphs only.
"""
if len(G) == 0:
raise nx.NetworkXPointlessConcept('Connectivity is undefined ',
'for the null graph.')
return sum(1 for node in _plain_bfs(G, arbitrary_element(G))) == len(G) | 25,728 |
def remove_collaborations():
""" Main function that governs the deletion of metadata in a collaborations
within a specified Synergos network
"""
st.title("Orchestrator - Remove existing collaboration(s)")
##################################################
# Step 1: Connect to your specified orchestrator #
##################################################
st.header("Step 1: Connect to an Orchestrator")
collab_driver = render_orchestrator_inputs()
######################################################################
# Step 2: Pull collaboration information from specified orchestrator #
######################################################################
st.header("Step 2: Target your collaboration of interest")
if collab_driver:
collab_data = collab_driver.collaborations.read_all().get('data', [])
collab_ids = [collab['key']['collab_id'] for collab in collab_data]
else:
collab_ids = []
with st.beta_container():
selected_collab_id = st.selectbox(
label="Collaboration ID:",
options=collab_ids,
help="""Select a collaboration to peruse."""
)
if collab_driver:
selected_collab_data = collab_driver.collaborations.read(
collab_id=selected_collab_id
).get('data', {})
else:
selected_collab_data = {}
if selected_collab_data:
selected_collab_data.pop('relations') # no relations rendered!
with st.beta_expander("Collaboration Details"):
updated_collab = collab_renderer.display(selected_collab_data)
################################
# Step 3: Remove collaboration #
################################
st.header("Step 3: Submit removal request for collaboration ")
is_confirmed = render_confirmation_form(
data=updated_collab,
r_type=R_TYPE,
r_action="removal",
use_warnings=False
)
if is_confirmed:
collab_driver.collaborations.delete(collab_id=selected_collab_id)
st.echo(f"Collaboration '{selected_collab_id}' has been deleted.") | 25,729 |
def DeleteObjects(objects_to_delete, num_threads=DEFAULT_NUM_THREADS,
show_progress_bar=False):
"""Delete the given Cloud Storage objects.
Uses the appropriate parallelism (multi-process, multi-thread, both, or
synchronous).
Args:
objects_to_delete: list of ObjectDeleteTask
num_threads: int (optional), the number of threads to use.
show_progress_bar: bool. If true, show a progress bar to the users when
deleting files.
"""
num_objects = len(objects_to_delete)
if show_progress_bar:
label = 'Deleting {} {} from Google Cloud Storage'.format(
num_objects, text.Pluralize(num_objects, 'object'))
else:
label = None
ExecuteTasks(objects_to_delete, num_threads, label) | 25,730 |
def setup_routing(app):
"""
Configure url routing for application
:param app: The bottle application object
"""
app.route('/api/access', 'POST', access.login)
app.route('/api/access', 'DELETE', access.logout)
app.route('/api/users', 'GET', users.get_personal_info)
app.route('/api/users', 'POST', users.create_new_user)
app.route('/api/users', 'PUT', users.change_personal_info)
app.route('/api/users', 'DELETE', users.delete_user)
app.route('/api/users/<user_id:int>', 'GET', users.get_user_info)
app.route('/api/users/nearby', 'GET', users.get_nearby_users)
app.route('/api/devices', 'GET', devices.get_devices)
app.route('/api/devices', 'POST', devices.add_new_device)
app.route('/api/observations', 'POST', observations.record_observation) | 25,731 |
def show_result(img,
result,
skeleton=None,
kpt_score_thr=0.3,
bbox_color=None,
pose_kpt_color=None,
pose_limb_color=None,
radius=4,
thickness=1,
font_scale=0.5,
win_name='',
show=False,
show_keypoint_weight=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (list[dict]): The results to draw over `img`
(bbox_result, pose_result).
skeleton (list[list]): The connection of keypoints.
kpt_score_thr (float, optional): Minimum score of keypoints
to be shown. Default: 0.3.
pose_kpt_color (np.array[Nx3]`): Color of N keypoints.
If None, do not draw keypoints.
pose_limb_color (np.array[Mx3]): Color of M limbs.
If None, do not draw limbs.
radius (int): Radius of circles.
thickness (int): Thickness of lines.
font_scale (float): Font scales of texts.
win_name (str): The window name.
show (bool): Whether to show the image. Default: False.
show_keypoint_weight (bool): Whether to change the transparency
using the predicted confidence scores of keypoints.
wait_time (int): Value of waitKey param.
Default: 0.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
Tensor: Visualized image only if not `show` or `out_file`
"""
img = cv2.imread(img)
img = img[:,:,::-1]
img = img.copy()
img_h, img_w, _ = img.shape
pose_result = []
for res in result:
pose_result.append(res['keypoints'])
for _, kpts in enumerate(pose_result):
# draw each point on image
if pose_kpt_color is not None:
assert len(pose_kpt_color) == len(kpts)
for kid, kpt in enumerate(kpts):
x_coord, y_coord, kpt_score = int(kpt[0]), int(
kpt[1]), kpt[2]
if kpt_score > kpt_score_thr:
if show_keypoint_weight:
img_copy = img.copy()
r, g, b = pose_kpt_color[kid]
cv2.circle(img_copy, (int(x_coord), int(y_coord)),
radius, (int(r), int(g), int(b)), -1)
transparency = max(0, min(1, kpt_score))
cv2.addWeighted(
img_copy,
transparency,
img,
1 - transparency,
0,
dst=img)
else:
r, g, b = pose_kpt_color[kid]
cv2.circle(img, (int(x_coord), int(y_coord)),
radius, (int(r), int(g), int(b)), -1)
# draw limbs
if skeleton is not None and pose_limb_color is not None:
assert len(pose_limb_color) == len(skeleton)
for sk_id, sk in enumerate(skeleton):
pos1 = (int(kpts[sk[0] - 1, 0]), int(kpts[sk[0] - 1, 1]))
pos2 = (int(kpts[sk[1] - 1, 0]), int(kpts[sk[1] - 1, 1]))
if (pos1[0] > 0 and pos1[0] < img_w and pos1[1] > 0
and pos1[1] < img_h and pos2[0] > 0
and pos2[0] < img_w and pos2[1] > 0
and pos2[1] < img_h
and kpts[sk[0] - 1, 2] > kpt_score_thr
and kpts[sk[1] - 1, 2] > kpt_score_thr):
r, g, b = pose_limb_color[sk_id]
if show_keypoint_weight:
img_copy = img.copy()
X = (pos1[0], pos2[0])
Y = (pos1[1], pos2[1])
mX = np.mean(X)
mY = np.mean(Y)
length = ((Y[0] - Y[1])**2 + (X[0] - X[1])**2)**0.5
angle = math.degrees(
math.atan2(Y[0] - Y[1], X[0] - X[1]))
stickwidth = 2
polygon = cv2.ellipse2Poly(
(int(mX), int(mY)),
(int(length / 2), int(stickwidth)), int(angle),
0, 360, 1)
cv2.fillConvexPoly(img_copy, polygon,
(int(r), int(g), int(b)))
transparency = max(
0,
min(
1, 0.5 *
(kpts[sk[0] - 1, 2] + kpts[sk[1] - 1, 2])))
cv2.addWeighted(
img_copy,
transparency,
img,
1 - transparency,
0,
dst=img)
else:
cv2.line(
img,
pos1,
pos2, (int(r), int(g), int(b)),
thickness=thickness)
if show:
imshow(img, win_name, wait_time)
if out_file is not None:
imwrite(img, out_file)
return img | 25,732 |
def D2(X, Y, Y2=None, YT=None):
""" Calculate the pointwise (squared) distance.
Arguments:
X: of shape (n_sample, n_feature).
Y: of shape (n_center, n_feature).
Y2: of shape (1, n_center).
YT: of shape (n_feature, n_center).
Returns:
pointwise distances (n_sample, n_center).
"""
X2 = K.sum(K.square(X), axis = 1, keepdims=True)
if Y2 is None:
if X is Y:
Y2 = X2
else:
Y2 = K.sum(K.square(Y), axis = 1, keepdims=True)
Y2 = K.reshape(Y2, (1, K.shape(Y)[0]))
if YT is None:
YT = K.transpose(Y)
d2 = K.reshape(X2, (K.shape(X)[0], 1)) \
+ Y2 - 2 * K.dot(X, YT) # x2 + y2 - 2xy
return d2 | 25,733 |
def read_table(path):
"""Lee un archivo tabular (CSV o XLSX) a una lista de diccionarios.
La extensión del archivo debe ser ".csv" o ".xlsx". En función de
ella se decidirá el método a usar para leerlo.
Si recibe una lista, comprueba que todos sus diccionarios tengan las
mismas claves y de ser así, la devuelve intacta. Levanta una Excepción
en caso contrario.
Args:
path(str o list): Como 'str', path a un archivo CSV o XLSX.
Returns:
list: Lista de diccionarios con claves idénticas representando el
archivo original.
"""
assert isinstance(path, string_types + (list, )), """
{} no es un `path` valido""".format(path)
# Si `path` es una lista, devolverla intacta si tiene formato tabular.
# Si no, levantar una excepción.
if isinstance(path, list):
if helpers.is_list_of_matching_dicts(path):
return path
else:
raise ValueError("""
La lista ingresada no esta formada por diccionarios con las mismas claves.""")
# Deduzco el formato de archivo de `path` y redirijo según corresponda.
suffix = path.split(".")[-1]
if suffix == "csv":
return _read_csv_table(path)
elif suffix == "xlsx":
return _read_xlsx_table(path)
else:
raise ValueError("""
{} no es un sufijo reconocido. Pruebe con .csv o .xlsx""".format(suffix)) | 25,734 |
def spec_from_json_dict(
json_dict: Dict[str, Any]
) -> FieldSpec:
""" Turns a dictionary into the appropriate FieldSpec object.
:param dict json_dict: A dictionary with properties.
:raises InvalidSchemaError:
:returns: An initialised instance of the appropriate FieldSpec
subclass.
"""
try:
if json_dict.get('ignored', False):
return Ignore(json_dict['identifier'])
type_str = json_dict['format']['type']
spec_type = cast(FieldSpec, FIELD_TYPE_MAP[type_str])
except KeyError as e:
raise InvalidSchemaError("the feature definition {} is incomplete. Must contain: {}".format(json_dict, e))
return spec_type.from_json_dict(json_dict) | 25,735 |
def hyp_pfq(A, B, x, out=None, n=0):
"""
This function is decorated weirdly because its extra params are lists.
"""
out = np_hyp_pfq([a+n for a in A], [b+n for b in B], x, out)
with np.errstate(invalid='ignore'):
out *= np.prod([scipy.special.poch(a, n) for a in A])
out /= np.prod([scipy.special.poch(b, n) for b in B])
return out | 25,736 |
def create_new_token(
data: dict,
expires_delta: Optional[timedelta] = None,
page_only: bool = False):
"""Creates a token with the given permission and expiry"""
to_encode = data.copy()
if page_only:
expires = datetime.max
elif expires_delta:
expires = datetime.utcnow() + timedelta(minutes=expires_delta)
else:
expires = datetime.utcnow() + timedelta(minutes=TOKEN_EXPIRATION_TIME)
to_encode.update({"exp": expires})
to_encode.update({"scope": "userauth:none" if page_only else "userauth:full"})
return jwt.encode(to_encode, SECRET, ALGORITHM) | 25,737 |
def parse_json_year_date(year: Number, fullpath: Path) -> Union[Path, None]:
"""
Filtra os arquivos json por ano.
"""
if not isinstance(fullpath, Path):
raise TypeError("O parâmetro path deve do tipo Path.")
pattern_finder = re.search(f"_{year}\.json", fullpath.name)
if pattern_finder:
return fullpath
else:
return None | 25,738 |
def available_fastspeech2():
"""
List available FastSpeech2, Text to Mel models.
"""
from malaya_speech.utils import describe_availability
return describe_availability(
_fastspeech2_availability,
text = '`husein` and `haqkiem` combined loss from training set',
) | 25,739 |
def solve(A, b, method='gauss', verbose=0, eps=1e-6, max_itration_times=100000, omega=1.9375):
"""
Solve equations in specified method.
:param A: coefficient matrix of the equations
:param b: vector
:param method: the way to solve equations
:param verbose: whether show the running information
:param eps: *epsilon*
:param max_itration_times: the maximum *rounds* of iteration
:param omega: *relaxation factor* for SOR method.
:return: the solution x or 'None' if error occurs
"""
# _show_equations(A, b) # only when dim <= 10
start = dt.now()
global _verbose, _eps, _max_itration_times, _omega
_verbose = verbose
_eps = eps
_max_itration_times = max_itration_times
_omega = omega
func = {
'gauss': gauss,
'lu': lu,
'chase': chase,
'square_root': square_root,
'jacobi': jacobi,
'gauss_seidel': gauss_seidel,
'sor': sor,
'cg': cg,
'qr': qr
}.get(method, 'other_method')
if func == 'other_method':
_raise_equasolerror_no_method(method)
# make a copy of A and b to make sure they will not be changed.
# show_equations(A, b)
A0 = np.copy(A)
b0 = np.copy(b)
answer = func(A0, b0)
if _verbose == 1:
print('[%s] time cost: %.4f s.' % (method, (dt.now() - start).total_seconds()))
return answer | 25,740 |
def returns_unknown():
"""Tuples are a not-supported type."""
return 1, 2, 3 | 25,741 |
def get_user(
cmd,
app_id: str,
token: str,
assignee: str,
api_version: str,
central_dns_suffix=CENTRAL_ENDPOINT,
) -> User:
"""
Get information for the specified user.
Args:
cmd: command passed into az
app_id: name of app (used for forming request URL)
token: (OPTIONAL) authorization token to fetch device details from IoTC.
MUST INCLUDE type (e.g. 'SharedAccessToken ...', 'Bearer ...')
assignee: unique ID of the user
central_dns_suffix: {centralDnsSuffixInPath} as found in docs
Returns:
users: dict
"""
result = _make_call(
cmd,
app_id=app_id,
method="get",
path=assignee,
payload=None,
token=token,
central_dns_suffix=central_dns_suffix,
api_version=api_version,
)
return _utility.get_object(result, MODEL, api_version) | 25,742 |
def test_gumbel_softmax_transform():
"""Test the gumbel-softmax transform function."""
# Define variables
numActions = 3
batch_size = 3
gumbelTemp = -1
gumbelReturnHard = True
x = torch.randn(batch_size,9)
# Perform transform
(batch_samples_stacked, all_samples_gumbel_softmax_vectors) = gumbel_softmax_transform(
x,
numActions,
gumbelTemp,
gumbelReturnHard)
print("all_samples_gumbel_softmax_vectors: ")
for h in all_samples_gumbel_softmax_vectors:
print("h: ", h) | 25,743 |
def run_test(
bess_addr,
ptfdir,
trex_server_addr=None,
extra_args=(),
):
"""
Runs PTF tests included in provided directory.
"""
# create a dummy interface for PTF
if not create_dummy_interface() or not set_up_interfaces([DUMMY_IFACE_NAME]):
return False
pypath = "/upf-tests/lib"
# build the ptf command to be run
cmd = ["ptf"]
cmd.extend(["--test-dir", ptfdir])
cmd.extend(["--pypath", pypath])
cmd.extend(["-i", f"296@{DUMMY_IFACE_NAME}"])
test_params = "bess_upf_addr='{}'".format(bess_addr)
if trex_server_addr is not None:
test_params += ";trex_server_addr='{}'".format(trex_server_addr)
cmd.append("--test-params={}".format(test_params))
cmd.extend(extra_args)
info("Executing PTF command: {}".format(" ".join(cmd)))
try:
# run ptf and send output to stdout
p = subprocess.Popen(cmd)
p.wait()
except Exception:
error("Error when running PTF tests")
return False
finally:
# always clean up the dummy interface
remove_dummy_interface()
return p.returncode == 0 | 25,744 |
def get(fg, bg=None, attribute = 0):
"""
Return string with ANSI escape code for set text colors
fg: html code or color index for text color
attribute: use Attribute class variables
"""
if type(fg) is str:
bg = bg if bg else "#000000"
return by_hex(fg, bg, attribute=attribute)
elif type(fg) is int and 0 <= fg <= 255:
bg = bg if bg else 0
return by_index(fg, bg, attribute=attribute)
else:
raise TypeError("You can use only string or int.") | 25,745 |
def get_Qi(Q,i,const_ij,m):
"""
Aim:
----
Equalising two polynomials where one is obtained by a SOS
decomposition in the canonical basis and the other one is expressed
in the Laguerre basis.
Parameters
----------
Q : matrix for the SOS decomposition
i : integer
degree at which we compte the coefficients.
const_ij : list
contains indices of Q at which coefficients i+j= const.
Returns
-------
Real that is a sum of coefficients
"""
return sum(factorial(l)*binom(l,i)*\
sum(Q[j]/sqrt(factorial(j[0])*factorial(j[1])) \
for j in const_ij[2*l]) for l in np.arange(i,m+1)) | 25,746 |
def run_openlego(analyze_mdao_definitions, cmdows_dir=None, initial_file_path=None,
data_folder=None, run_type='test', approx_totals=False, driver_debug_print=False):
# type: (Union[int, list, str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[bool], Optional[bool]) -> Union[tuple, LEGOProblem]
"""Run OpenLEGO for a list of MDAO definitions.
Parameters
----------
analyze_mdao_definitions : list
List of MDAO definitions to be analyzed.
cmdows_dir : str
Path to directory with CMDOWS files
initial_file_path : str
Path to file containing initial values
data_folder : str
Path to directory where results will be stored
run_type : str
Option to indicate the type of run, as this changes the return statement used
approx_totals : bool
Setting on whether to use approx_totals on the model
driver_debug_print : bool
Setting on whether to print debug information in the log
Returns
-------
Union[Tuple[float], LEGOProblem]
"""
# Check and analyze inputs
mdao_defs_loop = get_loop_items(analyze_mdao_definitions)
file_dir = os.path.dirname(__file__)
if not cmdows_dir:
cmdows_dir = os.path.join(file_dir, 'cmdows_files')
if not initial_file_path:
initial_file_path = os.path.join(file_dir, 'SSBJ-base.xml')
if not data_folder:
data_folder = ''
# Run the
for mdao_def in mdao_defs_loop:
print('\n-----------------------------------------------')
print('Running the OpenLEGO of Mdao_{}.xml...'.format(mdao_def))
print('------------------------------------------------')
"""Solve the SSBJ problem using the given CMDOWS file."""
# 1. Create Problem
prob = LEGOProblem(cmdows_path=os.path.join(cmdows_dir, 'Mdao_{}.xml'.format(mdao_def)),
kb_path=os.path.join(file_dir, 'kb'), # Knowledge base path
data_folder=data_folder, # Output directory
base_xml_file=os.path.join(data_folder,
'ssbj-output-{}.xml'.format(mdao_def)))
if driver_debug_print:
prob.driver.options['debug_print'] = ['desvars', 'nl_cons', 'ln_cons', 'objs']
prob.set_solver_print(0) # Set printing of solver information
if approx_totals:
prob.model.approx_totals()
# 2. Initialize the Problem and export N2 chart
prob.store_model_view()
prob.initialize_from_xml(initial_file_path) # Set the initial values from an XML file
# 3. Run the Problem
test_distributed = mdao_def in ['CO', 'BLISS-2000'] and run_type == 'test'
if test_distributed:
prob.run_model()
else:
prob.run_driver() # Run the driver (optimization, DOE, or convergence)
# 4. Read out the case reader
if not test_distributed:
prob.collect_results()
if run_type == 'test':
# 5. Collect test results for test assertions
tc = prob['/dataSchema/aircraft/geometry/tc'][0]
h = prob['/dataSchema/reference/h'][0]
M = prob['/dataSchema/reference/M'][0]
AR = prob['/dataSchema/aircraft/geometry/AR'][0]
Lambda = prob['/dataSchema/aircraft/geometry/Lambda'][0]
Sref = prob['/dataSchema/aircraft/geometry/Sref'][0]
if mdao_def not in ['CO', 'BLISS-2000']:
lambda_ = prob['/dataSchema/aircraft/geometry/lambda'][0]
section = prob['/dataSchema/aircraft/geometry/section'][0]
Cf = prob['/dataSchema/aircraft/other/Cf'][0]
T = prob['/dataSchema/aircraft/other/T'][0]
R = prob['/dataSchema/scaledData/R/value'][0]
extra = prob['/dataSchema/aircraft/weight/WT'][0]
elif mdao_def == 'CO':
lambda_ = prob.model.SubOptimizer0.prob['/dataSchema/aircraft/geometry/lambda'][0]
section = prob.model.SubOptimizer0.prob['/dataSchema/aircraft/geometry/section'][0]
Cf = prob.model.SubOptimizer1.prob['/dataSchema/aircraft/other/Cf'][0]
T = prob.model.SubOptimizer2.prob['/dataSchema/aircraft/other/T'][0]
R = prob['/dataSchema/scaledData/R/value'][0]
extra = (prob['/dataSchema/distributedArchitectures/group0/objective'],
prob['/dataSchema/distributedArchitectures/group1/objective'],
prob['/dataSchema/distributedArchitectures/group2/objective'])
else:
lambda_, section, Cf, T, R, extra = None, None, None, None, None, None
# 6. Cleanup and invalidate the Problem afterwards
prob.invalidate()
return tc, h, M, AR, Lambda, Sref, lambda_, section, Cf, T, R, extra
elif run_type == 'validation':
return prob
else:
prob.invalidate() | 25,747 |
def unzipWorker(filename, forceDownload = False):
"""
Perform decompression of downloaded files.
:param filename
:param forceDownload: A flag indicating that the download should override
the default behavior of the script.
"""
global MSG_BODY
originalName = filename
hourlyGzName = retriever.weatherUtil.datePart(
filename = filename) + "hourly.txt.gz"
if fileExists(hourlyGzName) and not forceDownload:
msg = "%s already exists." % hourlyGzName
print msg
MSG_BODY += '%s\n' % msg
return
if (fileExists(filename)):
msg = "Unzipping %s." % filename
print msg
MSG_BODY += '%s\n' % msg
try:
zfile = zipfile.ZipFile(filename)
for name in zfile.namelist():
(dirname, filename) = os.path.split(name)
if not dirname == '':
msg = "Decompressing " + filename + " into " + dirname + "."
print msg
MSG_BODY += '%s\n' % msg
else:
msg = "Decompressing " + filename + "."
print msg
MSG_BODY += '%s\n' % msg
if not os.path.exists(dirname) and not dirname == '':
os.mkdir(dirname)
fd = open(name, "w")
fd.write(zfile.read(name))
fd.close()
hourlyName = retriever.weatherUtil.datePart(
filename = originalName) + "hourly.txt"
if fileExists(hourlyName):
msg = "Hourly file exists. Compressing the hourly file with " \
"gzip."
print msg
MSG_BODY += '%s\n' % msg
gzipCompressFile(hourlyName)
else:
msg = "Hourly file not found."
print msg
MSG_BODY += '%s\n' % msg
except zipfile.BadZipfile:
msg = "Bad zipfile %s." % originalName
print msg
MSG_BODY += '%s\n' % msg | 25,748 |
def strToBool(s):
"""
Converts string s to a boolean
"""
assert type(s) == str or type(s) == unicode
b_dict = {'true': True, 'false': False, 'yes': True, 'no': False}
return b_dict[s.lower()] | 25,749 |
def publish_to_sns(topic_name, message, region=None):
"""
Post a message to an SNS topic
"""
AWS = AWSCachedClient(region) # cached client object
partition = None
if region:
partition = partition_from_region(region)
else:
partition = 'aws'
region = 'us-east-1'
topic_arn = 'arn:' + partition + ':sns:' + region + ':' + AWS.account + ':' + topic_name
json_message = json.dumps({"default":json.dumps(message)})
message_id = AWS.get_connection('sns', region).publish(
TopicArn=topic_arn,
Message=json_message,
MessageStructure='json'
).get('MessageId', 'error')
return message_id | 25,750 |
def bandpass_voxels(realigned_file, bandpass_freqs, sample_period=None):
"""
Performs ideal bandpass filtering on each voxel time-series.
Parameters
----------
realigned_file : string
Path of a realigned nifti file.
bandpass_freqs : tuple
Tuple containing the bandpass frequencies. (LowCutoff_HighPass HighCutoff_LowPass)
sample_period : float, optional
Length of sampling period in seconds. If not specified,
this value is read from the nifti file provided.
Returns
-------
bandpassed_file : string
Path of filtered output (nifti file).
"""
def ideal_bandpass(data, sample_period, bandpass_freqs):
# Derived from YAN Chao-Gan 120504 based on REST.
sample_freq = 1. / sample_period
sample_length = data.shape[0]
data_p = np.zeros(int(2**np.ceil(np.log2(sample_length))))
data_p[:sample_length] = data
LowCutoff, HighCutoff = bandpass_freqs
if (LowCutoff is None): # No lower cutoff (low-pass filter)
low_cutoff_i = 0
elif (LowCutoff > sample_freq / 2.):
# Cutoff beyond fs/2 (all-stop filter)
low_cutoff_i = int(data_p.shape[0] / 2)
else:
low_cutoff_i = np.ceil(
LowCutoff * data_p.shape[0] * sample_period).astype('int')
if (HighCutoff > sample_freq / 2. or HighCutoff is None):
# Cutoff beyond fs/2 or unspecified (become a highpass filter)
high_cutoff_i = int(data_p.shape[0] / 2)
else:
high_cutoff_i = np.fix(
HighCutoff * data_p.shape[0] * sample_period).astype('int')
freq_mask = np.zeros_like(data_p, dtype='bool')
freq_mask[low_cutoff_i:high_cutoff_i + 1] = True
freq_mask[
data_p.shape[0] -
high_cutoff_i:data_p.shape[0] + 1 - low_cutoff_i
] = True
f_data = fft(data_p)
f_data[freq_mask != True] = 0.
data_bp = np.real_if_close(ifft(f_data)[:sample_length])
return data_bp
nii = nb.load(realigned_file)
data = nii.get_data().astype('float64')
mask = (data != 0).sum(-1) != 0
Y = data[mask].T
Yc = Y - np.tile(Y.mean(0), (Y.shape[0], 1))
if not sample_period:
hdr = nii.get_header()
sample_period = float(hdr.get_zooms()[3])
# Sketchy check to convert TRs in millisecond units
if sample_period > 20.0:
sample_period /= 1000.0
Y_bp = np.zeros_like(Y)
for j in range(Y.shape[1]):
Y_bp[:, j] = ideal_bandpass(Yc[:, j], sample_period, bandpass_freqs)
data[mask] = Y_bp.T
img = nb.Nifti1Image(data, header=nii.get_header(),
affine=nii.get_affine())
bandpassed_file = os.path.join(os.getcwd(),
'bandpassed_demeaned_filtered.nii.gz')
img.to_filename(bandpassed_file)
return bandpassed_file | 25,751 |
def get_subset(
classes: List,
train_data,
train_labels,
val_data,
val_labels,
test_data,
test_labels,
) -> Tuple:
"""
creates a binary subset of training, validation, and testing set using the specified list of classes to select
:param classes: list of classes in the labels that are to be selected in the subset (only specify two)
:param train_data: list or numpy array containing training data
:param train_labels: list or numpy array containing training labels
:param val_data: list or numpy array containing validation/training phase 2 data
:param val_labels: list or numpy array containing validation/training phase 2 labels
:param test_data: list or numpy array containing testing data
:param test_labels: list or numpy array containing testing labels
:return: tuple of training sub-set, validation/training phase 2 sub-set, testing sub-set.
"sub-set" here is a tuple of training and testing numpy arrays
"""
train_set = np.isin(train_labels, classes)
val_set = np.isin(val_labels, classes)
test_set = np.isin(test_labels, classes)
train_data = train_data[train_set]
train_labels = train_labels[train_set] == classes[0]
val_data = val_data[val_set]
val_labels = val_labels[val_set] == classes[0]
test_data = test_data[test_set]
test_labels = test_labels[test_set] == classes[0]
return (train_data, train_labels), (val_data, val_labels), (test_data, test_labels) | 25,752 |
def init_vgg16(model_folder):
"""load the vgg16 model feature"""
if not os.path.exists(os.path.join(model_folder, 'vgg16.weight')):
from torch.utils.serialization import load_lua #only available in torch < 1.0
if not os.path.exists(os.path.join(model_folder, 'vgg16.t7')):
os.system('wget --no-check-certificate http://cs.stanford.edu/people/jcjohns/fast-neural-style/models/vgg16.t7 -O ' + os.path.join(model_folder, 'vgg16.t7'))
vgglua = load_lua(os.path.join(model_folder, 'vgg16.t7'))
vgg = Vgg16()
for (src, dst) in zip(vgglua.parameters()[0], vgg.parameters()):
dst.data[:] = src
torch.save(vgg.state_dict(), os.path.join(model_folder, 'vgg16.weight')) | 25,753 |
def to_xyzw(matrix):
"""Convenience/readibility function to bring spatial (trailing) axis to start.
Args:
matrix (...x4 array): Input matrix.
Returns:
4x... array
"""
return np.rollaxis(matrix, -1) | 25,754 |
def do_3d_pooling(feature_matrix, stride_length_px=2,
pooling_type_string=MAX_POOLING_TYPE_STRING):
"""Pools 3-D feature maps.
:param feature_matrix: Input feature maps (numpy array). Dimensions must be
M x N x H x C or 1 x M x N x H x C.
:param stride_length_px: See doc for `do_2d_pooling`.
:param pooling_type_string: Pooling type (must be accepted by
`_check_pooling_type`).
:return: feature_matrix: Output feature maps (numpy array). Dimensions will
be 1 x m x n x h x C.
"""
error_checking.assert_is_numpy_array_without_nan(feature_matrix)
error_checking.assert_is_integer(stride_length_px)
error_checking.assert_is_geq(stride_length_px, 2)
_check_pooling_type(pooling_type_string)
if len(feature_matrix.shape) == 4:
feature_matrix = numpy.expand_dims(feature_matrix, axis=0)
error_checking.assert_is_numpy_array(feature_matrix, num_dimensions=5)
feature_tensor = K.pool3d(
x=K.variable(feature_matrix), pool_mode=pooling_type_string,
pool_size=(stride_length_px, stride_length_px, stride_length_px),
strides=(stride_length_px, stride_length_px, stride_length_px),
padding='valid', data_format='channels_last'
)
return feature_tensor.numpy() | 25,755 |
def logoutUser(request):
"""[summary]
Args:
request ([Logout]): [Metodo herado de logout de django para cerrar sesión]
Returns:
[Redirect template]: [Retorna el template del login]
"""
logout(request)
return redirect('/accounts/login/') | 25,756 |
def assert_allclose(actual: float, desired: numpy.float64, rtol: float):
"""
usage.scipy: 2
usage.sklearn: 7
usage.statsmodels: 12
"""
... | 25,757 |
def sin_wave(freq, duration=1, offset=0):
"""Makes a sine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = SinSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave | 25,758 |
def assign_lpvs(lat):
""" Given lattice type return 3 lattice primitive vectors"""
lpv = zeros((3,3))
if lat=='FCC':
lpv[0,1]=1./sqrt(2)
lpv[0,2]=1./sqrt(2)
lpv[1,0]=1./sqrt(2)
lpv[1,2]=1./sqrt(2)
lpv[2,0]=1./sqrt(2)
lpv[2,1]=1./sqrt(2)
elif lat=='SC':
lpv[0,0]=1
lpv[1,1]=1
lpv[2,2]=1
elif lat=='SH':
lpv[0,0]=1./2
lpv[0,1]=-sqrt(3)/2
lpv[1,0]=1./2
lpv[1,1]=sqrt(3)/2
lpv[2,2]=1.
return lpv | 25,759 |
def flatten_and_batch_shift_indices(indices: torch.LongTensor,
sequence_length: int) -> torch.Tensor:
"""``indices`` of size ``(batch_size, d_1, ..., d_n)`` indexes into dimension 2 of a target tensor,
which has size ``(batch_size, sequence_length, embedding_size)``. This function returns a vector
that correctly indexes into the flattened target. The sequence length of the target must be provided
to compute the appropriate offset.
Args:
indices (torch.LongTensor):
"""
if torch.max(indices) >= sequence_length or torch.min(indices) < 0:
raise ValueError("All the elements should be in range (0, {}), but found ({}, {})".format(
sequence_length - 1, torch.min(indices).item(), torch.max(indices).item()))
offsets = get_range_vector(indices.size(0), indices.device) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# (batch_size, d_1, ..., d_n) + (batch_size, 1, ..., 1)
offset_indices = indices + offsets
# (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices | 25,760 |
def raveled_affinity_watershed(
image_raveled, marker_coords, offsets, mask, output
):
"""Compute affinity watershed on raveled arrays.
Parameters
----------
image_raveled : 2D array of float(32), shape (npixels, ndim)
The z, y, and x affinities around each pixel.
marker_coords : 1D array of int
The location of each marker along the pixels dimension of
``image_raveled``.
offsets : 1D array of int
The signed offsets to each neighboring pixel.
mask : 1D array of bool, shape (npixels,)
True for pixels to which the watershed should spread.
output : 1D array of int
The output array for markers.
"""
n_neighbors = offsets.shape[0]
age = 0
marker_coords = marker_coords.astype(np.intp)
offsets = offsets.astype(np.intp)
aff_offsets = offsets.copy().astype(np.intp)
aff_offsets[:int(len(offsets) / 2), 1] = 0
heap = [
Element(
image_raveled[0, 0], age, marker_coords[0],
marker_coords[0]
)
]
_ = heappop(heap)
# add each seed to the stack
for i in range(marker_coords.shape[0]):
index = marker_coords[i]
value = np.float32(0.)
source = index
index = index
elem = Element(value, age, index, source)
heappush(heap, elem)
# remove from stack until empty
while len(heap) > 0:
elem = heappop(heap)
for i in range(n_neighbors):
# get the flattened address of the neighbor
# offsets are 2d (size, 2) with columns 0 and 1 corresponding to
# affinities (ie axis) and image neighbour indices respectively
neighbor_index = elem.index + offsets[i, 1]
if not mask[neighbor_index]:
# neighbor is not in mask, move on to next neighbor
continue
if output[neighbor_index]:
# if there is a non-zero value in output, move on to next
# neighbor
continue
# if the neighbor is in the mask and not already labeled,
# label it then add it to the queue
output[neighbor_index] = output[elem.index]
value = image_raveled[aff_offsets[i, 0],
aff_offsets[i, 1] + elem.index]
age += 1
new_elem = Element(value, age, neighbor_index, elem.source)
heappush(heap, new_elem)
return output | 25,761 |
def test_run_job_with_job_requirements_mixed(ee2_port, ws_controller, mongo_client):
"""
Tests running a job where requirements are specified on input, from the catalog, and from
the deploy.cfg file.
"""
_run_job(
ee2_port,
ws_controller,
mongo_client,
job_reqs={"request_cpus": 9},
clientgroup="njs",
cpu=9,
mem=5,
disk=30,
catalog_return=[{"client_groups": ['{"request_cpus":8,"request_memory":5}']}],
as_admin="wheee", # truthy
user=USER_WRITE_ADMIN,
token=TOKEN_WRITE_ADMIN,
) | 25,762 |
def get_spike_times(units: pynwb.misc.Units, index, in_interval):
"""Use bisect methods to efficiently retrieve spikes from a given unit in a given interval
Parameters
----------
units: pynwb.misc.Units
index: int
in_interval: start and stop times
Returns
-------
"""
st = units['spike_times']
unit_start = 0 if index == 0 else st.data[index - 1]
unit_stop = st.data[index]
start_time, stop_time = in_interval
ind_start = bisect_left(st.target, start_time, unit_start, unit_stop)
ind_stop = bisect_right(st.target, stop_time, ind_start, unit_stop)
return np.asarray(st.target[ind_start:ind_stop]) | 25,763 |
def _make_hours(store_hours):
"""Store hours is a dictionary that maps a DOW to different open/close times
Since it's easy to represent disjoing hours, we'll do this by default
Such as, if a store is open from 11am-2pm and then 5pm-10pm
We'll slice the times in to a list of floats representing 30 minute intevals
So for monday, let's assume we have the store hours from 10am - 3pm
We represent this as
monday = [10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5]
"""
week_hrs = {}
for dow in store_hours.keys():
dow_hours = []
for hour_set in store_hours[dow]:
if len(hour_set) < 2:
open_hr = 0.0
close_hr = 24.0
else:
open_hr = float(hour_set[0])
close_hr = float(hour_set[1])
if close_hr < open_hr:
tmp = close_hr
close_hr = open_hr
open_hr = tmp
current_hr_it = open_hr
while((close_hr - current_hr_it) >= .5):
dow_hours.append(current_hr_it)
current_hr_it += .5
week_hrs[dow] = dow_hours
return week_hrs | 25,764 |
def mul_time(t1, factor):
"""Get the product of the original Time and the number
time: Time
factor: number
returns: Time
"""
assert valid_time(t1)
secods = time_to_int(t1) * factor
return int_to_time(secods) | 25,765 |
def create_pmf_from_samples(
t_samples_list, t_trunc=None, bin_width=None, num_bins=None):
"""
Compute the probability distribution of the waiting time from the sampled data.
Parameters
----------
t_samples_list : array-like 1-D
Samples of the waiting time.
t_trunc: int
The truncation time.
bin_width: int
The width of the bins for the histogram.
num_binms: int
The number of bins for the histogram.
If num_bins and bin_width are both given, bin_width has priority.
Returns
-------
pmf: array-like 1-D
The probability distribution also
the normalized histogram of waiting time.
bin_edges: array-like 1-D
The edge of each bins from ``numpy.histogram``.
"""
if t_trunc is None:
t_trunc = max(t_samples_list)
if bin_width is None:
if num_bins is None:
bin_width = int(np.ceil(t_trunc/200))
else:
bin_width = int(np.ceil(t_trunc/num_bins))
start = np.min(t_samples_list)
pmf, bin_edges = np.histogram(
t_samples_list, bins=np.arange(start, t_trunc+1, bin_width))
return pmf/len(t_samples_list), bin_edges | 25,766 |
def __add_click_for_argument(db_user, db_argument):
"""
Add click for a specific argument
:param db_user: User
:param db_argument: Argument
:return: None
"""
db_conclusion = DBDiscussionSession.query(Statement).get(db_argument.conclusion_uid)
# set vote for the argument (relation), its premisegroup and conclusion
__click_argument(db_argument, db_user, True)
__vote_premisesgroup(db_argument.premisegroup_uid, db_user, True)
__click_statement(db_conclusion, db_user, db_argument.is_supportive)
# add seen values
__argument_seen_by_user(db_user, db_argument.uid) | 25,767 |
def box_corner_to_center(boxes):
"""从(左上,右下)转换到(中间,宽度,高度)"""
x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
w = x2 - x1
h = y2 - y1
boxes = paddle.stack((cx, cy, w, h), axis=-1)
return boxes | 25,768 |
def test_iterate_with_multiple_selected_items(setup_analysis_iterator):
""" Test iterating over analysis objects with multiple selections. """
# Setup
KeyIndex, _, test_dict = setup_analysis_iterator
# Create the iterator
object_iter = generic_config.iterate_with_selected_objects(
analysis_objects = test_dict,
a = "a1",
b = "b2",
)
# Iterate over it.
assert next(object_iter) == (KeyIndex(a = "a1", b = "b2", c = "c"), "obj2")
# It should be exhausted now.
with pytest.raises(StopIteration):
next(object_iter) | 25,769 |
def irrelevant(condition=None, library=None, weblog_variant=None, reason=None):
""" decorator, allow to mark a test function/class as not relevant """
skip = _should_skip(library=library, weblog_variant=weblog_variant, condition=condition)
def decorator(function_or_class):
if not skip:
return function_or_class
full_reason = "not relevant" if reason is None else f"not relevant: {reason}"
return _get_skipped_item(function_or_class, full_reason)
return decorator | 25,770 |
def test_localdockerinterface_get_info_disconnected(caplog, responses):
"""No daemon to talk to (see responses used as fixture but no listening)."""
caplog.set_level(logging.DEBUG, logger="charmcraft")
ldi = LocalDockerdInterface()
resp = ldi.get_image_info("test-digest")
assert resp is None
expected = [
"Cannot connect to /var/run/docker.sock , please ensure dockerd is running.",
]
assert expected == [rec.message for rec in caplog.records] | 25,771 |
def ec_test():
"""
Tests the ec-multiply sections. Cases taken from the bip38 document.
"""
seed = "dh3409sjgh3g48"
seed2 = "asdas8729akjbmn"
cases = [[
'btc',
'TestingOneTwoThree',
'passphrasepxFy57B9v8HtUsszJYKReoNDV6VHjUSGt8EVJmux9n1J3Ltf1gRxyDGXqnf9qm',
'6PfQu77ygVyJLZjfvMLyhLMQbYnu5uguoJJ4kMCLqWwPEdfpwANVS76gTX',
'1PE6TQi6HTVNz5DLwB1LcpMBALubfuN2z2',
'5K4caxezwjGCGfnoPTZ8tMcJBLB7Jvyjv4xxeacadhq8nLisLR2',
],[
'btc',
'Satoshi',
'passphraseoRDGAXTWzbp72eVbtUDdn1rwpgPUGjNZEc6CGBo8i5EC1FPW8wcnLdq4ThKzAS',
'6PfLGnQs6VZnrNpmVKfjotbnQuaJK4KZoPFrAjx1JMJUa1Ft8gnf5WxfKd',
'1CqzrtZC6mXSAhoxtFwVjz8LtwLJjDYU3V',
'5KJ51SgxWaAYR13zd9ReMhJpwrcX47xTJh2D3fGPG9CM8vkv5sH',
]]
i = 1
for crypto, passphrase, inter_point, encrypted_pk, address, decrypted_pk in cases:
test_inter_point = Bip38IntermediatePoint.create(passphrase, seed)
test_generated_address, test_encrypted_pk, test_cfm = Bip38EncryptedPrivateKey.create_from_intermediate(crypto, test_inter_point, seed2)
assert test_generated_address == test_cfm.generate_address(passphrase), "Generate address from confirm code failed."
print("EC multiply test #%s passed!" % i)
i+= 1
cases2 = [[
'btc',
'MOLON LABE',
'passphraseaB8feaLQDENqCgr4gKZpmf4VoaT6qdjJNJiv7fsKvjqavcJxvuR1hy25aTu5sX',
'6PgNBNNzDkKdhkT6uJntUXwwzQV8Rr2tZcbkDcuC9DZRsS6AtHts4Ypo1j',
'1Jscj8ALrYu2y9TD8NrpvDBugPedmbj4Yh',
'5JLdxTtcTHcfYcmJsNVy1v2PMDx432JPoYcBTVVRHpPaxUrdtf8',
'cfrm38V8aXBn7JWA1ESmFMUn6erxeBGZGAxJPY4e36S9QWkzZKtaVqLNMgnifETYw7BPwWC9aPD',
263183,
1
],[
'btc',
u'ΜΟΛΩΝ ΛΑΒΕ',
'passphrased3z9rQJHSyBkNBwTRPkUGNVEVrUAcfAXDyRU1V28ie6hNFbqDwbFBvsTK7yWVK',
'6PgGWtx25kUg8QWvwuJAgorN6k9FbE25rv5dMRwu5SKMnfpfVe5mar2ngH',
'1Lurmih3KruL4xDB5FmHof38yawNtP9oGf',
'5KMKKuUmAkiNbA3DazMQiLfDq47qs8MAEThm4yL8R2PhV1ov33D',
'cfrm38V8G4qq2ywYEFfWLD5Cc6msj9UwsG2Mj4Z6QdGJAFQpdatZLavkgRd1i4iBMdRngDqDs51',
806938,
1
]]
i = 3
for crypto, passphrase, inter_point, encrypted_pk, address, decrypted_pk, confirm, lot, sequence in cases2:
test_inter_point = Bip38IntermediatePoint.create(passphrase, seed)
test_generated_address, test_encrypted_pk, test_cfm = Bip38EncryptedPrivateKey.create_from_intermediate(crypto, inter_point, seed2)
cfrm = Bip38ConfirmationCode(confirm)
print("EC multiply test #%s passed!" % i)
i += 1 | 25,772 |
def get_all_approved(self) -> list:
"""Get all appliances currently approved
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - appliance
- GET
- /appliance/approved
:return: Returns approved appliances
:rtype: list
"""
return self._get("/appliance/discovered") | 25,773 |
def get_project_root() -> str:
"""Get the path to the project root.
Returns:
str: the path to the project root.
"""
return os.path.abspath(os.path.dirname(os.path.dirname(__file__))) | 25,774 |
def bokeh_hover_tooltip(
label=False,
text=False,
image=False,
audio=False,
coords=True,
index=True,
custom=None,
):
"""
???+ note "Create a Bokeh hover tooltip from a template."
- param label: whether to expect and show a "label" field.
- param text: whether to expect and show a "text" field.
- param image: whether to expect and show an "image" (url/path) field.
- param audio: whether to expect and show an "audio" (url/path) field.
- param coords: whether to show xy-coordinates.
- param index: whether to show indices in the dataset.
- param custom: {display: column} mapping of additional (text) tooltips.
"""
# initialize mutable default value
custom = custom or dict()
# prepare encapsulation of a div box and an associated script
divbox_prefix = """<div class="out tooltip">\n"""
divbox_suffix = """</div>\n"""
script_prefix = """<script>\n"""
script_suffix = """</script>\n"""
# dynamically add contents to the div box and the script
divbox = divbox_prefix
script = script_prefix
if label:
divbox += """
<div>
<span style="font-size: 16px; color: #966;">
Label: @label
</span>
</div>
"""
if text:
divbox += """
<div style="word-wrap: break-word; width: 95%; text-overflow: ellipsis; line-height: 90%">
<span style="font-size: 11px;">
Text: @text
</span>
</div>
"""
if image:
divbox += """
<div>
<span style="font-size: 10px;">
Image: @image
</span>
<img
src="@image" height="60" alt="@image" width="60"
style="float: left; margin: 0px 0px 0px 0px;"
border="2"
></img>
</div>
"""
if audio:
divbox += """
<div>
<span style="font-size: 10px;">
Audio: @audio
</span>
<audio autoplay preload="auto" src="@audio">
</audio>
</div>
"""
if coords:
divbox += """
<div>
<span style="font-size: 12px; color: #060;">
Coordinates: ($x, $y)
</span>
</div>
"""
if index:
divbox += """
<div>
<span style="font-size: 12px; color: #066;">
Index: [$index]
</span>
</div>
"""
for _key, _field in custom.items():
divbox += f"""
<div>
<span style="font-size: 12px; color: #606;">
{_key}: @{_field}
</span>
</div>
"""
divbox += divbox_suffix
script += script_suffix
return divbox + script | 25,775 |
def plot_tuning_curves(direction_rates, title):
"""
This function takes the x-values and the y-values in units of spikes/s
(found in the two columns of direction_rates) and plots a histogram and
polar representation of the tuning curve. It adds the given title.
"""
#plots
plt.figure()
plt.subplot(2,2,1)
plt.bar(direction_rates[:,0], direction_rates[:,1], width=45, align='center')
plt.xlim(-22.5,337.5)
plt.xticks( direction_rates[:,0] )
plt.xlabel('Direction of Motion [degrees]')
plt.ylabel('Firing rate [spikes/s]')
plt.title(title)
plt.subplot(2,2,2,polar=True)
plt.polar( np.deg2rad( direction_rates[:,0] ), direction_rates[:,1], label='Firing rate [spikes/s]', color='blue' )
plt.polar( np.deg2rad( np.roll(direction_rates[:,0],1) ), np.roll(direction_rates[:,1],1), color='blue')
plt.legend(loc=8)
plt.title(title) | 25,776 |
def constant_xavier_initializer(shape, dtype=tf.float32, uniform=True):
"""Initializer function."""
if not dtype.is_floating:
raise TypeError('Cannot create initializer for non-floating point type.')
# Estimating fan_in and fan_out is not possible to do perfectly, but we try.
# This is the right thing for matrix multiply and convolutions.
if shape:
fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])
fan_out = float(shape[-1])
else:
fan_in = 1.0
fan_out = 1.0
for dim in shape[:-2]:
fan_in *= float(dim)
fan_out *= float(dim)
# Average number of inputs and output connections.
n = (fan_in + fan_out) / 2.0
if uniform:
# To get stddev = math.sqrt(factor / n) need to adjust for uniform.
limit = math.sqrt(3.0 * 1.0 / n)
return tf.random_uniform(shape, -limit, limit, dtype, seed=None)
else:
# To get stddev = math.sqrt(factor / n) need to adjust for truncated.
trunc_stddev = math.sqrt(1.3 * 1.0 / n)
return tf.truncated_normal(shape, 0.0, trunc_stddev, dtype, seed=None) | 25,777 |
def upload_script(name: str, permission_type: str, content: str, entry_id: str) -> Dict:
"""
Uploads a script by either given content or file
:param name: Script name to upload
:param permission_type: Permissions type of script to upload
:param content: PowerShell script content
:param entry_id: Script file to upload
:return: Response JSON which contains errors (if exist) and how many resources were affected
"""
endpoint_url = '/real-time-response/entities/scripts/v1'
body: Dict[str, Tuple[Any, Any]] = {
'name': (None, name),
'permission_type': (None, permission_type)
}
temp_file = None
try:
if content:
body['content'] = (None, content)
else: # entry_id was provided
file_ = demisto.getFilePath(entry_id)
file_name = file_.get('name') # pylint: disable=E1101
temp_file = open(file_.get('path'), 'rb') # pylint: disable=E1101
body['file'] = (file_name, temp_file)
headers = {
'Authorization': HEADERS['Authorization'],
'Accept': 'application/json'
}
response = http_request('POST', endpoint_url, files=body, headers=headers)
return response
finally:
if temp_file:
temp_file.close() | 25,778 |
def create_orthogonal(left, right, bottom, top, znear, zfar):
"""Create a Mat4 orthographic projection matrix."""
width = right - left
height = top - bottom
depth = zfar - znear
sx = 2.0 / width
sy = 2.0 / height
sz = 2.0 / -depth
tx = -(right + left) / width
ty = -(top + bottom) / height
tz = -(zfar + znear) / depth
return Mat4((sx, 0.0, 0.0, 0.0,
0.0, sy, 0.0, 0.0,
0.0, 0.0, sz, 0.0,
tx, ty, tz, 1.0)) | 25,779 |
def update_image_version(name: str, new_version: str):
"""returns the passed image name modified with the specified version"""
parts = name.rsplit(':', 1)
return f'{parts[0]}:{new_version}' | 25,780 |
def test_argument_conversion_error():
"""
If a conversion does not work, we pass the unconverted string (like in python2)
"""
instances = ClassA.from_string("ClassD[12.4]")
assert isinstance(instances[0].arg1, str) | 25,781 |
def compute_entanglement(theta):
"""Computes the second Renyi entropy of circuits with and without a tardigrade present.
Args:
- theta (float): the angle that defines the state psi_ABT
Returns:
- (float): The entanglement entropy of qubit B with no tardigrade
initially present
- (float): The entanglement entropy of qubit B where the tardigrade
was initially present
"""
dev = qml.device("default.qubit", wires=3)
# QHACK #
@qml.qnode(dev)
def circuits(theta, tartigrade):
if not tartigrade:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=0)
return qml.density_matrix(wires=[0])
def partial_trace(rho, qubit_2_keep): # Credits: GitHub @neversakura.
num_qubit = int(np.log2(rho.shape[0]))
qubit_axis = [(i, num_qubit + i) for i in range(num_qubit)
if i not in qubit_2_keep]
minus_factor = [(i, 2 * i) for i in range(len(qubit_axis))]
minus_qubit_axis = [(q[0] - m[0], q[1] - m[1])
for q, m in zip(qubit_axis, minus_factor)]
rho_res = np.reshape(rho, [2, 2] * num_qubit)
qubit_left = num_qubit - len(qubit_axis)
for i, j in minus_qubit_axis:
rho_res = np.trace(rho_res, axis1=i, axis2=j)
if qubit_left > 1:
rho_res = np.reshape(rho_res, [2 ** qubit_left] * 2)
return rho_res
psi_0 = np.array([1, 0])
psi_1 = np.array([0, 1])
g_bt = np.kron(psi_0, psi_0)
e_bt=np.cos(theta/2)*np.kron(psi_1,psi_0)+np.sin(theta/2)*np.kron(psi_0,psi_1)
psi_abt = 1/np.sqrt(2)*(np.kron(psi_0, e_bt)+np.kron(psi_1, g_bt))
rho_abt = np.outer(psi_abt, np.conj(psi_abt))
rho_b = partial_trace(rho_abt, [1])
mu_b = circuits(theta, 0)
s_mub = second_renyi_entropy(mu_b)
s_rhob = second_renyi_entropy(rho_b)
return s_mub, s_rhob
# QHACK # | 25,782 |
def parse_CDS_info(CDS_info):
"""
Args:
CDS_info (python d):
'aliases' (list<alias_list (multiple)>):
alias_list
list<'locus_tag', str> AND/OR
list<'old_locus_tag', str> AND/OR
list<'protein_id', str>
'dna_sequence' (str): The actual DNA sequence
'functions' (list<str>): First object of list is the function
'location' (list<scaffold (str), bp (int), strand ("+/-"), length (nt)>)
Returns:
gene_table_list_d (dict):
"locusId":str
"sysName": ?str
"type": 1
"scaffoldId": str
"begin": int
"end": int
"strand": str ("+"/"-")
"name": str (always "unknown" in this case)
"desc": str
"GC": float
"nTA": int
"AA_seq": Amino Acid sequence of gene
"""
gene_table_list_d = {}
#Getting locusId
aliases_l = CDS_info["aliases"]
locusId_obj = aliases_l[0]
if locusId_obj[0] != "locus_tag":
locus_tag_found = False
for i in range(1, len(aliases_l)):
if aliases_l[i][0] == "locus_tag":
locus_tag_found = True
locusId_obj = aliases_l[i]
break
logging.critical(f"Found locus_tag at different loc of list: {i}")
else:
locus_tag_found = True
if not locus_tag_found:
raise Exception("Expecting locus_tag from genome object, did not find it.")
else:
gene_table_list_d["locusId"] = locusId_obj[1]
gene_table_list_d["sysName"] = locusId_obj[1]
# Getting scaffold, location, strand
scaffold, bp_loc, strand, nt_len = get_location_info(CDS_info["location"][0])
gene_table_list_d["scaffoldId"] = scaffold
gene_table_list_d["begin"] = bp_loc
gene_table_list_d["end"] = bp_loc + nt_len
gene_table_list_d["strand"] = strand
# Getting description
gene_table_list_d["desc"] = CDS_info["functions"][0]
# Getting GC and nTA
DNA_seq = CDS_info["dna_sequence"].upper()
gene_table_list_d["GC"] = (DNA_seq.count("G") + DNA_seq.count("C"))/float(len(DNA_seq))
gene_table_list_d["nTA"] = DNA_seq.count("TA")
# Undecidable parts (from the data object)
gene_table_list_d["type"] = 1
gene_table_list_d["name"] = "unknown"
# Adding protein sequence
gene_table_list_d["AA_seq"] = CDS_info["protein_translation"].upper()
return gene_table_list_d | 25,783 |
def send_sessions():
"""
Delivers all currently undelivered sessions to Bugsnag
"""
default_client.session_tracker.send_sessions() | 25,784 |
def recover_original_schema_name(sql: str, schema_name: str) -> str:
"""Postgres truncates identifiers to 63 characters at parse time and, as pglast
uses bits of PG to parse queries, image names like noaa/climate:64_chars_of_hash
get truncated which can cause ambiguities and issues in provenance. We can't
get pglast to give us back the full identifier, but we can try and figure out
what it used to be and patch the AST to have it again.
"""
if len(schema_name) < POSTGRES_MAX_IDENTIFIER:
return schema_name
candidates = list(set(re.findall(r"(" + re.escape(schema_name) + r"[^.\"]*)[.\"]", sql)))
# Us finding more than one candidate schema is pretty unlikely to happen:
# we'd have to have a truncated schema name that's 63 characters long
# (of kind some_namespace/some_repo:abcdef1234567890....)
# which also somehow features in this query as a non-identifier. Raise an error here if
# this does happen.
assert len(candidates) == 1
return str(candidates[0]) | 25,785 |
def on_method_not_allowed(error):
"""Override the HTML 405 default."""
content = {"msg": "Method not allowed"}
return jsonify(content), 405 | 25,786 |
def dir_name(dir_path):
"""
转换零时文件夹、输入文件夹路径
:param dir_path: 主目录路径
:return:[tmp_dir, input_dir, res_dir]
"""
tmp_dir = dir_path + "tmp\\"
input_dir = dir_path + "input\\"
res_dir = dir_path + "result\\"
return tmp_dir, input_dir, res_dir | 25,787 |
def test_can_combine_sgr_codes():
"""Multiple SGR escape codes can be combined in one SGR code."""
assert sgr.create(code.BLACK, code.BG_RED) == \
code.CSI + code.BLACK + code.DELIMITER + code.BG_RED + 'm'
assert sgr.create(code.UNDERLINE, code.BLACK, code.BG_RED) == \
code.CSI + code.UNDERLINE + code.DELIMITER + code.BLACK + \
code.DELIMITER + code.BG_RED + 'm' | 25,788 |
def profile_binning(
r,
z,
bins,
z_name="pm",
z_clip=None,
z_quantile=None,
return_bin=True,
plot=True,
):
"""Bin the given quantity z in r.
Parameters
----------
r: 1d array, binned x values
z: 1d array, binned y values
bins: 1d array, bins
Returns
--------
r_rbin : 1d array, mean r in bins
z_rbin : 1d array, mean z in bins
z_bins : dict, numbers for bins
"""
if z_clip is None:
clip = clip_quantile_1d(z, z_quantile, return_func=True)
else:
clip = lambda z_: (z_ > z_clip[0]) & (z_ < z_clip[1])
z_bins = {}
if plot:
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
# Clip by bins
for k, b in enumerate(bins[:-1]):
in_bin = (bins[k] <= r) & (r < bins[k + 1])
clipped = clip(z[in_bin])
z_in_bin = z[in_bin][clipped]
r_in_bin = r[in_bin][clipped]
z_bin = {z_name: z_in_bin, "r": r_in_bin}
z_bins[k] = z_bin
if plot:
lab = "{0:.2f}<r<{1:.2f}".format(bins[k], bins[k + 1])
sns.distplot(
z_in_bin,
hist=False,
kde_kws={"lw": 2, "alpha": 0.9},
label=lab,
)
r_rbin, z_rbin = get_mean_rbins(z_bins, z_name=z_name)
z_bins = z_bins if return_bin else None
return r_rbin, z_rbin, z_bins | 25,789 |
def sequence_to_synergy_sims(inputs, params):
"""same as sequence to synergy, but prep some other tensors first
"""
# set up orig seq tensor
inputs[DataKeys.ORIG_SEQ] = inputs[DataKeys.FEATURES]
# set up thresholds tensor
num_interpretation_tasks = len(params["importance_task_indices"])
thresholds_shape = [
inputs[DataKeys.FEATURES].get_shape().as_list()[0],
num_interpretation_tasks, 1, 1]
inputs[DataKeys.WEIGHTED_SEQ_THRESHOLDS] = tf.zeros(thresholds_shape)
# move inputs to outputs
outputs = dict(inputs)
# and then run sequence to synergy
outputs, params = sequence_to_synergy(outputs, params)
return outputs, params | 25,790 |
def create_invalid_points_feature_class(access_feature_class, invalid_reach_table, invalid_points_feature_class):
"""
Create a feature class of centroid points for the invalid reaches.
:param access_feature_class: Point feature class of all accesses.
:param invalid_reach_table: Table of reaches not passing validation.
:return: Path to invalid points feature class.
"""
# get the reach coordinates for the putin and takeout if they exist
def get_reach_centroid(access_feature_class, reach_id):
# initialize variables to store coordinate pairs as nonetype
putin_coords = None
takeout_coords = None
# attempt to get the coordinates for the putin and takeout
for row in arcpy.da.SearchCursor(access_feature_class, ('reach_id', 'SHAPE@XY'), "type = 'putin'"):
if row[0] == reach_id:
putin_coords = row[1]
break
for row in arcpy.da.SearchCursor(access_feature_class, ('reach_id', 'SHAPE@XY'), "type = 'takeout'"):
if row[0] == reach_id:
takeout_coords = row[1]
break
# return coordinates for the best location for the reach available
if putin_coords is None and takeout_coords is None:
return None
elif putin_coords is None:
return takeout_coords
elif takeout_coords is None:
return putin_coords
else:
return (
min([putin_coords[0], takeout_coords[0]]) + abs(putin_coords[0] - takeout_coords[0]) / 2,
min([putin_coords[1], takeout_coords[1]]) + abs(putin_coords[1] - takeout_coords[1]) / 2
)
# create the output feature class
out_fc = arcpy.CreateFeatureclass_management(
out_path=os.path.dirname(invalid_points_feature_class),
out_name=os.path.basename(invalid_points_feature_class),
geometry_type='POINT',
spatial_reference=arcpy.Describe(access_feature_class).spatialReference
)[0]
# add the fields
arcpy.AddField_management(
in_table=out_fc,
field_name='reach_id',
field_type='TEXT',
field_length=10,
field_alias='Reach ID'
)
arcpy.AddField_management(
in_table=out_fc,
field_name='reason',
field_type='TEXT',
field_length=200,
field_alias='Reason'
)
# create a list of invalid reach id's and invalid reasons
invalid_list = [(row[0], row[1]) for row in arcpy.da.SearchCursor(invalid_reach_table, ('reach_id', 'reason'))]
# use an insert cursor to add records to the feature class
with arcpy.da.InsertCursor(out_fc, ('reach_id', 'reason', 'SHAPE@XY')) as cursor:
# for every invalid reach
for invalid_reach in invalid_list:
# get the reach centroid
centroid = get_reach_centroid(access_feature_class, invalid_reach[0])
# insert a new record
cursor.insertRow([invalid_reach[0], invalid_reach[1], centroid])
# return the path to the output feature class
return out_fc | 25,791 |
def _gen_roi_func_constant(constant_roi):
"""
Return a RoI function which returns a constant radius.
See :py:func:`map_to_grid` for a description of the parameters.
"""
def roi(zg, yg, xg):
""" constant radius of influence function. """
return constant_roi
return roi | 25,792 |
def less_important_function(num: int) -> str:
"""
Example which is documented in the module documentation but not highlighted on the main page.
:param num: A thing to pass
:return: A return value
"""
return f'{num}' | 25,793 |
def main():
"""
Entry point.
"""
if os.geteuid() == 0:
sys.stdout.write("Please do not run poezio as root.\n")
sys.exit(0)
sys.stdout.write("\x1b]0;poezio\x07")
sys.stdout.flush()
from poezio import config
config.run_cmdline_args()
config.create_global_config()
config.setup_logging()
config.post_logging_setup()
import logging
logging.raiseExceptions = False
from poezio.config import options
if options.check_config:
config.check_config()
sys.exit(0)
from poezio.asyncio import monkey_patch_asyncio_slixmpp
monkey_patch_asyncio_slixmpp()
from poezio import theming
theming.update_themes_dir()
from poezio import logger
logger.create_logger()
from poezio import roster
roster.roster.reset()
from poezio.core.core import Core
signal.signal(signal.SIGINT, signal.SIG_IGN) # ignore ctrl-c
cocore = Core()
signal.signal(signal.SIGUSR1, cocore.sigusr_handler) # reload the config
signal.signal(signal.SIGHUP, cocore.exit_from_signal)
signal.signal(signal.SIGTERM, cocore.exit_from_signal)
if options.debug:
cocore.debug = True
cocore.start()
from slixmpp.exceptions import IqError, IqTimeout
def swallow_iqerrors(loop, context):
"""Do not log unhandled iq errors and timeouts"""
if not isinstance(context['exception'], (IqError, IqTimeout)):
loop.default_exception_handler(context)
# Warning: asyncio must always be imported after the config. Otherwise
# the asyncio logger will not follow our configuration and won't write
# the tracebacks in the correct file, etc
import asyncio
loop = asyncio.get_event_loop()
loop.set_exception_handler(swallow_iqerrors)
loop.add_reader(sys.stdin, cocore.on_input_readable)
loop.add_signal_handler(signal.SIGWINCH, cocore.sigwinch_handler)
cocore.xmpp.start()
loop.run_forever()
# We reach this point only when loop.stop() is called
try:
cocore.reset_curses()
except:
pass | 25,794 |
def weth_asset_data(): # pylint: disable=redefined-outer-name
"""Get 0x asset data for Wrapped Ether (WETH) token."""
return asset_data_utils.encode_erc20(
NETWORK_TO_ADDRESSES[NetworkId.GANACHE].ether_token
) | 25,795 |
def test_access_cli_list(script_info_cli_list):
"""Test of list cli command."""
runner = CliRunner()
result = runner.invoke(
access, ['list'],
obj=script_info_cli_list
)
assert result.exit_code == 0
assert result.output.find("open") != -1 | 25,796 |
def GetAudioFiles(config):
"""Print a list of audio files across all models
The output is one line for the source file and one line for the install file,
e.g.:
ucm-config/bxtda7219max.reef.BASKING/bxtda7219max.reef.BASKING.conf
/usr/share/alsa/ucm/bxtda7219max.basking/bxtda7219max.basking.conf
Args:
config: A CrosConfig instance
"""
for files in config.GetAudioFiles():
print(files.source)
print(files.dest) | 25,797 |
def print_depth_recursive(data, depth=1):
"""
Depth-first search algorithm (recursive) for printing all dictionary
keys with their depth.
"""
if not isinstance(data, Mapping):
raise TypeError("Input should be a dictionary.")
for key, val in data.items():
print("{0} {1}".format(key, depth))
if isinstance(val, Mapping):
print_depth_recursive(val, depth=depth+1) | 25,798 |
def match_known_module_name(pattern):
"""
Matching with know module name.
Args:
pattern (Pattern): To be replaced pattern.
Returns:
str, matched module name, return None if not matched.
"""
matched_result = []
for ptn, module_name in BUILT_IN_MODULE_NAME.items():
if pattern.in_degree == ptn.in_degree and pattern.out_degree == ptn.out_degree and \
ptn.head == pattern.head and ptn.tail == pattern.tail:
is_matched, score = pattern_fuzzy_matching(pattern.ptn_items, ptn.ptn_items)
if is_matched:
matched_result.append((module_name, score))
if matched_result:
module_name = (matched_result if len(matched_result) == 1 else
sorted(matched_result, key=lambda x: x[1], reverse=True))[0][0]
if pattern.pattern not in used_module_name:
used_module_name[pattern.pattern] = 1
else:
module_name = f"{module_name}{used_module_name[pattern.pattern]}"
used_module_name[pattern.pattern] += 1
return module_name
return None | 25,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.