content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import requests
from bs4 import BeautifulSoup
import dateutil
def fetch_events_art_history(base_url='https://www.sas.upenn.edu'):
"""
Fetch events from Art History Department
"""
page = requests.get(urljoin(base_url, '/arthistory/events'))
page_soup = BeautifulSoup(page.content, 'html.parser')
range_pages = max([int(n_page.text) for n_page in page_soup.find('div',
attrs={'class': 'pagination pagination-centered'}).find_all('li') if n_page.text.isdigit()])
events = []
for n_page in range(1, range_pages):
page = requests.get(
(urljoin(base_url, '/arthistory/events?&page={}')).format(n_page))
page_soup = BeautifulSoup(page.content, 'html.parser')
all_events = page_soup.find(
'div', attrs={'class': 'item-list'}).find_all('li')
for event in all_events:
event_url = urljoin(base_url, event.find('a')['href'])
title = event.find('h3').text if event.find(
'h3') is not None else ''
# event_type = event.find('strong').text if event.find('strong') is not None else ''
date = event.find('span', attrs={'class': 'date-display-single'})
if date is not None:
date, event_time = date.attrs.get('content').split('T')
if '-' in event_time:
starttime, endtime = event_time.split('-')
try:
starttime, endtime = dateutil.parser.parse(starttime).strftime(
"%I:%M %p"), dateutil.parser.parse(endtime).strftime("%I:%M %p")
except:
pass
else:
starttime, endtime = event_time, ''
else:
date, starttime, endtime = '', '', ''
location = event.find('div', attrs={'class': 'location'})
location = location.text.strip() if location is not None else ''
event_soup = BeautifulSoup(requests.get(
event_url).content, 'html.parser')
description = event_soup.find('div', attrs={'class': 'field-body'})
description = description.text.strip() if description is not None else ''
events.append({
'title': title,
'speaker': '',
'date': date,
'location': location,
'description': description,
'starttime': starttime,
'endtime': endtime,
'url': event_url,
'owner': 'Art History'
})
return events
|
2c17219cbbdd94251db43f52459c196dada014fc
| 3,644,500
|
def calc_Q(nu=0.0,delta=0.0,lam=1.0,ret_k=False):
"""
Calculate psic Q in the cartesian lab frame.
nu and delta are in degrees, lam is in angstroms
if ret_k == True return tuple -> (Q,ki,kr)
"""
(ki,kr) = calc_kvecs(nu=nu,delta=delta,lam=lam)
Q = kr - ki
if ret_k == True:
return (Q,ki,kr)
else:
return Q
|
9c5a9e885b1f78bab7a1de2bbf6a3de2d5723e18
| 3,644,501
|
def build_data(args):
"""
build test data
"""
task_name = args.task_name.lower()
processor = reader.MatchProcessor(data_dir=args.data_dir,
task_name=task_name,
vocab_path=args.vocab_path,
max_seq_len=args.max_seq_len,
do_lower_case=args.do_lower_case)
test_data_generator = processor.data_generator(
batch_size=args.batch_size,
phase='test',
epoch=1,
shuffle=False,
device=args.gpu)
num_test_examples = processor.get_num_examples(phase='test')
test_data = [test_data_generator, num_test_examples]
return processor, test_data
|
ca52d035d34a83e1de0b3cad261f03ce53fd2f0c
| 3,644,502
|
def format_date(d):
"""Date format used in the report."""
if type(d) == str:
d = dateutil_parse(d)
return d.isoformat()
|
de999992e16fe52f42f4b79bbb0a78668d3fa109
| 3,644,503
|
import torch
def pytorch_argmax(op):
"""Implementation of argmax for pytorch."""
def _impl(x, dim):
dim = tuple(sorted(dim))
n = ()
for _s in range(len(x.shape)):
if _s not in dim:
n = n + (_s,)
n = n + dim
x = x.permute(n)
ns = x.shape[0 : -len(dim)] + (-1,)
r = torch.argmax(x.reshape(ns), -1, keepdim=False)
rl = list(r.shape)
for _sd in dim:
rl.insert(_sd, 1)
rf = tuple(rl)
return (torch.reshape(r, rf),)
return _impl, op.inputs[1:]
|
cc466b41c0dd4bb9730dcdf50816b9d0cf66cfaa
| 3,644,504
|
import os
def get_wine_quality(num_rows=None):
"""
Wine Quality dataset from UCI repository (
https://archive.ics.uci.edu/ml/datasets/Wine+Quality
Using the white wine data set, not the red.
- Dimensions: 4898 rows, 12 columns.
- Task: Regression
:param num_rows:
:return: X,y
"""
filename = 'winequality-white.csv'
if not os.path.isfile(filename):
urlretrieve(get_wine_quality_url, filename)
wine = pd.read_csv(filename, header=0, nrows=num_rows, delimiter=";")
X = wine.iloc[:, :-1].values
y = wine.iloc[:, -1].values
return X, y
|
2302b8502486261037ed8be92e8c6988dd46e8ea
| 3,644,505
|
def parse_eos(eos):
"""Function to interpret input as an EOS"""
if hasattr(eos, 'asq_of_rho_p'):
return eos # already is EOS class
if eos == 'H' or eos == 'h':
return SimpleHydrogen()
try:
return Ideal(float(eos)) # try parsing as a gamma value
except ValueError:
raise ValueError('Cannot parse EOS "{0:}".'.format(eos))
|
2303a9028b89647fae4b9a4fca0363826310b730
| 3,644,506
|
def get_2D_hse_kpoints(struct_for_path, ibzkpth):
"""
Args:
struct_for_path: Structure from which linemode k-points will
be generated.
ibzkpth:
Returns:
the Kpoints file object in the form of a string
ready for execution by MPInterfaces
calibrate objects
"""
# Read IBZKPT from prep step
ibz_lines = open(ibzkpth).readlines()
n_ibz_kpts = int(ibz_lines[1].split()[0])
# Read linemode KPOINTs from the dict (makes sure it is Kpoints
# file with only 20 per atom for the optimized settings
# Kpoints.from_dict(kpoint_dict).write_file('linemode_KPOINTS')
kpath = HighSymmKpath(struct_for_path)
Kpoints.automatic_linemode(20, kpath).write_file('KPOINTS_linemode')
remove_z_kpoints_linemode()
linemode_lines = open('KPOINTS_linemode').readlines()
# put them together
abs_path = []
for i in range(4, len(linemode_lines), 3):
start_kpt = linemode_lines[i].split()
end_kpt = linemode_lines[i+1].split()
increments = [
(float(end_kpt[0]) - float(start_kpt[0])) / 20,
(float(end_kpt[1]) - float(start_kpt[1])) / 20,
(float(end_kpt[2]) - float(start_kpt[2])) / 20
]
abs_path.append(start_kpt[:3] + ['0', start_kpt[4]])
for n in range(1, 20):
abs_path.append(
[str(float(start_kpt[0]) + increments[0] * n),
str(float(start_kpt[1]) + increments[1] * n),
str(float(start_kpt[2]) + increments[2] * n), '0']
)
abs_path.append(end_kpt[:3] + ['0', end_kpt[4]])
n_linemode_kpts = len(abs_path)
# write out the kpoints file and return the object
Kpoints_hse_file = '\n'.join(
['Automatically generated mesh',
'{}'.format(n_ibz_kpts + n_linemode_kpts),
'Reciprocal Lattice',
'{}'.format(str(''.join([line for line in ibz_lines[3:]])))]) + \
'{}'.format(str('\n'.join(
[' '.join(point) for point in abs_path])))
## can be used for test print out
# with open('KPOINTS_HSE', 'w') as kpts:
# kpts.write('Automatically generated mesh\n')
# kpts.write('{}\n'.format(n_ibz_kpts + n_linemode_kpts))
# kpts.write('Reciprocal Lattice\n')
# for line in ibz_lines[3:]:
# kpts.write(line)
# for point in abs_path:
# kpts.write('{}\n'.format(' '.join(point)))
return Kpoints_hse_file
|
e4ad65df4f4fc41c0af48e84dfd9b9bbddea9e20
| 3,644,507
|
import logging
import os
import pickle
import traceback
def read_cache(logger: logging.Logger, cache_file: str) -> CachedData:
"""Read file with Py pickle in it."""
if not cache_file:
return CachedData()
if not os.path.exists(cache_file):
logger.warning("Cache file '%s' doesn't exist.", cache_file)
return CachedData()
with open(cache_file, 'rb') as fhandle:
try:
cache = pickle.load(fhandle)
except EOFError:
# Note: occurred with empty file.
cache = CachedData()
logger.debug(
'Cache file is probably empty: %s', traceback.format_exc()
)
logger.debug(cache)
return cache
|
83f309e760f3d63a73fcb9a3f378f535605d7c94
| 3,644,508
|
def neutralize(word, g, word_to_vec_map):
"""
Removes the bias of "word" by projecting it on the space orthogonal to the bias axis.
This function ensures that gender neutral words are zero in the gender subspace.
Arguments:
word -- string indicating the word to debias
g -- numpy-array of shape (50,), corresponding to the bias axis (such as gender)
word_to_vec_map -- dictionary mapping words to their corresponding vectors.
Returns:
e_debiased -- neutralized word vector representation of the input "word"
"""
# Select word vector representation of "word". Use word_to_vec_map. (≈ 1 line)
e = word_to_vec_map[word]
# Compute e_biascomponent using the formula give above. (≈ 1 line)
e_biascomponent = (np.dot(e,g) / np.square(np.linalg.norm(g))) * g
# Neutralize e by substracting e_biascomponent from it
# e_debiased should be equal to its orthogonal projection. (≈ 1 line)
e_debiased = e - e_biascomponent
return e_debiased
|
a732050ef214fe29c6e234cea2f0a7d63b784829
| 3,644,509
|
def loadRowCluster(ndPage,algo):
"""
load cluster algo = aglo
"""
xpCluster = f".//Cluster[@algo='{algo}']"
lClusters= ndPage.xpath(xpCluster)
return lClusters
|
dcb75214e58d6656f58bee78b904562c05fd36d8
| 3,644,510
|
def _elementwise(f):
""" Enables elementwise operations
The wrapper implements two different modes of argument evaluation
for given p_1,..., p_k that represent the predicted distributions
and and x_1,...,x_m that represent the values to evaluate them on.
"elementwise" (default): Repeat the sequence of p_i until there are m,
i.e., p_1,...,p_k,p_1,p_2,...,p_k,p_1,...,p_m'
where m' is the remainder of dividing m by k.
"batch": x_1, ..., x_m is evaluated on every distribution p_i
resulting in a matrix m columns and k rows.
Parameters
----------
f: The function to decorate
Returns
-------
Decorated function
"""
def wrapper(self, x, *args, **kwargs):
if len(np.array(x).shape) > 1:
x = x.flatten()
# cache index
index_ = self.index
self.index = slice(None)
# disable elementwise mode if x is scalar
elementwise = (self.mode == 'elementwise' and len(np.array(x).shape) != 0)
if elementwise:
evaluations = len(x)
else:
evaluations = len(self.X)
# compose result
result = []
number_of_points = len(self.X)
for index in range(evaluations):
# set evaluation index and point
if elementwise:
self.index = index % number_of_points
at = x[index]
else:
self.index = index
at = x
# evaluate the function at this point
result.append(f(self, at, *args, **kwargs))
# rollback index
self.index = index_
if len(result) > 1:
return np.array(result)
else:
return result[0]
return _forward_meta(wrapper, f)
|
7cb9a17c648384e07bde3b57415244efd7e34e8a
| 3,644,511
|
def is_valid_task_id(task_id):
"""
Return False if task ID is not valid.
"""
parts = task_id.split('-')
if len(parts) == 5 and [len(i) for i in parts[1:]] == [8, 4, 4, 4]:
tp = RE_TASK_PREFIX.split(parts[0])
return (len(tp) == 5 and
all(i.isdigit() for i in tp[::2]) and
tp[1] in TT and
tp[3] in TG)
return False
|
d39e26ae52d96f9c6ed0bf5fea2ac317d5b9e8af
| 3,644,512
|
def figure_14_9():
"""Return the unweighted, undirected graph from Figure 14.9 of DSAP.
This is the same graph as in Figure 14.10.
"""
E = (
('A', 'B'), ('A', 'E'), ('A', 'F'), ('B', 'C'), ('B', 'F'),
('C', 'D'), ('C', 'G'), ('D', 'G'), ('D', 'H'), ('E', 'F'),
('E', 'I'), ('F' 'I'), ('G', 'J'), ('G', 'K'), ('G', 'L'),
('H', 'L'), ('I', 'J'), ('I', 'M'), ('I', 'N'), ('J', 'K'),
('K', 'N'), ('K', 'O'), ('L', 'P'), ('M', 'N'),
)
return graph_from_edgelist(E, False)
|
d81a11aa46bd62942c880dfa8f0a724801979449
| 3,644,513
|
def audit_umbrelladns(networks_fwrules):
"""Accepts a list of firewall rules for a client
Checks for rules to allow DNS lookups to Umbrella and
deny all other DNS lookups.
Returns a list of clients and a boolean of whether Umbrella DNS
is configured properly"""
umbrelladns_audit = []
host1 = '208.67.222.222/32'
host2 = '208.67.220.220/32'
for customer in networks_fwrules:
customer_result = {
'organizationId': customer['organizationId'],
'organizationName': customer['organizationName']
}
for network in customer['networks']:
umbrella_allow, dns_deny = 'False', 'False'
if 'l3FirewallRules' in network:
for rule in network['l3FirewallRules']:
destcidr = rule['destCidr'].split(",")
if rule['policy'] == 'allow' \
and rule['protocol'] == 'tcp' \
and rule['destPort'] == '53' \
and (host1 in destcidr and host2 in destcidr):
umbrella_allow = 'True'
if rule['policy'] == 'allow' \
and rule['protocol'] == 'udp' \
and rule['destPort'] == '53' \
and (host1 in destcidr and host2 in destcidr):
umbrella_allow = 'True'
if rule['policy'] == 'deny' \
and rule['protocol'] == 'tcp' \
and rule['destPort'] == '53' \
and rule['destCidr'] == 'Any':
dns_deny = 'True'
if rule['policy'] == 'deny' \
and rule['protocol'] == 'udp' \
and rule['destPort'] == '53' \
and rule['destCidr'] == 'Any':
dns_deny = 'True'
if umbrella_allow is 'True' and dns_deny is 'True':
customer_result['umbrellaDns'] = 'True'
else:
customer_result['umbrellaDns'] = 'False'
umbrelladns_audit.append(customer_result)
return umbrelladns_audit
|
26c01011dee998ba398db03603c61c00845055ea
| 3,644,514
|
from typing import Tuple
import itertools
def parse_element_container(elem: ET.Element) -> Tuple[Types.FlexElement, ...]:
"""Parse XML element container into FlexElement subclass instances.
"""
tag = elem.tag
if tag == "FxPositions":
# <FxPositions> contains an <FxLots> wrapper per currency.
# Element structure here is:
# <FxPositions><FxLots><FxLot /></FxLots></FxPositions>
# Flatten the nesting to create FxPositions as a tuple of FxLots
fxlots = (parse_element_container(child) for child in elem)
return tuple(itertools.chain.from_iterable(fxlots))
instances = tuple(parse_data_element(child) for child in elem)
return instances
|
477776ff49e47fb0ca45767c5a74ff6941d0abb0
| 3,644,515
|
def _is_smooth_across_dateline(mid_lat, transform, rtransform, eps):
"""
test whether the CRS is smooth over the dateline
idea borrowed from IsAntimeridianProjToWGS84 with minor mods...
"""
left_of_dt_x, left_of_dt_y, _ = rtransform.TransformPoint(180-eps, mid_lat)
right_of_dt_x, right_of_dt_y, _ = rtransform.TransformPoint(-180+eps, mid_lat)
if _dist(right_of_dt_x-left_of_dt_x, right_of_dt_y-left_of_dt_y) > 1:
return False
left_of_dt_lon, left_of_dt_lat, _ = transform.TransformPoint(left_of_dt_x, left_of_dt_y)
right_of_dt_lon, right_of_dt_lat, _ = transform.TransformPoint(right_of_dt_x, right_of_dt_y)
if (_dist(left_of_dt_lon - 180 + eps, left_of_dt_lat - mid_lat) > 2 * eps or
_dist(right_of_dt_lon + 180 - eps, right_of_dt_lat - mid_lat) > 2 * eps):
return False
return True
|
c1058bb24f254ce7158ec69872cfec1081a3027c
| 3,644,516
|
def reverse_args(func: Func) -> fn:
"""
Creates a function that invokes func with the positional arguments order
reversed.
Examples:
>>> concat = sk.reverse_args(lambda x, y, z: x + y + z)
>>> concat("a", "b", "c")
'cba'
"""
func = to_callable(func)
return fn(lambda *args, **kwargs: func(*args[::-1], **kwargs))
|
e1e734f767fb187f9563f51d1f106ebfc17ebbfb
| 3,644,517
|
def ar(x, y, z):
"""Offset arange by z/2."""
return z / 2 + np.arange(x, y, z, dtype='float')
|
0aca14778dd4ba814d9303146d3226f6645f2366
| 3,644,518
|
def dot(inputs, axes, normalize=False, **kwargs):
"""Functional interface to the `Dot` layer.
Args:
inputs: A list of input tensors (at least 2).
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the dot product of the samples from the inputs.
"""
return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)
|
2e5d83aad376e82b7938ebfec8cef3074bec3c58
| 3,644,519
|
def delete_queue(name, region, opts=None, user=None):
"""
Deletes a queue in the region.
name
Name of the SQS queue to deletes
region
Name of the region to delete the queue from
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_queue <sqs queue> <region>
"""
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
log.debug("map %s", url_map)
if name in url_map:
delete = {"queue-url": url_map[name]}
rtn = _run_aws("delete-queue", region=region, opts=opts, user=user, **delete)
success = True
err = ""
out = "{} deleted".format(name)
else:
out = ""
err = "Delete failed"
success = False
ret = {
"retcode": 0 if success else 1,
"stdout": out,
"stderr": err,
}
return ret
|
b17f54eefab61e682697fe09d26f6a55658b4171
| 3,644,520
|
import inspect
def get_all_methods(klass):
"""Get all method members (regular, static, class method).
"""
if not inspect.isclass(klass):
raise ValueError
pairs = list()
for attr, value in inspect.getmembers(
klass, lambda x: inspect.isroutine(x)):
if not (attr.startswith("__") or attr.endswith("__")):
pairs.append((attr, value))
return pairs
|
ada4f47c750455ddd1300f26eb3e296b046acefe
| 3,644,521
|
import pathlib
def _suffix_directory(key: pathlib.Path):
"""Converts '/folder/.../folder/folder/folder' into 'folder/folder'"""
key = pathlib.Path(key)
shapenet_folder = key.parent.parent
key = key.relative_to(shapenet_folder)
return key
|
147539065c3d21ee351b23f2d563c662fe55f04a
| 3,644,522
|
def setDesktop( studyID ):
"""This method sets and returns TRUST_PLOT2D desktop"""
global moduleDesktop, desktop
if studyID not in moduleDesktop:
moduleDesktop[studyID] = DynamicDesktop( sgPyQt )
moduleDesktop[studyID].initialize()
desktop = moduleDesktop[studyID]
return desktop
|
fd7ad5b57a832e4d6d4adbf2b5fbf973cc1b9e3e
| 3,644,523
|
def load(file_path: str):
"""Used for loading dataset files that have been downloaded.
Args:
file_path: Path to file to be loaded.
Returns:
x: Data used to train models.
y: Dataset labels.
Example:
>>> data,labels = load("model/mnist.npz")
>>> # Print first dataset example and first label
>>> print(data[0])
>>> print(label[0])
[0 200 ... 15 0]
5
"""
with np.load(file_path) as data:
return data['x'], \
data['y']
|
47e045d343509322cf9f845f454a99bf6f34cde7
| 3,644,524
|
def xrefchar(*args):
"""
xrefchar(xrtype) -> char
Get character describing the xref type.
@param xrtype: combination of Cross-Reference type flags and a
cref_t of dref_t value (C++: char)
"""
return _ida_xref.xrefchar(*args)
|
a6991e0a56710359804d21b79b86ed3ead852769
| 3,644,525
|
def problem_5_14_8(scalars, vectors):
"""
>>> u = list2vec([1,1,0,0])
>>> v = list2vec([0,1,1,0])
>>> w = list2vec([0,0,1,1])
>>> x = list2vec([1,0,0,1])
>>> problem_5_14_8([1, -1, 1], [u, v, w]) == x
True
>>> problem_5_14_8([-1, 1, 1], [u, v, x]) == w
True
>>> problem_5_14_8([1, 1, -1], [u, w, x]) == v
True
>>> problem_5_14_8([1, -1, 1], [v, w, x]) == u
True
"""
return lin_comb_sum(scalars, vectors)
|
e8456cbf7a0e47519003c3b3a414560c1d1ee5ac
| 3,644,526
|
def atomic(fn, self, *args, **kwargs):
"""
Atomic method.
"""
return self._atom(fn, args, kwargs)
|
96fdd8451bb534deefb2ffbe101526838d75fa6e
| 3,644,527
|
def text_to_string(filename, useEncoding):
"""Read a text file and return a string."""
with open(filename, encoding=useEncoding, errors='ignore') as infile:
return infile.read()
|
f879bb747699496204820b74944fd563658a7117
| 3,644,528
|
def forward_propagation(x, paras, bn_paras, decay=0.9):
""" forward propagation function
Paras
------------------------------------
x: input dataset, of shape (input size, number of examples)
W: weight matrix of shape (size of current layer, size of previous layer)
b: bias vector of shape (size of current layer,1)
gamma: scale vector of shape (size of current layer ,1)
beta: offset vector of shape (size of current layer ,1)
decay: the parameter of exponential weight average
moving_mean: decay * moving_mean + (1 - decay) * current_mean
moving_var: decay * moving_var + (1 - decay) * moving_var
Returns
------------------------------------
y: the output of the last Layer(y_predict)
caches: list, every element is a tuple:(W,b,z,A_pre)
"""
L = len(paras) // 4 # number of layer
caches = []
# calculate from 1 to L-1 layer
for l in range(1, L):
W = paras["W" + str(l)]
b = paras["b" + str(l)]
gamma = paras["gamma" + str(l)]
beta = paras["beta" + str(l)]
# linear forward -> relu forward ->linear forward....
z = linear(x, W, b)
mean, var, sqrt_var, normalized, out = batch_norm(z, gamma, beta)
caches.append((x, W, b, gamma, sqrt_var, normalized, out))
x = relu(out)
bn_paras["moving_mean" + str(l)] = decay * bn_paras["moving_mean" + str(l)] + (1 - decay) * mean
bn_paras["moving_var" + str(l)] = decay * bn_paras["moving_var" + str(l)] + (1 - decay) * var
# calculate Lth layer
W = paras["W" + str(L)]
b = paras["b" + str(L)]
z = linear(x, W, b)
caches.append((x, W, b, None, None, None, None))
y = sigmoid(z)
return y, caches, bn_paras
|
eb2955f9bff056ad1639d2b63a39a6ff40293400
| 3,644,529
|
def sentence_indexes_for_fragment(fragment: Fragment, sentences: list) -> list:
"""Get the start and end indexes in the whole article for the sentences encompassing a fragment."""
start_sentence_index = sentence_index_for_fragment_index(fragment.start, sentences)
end_sentence_index = sentence_index_for_fragment_index(fragment.end, sentences)
return list(range(start_sentence_index, end_sentence_index +1))
|
08ec8df9c9e7e06f20dd6554a9da3a0ca89e4f53
| 3,644,530
|
import struct
def _watchos_extension_impl(ctx):
"""Implementation of watchos_extension."""
top_level_attrs = [
"app_icons",
"strings",
"resources",
]
# Xcode 11 requires this flag to be passed to the linker, but it is not accepted by earlier
# versions.
# TODO(min(Xcode) >= 11): Make this unconditional when the minimum supported Xcode is Xcode 11.
xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
if xcode_support.is_xcode_at_least_version(xcode_config, "11"):
extra_linkopts = ["-e", "_WKExtensionMain"]
# This is required when building with watchOS SDK 6.0 or higher but with a minimum
# deployment version lower than 6.0. See
# https://developer.apple.com/documentation/xcode_release_notes/xcode_11_release_notes.
minimum_os = apple_common.dotted_version(ctx.attr.minimum_os_version)
if minimum_os < apple_common.dotted_version("6.0"):
extra_linkopts.append(
# The linker will search for this library relative to sysroot, which will already
# be the watchOS SDK directory.
#
# This is a force-load (unlike Xcode, which uses a standard `-l`) because we can't
# easily control where it appears in the link order relative to WatchKit.framework
# (where this symbol also lives, in watchOS 6+), so we need to guarantee that the
# linker doesn't skip the static library's implementation of `WKExtensionMain` if
# it already resolved the symbol from the framework.
"-Wl,-force_load,/usr/lib/libWKExtensionMainLegacy.a",
)
else:
extra_linkopts = []
link_result = linking_support.register_linking_action(
ctx,
extra_linkopts = extra_linkopts,
stamp = ctx.attr.stamp,
)
binary_artifact = link_result.binary_provider.binary
debug_outputs_provider = link_result.debug_outputs_provider
actions = ctx.actions
apple_toolchain_info = ctx.attr._toolchain[AppleSupportToolchainInfo]
bin_root_path = ctx.bin_dir.path
bundle_id = ctx.attr.bundle_id
bundle_name, bundle_extension = bundling_support.bundle_full_name_from_rule_ctx(ctx)
executable_name = bundling_support.executable_name(ctx)
entitlements = entitlements_support.entitlements(
entitlements_attr = getattr(ctx.attr, "entitlements", None),
entitlements_file = getattr(ctx.file, "entitlements", None),
)
features = features_support.compute_enabled_features(
requested_features = ctx.features,
unsupported_features = ctx.disabled_features,
)
label = ctx.label
platform_prerequisites = platform_support.platform_prerequisites_from_rule_ctx(ctx)
predeclared_outputs = ctx.outputs
rule_descriptor = rule_support.rule_descriptor(ctx)
archive = outputs.archive(
actions = actions,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
)
bundle_verification_targets = [struct(target = ext) for ext in ctx.attr.extensions]
processor_partials = [
partials.apple_bundle_info_partial(
actions = actions,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
executable_name = executable_name,
bundle_id = bundle_id,
entitlements = entitlements,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
product_type = rule_descriptor.product_type,
),
partials.binary_partial(
actions = actions,
binary_artifact = binary_artifact,
executable_name = executable_name,
label_name = ctx.label.name,
),
partials.bitcode_symbols_partial(
actions = actions,
binary_artifact = binary_artifact,
debug_outputs_provider = debug_outputs_provider,
dependency_targets = ctx.attr.extensions,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
),
partials.clang_rt_dylibs_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
binary_artifact = binary_artifact,
features = features,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
),
partials.debug_symbols_partial(
actions = actions,
bin_root_path = bin_root_path,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
debug_dependencies = ctx.attr.extensions,
debug_outputs_provider = debug_outputs_provider,
dsym_info_plist_template = apple_toolchain_info.dsym_info_plist_template,
executable_name = executable_name,
platform_prerequisites = platform_prerequisites,
rule_label = label,
),
partials.embedded_bundles_partial(
bundle_embedded_bundles = True,
platform_prerequisites = platform_prerequisites,
embeddable_targets = ctx.attr.extensions,
plugins = [archive],
),
# Following guidance of the watchOS 2 migration guide's recommendations for placement of a
# framework, scoping dynamic frameworks only to the watch extension bundles:
# https://developer.apple.com/library/archive/documentation/General/Conceptual/AppleWatch2TransitionGuide/ConfiguretheXcodeProject.html
partials.framework_import_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
provisioning_profile = getattr(ctx.file, "provisioning_profile", None),
rule_descriptor = rule_descriptor,
targets = ctx.attr.deps,
),
partials.resources_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
bundle_extension = bundle_extension,
bundle_verification_targets = bundle_verification_targets,
bundle_id = bundle_id,
bundle_name = bundle_name,
environment_plist = ctx.file._environment_plist,
executable_name = executable_name,
launch_storyboard = None,
platform_prerequisites = platform_prerequisites,
plist_attrs = ["infoplists"],
rule_attrs = ctx.attr,
rule_descriptor = rule_descriptor,
rule_label = label,
top_level_attrs = top_level_attrs,
),
partials.swift_dylibs_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
binary_artifact = binary_artifact,
label_name = label.name,
dependency_targets = ctx.attr.extensions,
platform_prerequisites = platform_prerequisites,
),
]
if platform_prerequisites.platform.is_device:
processor_partials.append(
partials.provisioning_profile_partial(
actions = actions,
profile_artifact = ctx.file.provisioning_profile,
rule_label = label,
),
)
processor_result = processor.process(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
codesignopts = codesigning_support.codesignopts_from_rule_ctx(ctx),
entitlements = entitlements,
executable_name = executable_name,
ipa_post_processor = ctx.executable.ipa_post_processor,
partials = processor_partials,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
process_and_sign_template = apple_toolchain_info.process_and_sign_template,
provisioning_profile = getattr(ctx.file, "provisioning_profile", None),
rule_descriptor = rule_descriptor,
rule_label = label,
)
return [
DefaultInfo(
files = processor_result.output_files,
),
OutputGroupInfo(
**outputs.merge_output_groups(
link_result.output_groups,
processor_result.output_groups,
)
),
WatchosExtensionBundleInfo(),
] + processor_result.providers
|
0e79df610e751e6322888ee68a15a7ff4ceda970
| 3,644,531
|
def train_and_eval(trial: optuna.Trial, ex_dir: str, seed: [int, None]):
"""
Objective function for the Optuna `Study` to maximize.
.. note::
Optuna expects only the `trial` argument, thus we use `functools.partial` to sneak in custom arguments.
:param trial: Optuna Trial object for hyper-parameter optimization
:param ex_dir: experiment's directory, i.e. the parent directory for all trials in this study
:param seed: seed value for the random number generators, pass `None` for no seeding
:return: objective function value
"""
# Synchronize seeds between Optuna trials
pyrado.set_seed(seed)
# Environment
env_hparams = dict(dt=1/250., max_steps=1500)
env = QQubeSim(**env_hparams)
env = ActNormWrapper(env)
# Policy
policy_hparam = dict(
feats=FeatureStack([identity_feat, sign_feat, abs_feat, squared_feat, bell_feat, MultFeat([4, 5])])
)
policy = LinearPolicy(spec=env.spec, **policy_hparam)
# Algorithm
algo_hparam = dict(
num_sampler_envs=1, # parallelize via optuna n_jobs
max_iter=150,
pop_size=trial.suggest_categorical('pop_size', [100, 150, 200, 250]),
num_rollouts=trial.suggest_categorical('num_rollouts', [4, 6, 8, 10, 12]),
num_is_samples=trial.suggest_categorical('num_is_samples', [50, 100, 150, 200]),
expl_std_init=trial.suggest_uniform('expl_std_init', 0.2, 1.5),
expl_std_min=0.02,
symm_sampling=trial.suggest_categorical('symm_sampling', [True, False]),
)
csv_logger = create_csv_step_logger(osp.join(ex_dir, f'trial_{trial.number}'))
algo = PoWER(osp.join(ex_dir, f'trial_{trial.number}'), env, policy, **algo_hparam, logger=csv_logger)
# Train without saving the results
algo.train(snapshot_mode='latest', seed=seed)
# Evaluate
min_rollouts = 1000
sampler = ParallelSampler(env, policy, num_envs=1, min_rollouts=min_rollouts) # parallelize via optuna n_jobs
ros = sampler.sample()
mean_ret = sum([r.undiscounted_return() for r in ros])/min_rollouts
return mean_ret
|
128b94452d3a398efe5b754e4e3dacf25bd5e165
| 3,644,532
|
def alert_query(alert, authz):
"""Construct a search query to find new matching entities and documents
for a particular alert. Update handling is done via a timestamp of the
latest known result."""
# Many users have bookmarked complex queries, otherwise we'd use a
# precise match query.
query = {
'simple_query_string': {
'query': alert.query,
'fields': ['text'],
'default_operator': 'AND',
'minimum_should_match': '90%'
}
}
filter_since = {
'range': {
'created_at': {'gt': alert.notified_at}
}
}
return {
'size': MAX_PAGE,
'query': {
'bool': {
'should': [query],
'filter': [filter_since, authz_query(authz)],
'minimum_should_match': 1
}
}
}
|
c7181e174613ea61fe67d6165f1022a10ab5862e
| 3,644,533
|
def iscomment(s):
"""
Define what we call a comment in MontePython chain files
"""
return s.startswith('#')
|
ab3a9d240e423c562c9e83cdd9599fddf144b7c3
| 3,644,534
|
import sys
def hxlvalidate_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlvalidate with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Validate a HXL dataset.')
parser.add_argument(
'-s',
'--schema',
help='Schema file for validating the HXL dataset (if omitted, use the default core schema).',
metavar='schema',
default=None
)
parser.add_argument(
'-a',
'--all',
help='Include all rows in the output, including those without errors',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'-e',
'--error-level',
help='Minimum error level to show (defaults to "info") ',
choices=['info', 'warning', 'error'],
metavar='info|warning|error',
default='info'
)
args = parser.parse_args(args)
do_common_args(args)
with make_input(args, stdin) as input, make_output(args, stdout) as output:
class Counter:
infos = 0
warnings = 0
errors = 0
def callback(e):
"""Show a validation error message."""
if e.rule.severity == 'info':
if args.error_level != 'info':
return
Counter.infos += 1
elif e.rule.severity == 'warning':
if args.error_level == 'error':
return
Counter.warnings += 1
else:
Counter.errors += 1
message = '[{}] '.format(e.rule.severity)
if e.row:
if e.rule:
message += "{},{}: ".format(e.row.row_number + 1, e.rule.tag_pattern)
else:
message += "{}: ".format(e.row.row_number + 1)
elif e.rule:
message += "<dataset>,{}: ".format(e.rule.tag_pattern)
else:
message += "<dataset>: "
if e.value:
message += '"{}" '.format(e.value)
if e.message:
message += e.message
message += "\n"
output.write(message)
output.write("Validating {} with schema {} ...\n".format(args.infile or "<standard input>", args.schema or "<default>"))
source = hxl.io.data(input)
if args.schema:
with make_input(args, None, args.schema) as schema_input:
schema = hxl.schema(schema_input, callback=callback)
else:
schema = hxl.schema(callback=callback)
schema.validate(source)
if args.error_level == 'info':
output.write("{:,} error(s), {:,} warnings, {:,} suggestions\n".format(Counter.errors, Counter.warnings, Counter.infos))
elif args.error_level == 'warning':
output.write("{:,} error(s), {:,} warnings\n".format(Counter.errors, Counter.warnings))
else:
output.write("{:,} error(s)\n".format(Counter.errors))
if Counter.errors > 0:
output.write("Validation failed.\n")
return EXIT_ERROR
else:
output.write("Validation succeeded.\n")
return EXIT_OK
|
613b7157f23e9c6234ae3428d18e0f22789131ea
| 3,644,535
|
def fix_bayes_factor(bayes_factor):
"""
If one of the bayes factors is 'inf' we get a string instead of a
tuple back. This is hacky but fixes that.
"""
# Maximum cut off for Bayes factor value
max_bf = 1e12
if type(bayes_factor) == str:
bayes_factor = bayes_factor.split(",")
bayes_factor = [min(float(x), max_bf) for x in bayes_factor]
bayes_factor = tuple(bayes_factor)
bayes_factor = bayes_factor[0]
return bayes_factor
|
7e7912ea9b0c90f0945f486aa397a2df2d13d5cc
| 3,644,536
|
def fiebelkorn_binning(x_trial, t_trial):
"""
Given accuracy and time-points, find the time-smoothed average accuracy
Parameters
----------
x_trial : np.ndarray
Accuracy (Hit: 1, Miss: 0) of each trial
t_trial : np.ndarray
The time-stamp of each trial
Returns
-------
x_bin : np.ndarray
The average accuracy within each time bin
t_bin : np.ndarray
The centers of each time bin
"""
details = behav_details['fiebelkorn']
# Time-stamps of the center of each bin
t_bin = np.arange(details['t_start'],
details['t_end'] + 1e-10,
details['bin_step'])
# Accuracy within each bin
x_bin = []
for i_bin in range(len(t_bin)):
bin_center = t_bin[i_bin]
bin_start = bin_center - (details['bin_width'] / 2)
bin_end = bin_center + (details['bin_width'] / 2)
bin_sel = (bin_start <= t_trial) & (t_trial <= bin_end)
x_bin_avg = np.mean(x_trial[bin_sel])
x_bin.append(x_bin_avg)
x_bin = np.array(x_bin)
return x_bin, t_bin
|
29651c03dba351475c881d77a08da618ba89aa6a
| 3,644,537
|
def get_fastest_while_jump(condition:str, jump_tag:str, verdicts: list) -> list:
"""Verdicts like ["while", "a", "<", "10"] """
result = []
jumpables = ("===", ) + tuple(INVERT_TABLE.keys())
if len(verdicts) == 2:
result.append(F"jump-if {jump_tag} {verdicts[1]} != false")
elif verdicts[2] in jumpables:
result.append(F"jump-if {jump_tag} " + (" ".join(verdicts[1:]) ) )
else:
result.append(create_temporary_xlet(condition, verdicts[1:]))
result.append(F"jump-if {jump_tag} {condition} != false")
return result
|
16f4b8ba1e180dbad22e93f6bf08ab52eecb0086
| 3,644,538
|
import sys
import os
from sys import path
def ChangeUserPath(args):
"""Function to change or create the user repository path. This is where all the user's data is
stored."""
global user_datapath
if user_datapath:
sys.stdout.write("Current user_datapath is: %s\n" % user_datapath)
elif savedpath:
sys.stdout.write("Saved user_datapath: %s was not found\n" % savedpath)
p = input("Please provide a path to place a user repository (s to skip):\n")
if p.lower() == "s":
return False
newpath = os.path.abspath(os.path.join(p, "ABangleData/"))
# Handle some potential errors - this may not be completely robust.
if not os.path.exists(newpath):
try:
os.mkdir(newpath)
except OSError as exe:
if str(exe).startswith("[Errno 13]"):
sys.stderr.write("No write privelages for %s.\n" % os.path.abspath(p))
else:
sys.stderr.write(
"Path %s does not exist. Please provide an existing path to create a repository\n"
% os.path.abspath(p)
)
return False
elif not (os.access(newpath, os.R_OK) and os.access(newpath, os.W_OK)):
sys.stderr.write("No read/write privelages for %s.\n" % newpath)
return False
if not os.path.exists(os.path.join(newpath, "user_fvs")):
try:
os.mkdir(os.path.join(newpath, "user_fvs"))
except OSError as exe:
if str(exe).startswith("[Errno 13]"):
sys.stderr.write("No write privelages for %s.\n" % os.path.abspath(p))
return False
elif not (os.access(newpath, os.R_OK) and os.access(newpath, os.W_OK)):
sys.stderr.write("No read/write privelages for %s.\n" % newpath)
return False
user_datapath = newpath
ufname = open(os.path.join(path, "config/userdatapath.txt"), "w")
ufname.write(user_datapath)
ufname.close()
# Create the data store files.
CreateStore()
return True
|
1d206e6c1c3bda4b6508a2f5b2f0a60cc81cc2cd
| 3,644,539
|
import os
def absolute_path_without_git(directory):
"""
return the absolute path of local git repo
"""
return os.path.abspath(directory + "/..")
|
1547bdebd8e6375f6725dbf36ae99a93eec5053b
| 3,644,540
|
import os
def find_template(fname):
"""Find absolute path to template.
"""
for dirname in tuple(settings.TEMPLATE_DIRS) + get_app_template_dirs('templates'):
tmpl_path = os.path.join(dirname, fname)
# print "TRYING:", tmpl_path
if os.path.exists(tmpl_path):
return tmpl_path
raise IOError(fname + " not found.")
|
ae77d4a72cd8eaf34a9a84d05631ccbb96466ad0
| 3,644,541
|
import torch
def create_hcp_sets(skeleton, side, directory, batch_size, handedness=0):
"""
Creates datasets from HCP data
IN: skeleton: boolean, True if input is skeleton, False otherwise,
side: str, 'right' or 'left'
handedness: int, 0 if mixed ind, 1 if right handed, 2 if left handed
directory: str, folder in which save the results
batch_size: int, size of training batches
weights: list, list of weights to apply to skeleton values
OUT: root_dir: created directory where results will be stored
dataset_train_loader, dataset_val_loader, dataset_test_loader: loaders
that will be used for training and testing
"""
print(torch.cuda.current_device())
date_exp = date.today().strftime("%d%m%y")
if skeleton == True:
skel = 'skeleton'
loss_type = 'CrossEnt'
root_dir = directory + side + '_hemi_' + skel + '_' + date_exp + '_' +loss_type + '_' + str(handedness) + '_2classes/'
else:
skel = 'norm_spm'
loss_type = 'L2'
root_dir = directory + side + '_hemi_' + skel + '_' + date_exp + '_' +loss_type + '_' + str(handedness) +'/'
#print("Parameters : skeleton: {}, side: {}, weights: {}, loss_type: {}".format(skeleton, side, weights, loss_type))
print(root_dir)
save_results.create_folder(root_dir)
if skeleton:
data_dir = '/neurospin/dico/lguillon/skeleton/sts_crop/'
#data_dir = '/home_local/lg261972/data/'
if handedness == 0:
input_data = 'sts_crop_skeleton_' + side
tmp = pd.read_pickle(data_dir + input_data +'.pkl')
filenames = list(tmp.columns)
tmp = torch.from_numpy(np.array([tmp.loc[0].values[k] for k in range(len(tmp))]))
else:
if handedness == 1:
input_data = side + '_hemi_rightH_sts_crop_skeleton'
else:
input_data = side + '_hemi_leftH_sts_crop_skeleton'
print(input_data)
tmp = pd.read_pickle(data_dir + input_data +'.pkl')
filenames = tmp.Subject.values
print(len(filenames))
tmp = torch.from_numpy(np.array([tmp.loc[k].values[0] for k in range(len(tmp))]))
else:
data_dir = '/neurospin/dico/lguillon/hcp_cs_crop/sts_crop/'+ side + '_hemi/'
data_dir = '/home_local/lg261972/data/'
if handedness == 0:
input_data = 'sts_crop_' + side
tmp = pd.read_pickle(data_dir + input_data +'.pkl')
filenames = list(tmp.columns)
tmp = torch.from_numpy(np.array([tmp.loc[0].values[k] for k in range(len(tmp))]))
else:
if handedness == 1:
input_data = side + '_hemi_rightH_sts_crop'
else:
input_data = side + '_hemi_leftH_sts_crop'
print(input_data)
tmp = pd.read_pickle(data_dir + input_data +'.pkl')
filenames = tmp.Subject.values
print(len(filenames))
tmp = torch.from_numpy(np.array([tmp.loc[k].values[0] for k in range(len(tmp))]))
tmp = tmp.to('cuda')
hcp_dataset = TensorDataset(filenames=filenames, data_tensor=tmp,
skeleton=skeleton, vae=False)
# Split training set into train, val and test
partition = [0.7, 0.2, 0.1]
print([round(i*(len(hcp_dataset))) for i in partition])
train_set, val_set, test_set = torch.utils.data.random_split(hcp_dataset,
[round(i*(len(hcp_dataset))) for i in partition])
#train_set = AugDatasetTransformer(train_set)
#val_set = AugDatasetTransformer(val_set)
#test_set = AugDatasetTransformer(test_set)
dataset_train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size,
shuffle=True, num_workers=0)
dataset_val_loader = torch.utils.data.DataLoader(val_set, shuffle=True,
num_workers=0)
dataset_test_loader = torch.utils.data.DataLoader(test_set, shuffle=True,
num_workers=0)
print("Dataset generated \n Size of training dataset :", len(dataset_train_loader))
return root_dir, dataset_train_loader, dataset_val_loader, dataset_test_loader
|
32615f19b70b8d78240adc1c2f60c5191f4c93fb
| 3,644,542
|
def rtrim(n):
"""Returns a transform that removes the rightmost n points
"""
def t(xarr, yarr, *args):
return (xarr[:-n], yarr[:-n]) + args
t.__name__ = b'rtrim({})'.format(n)
return t
|
583e4e2b9eef8281002760ccb1d336b9fdff36af
| 3,644,543
|
def analyze_avg_prof_quality_by_department(dict_cursor, departmentID, campus):
"""
>>> analyze_avg_prof_quality_by_department(dict_cursor, 'CSC', 'St. George')
CSC
enthusiasm 3.95
course_atmosphere 3.90
...
(This is not complete)
"""
return __analyze_data_by_DB_GETMETHOD_WITH_TWO_ARGS(DEPARTMENT_QUALITY_BY_DID, dict_cursor, departmentID, campus)
|
c44b74181e223c4a543575dd42f1db73b57e48b9
| 3,644,544
|
from os import linesep
def audit(environ):
"""Check a wmt-exe environment.
Parameters
----------
environ : dict
Environment variables.
Returns
-------
str
Warnings/errors.
"""
messages = []
for command in ['TAIL', 'CURL', 'BASH']:
messages.append(check_is_executable(environ[command]))
for path_var in ['PYTHONPATH', 'LD_LIBRARY_PATH', 'PATH', 'CLASSPATH']:
for item in environ[path_var].split(pathsep):
messages.append(check_is_dir(item))
for path_var in ['SIDL_DLL_PATH']:
for item in environ[path_var].split(';'):
messages.append(check_is_dir(item))
for module in ['csdms', 'csdms.model']:
messages.append(check_is_module(module, env=environ))
for component in find_components(env=environ):
module = '.'.join(['csdms.model', component])
messages.append(check_is_module(module, env=environ))
for component in find_components(env=environ):
module = '.'.join(['csdms.model', component])
messages.append(check_is_component(module, component,
env=environ))
return linesep.join(messages)
|
e65afb413501d5aceaf715af5bf409e5e9aa50c5
| 3,644,545
|
import re
import json
def parse_to_json(data_str):
"""
Convert string to a valid json object
"""
json_obj_list = []
obj = data_str.split('%')
for record in obj:
attributes = re.split(',', record)
data = json.dumps(attributes)
data = re.sub(r':', '":"', data)
data = re.sub(r'\[', '{', data)
data = re.sub(r']', '}', data)
json_obj_list.append(data)
return json_obj_list
|
288911694548fd603a3a261ac9c51c5c971599e0
| 3,644,546
|
def calculate_elbo(model, X, recon_X):
"""
Compute the ELBO of the model with reconstruction error and KL divergence..
"""
rec_loss = - np.sum(X * np.log(1e-8 + recon_X)
+ (1 - X) * np.log(1e-8 + 1 - recon_X), 1)
mu, logvar = model.transform(X)
kl = -0.5 * np.sum(1 + logvar - mu ** 2 - np.exp(logvar), 1)
elbo = np.mean(rec_loss + kl)
return elbo
|
aa3f2123bcc8ed0ee62b0b28a4fb3aeb0c1c886c
| 3,644,547
|
def dice_loss(pred, target):
"""
Dice Loss based on Dice Similarity Coefficient (DSC)
@param pred: torch.tensor, model prediction
@param target: torch.tensor, ground truth label
"""
return 1 - dice_coeff(pred, target)
|
9f940c09c4dac7477c6f77f2ecf632b95107f04f
| 3,644,548
|
import struct
def parse(transaction):
""" Parses Bitcoin Transaction into it's component parts"""
byteStringLength = 2
# Version
version = struct.unpack('<L', transaction[0:4*byteStringLength].decode("hex"))[0]
offset = 4*byteStringLength
# print "Version is: " + str(version)
# Inputs
varLength, inputCount = translationUtil.unformatVarInt(transaction[offset:offset+9*byteStringLength].decode("hex"))
# print "Input Count is: " + str(inputCount)
offset += varLength*byteStringLength
inputs = []
for i in range(0, inputCount):
# Hash of input (previous output) transaction
inHash = (transaction[offset:offset+64].decode("hex"))[::-1].encode("hex")
offset += 64
# Index of reference within input (previous output) transaction
inIndex = struct.unpack('<L', transaction[offset:offset+4*byteStringLength].decode("hex"))[0]
offset += 4*byteStringLength
# Script signature length
varLength, scriptLen = translationUtil.unformatVarInt(transaction[offset:offset+9*byteStringLength].decode("hex"))
offset += varLength*byteStringLength
# Script
script = transaction[offset:offset+scriptLen*byteStringLength].decode("hex")
offset += scriptLen*byteStringLength
# Sequence
sequence = struct.unpack('<L', transaction[offset:offset+4*byteStringLength].decode("hex"))[0]
offset += 4*byteStringLength
# Append
# print "Input {0} is: {1}, {2}, {3}, {4}".format(i, inHash, inIndex, script, sequence)
inputs.append([inHash, inIndex, script, sequence])
# Outputs
varLength, outputCount = translationUtil.unformatVarInt(transaction[offset:offset+9*byteStringLength].decode("hex"))
# print "Output Count is: {0}".format(outputCount)
offset += varLength*byteStringLength
outputs = []
for i in range(0, outputCount):
# Index of reference within input (previous output) transaction
value = struct.unpack('<Q', transaction[offset:offset+8*byteStringLength].decode("hex"))[0]
offset += 8*byteStringLength
# Script signature length
varLength, scriptLen = translationUtil.unformatVarInt(transaction[offset:offset+9*byteStringLength].decode("hex"))
offset += varLength*2
# Script
script = transaction[offset:offset+scriptLen*byteStringLength].decode("hex")
offset += scriptLen*byteStringLength
# Append
# print "Output {0} is: {1}, {2}".format(i, value, script)
outputs.append([value, script])
# Block Lock Time
blockLockTime = struct.unpack('<L', transaction[offset:offset+4*byteStringLength].decode("hex"))[0]
# print "Block Lock Time is: " + str(blockLockTime)
return (version, inputs, outputs, blockLockTime)
|
fcf9eede33b3dda8026a00a8a3a57ab2cd84ef22
| 3,644,549
|
import os
import re
def get_version():
"""Get project version
"""
version_file_path = os.path.join(
os.path.dirname(__file__),
'spowtd',
'VERSION.txt')
with open(version_file_path) as version_file:
version_string = version_file.read().strip()
version_string_re = re.compile('[0-9.]+')
match = version_string_re.match(version_string)
if match is None:
raise ValueError(
'version string "{}" does not match regexp "{}"'
.format(version_string, version_string_re.pattern))
return match.group(0)
|
4a11ae4efd6269b29acdef0835461ede33cf6e5c
| 3,644,550
|
def get_related_items_by_type(parser, token):
"""Gets list of relations from object identified by a content type.
Syntax::
{% get_related_items_by_type [content_type_app_label.content_type_model] for [object] as [varname] [direction] %}
"""
tokens = token.contents.split()
if len(tokens) not in (6, 7):
raise template.TemplateSyntaxError(
"%r tag requires 6 arguments" % tokens[0]
)
if tokens[2] != 'for':
raise template.TemplateSyntaxError(
"Third argument in %r tag must be 'for'" % tokens[0]
)
if tokens[4] != 'as':
raise template.TemplateSyntaxError(
"Fifth argument in %r tag must be 'as'" % tokens[0]
)
direction = 'forward'
if len(tokens) == 7:
direction = tokens[6]
return GetRelatedItemsByTypeNode(
name=tokens[1], obj=tokens[3], as_var=tokens[5], direction=direction
)
|
a774830f92b6e2abc1df9d172c6b696b87dc83d0
| 3,644,551
|
def stitch_valleys(valley_list):
"""Returns a stitched list of valleys to extract seq from."""
valley_collection = utils.LocusCollection(valley_list, 1)
stitched_valley_collection = valley_collection.stitch_collection()
loci = []
regions = []
for valley in stitched_valley_collection.get_loci():
if [valley.chr, valley.start, valley.end] not in regions:
loci.append(valley)
regions.append([valley.chr, valley.start, valley.end])
return loci
|
d5b4e35d66c9c5ff05a027569454d2ec1b612e45
| 3,644,552
|
def no_gcab_namespace(name, *args):
"""
Mock gi.require_version() to raise an ValueError to
simulate that GCab bindings are not available.
We mock importing the whole 'gi', so that this test
can be run even when the 'gi' package is not available.
"""
if name.startswith("gi"):
m = mock.Mock()
m.require_version.side_effect = ValueError
return m
return orig_import(name, *args)
|
7952d944aa1fb512874874870a2d9bfaa31c5834
| 3,644,553
|
import os
import shutil
def _start_beamtime(
PI_last, saf_num, experimenters=[], wavelength=None, test=False
):
"""function for start a beamtime"""
# check status first
active_beamtime = glbl.get('_active_beamtime')
if active_beamtime is False:
raise xpdAcqError("It appears that end_beamtime may have been "
"run.\nIf you wish to start a new beamtime, "
"please open a new terminal and proceed "
"with the standard starting sequence.")
# check directory
home_dir = glbl_dict['home']
if not os.path.exists(home_dir):
raise RuntimeError(
"WARNING: fundamental directory {} does not "
"exist.\nPlease contact beamline staff immediately".format(
home_dir
)
)
f_list = os.listdir(home_dir)
if len(f_list) != 0:
raise FileExistsError(
"There are more than one files/directories:\n"
"{}\n"
"under {}.\n"
"have you run '_end_beamtime()' yet?".format(f_list, home_dir)
)
elif len(f_list) == 0:
_make_clean_env()
print("INFO: initiated requried directories for experiment")
bt = Beamtime(PI_last, saf_num, experimenters, wavelength=wavelength)
os.chdir(home_dir)
print(
"INFO: to link newly created beamtime object to xrun, "
"please do\n"
">>> xrun.beamtime = bt"
)
# copy default Ni24.D to xpdUser/user_analysis
src = os.path.join(DATA_DIR, "Ni24.D")
dst = os.path.join(glbl_dict["usrAnalysis_dir"], "Ni24.D")
shutil.copy(src, dst)
_load_beamline_config(
glbl["blconfig_path"], test=test
)
# pre-populated scan plan
for expo in EXPO_LIST:
ScanPlan(bt, ct, expo)
# inject beamtime state
glbl['_active_beamtime'] = True
return bt
|
c1daff80fa43e21bca90fba6ac3d6c66f6f62b6e
| 3,644,554
|
import logging
def logger(module_name: str):
"""Инициализация и конфигурирования логгера"""
logging.basicConfig(
level=logging.INFO,
format='[%(levelname)s][%(asctime)s] %(name)s: %(message)s'
)
return logging.getLogger(module_name)
|
0a436b50d16c752404d31e3f34b38239391236d5
| 3,644,555
|
from typing import List
def cast(op_name: str, expr: Expr, in_xlayers: List[XLayer]) -> XLayer:
"""
Conversion of Relay 'clip' layer
Relay
-----
Type: tvm.relay.op.clip
Ref: https://docs.tvm.ai/langref/relay_op.html
Parameters:
- a (relay.Expr)
The input tensor.
- a_min (float)
The clip minimum.
- a_max (float)
The clip maximum.
"""
a_min = float(expr.attrs.a_min)
a_max = float(expr.attrs.a_max)
logger.debug("clip: {}".format(op_name))
X = px.ops.clip(op_name, in_xlayers[0], a_min, a_max, relay_id=[hash(expr)])
logger.debug("-- outshape: {}".format(list(X.shapes)))
return X
|
e6a7699f64054e3196a512929e1cfbc390ba214e
| 3,644,556
|
def __generation_dec(n: int, m: int, x_min: np.array, x_max: np.array) -> np.matrix:
"""
:param n: num rows in returned matrix
:param m: num cols in returned matrix
:param x_min: float array, min possible nums in cols of returned matrix
:param x_max: float array, max possible nums in cols of returned matrix
:return: n times m float matrix with nums in col number i in [x_min[i], x_max[i])
"""
assert n > 0, "n should be positive"
assert m > 0, "m should be positive"
assert x_min.shape == (m, ), "x_min should be of shape (m, )"
assert x_max.shape == (m, ), "x_max should be of shape (m, )"
return np.random.uniform(low=x_min, high=x_max, size=(n, m))
|
d76970858faacc8757c0bfa5b8840f4b5ab200d0
| 3,644,557
|
def apply_tariff(kwh, hour):
"""Calculates cost of electricity for given hour."""
if 0 <= hour < 7:
rate = 12
elif 7 <= hour < 17:
rate = 20
elif 17 <= hour < 24:
rate = 28
else:
raise ValueError(f'Invalid hour: {hour}')
return rate * kwh
|
fb2c5b458c13456a39612720b6e80e0cd707391e
| 3,644,558
|
def _compound_smiles(compound: reaction_pb2.Compound) -> str:
"""Returns the compound SMILES, if defined."""
for identifier in compound.identifiers:
if identifier.type == identifier.SMILES:
return identifier.value
return ""
|
44c9f8169442b9a116a4d77ea6be74ec4cc27a31
| 3,644,559
|
import subprocess
def convert_pdf_to_txt(pdf, startpage=None):
"""Convert a pdf file to text and return the text.
This method requires pdftotext to be installed.
Parameters
----------
pdf : str
path to pdf file
startpage : int, optional
the first page we try to convert
Returns
-------
str
the converted text
"""
if startpage is not None:
startpageargs = ['-f', str(startpage)]
else:
startpageargs = []
stdout = subprocess.Popen(["pdftotext", "-q"] + startpageargs + [pdf, "-"],
stdout=subprocess.PIPE).communicate()[0]
# python2 and 3
if not isinstance(stdout, str):
stdout = stdout.decode()
return stdout
|
469fe2439e4ca44e396df07d5e8edb2db5e90e87
| 3,644,560
|
def cross_correlizer(sample_rate, max_itd, max_frequency):
"""
Convenience function for creating a CrossCorrelizer with appropriate
parameters.
sample_rate : the sample rate of the wav files to expect.
max_itd : the maximum interaural time difference to test.
max_frequency : the highest frequency to test.
"""
shift_max = int(np.ceil(max_itd * sample_rate))
shift_steps = int(float(sample_rate) / max_frequency / 2.)
return CrossCorrelizer(sample_rate, shift_max, shift_steps)
|
747c42c3db2ad1f7642e575a35e3ce6d3c84b4b2
| 3,644,561
|
import altair as alt
def plot_precision_recall_at_k(
predicate_df, idx_flip, max_k=100, give_random=True, give_ensemble=True
):
"""
Plots precision/recall at `k` values for flipped label experiments.
Returns an interactive altair visualisation. Make sure it is installed beforehand.
Arguments:
predicate_df: the dataframe with predicates from `ensemble.get_predicates`
idx_flip: array that indicates if labels are wrong
max_k: the maximum value for `k` to consider
give_random: plot the "at k" statistics for the randomly selected lower bound
give_ensemble: plot the "at k" statistics from the reason ensemble
"""
alt.data_transformers.disable_max_rows()
# We combine the results in dataframes
plot_df = calculate_precision_recall_at_k(
predicate_df=predicate_df,
idx_flip=idx_flip,
max_k=max_k,
give_random=give_random,
give_ensemble=give_ensemble,
)
# So that we may plot it.
return (
alt.Chart(plot_df)
.mark_line()
.encode(x="k", y="value", color="variable", strokeDash="setting")
.interactive()
)
|
e2edc16d8648f9dd913df41dfc39e0b48140cfe7
| 3,644,562
|
from typing import List
from typing import Union
def has_permissions(
permissions: int, required: List[Union[int, BasePermission]]
) -> bool:
"""Returns `True` if `permissions` has all required permissions"""
if permissions & Administrator().value:
return True
all_perms = 0
for perm in required:
if isinstance(perm, int):
all_perms |= perm
else:
all_perms |= perm.value
return permissions & all_perms == all_perms
|
db32fe9d1a53cd5b14b71522d08901172bbad8f7
| 3,644,563
|
def mat_to_xyz(mat: NDArrayFloat) -> NDArrayFloat:
"""Convert a 3D rotation matrix to a sequence of _extrinsic_ rotations.
In other words, 3D rotation matrix and returns a sequence of Tait-Bryan angles
representing the transformation.
Reference: https://en.wikipedia.org/wiki/Euler_angles#Rotation_matrix
Reference: https://en.wikipedia.org/wiki/Euler_angles#Tait%E2%80%93Bryan_angles_2
Args:
mat: (...,3,3) Rotation matrix.
Returns:
(...,3) Tait-Bryan angles (in radians) formulated for a sequence of extrinsic rotations.
"""
xyz_rad: NDArrayFloat = Rotation.from_matrix(mat).as_euler("xyz", degrees=False)
return xyz_rad
|
1f27e503b28f9a932bc4aa703de8a210968f64f6
| 3,644,564
|
import hashlib
def get_user_gravatar(user_id):
"""
Gets link to user's gravatar from serializer.
Usage::
{% get_user_gravatar user_id %}
Examples::
{% get_user_gravatar 1 %}
{% get_user_gravatar user.id %}
"""
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return static('img/anonymous.png')
if not user.email:
return static('img/anonymous.png')
url_base = 'https://www.gravatar.com/avatar/{}?d=mp'
user_hash = hashlib.md5(user.email.lower().encode('utf-8')).hexdigest()
return url_base.format(user_hash)
|
b8cd883c3ca76a3dc45253457715ac011c04785d
| 3,644,565
|
def natural_key(s):
"""Converts string ``s`` into a tuple that will sort "naturally" (i.e.,
``name5`` will come before ``name10`` and ``1`` will come before ``A``).
This function is designed to be used as the ``key`` argument to sorting
functions.
:param s: the str/unicode string to convert.
:rtype: tuple
"""
# Use _nkre to split the input string into a sequence of
# digit runs and non-digit runs. Then use _nkconv() to convert
# the digit runs into ints and the non-digit runs to lowercase.
return tuple(_nkconv(m) for m in _nkre.findall(s))
|
7eda87824ac9ad952c911d8b1946cc0af43fa4aa
| 3,644,566
|
def read_ValidationSets_Sources():
"""Read and return ValidationSets_Sources.csv file"""
df = pd.read_csv(data_dir + 'ValidationSets_Sources.csv',header=0,
dtype={"Year":"str"})
return df
|
ea653e91ab37abd91297783caf8ea1fa6bd14545
| 3,644,567
|
from sys import int_info
def igcd_lehmer(a, b):
"""Computes greatest common divisor of two integers.
Euclid's algorithm for the computation of the greatest
common divisor gcd(a, b) of two (positive) integers
a and b is based on the division identity
a = q*b + r,
where the quotient q and the remainder r are integers
and 0 <= r < b. Then each common divisor of a and b
divides r, and it follows that gcd(a, b) == gcd(b, r).
The algorithm works by constructing the sequence
r0, r1, r2, ..., where r0 = a, r1 = b, and each rn
is the remainder from the division of the two preceding
elements.
In Python, q = a // b and r = a % b are obtained by the
floor division and the remainder operations, respectively.
These are the most expensive arithmetic operations, especially
for large a and b.
Lehmer's algorithm is based on the observation that the quotients
qn = r(n-1) // rn are in general small integers even
when a and b are very large. Hence the quotients can be
usually determined from a relatively small number of most
significant bits.
The efficiency of the algorithm is further enhanced by not
computing each long remainder in Euclid's sequence. The remainders
are linear combinations of a and b with integer coefficients
derived from the quotients. The coefficients can be computed
as far as the quotients can be determined from the chosen
most significant parts of a and b. Only then a new pair of
consecutive remainders is computed and the algorithm starts
anew with this pair.
References
==========
.. [1] https://en.wikipedia.org/wiki/Lehmer%27s_GCD_algorithm
"""
a, b = abs(as_int(a)), abs(as_int(b))
if a < b:
a, b = b, a
# The algorithm works by using one or two digit division
# whenever possible. The outer loop will replace the
# pair (a, b) with a pair of shorter consecutive elements
# of the Euclidean gcd sequence until a and b
# fit into two Python (long) int digits.
nbits = 2*int_info.bits_per_digit
while a.bit_length() > nbits and b != 0:
# Quotients are mostly small integers that can
# be determined from most significant bits.
n = a.bit_length() - nbits
x, y = int(a >> n), int(b >> n) # most significant bits
# Elements of the Euclidean gcd sequence are linear
# combinations of a and b with integer coefficients.
# Compute the coefficients of consecutive pairs
# a' = A*a + B*b, b' = C*a + D*b
# using small integer arithmetic as far as possible.
A, B, C, D = 1, 0, 0, 1 # initial values
while True:
# The coefficients alternate in sign while looping.
# The inner loop combines two steps to keep track
# of the signs.
# At this point we have
# A > 0, B <= 0, C <= 0, D > 0,
# x' = x + B <= x < x" = x + A,
# y' = y + C <= y < y" = y + D,
# and
# x'*N <= a' < x"*N, y'*N <= b' < y"*N,
# where N = 2**n.
# Now, if y' > 0, and x"//y' and x'//y" agree,
# then their common value is equal to q = a'//b'.
# In addition,
# x'%y" = x' - q*y" < x" - q*y' = x"%y',
# and
# (x'%y")*N < a'%b' < (x"%y')*N.
# On the other hand, we also have x//y == q,
# and therefore
# x'%y" = x + B - q*(y + D) = x%y + B',
# x"%y' = x + A - q*(y + C) = x%y + A',
# where
# B' = B - q*D < 0, A' = A - q*C > 0.
if y + C <= 0:
break
q = (x + A) // (y + C)
# Now x'//y" <= q, and equality holds if
# x' - q*y" = (x - q*y) + (B - q*D) >= 0.
# This is a minor optimization to avoid division.
x_qy, B_qD = x - q*y, B - q*D
if x_qy + B_qD < 0:
break
# Next step in the Euclidean sequence.
x, y = y, x_qy
A, B, C, D = C, D, A - q*C, B_qD
# At this point the signs of the coefficients
# change and their roles are interchanged.
# A <= 0, B > 0, C > 0, D < 0,
# x' = x + A <= x < x" = x + B,
# y' = y + D < y < y" = y + C.
if y + D <= 0:
break
q = (x + B) // (y + D)
x_qy, A_qC = x - q*y, A - q*C
if x_qy + A_qC < 0:
break
x, y = y, x_qy
A, B, C, D = C, D, A_qC, B - q*D
# Now the conditions on top of the loop
# are again satisfied.
# A > 0, B < 0, C < 0, D > 0.
if B == 0:
# This can only happen when y == 0 in the beginning
# and the inner loop does nothing.
# Long division is forced.
a, b = b, a % b
continue
# Compute new long arguments using the coefficients.
a, b = A*a + B*b, C*a + D*b
# Small divisors. Finish with the standard algorithm.
while b:
a, b = b, a % b
return a
|
609db770670f718121fb045f3786aad5f0ff8e5f
| 3,644,568
|
def regular_polygon_area_equivalent_radius(n, radius=1.0):
""" Compute equivalent radius to obtain same surface as circle.
\theta = \frac{2 \pi}{n}
r_{eqs} = \sqrt{\frac{\theta r^2}{\sin{\theta}}}
:param radius: circle radius
:param n: number of regular polygon segments
:return: equivalent regular polynom surface
"""
theta = 2 * np.pi / n
r = np.sqrt((theta * radius ** 2) / np.sin(theta))
return r
|
4aacc8c2ab57516bef15167e5a22485c9f55bc2d
| 3,644,569
|
def get_dashboard_list(project_id=None, page=1, page_size=25, token_info=None, user=None):
"""Get a list of dashboards
:param project_id: Filter dashboards by project ID
:type project_id: str
:param user_id: Filter dashboards by user ID
:type user_id: str
:param limit: Limit the dashboards
:type limit: int
:param offset: Offset the dashboards
:type offset: int
:rtype: DashboardList
"""
query = Dashboard.query
project = None
if "project_id" in connexion.request.args:
project = Project.query.get(connexion.request.args["project_id"])
if project:
if not project_has_user(project, user):
return "Forbidden", 403
query = query.filter(Dashboard.project_id == project_id)
offset = (page * page_size) - page_size
total_items = query.count()
total_pages = (total_items // page_size) + (1 if total_items % page_size > 0 else 0)
dashboards = query.offset(offset).limit(page_size).all()
return {
"dashboards": [dashboard.to_dict() for dashboard in dashboards],
"pagination": {
"page": page,
"pageSize": page_size,
"totalItems": total_items,
"totalPages": total_pages,
},
}
|
9a15c87b081dcdb87e1a5c4778b0114309365a2b
| 3,644,570
|
import torch
def _ssim(X, Y, filter, K=(0.01, 0.03)):
""" Calculate ssim index for X and Y"""
K1, K2 = K
# batch, channel, [depth,] height, width = X.shape
C1 = K1 ** 2
C2 = K2 ** 2
filter = filter.to(X.device, dtype=X.dtype)
mu_x = gaussian_filter(X, filter)
mu_y = gaussian_filter(Y, filter)
mu_x_sq = mu_x.pow(2)
mu_y_sq = mu_y.pow(2)
mu_x_mu_y = mu_x * mu_y
sigma_x_sq = (gaussian_filter(X * X, filter) - mu_x_sq)
sigma_y_sq = (gaussian_filter(Y * Y, filter) - mu_y_sq)
sigma_xy = (gaussian_filter(X * Y, filter) - mu_x_mu_y)
cs_map = (2 * sigma_xy + C2) / (sigma_x_sq + sigma_y_sq + C2) # set alpha=beta=gamma=1
ssim_map = ((2 * mu_x_mu_y + C1) / (mu_x_sq + mu_y_sq + C1))
ssim_map *= cs_map
ssim_per_channel = torch.flatten(ssim_map, 2).mean(-1)
cs = torch.flatten(cs_map, 2).mean(-1)
return ssim_per_channel, cs
|
49deca478e06c35f06436f16ad34fb8154ba0cfd
| 3,644,571
|
def get_all_messages(notification_queue, **kwargs):
"""
Get all messages on the specified notification queue
Variables:
complete_queue => Queue to get the message from
Arguments:
None
Data Block:
None
Result example:
[] # List of messages
"""
resp_list = []
u = NamedQueue("nq-%s" % notification_queue,
host=config.core.redis.persistent.host,
port=config.core.redis.persistent.port,
db=config.core.redis.persistent.db)
while True:
msg = u.pop(blocking=False)
if msg is None:
break
resp_list.append(msg)
return make_api_response(resp_list)
|
c0a61d50cc3e6373bc007f8978278d49f66544e9
| 3,644,572
|
def ssa_reconstruct(pc, v, k):
"""
from Vimal
Series reconstruction for given SSA decomposition using vector of components
:param pc: matrix with the principal components from SSA
:param v: matrix of the singular vectors from SSA
:param k: vector with the indices of the components to be reconstructed
:return: the reconstructed time series
"""
if np.isscalar(k):
k = [k]
if pc.ndim != 2:
raise ValueError('pc must be a 2-dimensional matrix')
if v.ndim != 2:
raise ValueError('v must be a 2-dimensional matrix')
t, dim = pc.shape
n_points = t + (dim - 1)
if any(filter(lambda x: dim < x or x < 0, k)):
raise ValueError('k must be vector of indexes from range 0..%d' % dim)
pc_comp = np.asarray(np.matrix(pc[:, k]) * np.matrix(v[:, k]).T)
xr = np.zeros(n_points)
times = np.zeros(n_points)
# reconstruction loop
for i in range(dim):
xr[i: t + i] = xr[i: t + i] + pc_comp[:, i]
times[i: t + i] = times[i: t + i] + 1
xr = (xr / times) * np.sqrt(t)
return xr
|
1ac054f2d31ab6f883a369e682a33235305df604
| 3,644,573
|
import os
import sys
def loadImtoolrc(imtoolrc=None):
"""
Locates, then reads in IMTOOLRC configuration file from
system or user-specified location, and returns the
dictionary for reference.
"""
# Find the IMTOOLRC file. Except as noted below, this order
# matches what ximtool and ds9 use.
_home = os.getenv("HOME")
# Look for path to directory where this module is installed
# This will be last-resort location for IMTOOLRC that was
# distributed with this module.
_module_path = os.path.split(__file__)[0]
####
# list of file names to look for; ok to have None to skip an entry
_name_list = []
# There are two environment variables that might set the location
# of imtoolrc:
# getenv('imtoolrc')
_name_list.append(os.getenv(_default_imtoolrc_env[0]))
# getenv('IMTOOLRC')
_name_list.append(os.getenv(_default_imtoolrc_env[1]))
# ~/.imtoolrc
if 'HOME' in os.environ :
_name_list.append( os.path.join(os.environ['HOME'], ".imtoolrc") )
_name_list.append(sys.prefix+os.sep+_default_local_imtoolrc)
# /usr/local/lib/imtoolrc
_name_list.append(_default_system_imtoolrc)
# $iraf/dev/imtoolrc - this is not in ds9 or NOAO's ximtool,
# but it is in the AURA Unified Release ximtool. This is the
# one place on your system where you can be certain that
# imtoolrc is really there. Eventually, we will make a patch
# to add this to ds9 and to IRAF.
if 'iraf' in os.environ :
_name_list.append( os.path.join( os.environ['iraf'], 'dev', 'imtoolrc') )
# special to numdisplay: use imtoolrc that is in the package directory.
# Basically, this is our way of having a built-in default table.
_name_list.append(_module_path+os.sep+'imtoolrc')
####
# Search all possible IMTOOLRC names in list
# and open the first one found...
for name in _name_list:
try:
if name:
_fdin = open(name)
break
except OSError as error:
pass
#Parse the file, line by line and populate the dictionary
_lines = _fdin.readlines()
_fdin.close()
# Build a dictionary for the entire IMTOOL table
# It will be indexed by configno.
fbdict = {}
for line in _lines:
# Strip out any blanks/tabs, Python 3 compat
line = line.strip()
# Ignore empty lines
if len(line) > 1:
_lsp = line.split()
# Also, ignore comment lines starting with '#'
if _lsp[0] != '#':
configno = int(_lsp[0])
_dict = {'nframes':int(_lsp[1]),'width':int(_lsp[2]),'height':int(_lsp[3]),'name':_lsp[5]}
fbdict[configno] = _dict
return fbdict
|
68aa55443baaa5c0eb57d80af3f7a989cba1c2d9
| 3,644,574
|
def get_theo_joints_pm(W, b, beta):
"""calculate the theoretical state distribution for a Boltzmann
machine
"""
N = len(b)
joints = []
states = get_states(N)
for s in states:
joints.append(np.exp(-1. * get_energy(W, b, (2. * s - 1.), beta)))
joints /= np.sum(joints)
return joints
|
c84c518fac47d139f951d4973907dceec1d9c825
| 3,644,575
|
from typing import BinaryIO
def tail(the_file: BinaryIO, lines_2find: int = 20) -> list[bytes]:
"""
From http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
"""
lines_found: int = 0
total_bytes_scanned: int = 0
the_file.seek(0, 2)
bytes_in_file: int = the_file.tell()
while lines_2find + 1 > lines_found and bytes_in_file > total_bytes_scanned:
byte_block: int = min(1024, bytes_in_file - total_bytes_scanned)
the_file.seek(-(byte_block + total_bytes_scanned), 2)
total_bytes_scanned += byte_block
lines_found += the_file.read(1024).count(b"\n")
the_file.seek(-total_bytes_scanned, 2)
line_list: list[bytes] = list(the_file.readlines())
return line_list[-lines_2find:]
# We read at least 21 line breaks from the bottom, block by block for speed
# 21 to ensure we don't get a half line
|
094917839d4b26e284244715452982eaf6e8c08a
| 3,644,576
|
def add_device_tag_command(client, args):
""" Command to add tag to an existing admin devices entry """
site, concentrator, map = get_site_params()
transmitter_id = args.get('transmitter_id')
tag = args.get('tag')
result = client.add_device_tag(site=site, concentrator=concentrator, map=map,
transmitter_id=transmitter_id, tag=tag)
if 'status' not in result:
return_error('Failed to add device tag')
return result['status'], {}, result
|
9aeaff1110515215bb7f2d3aa1a6ab5123cd31b2
| 3,644,577
|
def CommaSeparatedFloats(sFloatsCSV):
"""Read comma-separated floats from string.
[sFloatsCSV]: string, contains comma-separated floats.
<retval>: list, floats parsed from string.
"""
return [float(sFloat) for sFloat in sFloatsCSV.replace(" ","").split(",")]
|
1aa12ca7297aa3bd809f6d2ffaf155233a826b49
| 3,644,578
|
def merge_channels(image_list):
"""
Merge channels of multiple scalar ANTsImage types into one
multi-channel ANTsImage
ANTsR function: `mergeChannels`
Arguments
---------
image_list : list/tuple of ANTsImage types
scalar images to merge
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> image2 = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> image3 = ants.merge_channels([image,image2])
>>> image3.components == 2
"""
inpixeltype = image_list[0].pixeltype
dimension = image_list[0].dimension
components = len(image_list)
for image in image_list:
if not isinstance(image, iio.ANTsImage):
raise ValueError('list may only contain ANTsImage objects')
if image.pixeltype != inpixeltype:
raise ValueError('all images must have the same pixeltype')
libfn = utils.get_lib_fn('mergeChannels%s' % image_list[0]._libsuffix)
image_ptr = libfn([image.pointer for image in image_list])
return iio.ANTsImage(pixeltype=inpixeltype,
dimension=dimension,
components=components,
pointer=image_ptr)
|
33b5588d6ad4d128ed6206652919408e32520c80
| 3,644,579
|
def get_var(name: str, options: dict) -> str:
"""
Returns the value from the given dict with key 'INPUT_$key',
or if this does not exist, key 'key'.
"""
return options.get('INPUT_{}'.format(name)) or options.get(name)
|
9df0e3ec92af83b5719b88ca34f323bdfc7d1d84
| 3,644,580
|
import os
def get_filenames(is_training, data_dir, num_files=1014):
"""Return filenames for dataset."""
if is_training:
return [
os.path.join(data_dir, "train-%05d-of-01014" % i) for i in range(num_files)
]
else:
return [
os.path.join(data_dir, "validation-%05d-of-00128" % i) for i in range(128)
]
|
4381513fce78d7d491866db4f67a57496530d67c
| 3,644,581
|
def create_txt_response(name, txt_records):
"""
Returns an RRSet containing the 'txt_records' as the result of a DNS
query for 'name'.
This takes advantage of the fact that an Answer object mostly behaves
like an RRset.
"""
return dns.rrset.from_text_list(name, 60, "IN", "TXT", txt_records)
|
1f649719576b810a40ed7042b9b254653fe1364a
| 3,644,582
|
import ast
def bit_xor(*arguments):
"""
Bitwise XOR function.
"""
return ast.BitXor(*arguments)
|
07af3232a18796b4122e3ac6a4279ec00032c31d
| 3,644,583
|
def get_chromiumdir(platform, release):
"""
Args:
platform (str): a sys.platform str
Returns:
str: path to Chromium User Data Directory
http://www.chromium.org/user-experience/user-data-directory
"""
if platform == 'darwin':
chromedir = os.path.expanduser(
'~/Library/Application Support/Chromium')
elif platform.startswith('linux'):
chromedir = os.path.expanduser(
'~/.config/chromium')
elif platform == 'win32':
if release == 'XP':
chromedir = os.path.expanduser(
'~\Local Settings\Application Data\Chromium\User Data')
else:
chromedir = os.path.expanduser(
'~\AppData\Local\Chromium\User Data')
else:
raise NotImplementedError("Unknown platform: %r" % platform)
return [chromedir]
|
4ed1a9d70dfd3430911d26ac47322e9612bfdb06
| 3,644,584
|
def make_ts_scorer(
score_func, greater_is_better=True, needs_proba=False, needs_threshold=False, **kwargs,
):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in `~sklearn.model_selection.GridSearchCV`
and `~sklearn.model_selection.cross_validate`. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean
Not yet implemented, kept only to be compatible with the scikit-learn API
needs_threshold : boolean
Not yet implemented, kept only to be compatible with the scikit-learn API
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
callable
scorer object that returns a scalar score
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True," " but not both.")
if needs_proba:
raise NotImplementedError("Usage/evaluation of prediction probabilities are not yet implemented.")
elif needs_threshold:
raise NotImplementedError("Evaluation of decision function output is not yet implemented.")
else:
cls = _TSPredictScorer
return cls(score_func, sign, kwargs)
|
9003f82b52a1e915111bd46002fda8f61f6c9b9e
| 3,644,585
|
def pfas(x):
"""Parse a JSON array of PFA expressions as a PFA abstract syntax trees.
:type x: open JSON file, JSON string, or Pythonized JSON
:param x: PFA expressions in a JSON array
:rtype: list of titus.pfaast.Expression
:return: parsed expressions as a list of abstract syntax trees
"""
return jsonToAst.exprs(x)
|
e3544a89f16c908752a55ea58bfc3360abbe4121
| 3,644,586
|
def overlap(x, y, a, b):
"""Finds the overlap of (x, y) and (a, b).
Assumes an overlap exists, i.e. y >= a and b >= x.
"""
c = clamp(x, a, b)
d = clamp(y, a, b)
return c, d
|
c26b2f32ba9c12f72108c756ca4c1b4993fe8d55
| 3,644,587
|
def topological_sort(g):
"""
Returns a list of vertices in directed acyclic graph g in topological
order.
"""
ready = []
topo = []
in_count = {}
for v in g.vertices():
in_count[v] = g.degree(v, outgoing=False)
if in_count[v] == 0: # v has no constraints, i.e no incoming edges
ready.append(v)
while len(ready) > 0:
u = ready.pop()
topo.append(u)
for e in g.incident_edges(u):
v = e.opposite(u)
in_count[v] -= 1 # v now no longer has u as a constraint
if in_count[v] == 0:
ready.append(v)
return topo
|
5ac6261bf1b6fa92280abdc3fc95679ad9294e80
| 3,644,588
|
def probability_of_failure_in_any_period(p, n):
"""
Returns the probability that a failure (of probability p in one period)
happens once or more in n periods.
The probability of failure in one period is p, so the probability
of not failing is (1 - p). So the probability of not
failing over n periods is (1 - p) ** n, and the probability
of one or more failures in n periods is:
1 - (1 - p) ** n
Doing the math without losing precision is tricky.
After the binomial expansion, you get (for even n):
a = 1 - (1 - choose(n, 1) * p + choose(n, 2) p**2 - p**3 + p**4 ... + choose(n, n) p**n)
For odd n, the last term is negative.
To avoid precision loss, we don't want to to (1 - p) if p is
really tiny, so we'll cancel out the 1 and get:
you get:
a = choose(n, 1) * p - choose(n, 2) * p**2 ...
"""
if p < 0.01:
# For tiny numbers, (1 - p) can lose precision.
# First, compute the result for the integer part
n_int = int(n)
result = 0.0
sign = 1
for i in range(1, n_int + 1):
p_exp_i = p ** i
if p_exp_i != 0:
result += sign * choose(n_int, i) * (p ** i)
sign = -sign
# Adjust the result to include the fractional part
# What we want is: 1.0 - (1.0 - result) * ((1.0 - p) ** (n - n_int))
# Which gives this when refactored:
result = 1.0 - ((1.0 - p) ** (n - n_int)) + result * ((1.0 - p) ** (n - n_int))
return result
else:
# For high probabilities of loss, the powers of p don't
# get small faster than the coefficients get big, and weird
# things happen
return 1.0 - (1.0 - p) ** n
|
92439161b6b1e3288fc665c72c145282c6c09bb2
| 3,644,589
|
def stage_1(transformed_token_list):
"""Checks tokens against ngram to unigram dictionary"""
dict_data = pd.read_excel(v.stage_1_input_path, sheet_name=v.input_file_sheet_name)
selected_correct_token_data = pd.DataFrame(dict_data, columns=v.stage_1_input_file_columns)
transformed_state_1 = []
for sentence in transformed_token_list:
for row in selected_correct_token_data.itertuples():
b = list(literal_eval(row.ngram))
ngram = ''
for word in b: ngram += (' ' + word)
split_bigram = ngram.strip().split(' ')
split_sentence = sentence.strip().split(' ')
if ngram.strip() in sentence and split_bigram[0] in split_sentence and split_bigram[1] in split_sentence:
sentence = sentence.replace(ngram.strip(), row.unigram)
transformed_state_1.append(sentence)
print_to_file(v.stage_1_output_path, transformed_state_1, v.input_file_columns)
return transformed_state_1
|
6dea5bb1e1e04d183ade142f50c36aea00933ff1
| 3,644,590
|
def _perform_sanity_checks(config, extra_metadata):
"""
Method to perform sanity checks on current classification run.
:param config: dirbs config instance
:param extra_metadata: job extra metadata dict obj
:return: bool (true/false)
"""
curr_conditions = [c.as_dict() for c in config.conditions]
curr_operators = [op.as_dict() for op in config.region_config.operators]
curr_amnesty = config.amnesty_config.as_dict()
if curr_conditions == extra_metadata['conditions'] and \
curr_operators == extra_metadata['operators'] and \
curr_amnesty == extra_metadata['amnesty']:
return True
return False
|
fa5fa39bae91393c4f91ab6aa3b595f8a0db2e4f
| 3,644,591
|
def get_key_from_id(id : str) -> str:
"""
Gets the key from an id.
:param id:
:return:
"""
assert id in KEYMAP, "ID not found"
return KEYMAP[id]
|
7fbf00bbd905382888b993bbee5564c42edf4e73
| 3,644,592
|
import string
def CreateFromDict(registration_dict):
"""Returns the content of the header file."""
template = string.Template("""\
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is autogenerated by
// base/android/jni_generator/jni_registration_generator.py
// Please do not change its content.
#ifndef HEADER_GUARD
#define HEADER_GUARD
#include <jni.h>
#include "base/android/jni_generator/jni_generator_helper.h"
#include "base/android/jni_int_wrapper.h"
// Step 1: Forward declaration.
${FORWARD_DECLARATIONS}
// Step 2: Main dex and non-main dex registration functions.
bool RegisterMainDexNatives(JNIEnv* env) {
${REGISTER_MAIN_DEX_NATIVES}
return true;
}
bool RegisterNonMainDexNatives(JNIEnv* env) {
${REGISTER_NON_MAIN_DEX_NATIVES}
return true;
}
#endif // HEADER_GUARD
""")
if len(registration_dict['FORWARD_DECLARATIONS']) == 0:
return ''
return jni_generator.WrapOutput(template.substitute(registration_dict))
|
08d49b8cbb1275104b4498b98aed00747163e874
| 3,644,593
|
from datetime import datetime
def controller_add_raw_commands_not_privileged():
"""
This view allows a client to send a raw command to a CISCO device in not privileged EXEC mode
:return: <dict> result of the operation. check documentation for details
"""
# TODO: convert print screens to app.logger.debug("message")
print("OUTPUT - Entering function: controller_add_raw_commands_not_privileged")
# START COUNTING TIME FOR LOGGING PURPOSES
start_time = datetime.now()
# GETTING CLIENT INFO, FOR LOGGING
client_info = get_http_request_info(request)
# OUTPUT MESSAGES IN DEBUG MODE- ( WE CAN CREATE A DEBUG MODE FOR LOGGING )
message = "OUTPUT - WEBSERVICE URI: \t'{}'".format(client_info["REQUEST_URI"])
print(message)
message = ("OUTPUT - REQUEST_INFORMATION " + str(client_info))
# ----- --- Below line is just to remember us that we could create a debug mode log with messages like these one.
# logger_engine.debug(message)
print("OUTPUT - starting time: {}".format(start_time))
print(message)
print("OUTPUT - Let´s request data from client - CHECK IF DATA IS VALID")
data = request_data(client_info)
print("OUTPUT - data: ", data)
if isinstance(data[0], dict):
if data[0]["STATUS"] == "Failure":
print("OUTPUT - WE HAVE FOUND AN ERROR......")
end_time = datetime.now()
total_time = end_time - start_time
if data[0]["ERROR"] == "1":
print("OUTPUT - ERROR 1. LETS RAISE INVALID_USAGE function amd inform client ")
print("OUTPUT - data", data)
# CREATE ERROR OBJECT
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, current_app.logger,
user_data_for_logging= None)
# CALL METHOD FOR ERROR 1 ( CHECK ERROR-CATALOG.txt for details )
logger_obj.error_1_json_data(data[0])
# CREATE A JSON LOG
logger_obj.create_json_log()
raise InvalidUsage("Bad request!", status_code=400, payload=logger_obj.error_dict)
if data[0]["ERROR"] == "2":
print("OUTPUT - ERROR 2. LETS RAISE INVALID_USAGE function amd inform client ")
print("OUTPUT - data", data)
# CREATE ERROR OBJECT
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, current_app.logger,
user_data_for_logging=data[1])
# CALL METHOD FOR ERROR 2 ( CHECK ERROR-CATALOG.txt for details
logger_obj.error_2_fundamental_data_required(data[0])
# CREATE A JSON LOG
logger_obj.create_json_log()
raise InvalidUsage("Bad request!", status_code=400, payload=logger_obj.error_dict)
if data[0]["ERROR"] == "3":
print("OUTPUT - ERROR 3. LETS RAISE INVALID_USAGE function amd inform client ")
print("OUTPUT - data", data)
# CREATE ERROR OBJECT
#EXAMPLE HOW DATA SHOULD BE :OUTPUT - \
# data ({'STATUS': 'Failure', 'ERROR': '3', 'TYPE': 'WEBSERVICE DATA FAILURE', 'MESSAGE':
# 'Please, send an ip key in your dictionary'}, {'ips': '192.168.7.1'}) ------ ------- is a tuple
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, current_app.logger,
user_data_for_logging=data[1])
# CALL METHOD FOR ERROR 3 ( CHECK ERROR-CATALOG.txt for details
logger_obj.error_webservice_data_failure(data[0])
# CREATE A JSON LOG
logger_obj.create_json_log()
raise InvalidUsage("Bad request!", status_code=400, payload=logger_obj.error_dict)
print("=" * 79)
print("OUTPUT - data is OK! ")
print("OUTPUT - data[0]", data[0])
print("OUTPUT - data[1]", data[1])
print("=" * 79)
# CHECK IF IS PORT
# MAKE A COPY OF CLIENT DATA for logging purposes
user_data_for_logging = dict((k, v) for k, v in data[2].items())
print("OUTPUT - user_data_for_logging:", user_data_for_logging)
# LETS REMOVE THE KEYS, VLAN_ID AND VLAN_NAME AND STORE THEIR VALUES ON NEW VARIABLES. Construct a dictionary with
# vlan_id an vlan_name send later. there are a case where client doesn 't send to US vlan_name and we
# have to process theses kind of behavior
command_to_send = data[1].pop("command_to_send")
print("OUTPUT - command_list_to_send: {}".format(command_to_send))
# CHECK IF CLIENT WANTS A SPECIFIC PORT FOR CONNECTION to device. SET TO NONE if not
if isinstance(data[1], dict):
if 'port' in data[1].keys():
port = data[1].pop("port")
else:
port = None
if data[0]["CONNECTIONS"] == "both":
# ------- first try a telnet connection ---------------
connection = ConnectToDevice(data[1], connection_type="TELNET", port=port)
# LETS START CONFIGURING
result = connection.configure_add_raw_commands_not_privileged(command_to_send=command_to_send)
# result = connection.get_show_run()
print("OUTPUT - result of telnet connection: ", result)
print("OUTPUT - type(result): ", type(result))
print("OUTPUT - result is a list with one dictionary unstructured data")
result_telnet = result
end_time = datetime.now()
total_time = end_time - start_time
# - ------- At these point we should check if telnet was successful ---------------------
if isinstance(result_telnet, dict):
if result_telnet["STATUS"] == "Failure":
del connection
print("OUTPUT - Perform a ssh connection because telnet failed ")
# -- ------ Perform a ssh connection because telnet failed ----------
connection_new = ConnectToDevice(data[1], connection_type="SSH", port=port)
# LETS START CONFIGURING
result = connection_new .configure_add_raw_commands_not_privileged(command_to_send=command_to_send)
print("OUTPUT - result of ssh connection: ", result)
print("OUTPUT - type(result): ", type(result))
print("OUTPUT - result is a list with one dictionary unstructured data")
result_ssh = result
if isinstance(result, dict):
# ---- Check if ssh connection was successful. if not, inform client of both fails and log
if result["STATUS"] == "Failure":
# Expecting here to appear error 4 -------- HANDLE ERROR
# first handle error 4
if result["ERROR"] == "4":
# CREATE ERROR OBJECT
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info,
current_app.logger,
user_data_for_logging=user_data_for_logging)
# CALL METHOD FOR BOTH CONNECTION ERROR
logger_obj.error_both_connection_fails("Failed connection to device", result_ssh, result_telnet)
# CREATE A JSON LOG
logger_obj.create_json_log()
print("OUTPUT - result: {}".format(result))
raise InvalidUsage("Bad request!", status_code=513, payload=logger_obj.error_dict)
# Error 8 FOR NOW THESE ERROR DOESN EXIST YET - LATER WE MAY NEED IT
# if result["ERROR"] == "8":
# CREATE ERROR OBJECT
# logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, logger_engine,
# user_data_for_logging=user_data_for_logging)
# CALL METHOD FOR ERROR
# logger_obj.error_operation_error(result)
# CREATE A JSON LOG
# logger_obj.create_json_log()
# raise InvalidUsage("Bad request!", status_code=513, payload=logger_obj.error_dict)
# ----- ----- connection to device Successful. ------ Build log and return info to client ----
# Connection to device:Successful. OPERATION SUCCESSFUL ------ Build log and return interface config to client ----
# --------- CREATE success LOG -------
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, current_app.logger,
user_data_for_logging=user_data_for_logging)
success_dict = {"STATUS": "Success",
"MESSAGE": "Check results. Whenever possible, we will try to output Structured and Unstructured "
"data "
"captured from de Network Device CLI. We have to be careful on what commands we are "
"trying to send, because we are not in privileged mode, and sometimes we are not "
"authorized to run them on the device and the output will be something like: e.g.( "
" ^\n% Invalid input detected at '^' marker.\n )",
"STRUCTURED_RESULT": result[0],
"UNSTRUCTURED_RESULT": result[1]
}
final_dict = {"NETWORK AUTOMATE RESPONSE": success_dict}
# CALL METHOD FOR success messages
logger_obj.sucess_add_raw_commands(success_dict)
# CREATE A JSON LOG
logger_obj.create_json_log()
# GIVE RESPONSE TO VIEW, for client
return final_dict
# What differ a ssh or telnet connection is only the device driver used by netmiko, so the first thing we should do,
# is to know which connection the client want us to perform.
# ------ we will pass these choice to the class "ConnectToDevice". ---------------
if data[0]["CONNECTIONS"] == "telnet":
connection = ConnectToDevice(data=data[1], connection_type="TELNET", port=port)
if data[0]["CONNECTIONS"] == "ssh":
connection = ConnectToDevice(data=data[1], connection_type="SSH", port=port)
# LETS START CONFIGURING
result = connection.configure_add_raw_commands_not_privileged(command_to_send=command_to_send)
print("OUTPUT - configure_add_raw_commands ended ....")
print("="*79)
print("OUTPUT - result: ", result)
print("OUTPUT - type(result): ", type(result))
# "OUTPUT - result[0] is a list with one dictionary with structured data and a dictionary with unstructured data")
# TIME FOR LOGGING PURPOSES
end_time = datetime.now()
total_time = end_time - start_time
# ---- At these point, if the connection object return an error ( like connection error or other ) we should
# report these and inform client
if isinstance(result, dict):
if result["STATUS"] == "Failure":
# if status is failure , we are EXPECTING HERE ERROR 4, 3 or 6
# first Error 4
if result["ERROR"] == "4":
# CREATE ERROR OBJECT
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, current_app.logger,
user_data_for_logging=user_data_for_logging)
# CALL METHOD FOR ERROR
logger_obj. error_netmiko(result)
# CREATE A JSON LOG
logger_obj.create_json_log()
raise InvalidUsage("Bad request!", status_code=512, payload=logger_obj.error_dict)
# Connection to device: Successful. OPERATION SUCCESSFUL ------ Build log and return info to client ----
# --------- CREATE success LOG -------
logger_obj = NetworkAutomateLogs(start_time, end_time, total_time, client_info, current_app.logger,
user_data_for_logging=user_data_for_logging)
success_dict = {"STATUS": "Success",
"MESSAGE": "Check results. Whenever possible, we will try to output Structured and Unstructured data"
" captured from de Network Device CLI. We have to be careful on what commands we are "
"trying to send, because we are not in privileged mode, and sometimes we are not "
"authorized to run them on the device and the output will be something like: e.g.( "
" ^\n% Invalid input detected at '^' marker.\n )",
"STRUCTURED_RESULT": result[0],
"UNSTRUCTURED_RESULT": result[1]
}
final_dict = {"NETWORK AUTOMATE RESPONSE": success_dict}
# CALL METHOD FOR success messages
logger_obj.sucess_add_raw_commands(success_dict)
# CREATE A JSON LOG
logger_obj.create_json_log()
# GIVE RESPONSE TO Client
return final_dict
|
86ba530e77646d506f1b9961908b30c5d00d4ecf
| 3,644,594
|
def static_message_fixture(tmpdir_factory, prefix, message, suffix):
"""A fixture which provides a static message."""
filename = tmpdir_factory.mktemp('data').join('static_message.txt').strpath
file_contents = "{0}{1}{2}".format(prefix, message, suffix)
with open(filename, 'w') as f:
f.write(file_contents)
return filename
|
a9a11508eb10760452cad557e792df30b068e8bc
| 3,644,595
|
def entropy_image(filename,bins=30):
"""
extracts the renyi entropy of image stored under filename.
"""
img = cv2.imread(filename,0)/255.0 # gray images
p,_ = np.histogram( img, range=[0.0,1.0],bins=bins )
return -np.log(np.dot(p,p)/(np.sum(p)**2.0))
|
b9686647601cb8850a6b03a1c52f4ad0a4218553
| 3,644,596
|
def satisfies_constraint(kel: dict, constraint: dict) -> bool:
"""Determine whether knowledge graph element satisfies constraint.
If the constrained attribute is missing, returns False.
"""
try:
attribute = next(
attribute
for attribute in kel.get("attributes", None) or []
if attribute["attribute_type_id"] == constraint["id"]
)
except StopIteration:
return False
return constraint.get("not", False) != operator_map[constraint["operator"]](
attribute["value"],
constraint["value"],
)
|
39ae764e03c77dcb0145b9091d21df092894850d
| 3,644,597
|
def static_unroll(core, input_sequence, initial_state, time_major=True):
"""Performs a static unroll of an RNN.
An *unroll* corresponds to calling the core on each element of the
input sequence in a loop, carrying the state through::
state = initial_state
for t in range(len(input_sequence)):
outputs, state = core(input_sequence[t], state)
A *static* unroll replaces a loop with its body repeated multiple
times when executed inside :func:`jax.jit`::
state = initial_state
outputs0, state = core(input_sequence[0], state)
outputs1, state = core(input_sequence[1], state)
outputs2, state = core(input_sequence[2], state)
...
See :func:`dynamic_unroll` for a loop-preserving unroll function.
Args:
core: An :class:`RNNCore` to unroll.
input_sequence: An arbitrarily nested structure of tensors of shape
``[T, ...]`` if time-major=True, or ``[B, T, ...]`` if time_major=False,
where ``T`` is the number of time steps.
initial_state: An initial state of the given core.
time_major: If True, inputs are expected time-major, otherwise they are
expected batch-major.
Returns:
A tuple with two elements:
* **output_sequence** - An arbitrarily nested structure of tensors
of shape ``[T, ...]`` if time-major, otherwise ``[B, T, ...]``.
* **final_state** - Core state at time step ``T``.
"""
output_sequence = []
time_axis = 0 if time_major else 1
num_steps = jax.tree_leaves(input_sequence)[0].shape[time_axis]
state = initial_state
for t in range(num_steps):
if time_major:
inputs = jax.tree_map(lambda x, _t=t: x[_t], input_sequence)
else:
inputs = jax.tree_map(lambda x, _t=t: x[:, _t], input_sequence)
outputs, state = core(inputs, state)
output_sequence.append(outputs)
# Stack outputs along the time axis.
output_sequence = jax.tree_multimap(
lambda *args: jnp.stack(args, axis=time_axis),
*output_sequence)
return output_sequence, state
|
f61c9de5b90a0757617f9db588ab54e69918bc4b
| 3,644,598
|
from typing import List
from typing import Tuple
def getElementByClass(className: str, fileName: str) -> List[Tuple[int, str]]:
"""Returns first matching tag from an HTML/XML document"""
nonN: List[str] = []
with open(fileName, "r+") as f:
html: List[str] = f.readlines()
for line in html:
nonN.append(line.replace("\n", ""))
pattern: str = f'class="{className}"'
patternAlt: str = f"class='{className}'"
matches: List[Tuple[int, str]] = []
for line in nonN:
if pattern in line or patternAlt in line:
lineNo = nonN.index(line) + 1
matches.append((int(lineNo), line))
break
return matches
|
969e4070e16dec2e10e26e97cbaaab9d95e7b904
| 3,644,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.