content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def optimal_kernel_bandwidth(spiketimes, times=None, bandwidth=None,
bootstrap=False):
"""
Calculates optimal fixed kernel bandwidth, given as the standard deviation
sigma.
Parameters
----------
spiketimes : np.ndarray
Sequence of spike times (sorted to be ascending).
times : np.ndarray, optional
Time points at which the kernel bandwidth is to be estimated.
If None, `spiketimes` is used.
Default: None.
bandwidth : np.ndarray, optional
Vector of kernel bandwidths (standard deviation sigma).
If specified, optimal bandwidth is selected from this.
If None, `bandwidth` is obtained through a golden-section search on a
log-exp scale.
Default: None.
bootstrap : bool, optional
If True, calculates the 95% confidence interval using Bootstrap.
Default: False.
Returns
-------
dict
'y' : np.ndarray
Estimated density.
't' : np.ndarray
Points at which estimation was computed.
'optw' : float
Optimal kernel bandwidth given as standard deviation sigma
'w' : np.ndarray
Kernel bandwidths examined (standard deviation sigma).
'C' : np.ndarray
Cost functions of `bandwidth`.
'confb95' : tuple of np.ndarray
Bootstrap 95% confidence interval: (lower level, upper level).
If `bootstrap` is False, `confb95` is None.
'yb' : np.ndarray
Bootstrap samples.
If `bootstrap` is False, `yb` is None.
If no optimal kernel could be found, all entries of the dictionary are
set to None.
References
----------
.. [1] H. Shimazaki, & S. Shinomoto, "Kernel bandwidth optimization in
spike rate estimation," Journal of Computational Neuroscience,
vol. 29, no. 1-2, pp. 171-82, 2010. doi:10.1007/s10827-009-0180-4.
"""
if times is None:
time = np.max(spiketimes) - np.min(spiketimes)
isi = np.diff(spiketimes)
isi = isi[isi > 0].copy()
dt = np.min(isi)
times = np.linspace(np.min(spiketimes),
np.max(spiketimes),
min(int(time / dt + 0.5),
1000)) # The 1000 seems somewhat arbitrary
t = times
else:
time = np.max(times) - np.min(times)
spiketimes = spiketimes[(spiketimes >= np.min(times)) &
(spiketimes <= np.max(times))].copy()
isi = np.diff(spiketimes)
isi = isi[isi > 0].copy()
dt = np.min(isi)
if dt > np.min(np.diff(times)):
t = np.linspace(np.min(times), np.max(times),
min(int(time / dt + 0.5), 1000))
else:
t = times
dt = np.min(np.diff(times))
yhist, bins = np.histogram(spiketimes, np.r_[t - dt / 2, t[-1] + dt / 2])
N = np.sum(yhist)
yhist = yhist / (N * dt) # density
optw = None
y = None
if bandwidth is not None:
C = np.zeros(len(bandwidth))
Cmin = np.inf
for k, w_ in enumerate(bandwidth):
C[k], yh = cost_function(yhist, N, w_, dt)
if C[k] < Cmin:
Cmin = C[k]
optw = w_
y = yh
else:
# Golden section search on a log-exp scale
wmin = 2 * dt
wmax = max(spiketimes) - min(spiketimes)
imax = 20 # max iterations
bandwidth = np.zeros(imax)
C = np.zeros(imax)
tolerance = 1e-5
phi = 0.5 * (np.sqrt(5) + 1) # The Golden ratio
a = ilogexp(wmin)
b = ilogexp(wmax)
c1 = (phi - 1) * a + (2 - phi) * b
c2 = (2 - phi) * a + (phi - 1) * b
f1, y1 = cost_function(yhist, N, logexp(c1), dt)
f2, y2 = cost_function(yhist, N, logexp(c2), dt)
k = 0
while (np.abs(b - a) > (tolerance * (np.abs(c1) + np.abs(c2)))) \
and (k < imax):
if f1 < f2:
b = c2
c2 = c1
c1 = (phi - 1) * a + (2 - phi) * b
f2 = f1
f1, y1 = cost_function(yhist, N, logexp(c1), dt)
bandwidth[k] = logexp(c1)
C[k] = f1
optw = logexp(c1)
y = y1 / (np.sum(y1 * dt))
else:
a = c1
c1 = c2
c2 = (2 - phi) * a + (phi - 1) * b
f1 = f2
f2, y2 = cost_function(yhist, N, logexp(c2), dt)
bandwidth[k] = logexp(c2)
C[k] = f2
optw = logexp(c2)
y = y2 / np.sum(y2 * dt)
k = k + 1
# Bootstrap confidence intervals
confb95 = None
yb = None
# If bootstrap is requested, and an optimal kernel was found
if bootstrap and optw:
nbs = 1000
yb = np.zeros((nbs, len(times)))
for ii in range(nbs):
idx = np.floor(np.random.rand(N) * N).astype(int)
xb = spiketimes[idx]
y_histb, bins = np.histogram(
xb, np.r_[t - dt / 2, t[-1] + dt / 2]) / dt / N
yb_buf = fftkernel(y_histb, optw / dt).real
yb_buf = yb_buf / np.sum(yb_buf * dt)
yb[ii, :] = np.interp(times, t, yb_buf)
ybsort = np.sort(yb, axis=0)
y95b = ybsort[np.floor(0.05 * nbs).astype(int), :]
y95u = ybsort[np.floor(0.95 * nbs).astype(int), :]
confb95 = (y95b, y95u)
# Only perform interpolation if y could be calculated
if y is not None:
y = np.interp(times, t, y)
return {'y': y,
't': times,
'optw': optw,
'w': bandwidth,
'C': C,
'confb95': confb95,
'yb': yb} | 35,400 |
def run_visualization():
"""Main visualization method"""
s = Settings()
img = np.zeros((s.window_height, s.window_width, 3), np.uint8)
shape_start_x = s.window_width / 2
shape_start_y = s.window_height / 2
bg_start_x = 0
bg_start_y = 0
for chunk_index in range(s.chunks_count):
shape_walker = RandomWalk(s, shape_start_x, shape_start_y)
shape_walker.fill(s, True)
bg_walker = RandomWalk(s, bg_start_x, bg_start_y)
bg_walker.fill(s, False)
shape_red = np.random.randint(150, 200)
shape_green = np.random.randint(0, 64)
shape_blue = np.random.randint(150, 200)
bg_red = np.random.randint(0, 1)
bg_green = np.random.randint(0, 1)
bg_blue = int(np.random.choice([0, 60, 70, 80, 90, 100], p=[.4, .3, .1, .1, .05, .05]))
for i, val in enumerate(shape_walker.xv):
if len(shape_walker.xv) == i + 1:
break
if shape_walker.cv[i + 1]:
cv2.line(img, (shape_walker.xv[i], shape_walker.yv[i]), (shape_walker.xv[i+1], shape_walker.yv[i+1]), (shape_blue, shape_green, shape_red), 1)
cv2.line(img, (bg_walker.xv[i], bg_walker.yv[i]), (bg_walker.xv[i+1], bg_walker.yv[i+1]), (bg_blue, bg_green, bg_red), 1)
cv2.imshow("Frame", img)
if i % s.drawing_speed == 0:
cv2.waitKey(int(1000 / s.drawing_fps))
shape_start_x = shape_walker.xv[-1]
shape_start_y = shape_walker.yv[-1]
bg_start_x = bg_walker.xv[-1]
bg_start_y = bg_walker.yv[-1]
print("Complete!")
cv2.waitKey(0)
cv2.destroyAllWindows() | 35,401 |
def run_lsh_omp_coder(data, dictionary, sparsity, num_buckets=1):
"""Solve the orthogonal matching pursuit problem with LSH bucketing.
Use sklearn.linear_model.orthogonal_mp to solve the following optimization
program:
argmin ||y - X*gamma||^2,
subject to ||gamma||_0 <= n_{nonzero coefs},
where
y is 'data', size = (n_samples, n_targets),
X is 'dictionary', size = (n_samples, n_features). Columns are assumed
to have unit norm,
gamma: sparse coding, size = (n_features, n_targets).
Args:
data: The matrix y in the above program,
dictionary: The matrix X in the above program,
sparsity: n_{nonzero coefs} in the above program.
num_buckets: number of LSH buckets to use, int.
Returns:
gamma.
"""
logging.info("running LSH based sklearn.linear_model.orthogonal_mp ...")
indices = lsh_knn_map(
np.transpose(np.vstack((data, dictionary))), num_buckets, 1)
logging.info("indices shape is %s", indices.shape)
data_buckets = [[] for i in range(num_buckets)]
data_index = [[] for i in range(num_buckets)]
dict_buckets = [[] for i in range(num_buckets)]
dict_index = [[] for i in range(num_buckets)]
for i in range(data.shape[0]):
data_buckets[indices[i][0]].append(data[i, :])
data_index[indices[i][0]].append(i)
for i in range(dictionary.shape[0]):
dict_buckets[indices[data.shape[0] + i][0]].append(dictionary[i, :])
dict_index[indices[data.shape[0] + i][0]].append(i)
code = sparse.lil_matrix((data.shape[0], dictionary.shape[0]))
for i in range(num_buckets):
start_time = time.time()
if len(data_buckets[i]) > 0: # pylint: disable=g-explicit-length-test
if len(dict_buckets[i]) == 0: # pylint: disable=g-explicit-length-test
logging.error(
"lsh bucketing failed...empty bucket with no dictionary elements")
else:
small_code = sklearn.linear_model.orthogonal_mp(
np.transpose(np.vstack(dict_buckets[i])),
np.transpose(np.vstack(data_buckets[i])),
n_nonzero_coefs=sparsity)
small_code = np.transpose(small_code)
row_idx = np.asarray(data_index[i])
col_idx = np.asarray(dict_index[i])
code[row_idx[:, None], col_idx] = small_code
logging.info("running time of OMP for bucket %d = %d seconds",
i, time.time() - start_time)
return code | 35,402 |
def hamming_options(seq1, seq2):
"""Calculate Hamming distance between two sequences.
Interpret ambiguity as options.
"""
sequence1 = convert_to_nosegment(seq1)
sequence2 = convert_to_nosegment(seq2)
distance = 0
for i, segment1 in enumerate(sequence1.segments):
segment2 = sequence2.segments[i]
if set(segment1.choices) & set(segment2.choices) == set():
distance += 1
return distance | 35,403 |
def __getgyro_decoder__(port: serial.Serial, *args, **kwargs) -> Tuple[int, int, int]:
"""
Reads the gyro state from the serial port and decodes it as a (x, y, z) tuple.
"""
val = port.read(8)
if(val[7] != 0xAA):
raise Exception("Updating configuration data failed.")
x = struct.unpack(">H", val[0:2])[0]
y = struct.unpack(">H", val[2:4])[0]
z = struct.unpack(">H", val[4:6])[0]
return (x, y, z) | 35,404 |
def spatial_knn(coords, expression, n_neighbors=14, n_sp_neighbors=7, radius=None,
which_exprs_dims=None, sample_id=None):
"""
A variant on the standard knn neighbor graph inference procedure that also includes the spatial neighbors of each spot.
With help from Krzysztof Polanski.
:param coords: numpy.ndarray with x,y positions of spots.
:param expression: numpy.ndarray with expression of programmes / cluster expression (cols) of spots (rows).
:param n_neighbors: how many non-spatially-adjacent neighbors to report for each spot
:param n_sp_neighbors: how many spatially-adjacent neighbors to report for each spot. Use 7 for hexagonal grid.
:param radius: Supercedes `n_sp_neighbors` - radius within which to report spatially-adjacent neighbors for each spot. Pick radius based on spot size.
:param which_exprs_dims: which expression dimensions to use (cols)?
"""
# create and query spatial proximity tree within each sample
if radius is None:
coord_ind = np.zeros((coords.shape[0], n_sp_neighbors))
else:
coord_ind = np.zeros(coords.shape[0])
for sam in sample_id.unique():
coord_tree = KDTree(coords[sample_id.isin([sam]), :])
if radius is None:
coord_ind[sample_id.isin([sam]), :] = coord_tree.query(coords[sample_id.isin([sam]), :],
k=n_sp_neighbors, return_distance=False)
else:
coord_ind[sample_id.isin([sam])] = coord_tree.query_radius(coords[sample_id.isin([sam]), :],
radius, count_only=False)
# if selected dimensions not provided choose all
if which_exprs_dims is None:
which_exprs_dims = np.arange(expression.shape[1])
# print(which_exprs_dims)
# extract and index the appropriate bit of the PCA
pca = expression[:, which_exprs_dims]
ckd = cKDTree(pca)
# the actual number of neighbours - you'll get seven extra spatial neighbours in the thing
knn = n_neighbors + n_sp_neighbors
# identify the knn for each spot. this is guaranteed to contain at least n_neighbors non-adjacent spots
# this is exactly what we're after
ckdout = ckd.query(x=pca, k=knn, n_jobs=-1)
# create numeric vectors for subsetting later
numtemp = np.arange(expression.shape[0])
rowtemp = np.arange(knn)
# rejigger the neighour pool by including the spatially adjacent ones
for i in np.arange(expression.shape[0]):
# identify the spatial neighbours for the spot and compute their distance
mask = np.isin(numtemp, coord_ind[i])
# filter spatial neighbours by sample
if sample_id is not None:
mask = mask & sample_id.isin([sample_id[i]])
neigh = numtemp[mask]
ndist_temp = pca[mask, :] - pca[i, :]
ndist_temp = ndist_temp.reshape((mask.sum(), pca.shape[1]))
ndist = np.linalg.norm(ndist_temp, axis=1)
# how many non-adjacent neighbours will we get to keep?
# (this fluctuates as e.g. edge spots will have fewer hex neighbours)
kpoint = knn - len(neigh)
# the indices of the top kpoint number of non-adjacent neighbours (by excluding adjacent ones from the set)
inds = rowtemp[[i not in neigh for i in ckdout[1][0, :]]][:kpoint]
# keep the identified top non-adjacent neighbours
ckdout[0][i, :kpoint] = ckdout[0][i, inds]
ckdout[1][i, :kpoint] = ckdout[1][i, inds]
# add the spatial neighbours in the remaining spots of the knn graph
ckdout[0][i, kpoint:] = ndist
ckdout[1][i, kpoint:] = neigh
# sort each row of the graph in ascending distance order
# (sometimes spatially adjacent neighbours are some of the top ones)
knn_distances, knn_indices = ckdout
newidx = np.argsort(knn_distances, axis=1)
knn_indices = knn_indices[np.arange(np.shape(knn_indices)[0])[:, np.newaxis], newidx]
knn_distances = knn_distances[np.arange(np.shape(knn_distances)[0])[:, np.newaxis], newidx]
# compute connectivities and export as a dictionary
dist, cnts = compute_connectivities_umap(knn_indices, knn_distances, knn_indices.shape[0], knn_indices.shape[1])
neighbors = {'distances': dist,
'connectivities': cnts,
'params': {'n_neighbors': n_neighbors + n_sp_neighbors,
'method': 'spot_factors2knn', 'metric': 'euclidean'}}
return neighbors | 35,405 |
def parse_q(s):
"""Parse the value of query string q (?q=) into a search sub-term."""
if '=' not in s:
names = s.split()
term = '/'.join(map(lambda x: 'n.name=' + x, names))
return term
else:
subterms = s.split()
res = []
for subterm in subterms:
if '=' not in subterm:
res.append('n.name=' + subterm)
else:
res.append(subterm)
term = '&'.join(res)
return term | 35,406 |
def getCompleteData(client , response ,comp):
"""
This function is useful to receive missing data in tcp packet
Input :
Client = Tcp Object which interact with host end and client
response = received response from the host end
comp = comparitive struct defined by tcp packet
Output :
response = returns missing concatenated bytes data
or say whole packet
"""
remaining = comp.size - len(response)
while(remaining > 0 ):
read = client.recv(remaining)
response += read
remaining -= len(read)
return response | 35,407 |
def two_squares(n):
"""
Write the integer `n` as a sum of two integer squares if possible;
otherwise raise a ``ValueError``.
INPUT:
- ``n`` -- an integer
OUTPUT: a tuple `(a,b)` of non-negative integers such that
`n = a^2 + b^2` with `a <= b`.
EXAMPLES::
sage: two_squares(389)
(10, 17)
sage: two_squares(21)
Traceback (most recent call last):
...
ValueError: 21 is not a sum of 2 squares
sage: two_squares(21^2)
(0, 21)
sage: a,b = two_squares(100000000000000000129); a,b
(4418521500, 8970878873)
sage: a^2 + b^2
100000000000000000129
sage: two_squares(2^222+1)
(253801659504708621991421712450521, 2583712713213354898490304645018692)
sage: two_squares(0)
(0, 0)
sage: two_squares(-1)
Traceback (most recent call last):
...
ValueError: -1 is not a sum of 2 squares
TESTS::
sage: for _ in range(100):
....: a = ZZ.random_element(2**16, 2**20)
....: b = ZZ.random_element(2**16, 2**20)
....: n = a**2 + b**2
....: aa,bb = two_squares(n)
....: assert aa**2 + bb**2 == n
ALGORITHM:
See http://www.schorn.ch/howto.html
"""
n = ZZ(n)
if n <= 0:
if n == 0:
z = ZZ.zero()
return (z, z)
raise ValueError("%s is not a sum of 2 squares"%n)
if n.nbits() <= 32:
from sage.rings import sum_of_squares
return sum_of_squares.two_squares_pyx(n)
# Start by factoring n (which seems to be unavoidable)
F = n.factor(proof=False)
# First check whether it is possible to write n as a sum of two
# squares: all prime powers p^e must have p = 2 or p = 1 mod 4
# or e even.
for (p,e) in F:
if e % 2 == 1 and p % 4 == 3:
raise ValueError("%s is not a sum of 2 squares"%n)
# We run over all factors of n, write each factor p^e as
# a sum of 2 squares and accumulate the product
# (using multiplication in Z[I]) in a^2 + b^2.
from sage.rings.finite_rings.integer_mod import Mod
a = ZZ.one()
b = ZZ.zero()
for (p,e) in F:
if e >= 2:
m = p ** (e//2)
a *= m
b *= m
if e % 2 == 1:
if p == 2:
# (a + bi) *= (1 + I)
a,b = a - b, a + b
else: # p = 1 mod 4
# Find a square root of -1 mod p.
# If y is a non-square, then y^((p-1)/4) is a square root of -1.
y = Mod(2,p)
while True:
s = y**((p-1)/4)
if not s*s + 1:
s = s.lift()
break
y += 1
# Apply Cornacchia's algorithm to write p as r^2 + s^2.
r = p
while s*s > p:
r,s = s, r % s
r %= s
# Multiply (a + bI) by (r + sI)
a,b = a*r - b*s, b*r + a*s
a = a.abs()
b = b.abs()
assert a*a + b*b == n
if a <= b:
return (a,b)
else:
return (b,a) | 35,408 |
def getattr_by_path(obj, attr, *default):
"""Like getattr(), but can go down a hierarchy like 'attr.subattr'"""
value = obj
for part in attr.split('.'):
if not hasattr(value, part) and len(default):
return default[0]
value = getattr(value, part)
if callable(value):
value = value()
return value | 35,409 |
def make_shirt(size, message):
"""Display information regarding the size and message of a shirt."""
print("The shirt size is " + size +
" and the message will read: " + message + ".") | 35,410 |
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["Size 15 failed"]
):
"""
usage.scipy: 8
"""
... | 35,411 |
def entries_to_files(entry_ids):
"""
Format file details (retrieved using the files' entry IDs) to API expectations to include files in API call.
parameter: (list) entry_ids
List of entry ID strings for files uploaded to the warroom
returns:
List of attachment field, value tuples formatted according to API expectations
"""
attachments = []
for entry_id in entry_ids:
execute_results = demisto.getFilePath(entry_id)
file_path = execute_results['path']
file_name = execute_results['name']
attachments.append(('attachments[]', (file_name, open(file_path, 'rb'))))
return attachments | 35,412 |
def g_mult(a, b, p):
"""Multiply two polynomials given the irreducible polynomial of a GF"""
c = [i % 2 for i in mult(a, b)]
c, p = lenshift(c, p)
return div(c, p) | 35,413 |
def compute_common_alphanumeric_tokens(column, feature, k):
"""
compute top k frequent alphanumerical tokens and their counts.
tokens only contain alphabets and/or numbers, decimals with points not included
"""
col = column.str.split(expand=True).unstack().dropna().values
token = np.array(list(filter(lambda x: x.isalnum(), col)))
if token.size:
feature["frequent-entries"]["most_common_alphanumeric_tokens"] = ordered_dict2(token, k) | 35,414 |
def data_augmentation_fn(input_image: tf.Tensor, label_image: tf.Tensor, flip_lr: bool=True,
flip_ud: bool=True, color: bool=True) -> (tf.Tensor, tf.Tensor):
"""Applies data augmentation to both images and label images.
Includes left-right flip, up-down flip and color change.
:param input_image: images to be augmented [B, H, W, C]
:param label_image: corresponding label images [B, H, W, C]
:param flip_lr: option to flip image in left-right direction
:param flip_ud: option to flip image in up-down direction
:param color: option to change color of images
:return: the tuple (augmented images, augmented label images) [B, H, W, C]
"""
with tf.name_scope('DataAugmentation'):
if flip_lr:
with tf.name_scope('random_flip_lr'):
sample = tf.random_uniform([], 0, 1)
label_image = tf.cond(sample > 0.5, lambda: tf.image.flip_left_right(label_image), lambda: label_image)
input_image = tf.cond(sample > 0.5, lambda: tf.image.flip_left_right(input_image), lambda: input_image)
if flip_ud:
with tf.name_scope('random_flip_ud'):
sample = tf.random_uniform([], 0, 1)
label_image = tf.cond(sample > 0.5, lambda: tf.image.flip_up_down(label_image), lambda: label_image)
input_image = tf.cond(sample > 0.5, lambda: tf.image.flip_up_down(input_image), lambda: input_image)
chanels = input_image.get_shape()[-1]
if color:
input_image = tf.image.random_contrast(input_image, lower=0.8, upper=1.0)
if chanels == 3:
input_image = tf.image.random_hue(input_image, max_delta=0.1)
input_image = tf.image.random_saturation(input_image, lower=0.8, upper=1.2)
return input_image, label_image | 35,415 |
def get_stack_value(stack, key):
"""Get metadata value from a cloudformation stack."""
for output in stack.outputs:
if output['OutputKey'] == key:
return output['OutputValue'] | 35,416 |
def add_command_line_sys_path():
"""Function that adds sys.path of the command line to Blender's sys.path"""
additional_system_paths = get_additional_command_line_sys_path()
for additional_sys_path in additional_system_paths:
sys.path.append(additional_sys_path)
if len(additional_system_paths) > 0:
addon_name = _get_addon_name()
prefs = bpy.context.preferences.addons[addon_name].preferences
prefs.sys_path_list_str = json.dumps(additional_system_paths) | 35,417 |
def test_dnn_tag():
"""
We test that if cudnn isn't avail we crash and that if it is avail, we use it.
"""
x = T.ftensor4()
old = theano.config.on_opt_error
theano.config.on_opt_error = "raise"
sio = StringIO()
handler = logging.StreamHandler(sio)
logging.getLogger('theano.compile.tests.test_dnn').addHandler(handler)
# Silence original handler when intentionnally generating warning messages
logging.getLogger('theano').removeHandler(theano.logging_default_handler)
raised = False
try:
f = theano.function(
[x],
max_pool_2d(x, ds=(2, 2), ignore_border=True),
mode=mode_with_gpu.including("cudnn"))
except (AssertionError, RuntimeError), e:
assert not cuda.dnn.dnn_available()
raised = True
finally:
theano.config.on_opt_error = old
logging.getLogger('theano.compile.tests.test_dnn').removeHandler(handler)
logging.getLogger('theano').addHandler(theano.logging_default_handler)
if not raised:
assert cuda.dnn.dnn_available()
assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)
for n in f.maker.fgraph.toposort()]) | 35,418 |
def cmd_example_cmd_as_module(mixcli: MixCli, **kwargs):
"""
This function would be called by the processing of ArgumentParser. The contract is the positional argument
would be a MixCli instance and the rest are passed as keyword arguments.
We recommend to name this function as cmd_<name_of_group>_<name_of_command>
:param mixcli: a MixCli instance
:param kwargs: keyword arguments
:return:
"""
mixcli.info("example command group cmd_as_module ArgumentParser support function")
example_cmd_as_module(mixcli.httpreq_handler)
return True | 35,419 |
def tee(iterable, n=2): # real signature unknown; restored from __doc__
""" tee(iterable, n=2) --> tuple of n independent iterators. """
pass | 35,420 |
def display_credentials(user_name):
"""
Function to display saved account credentials
"""
return Credentials.display_credentials(user_name) | 35,421 |
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
# Training settings
parser.add_argument('--model', type=str, default='dadgan', metavar='N',
help='neural network used in training')
parser.add_argument('--backbone', type=str, default='resnet',
help='employ with backbone (default: xception)')
parser.add_argument('--backbone_pretrained', type=str2bool, default=False,
help='pretrained backbone (default: False)')
parser.add_argument('--backbone_freezed', type=str2bool, default=False,
help='Freeze backbone to extract features only once (default: False)')
parser.add_argument('--dataset', type=str, default='brats', metavar='N',
choices=['brats', 'brats_t2', 'brats_t1', 'brats_flair'],
help='dataset used for training')
parser.add_argument('--data_dir', type=str, default='./../../../data/brats',
help='data directory (default = ./../../../data/brats)')
parser.add_argument('--checkname', type=str, default='dadgan', help='set the checkpoint name')
parser.add_argument('--partition_method', type=str, default='hetero', metavar='N',
help='how to partition the dataset on local workers')
parser.add_argument('--partition_alpha', type=float, default=0.5, metavar='PA',
help='partition alpha (default: 0.5)')
parser.add_argument('--client_num_in_total', type=int, default=3, metavar='NN',
help='number of workers in a distributed cluster')
parser.add_argument('--client_num_per_round', type=int, default=3, metavar='NN',
help='number of workers')
parser.add_argument('--save_client_model', type=str2bool, default=True,
help='whether to save locally trained model by clients (default: True')
parser.add_argument('--batch_size', type=int, default=10, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('--sync_bn', type=str2bool, default=False,
help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze_bn', type=str2bool, default=False,
help='whether to freeze bn parameters (default: False)')
parser.add_argument('--client_optimizer', type=str, default='adam',
help='adam or sgd')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
# parser.add_argument('--lr_scheduler', type=str, default='poly',
# choices=['poly', 'step', 'cos'],
# help='lr scheduler mode: (default: poly)')
parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--momentum', type=float, default=0.9,
metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=5e-4,
metavar='M', help='w-decay (default: 5e-4)')
parser.add_argument('--nesterov', action='store_true', default=False,
help='whether use nesterov (default: False)')
parser.add_argument('--epochs', type=int, default=2, metavar='EP',
help='how many epochs will be trained locally')
parser.add_argument('--comm_round', type=int, default=200,
help='how many round of communications we shoud use')
parser.add_argument('--is_mobile', type=int, default=0,
help='whether the program is running on the FedML-Mobile server side')
parser.add_argument('--evaluation_frequency', type=int, default=5,
help='Frequency of model evaluation on training dataset (Default: every 5th round)')
parser.add_argument('--gpu_mapping_file', type=str, default="gpu_mapping.yaml",
help='the gpu utilization file for servers and clients. If there is no \
gpu_util_file, gpu will not be used.')
parser.add_argument('--gpu_mapping_key', type=str, default="mapping_default",
help='the key in gpu utilization file')
parser.add_argument('--input_nc', type=int, default=1, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--gan_mode', type=str, default='vanilla',
help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
parser.add_argument('--netD', type=str, default='basic',
help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='unet_256',
help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='batch', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', default=False, action='store_true', help='no dropout for the generator')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
parser.add_argument('--lambda_perceptual', type=float, default=1, help='weight for perceptual loss')
parser.add_argument('--lambda_G', type=float, default=0.1, help='weight for dadgan G')
parser.add_argument('--lambda_D', type=float, default=0.05, help='weight for dadgan D')
parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images')
parser.add_argument('--continue_train', default=False, action='store_true', help='continue training: load the latest model')
parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, for lr_policy == linear')
parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate for linear policy, or T_max for cos policy')
parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero, for lr_policy == linear')
parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations, for lr_policy == step')
parser.add_argument('--lr_decay_gamma', type=float, default=0.1, help='gamma for lr_policy == step')
parser.add_argument('--verbose', default=False, action='store_true', help='if specified, print more debugging information')
args = parser.parse_args()
args.isTrain = True
return args
### Args to add ###
# lr_scheduler
# outstride
# freeze_bn
# sync_bn
# categories
# backbone
# backbone-pretrained | 35,422 |
def sk_learn_bootstrap(x, y, z, design_matrix, kf_reg, N_bs=100,
test_percent=0.4, print_results=True):
"""Sci-kit learn bootstrap method."""
x_train, x_test, y_train, y_test = sk_modsel.train_test_split(
np.c_[x.ravel(), y.ravel()], z.ravel(),
test_size=test_percent, shuffle=False)
# Ensures we are on axis shape (N_observations, N_predictors)
y_test = y_test.reshape(-1, 1)
y_train = y_train.reshape(-1, 1)
y_pred = np.empty((y_test.shape[0], N_bs))
X_test = design_matrix(x_test)
R2_ = np.empty(N_bs)
mse_ = np.empty(N_bs)
bias2_ = np.empty(N_bs)
beta_coefs = []
X_train = design_matrix(x_train)
for i_bs in tqdm(range(N_bs), desc="SciKit-Learn bootstrap"):
x_boot, y_boot = sk_utils.resample(X_train, y_train)
# x_boot, y_boot = sk_utils.resample(x_train, y_train)
# X_boot = design_matrix(x_boot)
kf_reg.fit(X_boot, y_boot)
# y_pred[:, i_bs] = kf_reg.predict(cp.deepcopy(x_test)).ravel()
y_predict = kf_reg.predict(X_test)
# print(sk_metrics.r2_score(y_test.flatten(), y_pred[:,i_bs].flatten()))
# R2_[i_bs] = sk_metrics.r2_score(y_test.flatten(), y_pred[:,i_bs].flatten())
# R2_[i_bs] = metrics.R2(y_test, y_predict)
# mse_[i_bs] = metrics.mse(y_test.flatten(), y_pred[:, i_bs].flatten())
# bias2_[i_bs] = metrics.bias2(
# y_test.flatten(), y_pred[:, i_bs].flatten())
y_pred[:, i_bs] = y_predict.ravel()
beta_coefs.append(kf_reg.coef_)
# R2 = np.mean(R2_)
# # print("R2 from each bs step = ",R2)
# # # MSE = mse_.mean()
# # # bias = bias2_.mean()
# # R2 = np.mean(R2_list)
# # R2 = (1 - np.sum(np.average((y_test - y_pred)**2, axis=1)) /
# # np.sum((y_test - np.average(y_test)**2)))
# # print(R2)
# print(y_test.shape, y_pred.shape)
# s1 = np.sum((np.mean((y_test - y_pred)**2, axis=1)))
# s2 = np.sum((y_test - np.mean(y_test))**2)
# print ("R2=",1 - s1/s2)
# R2 = (1 - np.sum(np.mean((y_test - y_pred)**2, axis=0, keepdims=True),keepdims=True) /
# np.sum((y_test - np.mean(y_test, keepdims=True)**2,),keepdims=True))
# print(R2.mean())
# R2 = R2.mean()
R2 = np.mean(metrics.R2(y_test, y_pred, axis=0))
# Mean Square Error, mean((y - y_approx)**2)
_mse = ((y_test - y_pred))**2
MSE = np.mean(np.mean(_mse, axis=1, keepdims=True))
# Bias, (y - mean(y_approx))^2
_mean_pred = np.mean(y_pred, axis=1, keepdims=True)
bias = np.mean((y_test - _mean_pred)**2)
# Variance, var(y_predictions)
var = np.mean(np.var(y_pred, axis=1, keepdims=True))
beta_coefs_var = np.asarray(beta_coefs).var(axis=0)
beta_coefs = np.asarray(beta_coefs).mean(axis=0)
# # R^2 score, 1 - sum((y-y_approx)**2)/sum((y-mean(y))**2)
# y_pred_mean = np.mean(y_pred, axis=1)
# _y_test = y_test.reshape(-1)
# print ("R2:", metrics.R2(_y_test, y_pred_mean))
# _s1 = np.sum(((y_test - y_pred))**2, axis=1, keepdims=True)
# _s2 = np.sum((y_test - np.mean(y_test))**2)
# print (_s1.mean(), _s2)
# R2 = 1 - _s1.mean()/_s2
# print(np.array([sk_metrics.r2_score(y_test, y_pred[:,i]) for i in range(N_bs)]).mean())
# R2 = metrics.R2(y_test, y_pred, axis=1)
# R2 = np.mean(metrics.R2(y_test, y_pred, axis=1))
# print(np.mean(metrics.R2(y_test, y_pred, axis=1)))
# R2 = R2.mean()
# print(R2.mean())
if print_results:
print("R2: {:-20.16f}".format(R2))
print("MSE: {:-20.16f}".format(MSE))
print("Bias^2:{:-20.16f}".format(bias))
print("Var(y):{:-20.16f}".format(var))
print("Beta coefs: {}".format(beta_coefs))
print("Beta coefs variances: {}".format(beta_coefs_var))
print("Diff: {}".format(abs(MSE - bias - var)))
results = {
"y_pred": np.mean(y_pred, axis=1),
"y_pred_var": np.var(y_pred, axis=1),
"mse": MSE,
"r2": R2,
"var": var,
"bias": bias,
"beta_coefs": beta_coefs,
"beta_coefs_var": beta_coefs_var,
"beta_95c": np.sqrt(beta_coefs_var)*2,
"diff": abs(MSE - bias - var),
}
return results | 35,423 |
def vAdd(v, w):
""" Return a new Vector, which is the result of v + w """
return Vector(v[0] + w[0], v[1] + w[1], v[2] + w[2]) | 35,424 |
def dynamic_vm_values(trace, code_start=BADADDR, code_end=BADADDR, silent=False):
"""
Find the virtual machine context necessary for an automated static analysis.
code_start = the bytecode start -> often the param for vm_func and usually starts right after vm_func
code_end = the bytecode end -> bytecode usually a big chunk, so if we identify several x86/x64 inst in a row we reached the end
base_addr = startaddr of the jmp table -> most often used offset in the vm_trace
vm_addr = startaddr of the vm function -> biggest function in .vmp segment,
:param trace: instruction trace
:return: vm_ctx -> [code_start, code_end, base_addr, vm_func_addr, vm_funcs]
"""
base_addr = defaultdict(lambda: 0)
vm_addr = find_vm_addr(deepcopy(trace))
trace, vm_seg_start, vm_seg_end = extract_vm_segment(trace)
code_addrs = []
# try finding code_start
if code_start == BADADDR:
code_start = GetFunctionAttr(vm_addr, FUNCATTR_END)#NextHead(GetFunctionAttr(vm_addr, FUNCATTR_END), vm_seg_end)
code_start = NextHead(code_start, BADADDR)
while isCode(code_start):
code_start = NextHead(code_start, BADADDR)
for line in trace:
# construct base addr dict of offsets -> jmp table should be the one most used
if len(line.disasm) == 2:
try:
offset = re.findall(r'.*:off_([0123456789abcdefABCDEF]*)\[.*\]', line.disasm[1])[0]
base_addr[offset] += 1
except:
pass
# code_start additional search of vm_func params
if line.addr == vm_addr:
for l in trace[:trace.index(line)]:
if l.disasm[0] == 'push':
try:
arg = re.findall(r'.*_([0123456789ABCDEFabcdef]*)', l.disasm[1])
if len(arg) == 1:
code_addrs.append(int(arg[0], 16))
except Exception, e:
print e.message
# finalize base_addr
max_addr = int(max(base_addr, key=base_addr.get), 16) # now we have the base_addr used for offset computation - this will probably be the top of the table but to be sure we need to take its relative position into account
base_addr = max_addr
while GetMnem(PrevHead(base_addr)) == '':
base_addr = PrevHead(base_addr)
# finalize code_start
if not silent:
if code_start not in code_addrs:
code_start = AskAddr(code_start, "Start of bytecode mismatch! Found %x but parameter for vm seem to be %s" % (code_start, [hex(c) for c in code_addrs]))
# code_end -> follow code_start until data becomes code again
if code_end == BADADDR:
code_end = vm_seg_end
# while code_end < vm_seg_end:
# code_end = NextHead(code_end, vm_seg_end)
# if isCode(code_end):
# break
vm_ctx = VMContext()
vm_ctx.code_start = code_start
vm_ctx.code_end = code_end
vm_ctx.base_addr = base_addr
vm_ctx.vm_addr = vm_addr
print code_start, code_end, base_addr, vm_addr
return vm_ctx | 35,425 |
def getAddresses(pkt):
"""
0: ('dst', 'src', 'bssid', None), from sta to sta
1: ('dst', 'bssid', 'src', None), out of ds
2: ('bssid', 'src', 'dst', None), in ds
3: ('recv', 'transl', 'dst', 'src') between dss
"""
f = pkt.FCfield & 3 # to-DS and from-DS
if f == 0:
adrs = ('destination', 'source', 'bssid', None)
elif f == 1:
adrs = ('bssid', 'source', 'destination', None)
elif f == 2:
adrs = ('destination', 'bssid', 'source', None)
else:
adrs = (None, 'bssid', 'destination', 'source')
pktAddrs = (pkt.addr1, pkt.addr2, pkt.addr3, pkt.addr4)
class Dummy:
def __init__(self, *pargs, **kwargs):
self.__dict__.update(kwargs)
kw = dict(zip(adrs, pktAddrs))
del kw[None]
r = Dummy(**kw)
r.f = f
return r | 35,426 |
def test_keep_alive_client_timeout():
"""If the server keep-alive timeout is longer than the client
keep-alive timeout, client will try to create a new connection here."""
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
client = ReuseableSanicTestClient(keep_alive_app_client_timeout, loop)
headers = {"Connection": "keep-alive"}
try:
request, response = client.get(
"/1", headers=headers, request_keepalive=1
)
assert response.status == 200
assert response.text == "OK"
loop.run_until_complete(aio_sleep(2))
exception = None
request, response = client.get("/1", request_keepalive=1)
except ValueError as e:
exception = e
assert exception is not None
assert isinstance(exception, ValueError)
assert "got a new connection" in exception.args[0]
finally:
client.kill_server() | 35,427 |
def _ldmodule_soversion(target, source, env, for_signature):
"""Function to determine what to use for SOVERSION"""
if 'SOVERSION' in env:
return '.$SOVERSION'
elif 'LDMODULEVERSION' in env:
ldmod_version = env.subst('$LDMODULEVERSION')
# We use only the most significant digit of LDMODULEVERSION
return '.' + ldmod_version.split('.')[0]
else:
return '' | 35,428 |
def jitter(
grid: Grid,
min_variance: int = None,
max_variance: int = None,
size: int = None,
clamp: bool = False,
variance_list: list[int] = None,
) -> Grid:
"""Randomly jitter all points in a grid
Jitter will apply to both the x and y axises of the grid
If a variance list is given, each point will be jittered by a random value from the jitter list
If one of min_variance or max_variance is specified, points will be jittered from -v to v
If both min_variance or max_variance is specified, points will be jittered from -max to -min or min to max
Args:
grid (Grid): Grid points to jitter
min_variance (int, optional): Minimum jitter amount. Defaults to None.
max_variance (int, optional): Maximum jitter amount. Defaults to None.
size (int, optional): Grid size - useful for clamping. Defaults to None.
clamp (bool, optional): Whether to stop points leaving the bounds. Defaults to False.
variance_list (list[int], optional): List of possible jitter amounts. Defaults to None.
Returns:
Grid: Transformed grid, with each point 'jittered'
"""
# If no size is specified, grab the largest point we have
# if jittering a grid twice this could go badly...
if size is None:
size = max(grid[0], key=lambda x: x[0])[0]
# Argument handling - there's a few cases
# This jit function is then applied to each point to spice em up
if variance_list is not None and len(variance_list) > 0:
def jit(val):
return val + choice(variance_list)
elif min_variance is None and max_variance is None:
def jit(val):
return val
elif min_variance is None and max_variance is not None:
def jit(val):
return val + choice([-1, 1]) * randrange(0, max_variance)
elif max_variance is None and min_variance is not None:
def jit(val):
return val + choice([-1, 1]) * randrange(0, min_variance)
elif min_variance >= max_variance:
def jit(val):
return val + choice([-1, 1]) * min_variance
def clampf(x):
# Clamp a point 0 <= x <= size *only* if the clamp flag is enabled
if clamp:
return max(0, min(x, size))
else:
return x
# Jit (and optionally clamp) all points in the grid
return [[(clampf(jit(xx)), clampf(jit(yy))) for (xx, yy) in row] for row in grid] | 35,429 |
def survey_post_save(sender, instance, created, **kwargs):
""" Ensure that a table exists for this logger. """
# Force our response model to regenerate
Response = instance.get_survey_response_model(regenerate=True, notify_changes=False)
# Create a new table if it's missing
utils.create_db_table(Response)
# Reregister the model in the admin
utils.reregister_in_admin(admin.site, Response)
# Tell other process to regenerate their models
utils.notify_model_change(Response) | 35,430 |
def testAgentInstanceSettingsTo_whenProtoHasNumberField_returnsBytes():
"""Test supported serializing int number."""
instance_settings = definitions.AgentSettings(
key='agent/ostorlab/BigFuzzer',
bus_url='mq',
bus_exchange_topic='topic',
bus_management_url='mq_managment',
bus_vhost='vhost',
args=[utils_definitions.Arg(name='speed', type='number', value=1)]
)
proto = instance_settings.to_raw_proto()
assert isinstance(proto, bytes) | 35,431 |
def loadFasta(fa, sep=None, term=None, nfilter=None):
"""Returns a kapow.Array() with the contents of the file interpreted as a FASTA file, using sep if given."""
def _boundary2int(num, n, start=False):
if num == '': return 0 if start else n
mult = 1
if num[-1] == 'K': mult = 1000
elif num[-1] == 'M': mult = 1000000
elif num[-1] == 'G': mult = 1000000000
elif num[-1] == 'T': mult = 1000000000000
else: num = num + '_'
num = int(num[:-1]) * mult
if num < 0: return n + num
return num
m = re.match(r'(.*)\[([0-9]*[TGMK]?):([0-9]*[TGMK]?)\]', fa)
if m:
fa, left, right = m.groups()
res = loadFile(fa)
ores = kapow.fa_copy_cont_bind(res, sep)
# Filters the long lists of Ns
if nfilter:
ores, lres = kapow.fa_strip_n_bind(ores)
# Binds the desired portion
if m:
ores = ores[_boundary2int(left, len(ores), True):_boundary2int(right, len(ores), False)]
if term != None:
if ores.size < res.size:
ores = res[0:ores.size+1]
else:
ores = kapow.Array(res.size+1,1)
ores.memcpy(res)
del res
ores[ores.size-1] = term
return ores | 35,432 |
def reverse_bits(counter) -> int:
"""
Reverses the order of the bits in the given counter
:param counter: a 7bit value
:return:
"""
# From Elephant reference code (elephant160v2 > spongent.c > retnuoCl)
return ((counter & 0x01) << 7) | ((counter & 0x02) << 5) | ((counter & 0x04) << 3) \
| ((counter & 0x08) << 1) | ((counter & 0x10) >> 1) | ((counter & 0x20) >> 3) \
| ((counter & 0x40) >> 5) | ((counter & 0x80) >> 7) | 35,433 |
def check_quota():
"""
Check quota for the RANDOM.ORG API
:return: True if the request is successful AND there is remaining quota available
"""
resp = requests.request('GET', 'https://www.random.org/quota/?format=plain')
if resp.status_code != 200 or int(resp.text) <= 0:
return False
return True | 35,434 |
def test_unauthorized_source(
clirunner,
caplog: LogCaptureFixture,
docker_registry_secure: DockerRegistrySecure,
gpgsigner: GPGSigner,
known_good_image: TypingKnownGoodImage,
):
"""Test docker-sign can handle incorrect credentials."""
caplog.clear()
caplog.set_level(logging.DEBUG)
# Using local registry credentials when connecting to dockehub ...
with registry_credentials(docker_registry_secure):
result = clirunner.invoke(
cli,
args=[
"registry",
"--keyid",
gpgsigner.keyid,
f"{Indices.DOCKERHUB}/dummy:dummy",
str(known_good_image["image_name"]),
],
env={"DSV_GPG_DATASTORE": str(gpgsigner.homedir)},
input="\n",
)
assert result.exception
assert "401" in caplog.text
assert "Unauthorized" in caplog.text | 35,435 |
def safe_exit(navigator, err):
"""Safe exit of the Scan The Code mission."""
yield navigator.mission_params["scan_the_code_color1"].set("RED")
yield navigator.mission_params["scan_the_code_color2"].set("GREEN")
yield navigator.mission_params["scan_the_code_color3"].set("BLUE") | 35,436 |
def idwt2d(input_node, wavelet, levels=1):
"""
Constructs a TF graph that computes the 2D inverse DWT for a given wavelet.
Args:
input_node (tf.placeholder): Input signal. A 3D tensor with dimensions
as [rows, cols, channels]
wavelet (tfwavelets.dwtcoeffs.Wavelet): Wavelet object.
levels (int): Number of levels.
Returns:
Output node of IDWT graph.
"""
c = int(input_node.shape[2])
results = []
for i in range(c):
results.append(
idwt2d_singlechannel(input_node[:,:,i:i+1], wavelet, levels=levels)
)
return tf.concat(results, axis=-1) | 35,437 |
def readin_rho(filename, rhofile=True, aniso=False):
"""Read in the values of the resistivity in Ohmm.
The format is variable: rho-file or mag-file.
"""
if aniso:
a = [[0, 1, 2], [2, 3, 4]]
else:
a = [0, 2]
if rhofile:
if filename is None:
filename = 'rho/rho.dat'
with open(filename, 'r') as fid:
mag = np.loadtxt(fid, skiprows=1, usecols=(a[0]))
else:
if filename is None:
filename = read_iter()
with open(filename, 'r') as fid:
mag = np.power(10, np.loadtxt(fid, skiprows=1, usecols=(a[1])))
return mag | 35,438 |
def cofense_report_image_download_command(client: Client, args: Dict[str, str]) -> dict:
"""
Downloads the image for a specific report.
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: File Result
:rtype: ``dict``
"""
report_id = args.get("id")
if not report_id:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("id"))
image_type = args.get("type", DEFAULT_REPORT_IMAGE_TYPE).lower()
if not image_type:
image_type = DEFAULT_REPORT_IMAGE_TYPE
if image_type not in VALID_IMAGE_TYPE:
raise ValueError(MESSAGES["INVALID_IMAGE_TYPE"])
# Appending the id and type to the url_suffix
url_suffix = URL_SUFFIX["REPORT_IMAGE_DOWNLOAD"].format(report_id, image_type)
headers = {
"Accept": f"image/{image_type if image_type == DEFAULT_REPORT_IMAGE_TYPE else 'jpeg'}"
}
# Sending http request
raw_response = client.http_request(url_suffix, resp_type="content", headers=headers)
filename = f"Report ID - {report_id}.{image_type}"
return fileResult(filename, data=raw_response, file_type=entryTypes["image"]) | 35,439 |
def import_data_line(line_num, tokens, samples, organism):
"""
Function that imports numerical values in input tokens into the database.
An exception will be raised if any of the following errors are detected:
* The number of columns on this line is not equal to the number of
samples plus 1.
* The gene's "systematic_name" field (column #1) is blank;
* Data field (from column #2 to the end) can not be converted into a
float type.
"""
if len(tokens) != len(samples) + 1:
raise Exception("Input file line #%d: Number of columns is not %d" %
(line_num, len(samples) + 1))
gene_name = tokens[0]
if not gene_name or gene_name.isspace():
raise Exception("Input file line #%d: gene name (column #1)"
" is blank" % line_num)
try:
gene = Gene.objects.get(systematic_name=gene_name, organism=organism)
except Gene.MultipleObjectsReturned:
raise Exception("Input file line #%d: gene name %s (column #1) matches"
" multiple genes in the database" %
(line_num, gene_name))
except Gene.DoesNotExist:
# If a gene is not found in database, generate a warning message
# and skip this line.
logger.warning(
"Input file line #%d: gene name %s (column #1) not found in "
"database", line_num, gene_name)
return
values = tokens[1:]
# To speed up the importing process, all expression values on current data
# line will be saved in "records" and created in bulk at the end.
records = []
col_num = 2 # Expression values start from column #2.
for sample, value in zip(samples, values):
try:
float_val = float(value)
except ValueError:
raise Exception("Input file line #%d column #%d: expression value "
"%s not numeric" % (line_num, col_num, value))
if sample is not None:
records.append(
ExpressionValue(sample=sample, gene=gene, value=float_val))
col_num += 1
ExpressionValue.objects.bulk_create(records) | 35,440 |
def random_indices(batch_size, num_samples):
"""\
Generate a random sequence of indices for a batch.
:param batch_size: length of the random sequence to generate
:param num_samples: number of samples available, i.e., maximum value to
include in the random sequence + 1
:return: list of integers
"""
return _eddl.random_indices(batch_size, num_samples) | 35,441 |
def diff_first_last(L, *opArg):
"""
(list) -> boolean
Precondition: len(L) >= 2
Returns True if the first item of the list is different from the last; else returns False.
>>> diff_first_last([3, 4, 2, 8, 3])
False
>>> diff_first_last(['apple', 'banana', 'pear'])
True
>>> diff_first_last([4.0, 4.5])
True
--- Additional Test Cases ---
>>> diff_first_last(3, 4, 2, 8, 3)
False
>>> diff_first_last('apple', 'banana', 'pear')
True
>>> diff_first_last([5, 4], 4, 5, 4)
True
>>> diff_first_last([5, 4], 4, [5, 4])
False
>>> diff_first_last('eeee')
Invalid length. Nothing to compare to.
>>> diff_first_last([5])
Invalid length. Nothing to compare to.
Additional test cases show that the function can handle non-list inputs
of various kinds. Function can also handle invalid inputs of various kinds
"""
print ()
print ('---Checking if first and last values are unequal---')
print ('Input is: ', L, *opArg)
if not opArg:
if type(L) == str:
print ('Invalid length. Nothing to compare input to.')
return None
elif len(L) >= 2:
print (L[0] != L[-1])
return (L[0] != L[-1])
else:
print ('Invalid length. Nothing to compare input to.')
return None
else:
print (L != opArg[-1])
return (L != opArg[-1]) | 35,442 |
def update_on_table(df: pd.DataFrame, keys: update_key_type, values: update_key_type, table_name: str,
engine: sa.engine.base.Engine, schema: str) -> int:
"""
:param df: a dataframe with data tha needs to be updated. Must have columns to be used as key and some for values
:param keys: the set of columns to use as key, i.e. update when matched
:param values: the set of columns to update, i.e. set when matched
:param table_name: a table name as in util_function
:param engine: the sqlalchemy engine for the database
:param schema: a schema of interest - None if default schema of database is ok
:return: the number of records updated
"""
# get table
tbl = util_function(table_name, engine, schema)
# change nan to None, make sure columns are modified so that we can easily bindparam
df_ = df.copy()
df_.columns = [f"{el.lower()}_updt" for el in df_.columns]
groups = toolz.partition_all(CHUNK_SIZE, df_.where(pd.notnull(df_), None).to_dict(orient='records'))
if not isinstance(keys, tuple) and not isinstance(keys, dict):
raise BadArgumentType("keys and values must either be both tuples or both dicts", None)
# create where clause, and update statement
update_statement: dml.Update
if isinstance(keys, tuple):
if not isinstance(values, tuple):
raise BadArgumentType("keys and values must either be both tuples or both dicts", None)
where = [tbl.c[el] == sa.bindparam(f"{el.lower()}_updt") for el in keys]
update_statement = tbl.update().where(sa.and_(*where)).values(
dict((a, sa.bindparam(f"{a.lower()}_updt")) for a in values)
)
if isinstance(keys, dict):
if not isinstance(values, dict):
raise BadArgumentType("keys and values must either be both tuples or both dicts", None)
where = [tbl.c[k] == sa.bindparam(f"{v.lower()}_updt") for k, v in keys.items()]
update_statement = tbl.update().where(sa.and_(*where)).values(
dict((k, sa.bindparam(f"{v.lower()}_updt")) for k, v in values.items())
)
# update
count, last_successful_update = 0, None
with engine.connect() as connection:
for group in groups:
try:
result = connection.execute(update_statement, group)
last_successful_update = group[-1]
count += result.rowcount
except exc.OperationalError as _:
# try again
time.sleep(2)
try:
result = connection.execute(update_statement, group)
last_successful_update = group[-1]
count += result.rowcount
except exc.OperationalError as e:
raise OperationalError(
f"Failed to update records. Last successful update: {last_successful_update}", e
)
return count | 35,443 |
def administrar_investigaciones(request,tipo):
"""Administrar investigaciones seleccionadas, para ser eliminadas o finalizadas"""
if request.method == 'POST':
ids = request.POST.getlist('checks[]')
if tipo == "incorrecto":
#Se han seleccionado investigaciones para ser finalizadas incorrectamente
for id in ids:
investigacion = Investigacion.objects.filter(id=id)
for c in investigacion:
if c.propietario == request.user:
c.finalizado_incorrecto = True
c.save()
else:
c.usuario.remove(request.user.id)
else:
#Se han seleccionado investigaciones para ser finalizadas correctamente
for id in ids:
investigacion = Investigacion.objects.filter(id=id)
for c in investigacion:
if c.propietario == request.user:
c.finalizado_correcto = True
c.save()
else:
c.usuario.remove(request.user.id)
return HttpResponseRedirect(reverse('investigaciones')) | 35,444 |
def container_images_prepare_defaults():
"""Return default dict for prepare substitutions
This can be used as the mapping_args argument to the
container_images_prepare function to get the same result as not specifying
any mapping_args.
"""
return KollaImageBuilder.container_images_template_inputs() | 35,445 |
def logjsonfiles(cloudvolume: cv.CloudVolume,
filenames: list[str],
filecontents: list[str]
) -> None:
"""Stores extra JSON files alongside a provenance file.
Args:
cloudvolume: A CloudVolume.
filenames: A list of filenames to log.
filecontents: The contents of each file to log.
"""
for filename, filecontent in zip(filenames, filecontents):
utils.sendjsonfile(cloudvolume, filename, filecontent) | 35,446 |
def discriminantcontrast(x, y, con, w):
"""return discriminant contrast (LDC, crossnobis, CV-Mahalanobis, whatever)."""
betas = lsbetas(x, y)
conest = con @ betas
return np.sum(conest * w, axis=1) | 35,447 |
def calculateCosine(point, origin):
"""
calculate the polar angle of the given point to the origin point
"""
x1, y1 = point
x0, y0 = origin
if y1 == y0:
return 1.0
return round((x1 - x0) / calculateDistance(point, origin), ROUND) | 35,448 |
def dirty(grid, times=1, all_pattern=all_pattern):
"""add random patterns n times at random position in a matrix object"""
while times:
times-=1
at( grid,
randint(0, grid.size_x) ,
randint(0,grid.size_y),
choice(all_pattern)
) | 35,449 |
def ValidClassWmi(class_name):
"""
Tells if this class for our ontology is in a given WMI server, whatever the namespace is.
This is used to display or not, the WMI url associated to a Survol object.
This is not an absolute rule.
"""
return class_name.startswith(("CIM_", "Win32_", "WMI_")) | 35,450 |
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom') | 35,451 |
def results():
"""Calculate results and route to results page"""
# get user input
user_input = dict(request.args)
user_titles = []
for x in user_input.keys():
if x == "algo":
algo_choice = user_input[x]
else:
user_titles.append(user_input[x])
# construct array
input_array = rec.create_array(user_titles, matrix)
# provide recommendations
if algo_choice == "NMF":
recommendations = rec.recommend_nmf(input_array, user_titles, matrix)
elif algo_choice == "CoSim":
recommendations = rec.recommend_cosim(input_array, user_titles, matrix)
return render_template("results.html", movies_html=recommendations) | 35,452 |
def schema():
"""
Print the JSON Schema used by FIXtodict.
"""
print(JSON_SCHEMA_V1) | 35,453 |
def populate_transformation_details(workflow_stats , workflow_info):
"""
populates the transformation details of the workflow
@param workflow_stats the StampedeStatistics object reference
@param workflow_info the WorkflowInfo object reference
"""
transformation_stats_dict ={}
wf_transformation_color_map ={}
global color_count
transformation_stats_list= workflow_stats.get_transformation_statistics()
for trans_stats in transformation_stats_list:
if trans_stats.transformation.strip() in exclude_transformations:
continue
trans_info = TransformationInfo()
trans_info.name = trans_stats.transformation
trans_info.count = trans_stats.count
trans_info.succeeded_count = trans_stats.success
trans_info.failed_count = trans_stats.failure
trans_info.min = trans_stats.min
trans_info.max = trans_stats.max
trans_info.avg = trans_stats.avg
trans_info.total_runtime = trans_stats.sum
transformation_stats_dict[trans_stats.transformation] = trans_info
if trans_stats.transformation not in global_transformtion_color_map:
global_transformtion_color_map[trans_stats.transformation]= predefined_colors[color_count%len(predefined_colors)]
color_count +=1
# Assigning the mapping to the workflow map
wf_transformation_color_map[trans_stats.transformation] =global_transformtion_color_map[trans_stats.transformation]
workflow_info.transformation_statistics_dict = transformation_stats_dict
workflow_info.transformation_color_map = wf_transformation_color_map | 35,454 |
def steamspy_tag_data_rename_cols():
"""
Update the steamspy_tag_data.csv file to have column names from mapping.json
"""
df = pd.read_csv("sets/steamspy_tag_data.csv")
with open("mapping.json", "r") as f:
mapping = json.load(f)
df = df.rename(mapping, axis="columns")
df.to_csv("sets/steamspy_tag_data_updated.csv", index=False) | 35,455 |
def flatten(d, separator='_', parent_key=None):
"""
Converts a nested hierarchy of key/value object (e.g. a dict of dicts) into a flat (i.e. non-nested) dict.
:param d: the dict (or any other instance of collections.MutableMapping) to be flattened.
:param separator: the separator to use when concatenating nested key names into flattened key names.
:param parent_key: used internally for recursion.
:return: a flattened dict (i.e. containing no nested dicts as values).
"""
if separator is None:
separator = '_'
if parent_key is None:
parent_key = ''
dict_type = dict if d is None else type(d)
items = []
for k, v in d.items():
new_key = parent_key + separator + k if parent_key else k
if isinstance(v, MutableMapping):
items.extend(flatten(v, separator=separator, parent_key=new_key).items())
else:
items.append((new_key, v))
return dict_type(items) | 35,456 |
def _rendered_size(text, point_size, font_file):
"""
Return a (width, height) pair representing the size of *text* in English
Metric Units (EMU) when rendered at *point_size* in the font defined in
*font_file*.
"""
emu_per_inch = 914400
px_per_inch = 72.0
font = _Fonts.font(font_file, point_size)
px_width, px_height = font.getsize(text)
emu_width = int(px_width / px_per_inch * emu_per_inch)
emu_height = int(px_height / px_per_inch * emu_per_inch)
return emu_width, emu_height | 35,457 |
def parse_args(args: str) -> Tuple[UFDLType]:
"""
Parses the string representation of a list of type arguments.
:param args:
The type arguments to parse.
:return:
The parsed types.
"""
if args == "":
return tuple()
return tuple(parse_type(arg) for arg in split_args(args)) | 35,458 |
def pca_biplot(
predictor: Iterable, response: Iterable, labels: Iterable[str] = None
) -> pyplot.Figure:
"""
produces a pca projection and plot the 2 most significant component score and the component coefficients.
:param predictor:
:param response:
:param labels:
:return:"""
scaler = StandardScaler()
scaler.fit(predictor)
pca = PCA()
return biplot(
pca.fit_transform(scaler.transform(predictor))[:, 0:2],
numpy.transpose(pca.components_[0:2, :]),
response,
labels,
) | 35,459 |
def betweenness_centrality(G, k=None, normalized=True,
weight=None, endpoints=False,
seed=None, result_dtype=np.float64):
"""
Compute the betweenness centrality for all nodes of the graph G from a
sample of 'k' sources.
CuGraph does not currently support the 'endpoints' and 'weight' parameters
as seen in the corresponding networkX call.
Parameters
----------
G : cuGraph.Graph
cuGraph graph descriptor with connectivity information. The graph can
be either directed (DiGraph) or undirected (Graph).
Weights in the graph are ignored, the current implementation uses
BFS traversals. Use weight parameter if weights need to be considered
(currently not supported)
k : int or list or None, optional, default=None
If k is not None, use k node samples to estimate betweenness. Higher
values give better approximation
If k is a list, use the content of the list for estimation: the list
should contain vertices identifiers.
Vertices obtained through sampling or defined as a list will be used as
sources for traversals inside the algorithm.
normalized : bool, optional
Default is True.
If true, the betweenness values are normalized by
2 / ((n - 1) * (n - 2)) for Graphs (undirected), and
1 / ((n - 1) * (n - 2)) for DiGraphs (directed graphs)
where n is the number of nodes in G.
Normalization will ensure that the values in [0, 1],
this normalization scales fo the highest possible value where one
node is crossed by every single shortest path.
weight : cudf.DataFrame, optional, default=None
Specifies the weights to be used for each edge.
Should contain a mapping between
edges and weights.
(Not Supported)
endpoints : bool, optional, default=False
If true, include the endpoints in the shortest path counts.
(Not Supported)
seed : optional
if k is specified and k is an integer, use seed to initialize the
random number generator.
Using None as seed relies on random.seed() behavior: using current
system time
If k is either None or list: seed parameter is ignored
result_dtype : np.float32 or np.float64, optional, default=np.float64
Indicate the data type of the betweenness centrality scores
Returns
-------
df : cudf.DataFrame
GPU data frame containing two cudf.Series of size V: the vertex
identifiers and the corresponding betweenness centrality values.
Please note that the resulting the 'vertex' column might not be
in ascending order.
df['vertex'] : cudf.Series
Contains the vertex identifiers
df['betweenness_centrality'] : cudf.Series
Contains the betweenness centrality of vertices
Examples
--------
>>> M = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(M, source='0', destination='1')
>>> bc = cugraph.betweenness_centrality(G)
"""
# vertices is intended to be a cuDF series that contains a sampling of
# k vertices out of the graph.
#
# NOTE: cuDF doesn't currently support sampling, but there is a python
# workaround.
#
vertices = None
if k is not None:
# In order to compare with pre-set sources,
# k can either be a list or an integer or None
# int: Generate an random sample with k elements
# list: k become the length of the list and vertices become the content
# None: All the vertices are considered
# NOTE: We do not renumber in case k is an int, the sampling is
# not operating on the valid vertices identifiers but their
# indices:
# Example:
# - vertex '2' is missing
# - vertices '0' '1' '3' '4' exist
# - There is a vertex at index 2 (there is not guarantee that it is
# vertice '3' )
if isinstance(k, int):
random.seed(seed)
vertices = random.sample(range(G.number_of_vertices()), k)
# Using k as a list allows to have an easier way to compare against
# other implementations on
elif isinstance(k, list):
vertices = k
k = len(vertices)
# We assume that the list that was provided is not the indices
# in the graph structure but the vertices identifiers in the graph
# hence: [1, 2, 10] should proceed to sampling on vertices that
# have 1, 2 and 10 as their identifiers
# FIXME: There might be a cleaner way to obtain the inverse mapping
if G.renumbered:
vertices = [G.edgelist.renumber_map[G.edgelist.renumber_map ==
vert].index[0] for vert in
vertices]
if endpoints is True:
raise NotImplementedError("endpoints accumulation for betweenness "
"centrality not currently supported")
if weight is not None:
raise NotImplementedError("weighted implementation of betweenness "
"centrality not currently supported")
if result_dtype not in [np.float32, np.float64]:
raise TypeError("result type can only be np.float32 or np.float64")
df = betweenness_centrality_wrapper.betweenness_centrality(G, normalized,
endpoints,
weight,
k, vertices,
result_dtype)
return df | 35,460 |
def kb_spmat_interp_adjoint(
data: Tensor, interp_mats: Tuple[Tensor, Tensor], grid_size: Tensor
) -> Tensor:
"""Kaiser-Bessel sparse matrix interpolation adjoint.
See :py:class:`~torchkbnufft.KbInterpAdjoint` for an overall description of
adjoint interpolation.
To calculate the sparse matrix tuple, see
:py:meth:`~torchkbnufft.calc_tensor_spmatrix`.
Args:
data: Scattered data to be interpolated to gridded data.
interp_mats: 2-tuple of real, imaginary sparse matrices to use for
sparse matrix KB interpolation.
Returns:
``data`` calculated at gridded locations.
"""
is_complex = True
if not data.is_complex():
if not data.shape[-1] == 2:
raise ValueError("For real inputs, last dimension must be size 2.")
is_complex = False
data = torch.view_as_complex(data)
image = KbSpmatInterpAdjoint.apply(data, interp_mats, grid_size)
if is_complex is False:
image = torch.view_as_real(image)
return image | 35,461 |
def help_send(command: str, help_string_call: Callable[[], str]):
"""发送帮助信息"""
class _HELP(ArgAction):
def __init__(self):
super().__init__(HelpActionManager.send_action)
def handle(self, option_dict, varargs, kwargs, is_raise_exception):
action = require_help_send_action(command=command)
if action:
return action(help_string_call())
async def handle_async(self, option_dict, varargs, kwargs, is_raise_exception):
action = require_help_send_action(command=command)
if action:
return await action(help_string_call())
HelpActionManager.helpers.setdefault(command, _HELP())
if command in HelpActionManager.cache:
HelpActionManager.helpers[command].action = HelpActionManager.cache[command]
HelpActionManager.helpers[command].awaitable = inspect.iscoroutinefunction(HelpActionManager.cache[command])
del HelpActionManager.cache[command]
return HelpActionManager.helpers[command] | 35,462 |
def divide_list(array, number):
"""Create sub-lists of the list defined by number.
"""
if len(array) % number != 0:
raise Exception("len(alist) % number != 0")
else:
return [array[x:x+number] for x in range(0, len(array), number)] | 35,463 |
def _sympify(a):
"""Short version of sympify for internal usage for __add__ and __eq__
methods where it is ok to allow some things (like Python integers
and floats) in the expression. This excludes things (like strings)
that are unwise to allow into such an expression.
>>> from sympy import Integer
>>> Integer(1) == 1
True
>>> Integer(1) == '1'
False
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x + 1
x + 1
>>> x + '1'
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for +: 'Symbol' and 'str'
see: sympify
"""
return sympify(a, strict=True) | 35,464 |
def remove_category(directory: str, img_dir: str, annotation_file_path: str, category: int = 1):
"""
category 1 == humans
"""
annotation_file_path = os.path.join(directory, "annotations", annotation_file_path)
assert os.path.isfile(annotation_file_path)
with open(annotation_file_path, 'r') as annotation_file:
posts = json.load(annotation_file)
images = posts["images"]
img_dict = {}
for image in images:
img_dict[image["id"]] = image
for annotation in tqdm(posts["annotations"]):
if annotation["category_id"] == category:
img_id = annotation["image_id"]
img = img_dict[img_id]
img_file = os.path.join(directory, img_dir, img["file_name"])
if os.path.isfile(img_file):
os.remove(img_file)
a = 1
# annotations = ijson.items(annotation_file, "annotations")
#
# for annotation in annotations:
# a = 1 | 35,465 |
def process_value(setting_info, color):
"""Called by the :class:`rivalcfg.mouse.Mouse` class when processing a
"reactive_rgbcolor" type setting.
:param dict setting_info: The information dict of the setting from the
device profile.
:param str,tuple,list,None color: The reactive color.
:rtype: [int, int, int]
"""
# Disable the reactive color
if color is None or str(color).lower() in ["off", "disable"]:
return [0x00, 0x00, 0x00, 0x00, 0x00]
# Color tuple
if type(color) in (tuple, list):
if len(color) != 3:
raise ValueError("Not a valid color %s" % str(color))
for channel in color:
if type(channel) != int or channel < 0 or channel > 255:
raise ValueError("Not a valid color %s" % str(color))
return [0x01, 0x00] + list(color)
if is_color(color):
return [0x01, 0x00] + list(parse_color_string(color))
raise ValueError("Not a valid color %s" % str(color)) | 35,466 |
def generate_ones(num_bits):
"""Returns a numpy array with N ones."""
return np.ones(num_bits, dtype=np.int) | 35,467 |
def get_exception_message(exception: Exception) -> str:
"""Returns the message part of an exception as string"""
return str(exception).strip() | 35,468 |
def handle_remove_readonly(
func: Callable, path: str, exc: Tuple[BaseException, OSError, TracebackType]
) -> None:
"""Handle errors when trying to remove read-only files through `shutil.rmtree`.
This handler makes sure the given file is writable, then re-execute the given removal function.
Arguments:
func: An OS-dependant function used to remove a file.
path: The path to the file to remove.
exc: A `sys.exc_info()` object.
"""
excvalue = exc[1]
if func in (os.rmdir, os.remove, os.unlink) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
func(path)
else:
raise | 35,469 |
def GetAvailableKernelProfiles():
"""Get available profiles on specified gsurl.
Returns:
a dictionary that maps kernel version, e.g. "4_4" to a list of
[milestone, major, minor, timestamp]. E.g,
[62, 9901, 21, 1506581147]
"""
gs_context = gs.GSContext()
gs_ls_url = os.path.join(KERNEL_PROFILE_URL, KERNEL_PROFILE_LS_PATTERN)
gs_match_url = os.path.join(KERNEL_PROFILE_URL, KERNEL_PROFILE_NAME_PATTERN)
try:
res = gs_context.List(gs_ls_url)
except gs.GSNoSuchKey:
logging.info('gs files not found: %s', gs_ls_url)
return {}
matches = filter(None, [re.match(gs_match_url, p.url) for p in res])
versions = {}
for m in matches:
versions.setdefault(m.group(1), []).append(map(int, m.groups()[1:]))
for v in versions:
versions[v].sort()
return versions | 35,470 |
def scatterplot(data, x, label=None):
"""Scatterplot matrix for array data
data have all the data (inlcuding missing data)
x is a list of arrays without the missing data (for histogram and fitting)
"""
fig, ax = plt.subplots(data.shape[1], data.shape[1], figsize=(8, 8))
fig.suptitle('Scatterplot Matrix', fontsize=12)
fig.subplots_adjust(hspace=0.04, wspace=0.04)
nbins2 = 4 if data.shape[1] > 3 else 5
for i in range(data.shape[1]):
for j in range(data.shape[1]):
#ax1 = plt.subplot(data.shape[1],data.shape[1],data.shape[1]*i+j+1)
if i == j:
nbins = 2 * np.sqrt(x[i].size) \
if x[i].size < 100 else np.sqrt(x[i].size)
n, bins, patches = ax[i, j].hist(x[i], nbins, normed=1,
facecolor='blue', alpha=0.75)
bincenters = np.linspace((bins[0] + bins[1]) / 2,
(bins[-2] + bins[-1]) / 2, 100)
y = stats.norm.pdf(bincenters, x[i].mean(), scale=x[i].std())
ax[i, j].plot(bincenters, y, 'r-', linewidth=2)
#ax[i, j].annotate(label[j], (0.05, 0.85),
# xycoords='axes fraction',fontweight='bold')
else:
ax[i, j].plot(data[:, i], data[:, j], 'bo', alpha=0.75)
ax[i, j].grid(True, linestyle='-', which='major',
color='lightgrey', alpha=0.5)
ax[i, j].xaxis.set_visible(False)
ax[i, j].yaxis.set_visible(False)
ax[i, j].xaxis.set_major_locator(ticker.MaxNLocator(nbins=nbins2,
prune='both'))
ax[i, j].yaxis.set_major_locator(ticker.MaxNLocator(nbins=nbins2,
prune='both'))
if ax[i, j].is_first_col():
ax[i, j].yaxis.set_ticks_position('left')
ax[i, j].yaxis.set_visible(True)
ax[i, j].set_ylabel(label[i])
if ax[i, j].is_last_col():
ax[i, j].yaxis.set_ticks_position('right')
ax[i, j].yaxis.set_visible(True)
if ax[i, j].is_first_row():
ax[i, j].xaxis.set_ticks_position('top')
ax[i, j].xaxis.set_visible(True)
if ax[i, j].is_last_row():
ax[i, j].xaxis.set_ticks_position('bottom')
ax[i, j].xaxis.set_visible(True)
ax[i, j].set_xlabel(label[j])
plt.show() | 35,471 |
def closeForm(f):
"""Closes the form for the GUI.
@param f: File to write to
"""
f.write("</form>") | 35,472 |
def main() -> None:
"""Main function"""
arguments = [
Argument("-c", "--cluster", "API server IP:port details")]
args = parse_args(
"Demonstrates Volume Operations using REST API Python Client Library.",
arguments,
)
setup_logging()
headers = setup_connection(args.api_user, args.api_pass)
svm_ops(args.cluster, headers) | 35,473 |
def _get_dbnd_run_relative_cmd():
"""returns command without 'dbnd run' prefix"""
argv = list(sys.argv)
while argv:
current = argv.pop(0)
if current == "run":
return argv
raise DatabandRunError(
"Can't calculate run command from '%s'",
help_msg="Check that it has a format of '..executable.. run ...'",
) | 35,474 |
def should_commit(kwargs: Kwargs) -> Tuple[bool, Kwargs]:
"""Function for if a schema class should create a document on instance."""
return kwargs.pop('create') if 'create' in kwargs else True, kwargs | 35,475 |
def create_view_files_widget(ws_names2id: Dict[str, str], ws_paths: Dict[str, WorkspacePaths], output):
"""Create an ipywidget UI to view HTML snapshots and their associated comment files."""
workspace_chooser = widgets.Dropdown(
options=ws_names2id,
value=None,
description='<b>Choose the workspace</b>:',
style={'description_width': 'initial'},
layout=widgets.Layout(width='900px')
)
user_chooser = widgets.Dropdown(
options=[],
value=None,
description='<b>Choose the user</b>:',
style={'description_width': 'initial'},
layout=widgets.Layout(width='900px')
)
date_chooser = widgets.Dropdown(
options=[],
value=None,
description='<b>Choose the date</b>:',
style={'description_width': 'initial'},
layout=widgets.Layout(width='900px')
)
time_chooser = widgets.Dropdown(
options=[],
value=None,
description='<b>Choose the time</b>:',
style={'description_width': 'initial'},
layout=widgets.Layout(width='900px')
)
file_chooser = widgets.Dropdown(
options=[],
value=None,
description='<b>Choose the file</b>:',
style={'description_width': 'initial'},
layout=widgets.Layout(width='900px')
)
view_comment_button = widgets.Button(
description='View the comment for the HTML snapshot',
disabled=False,
button_style='success',
layout=widgets.Layout(width='300px'),
tooltip='Click the button to view the comment associated with the HTML snapshot of the notebook.'
)
view_html_button = widgets.Button(
description='View the HTML snapshot',
disabled=False,
button_style='success',
layout=widgets.Layout(width='250px'),
tooltip='Click the button to view the HTML snapshot of the notebook.'
)
def on_view_comment_button_clicked(_):
with output:
output.clear_output()
if not file_chooser.value:
display(HTML('''<div class="alert alert-block alert-warning">
No comment files found for HTML snapshots in this workspace.</div>'''))
return
comment_file = file_chooser.value.replace('.html', WorkspacePaths.COMMENT_FILE_SUFFIX)
comment = get_ipython().getoutput(f"gsutil cat '{comment_file}'")
display(HTML(f'''<div class="alert alert-block alert-info">{comment}</div>'''))
view_comment_button.on_click(on_view_comment_button_clicked)
def on_view_html_button_clicked(_):
with output:
output.clear_output()
if not file_chooser.value:
display(HTML('''<div class="alert alert-block alert-warning">
No HTML snapshots found in this workspace.</div>'''))
return
source = file_chooser.value
dest = TEMP_HTML.name
get_ipython().system(f"set -o xtrace ; gsutil cp '{source}' '{dest}'")
display(IFrame(os.path.join('.', os.path.basename(TEMP_HTML.name)), width='100%', height=800))
view_html_button.on_click(on_view_html_button_clicked)
def on_choose_workspace(changed):
output.clear_output()
user_chooser.options = []
if changed['new']:
workspace_paths = ws_paths[changed['new']]
items = tf.io.gfile.glob(pattern=workspace_paths.get_user_glob())
if items:
user_chooser.options = {os.path.basename(item): item for item in items}
workspace_chooser.observe(on_choose_workspace, names='value')
def on_choose_user(changed):
date_chooser.options = []
if changed['new']:
workspace_paths = ws_paths[workspace_chooser.value]
items = tf.io.gfile.glob(pattern=workspace_paths.add_date_glob_to_path(path=changed['new']))
if items:
date_chooser.options = collections.OrderedDict(sorted(
{os.path.basename(item): item for item in items}.items(), reverse=True))
user_chooser.observe(on_choose_user, names='value')
def on_choose_date(changed):
time_chooser.options = []
if changed['new']:
workspace_paths = ws_paths[workspace_chooser.value]
items = tf.io.gfile.glob(pattern=workspace_paths.add_time_glob_to_path(path=changed['new']))
if items:
time_chooser.options = collections.OrderedDict(sorted(
{os.path.basename(item): item for item in items}.items(), reverse=True))
date_chooser.observe(on_choose_date, names='value')
def on_choose_time(changed):
file_chooser.options = []
if changed['new']:
workspace_paths = ws_paths[workspace_chooser.value]
items = tf.io.gfile.glob(pattern=workspace_paths.add_html_glob_to_path(path=changed['new']))
if items:
file_chooser.options = {os.path.basename(item): item for item in items}
time_chooser.observe(on_choose_time, names='value')
return widgets.VBox(
[widgets.HTML('''
<h3>View an HTML snapshot of a notebook</h3>
<p>Use the dropdowns to select the workspace, user, date, time, and particular HTML snapshot.
<br>Then click on the 'view' buttons to see either the comment for the snapshot or the actual snapshot.
</p><hr>'''),
workspace_chooser, user_chooser, date_chooser, time_chooser, file_chooser,
widgets.HBox([view_comment_button, view_html_button])],
layout=widgets.Layout(width='auto', border='solid 1px grey')) | 35,476 |
def test_hypotest_default(tmpdir, hypotest_args):
"""
Check that the default return structure of pyhf.utils.hypotest is as expected
"""
tb = pyhf.tensorlib
kwargs = {}
result = pyhf.utils.hypotest(*hypotest_args, **kwargs)
# CLs_obs
assert len(list(result)) == 1
assert isinstance(result, type(tb.astensor(result))) | 35,477 |
def _get_frame_time(time_steps):
""" Compute average frame time.
:param time_steps: 1D array with cumulative frame times.
:type time_steps: numpy.ndarray
:return: The average length of each frame in seconds.
:rtype: float
"""
if len(time_steps.shape) != 1:
raise ValueError("ERROR: Time series must be a 1D array.")
frame_time = time_steps[-1]/(len(time_steps) - 1) # Need to ignore the first frame (0).
return frame_time | 35,478 |
def next_symbol_to_learn(ls):
"""Returns the next symbol to learn. This always returns characters from the
training set, within those, gives higher probability to symbols the user
doesn't know very well yet. `ls` is the learn state. Returns a tuple like
("V", "...-")
"""
total = 0.0
candidates = [ ]
for k in ls["learning_set"]:
weight = 1.0/ls[k]
total += weight
candidates.append((k, weight))
r = random.uniform(0.0, total)
sum = 0.0
for c in candidates:
symbol = c[0]
weight = c[1]
sum += weight
if r <= sum:
return (symbol, morse.to_morse[symbol])
print("Ooops, should have selected a candidate symbol") | 35,479 |
def query_table3(song):
"""
This function returns the SQL neccessary to get all users who listened to the song name passed as an argument to this function.
"""
return "select user_name from WHERE_SONG where song_name = '{}';".format(song) | 35,480 |
def get_available_user_FIELD_transitions(instance, user, field):
"""
List of transitions available in current model state
with all conditions met and user have rights on it
"""
for transition in get_available_FIELD_transitions(instance, field):
if transition.has_perm(instance, user):
yield transition | 35,481 |
def test_check_input_dtw(params, err_msg):
"""Test parameter validation."""
with pytest.raises(ValueError, match=re.escape(err_msg)):
_check_input_dtw(**params) | 35,482 |
def create_background(
bg_type,
fafile,
outfile,
genome="hg18",
size=200,
nr_times=10,
custom_background=None,
):
"""Create background of a specific type.
Parameters
----------
bg_type : str
Name of background type.
fafile : str
Name of input FASTA file.
outfile : str
Name of output FASTA file.
genome : str, optional
Genome name.
size : int, optional
Size of regions.
nr_times : int, optional
Generate this times as many background sequences as compared to
input file.
Returns
-------
nr_seqs : int
Number of sequences created.
"""
size = int(size)
config = MotifConfig()
fg = Fasta(fafile)
if bg_type in ["genomic", "gc"]:
if not genome:
logger.error("Need a genome to create background")
sys.exit(1)
if bg_type == "random":
f = MarkovFasta(fg, k=1, n=nr_times * len(fg))
logger.debug("Random background: %s", outfile)
elif bg_type == "genomic":
logger.debug("Creating genomic background")
f = RandomGenomicFasta(genome, size, nr_times * len(fg))
elif bg_type == "gc":
logger.debug("Creating GC matched background")
f = MatchedGcFasta(fafile, genome, nr_times * len(fg))
logger.debug("GC matched background: %s", outfile)
elif bg_type == "promoter":
fname = Genome(genome).filename
gene_file = fname.replace(".fa", ".annotation.bed.gz")
if not gene_file:
gene_file = os.path.join(config.get_gene_dir(), "%s.bed" % genome)
if not os.path.exists(gene_file):
print("Could not find a gene file for genome {}")
print("Did you use the --annotation flag for genomepy?")
print(
"Alternatively make sure there is a file called {}.bed in {}".format(
genome, config.get_gene_dir()
)
)
raise ValueError()
logger.info(
"Creating random promoter background (%s, using genes in %s)",
genome,
gene_file,
)
f = PromoterFasta(gene_file, genome, size, nr_times * len(fg))
logger.debug("Random promoter background: %s", outfile)
elif bg_type == "custom":
bg_file = custom_background
if not bg_file:
raise IOError("Background file not specified!")
if not os.path.exists(bg_file):
raise IOError("Custom background file %s does not exist!", bg_file)
else:
logger.info("Copying custom background file %s to %s.", bg_file, outfile)
f = Fasta(bg_file)
median_length = np.median([len(seq) for seq in f.seqs])
if median_length < (size * 0.95) or median_length > (size * 1.05):
logger.warn(
"The custom background file %s contains sequences with a "
"median size of %s, while GimmeMotifs predicts motifs in sequences "
"of size %s. This will influence the statistics! It is recommended "
"to use background sequences of the same size.",
bg_file,
median_length,
size,
)
f.writefasta(outfile)
return len(f) | 35,483 |
def train_test_data(x_,y_,z_,i):
"""
Takes in x,y and z arrays, and a array with random indesies iself.
returns learning arrays for x, y and z with (N-len(i)) dimetions
and test data with length (len(i))
"""
x_learn=np.delete(x_,i)
y_learn=np.delete(y_,i)
z_learn=np.delete(z_,i)
x_test=np.take(x_,i)
y_test=np.take(y_,i)
z_test=np.take(z_,i)
return x_learn,y_learn,z_learn,x_test,y_test,z_test | 35,484 |
def construct_Tba(leads, tleads, Tba_=None):
"""
Constructs many-body tunneling amplitude matrix Tba from single particle
tunneling amplitudes.
Parameters
----------
leads : LeadsTunneling
LeadsTunneling object.
tleads : dict
Dictionary containing single particle tunneling amplitudes.
tleads[(lead, state)] = tunneling amplitude.
Tba_ : None or ndarray
nbaths by nmany by nmany numpy array containing old values of Tba.
The values in tleads are added to Tba_.
Returns
-------
Tba : ndarray
nleads by nmany by nmany numpy array containing many-body tunneling amplitudes.
The returned Tba corresponds to Fock basis.
"""
si, mtype = leads.si, leads.mtype
if Tba_ is None:
Tba = np.zeros((si.nleads, si.nmany, si.nmany), dtype=mtype)
else:
Tba = Tba_
# Iterate over many-body states
for j1 in range(si.nmany):
state = si.get_state(j1)
# Iterate over single particle states
for j0 in tleads:
(j3, j2), tamp = j0, tleads[j0]
# Calculate fermion sign for added/removed electron in a given state
fsign = np.power(-1, sum(state[0:j2]))
if state[j2] == 0:
statep = list(state)
statep[j2] = 1
ind = si.get_ind(statep)
if ind is None:
continue
Tba[j3, ind, j1] += fsign*tamp
else:
statep = list(state)
statep[j2] = 0
ind = si.get_ind(statep)
if ind is None:
continue
Tba[j3, ind, j1] += fsign*np.conj(tamp)
return Tba | 35,485 |
def inplace_update_i(tensor_BxL, updates_B, i):
"""Inplace update a tensor. B: batch_size, L: tensor length."""
batch_size = tensor_BxL.shape[0]
indices_Bx2 = tf.stack([
tf.range(batch_size, dtype=tf.int64),
tf.fill([batch_size], tf.cast(i, tf.int64))
],
axis=-1)
return tf.tensor_scatter_nd_update(tensor_BxL, indices_Bx2, updates_B) | 35,486 |
def bert(model = 'base', validate = True):
"""
Load bert model.
Parameters
----------
model : str, optional (default='base')
Model architecture supported. Allowed values:
* ``'multilanguage'`` - bert multilanguage released by Google.
* ``'base'`` - base bert-bahasa released by Malaya.
* ``'small'`` - small bert-bahasa released by Malaya.
validate: bool, optional (default=True)
if True, malaya will check model availability and download if not available.
Returns
-------
BERT_MODEL: malaya.bert._Model class
"""
if not isinstance(model, str):
raise ValueError('model must be a string')
if not isinstance(validate, bool):
raise ValueError('validate must be a boolean')
model = model.lower()
if model not in available_bert_model():
raise Exception(
'model not supported, please check supported models from malaya.bert.available_bert_model()'
)
if validate:
check_file(PATH_BERT[model]['model'], S3_PATH_BERT[model])
else:
if not check_available(PATH_BERT[model]['model']):
raise Exception(
'bert-model/%s is not available, please `validate = True`'
% (model)
)
if model == 'multilanguage':
if not os.path.exists(PATH_BERT[model]['directory']):
from zipfile import ZipFile
with ZipFile(PATH_BERT[model]['model']['model'], 'r') as zip:
zip.extractall(PATH_BERT[model]['path'])
from bert import tokenization
bert_vocab = PATH_BERT[model]['directory'] + 'vocab.txt'
bert_checkpoint = PATH_BERT[model]['directory'] + 'bert_model.ckpt'
tokenizer = tokenization.FullTokenizer(
vocab_file = bert_vocab, do_lower_case = False
)
cls = '[CLS]'
sep = '[SEP]'
else:
if not os.path.exists(PATH_BERT[model]['directory']):
import tarfile
with tarfile.open(PATH_BERT[model]['model']['model']) as tar:
tar.extractall(path = PATH_BERT[model]['path'])
import sentencepiece as spm
from .texts._text_functions import SentencePieceTokenizer
bert_checkpoint = PATH_BERT[model]['directory'] + 'model.ckpt'
sp_model = spm.SentencePieceProcessor()
sp_model.Load(PATH_BERT[model]['directory'] + 'sp10m.cased.v4.model')
with open(
PATH_BERT[model]['directory'] + 'sp10m.cased.v4.vocab'
) as fopen:
v = fopen.read().split('\n')[:-1]
v = [i.split('\t') for i in v]
v = {i[0]: i[1] for i in v}
tokenizer = SentencePieceTokenizer(v, sp_model)
cls = '<cls>'
sep = '<sep>'
bert_config = PATH_BERT[model]['directory'] + 'bert_config.json'
bert_config = modeling.BertConfig.from_json_file(bert_config)
model = _Model(bert_config, tokenizer, cls = cls, sep = sep)
model._saver.restore(model._sess, bert_checkpoint)
return model | 35,487 |
def surface(
x_grid, y_grid, z_grid,
cmap="Blues", angle=(25, 300), alpha=1.,
fontsize=14, labelpad=10,
title="", x_label="", y_label="", z_label="log-likelihood"):
"""
Creates 3d contour plot given a grid for each axis.
Arguments:
``x_grid``
An NxN grid of values.
``y_grid``
An NxN grid of values.
``z_grid``
An NxN grid of values. z_grid determines colour.
``cmap``
(Optional) Colour map used in the plot
``angle``
(Optional) tuple specifying the viewing angle of the graph
``alpha``
(Optional) alpha parameter of the surface
``fill``
(Optional) Used to specify whether or not contour plot should be filled
Default False.
``fontsize``
(Optional) the fontsize used for labels
``labelpad``
(Optional) distance of axis labels from the labels
``x_label``
(Optional) The label of the x-axis
``y_label``
(Optional) The label of the y-axis
``z_label``
(Optional) The label of the z-axis
Returns a ``matplotlib`` figure object and axes handle.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
ax = plt.axes(projection='3d')
# Data for a three-dimensional line
ax.plot_surface(x_grid, y_grid, z_grid, cmap=cmap, alpha=alpha)
ax.view_init(*angle)
fontsize = fontsize
labelpad = labelpad
if title:
plt.title(title, fontsize=fontsize)
if x_label:
ax.set_xlabel(x_label, fontsize=fontsize, labelpad=labelpad)
if y_label:
ax.set_ylabel(y_label, fontsize=fontsize, labelpad=labelpad)
if z_label:
ax.set_zlabel(z_label, fontsize=fontsize, labelpad=labelpad)
return ax | 35,488 |
async def record_trade_volume() -> RecordTradeVolumeResponse:
"""
This api exists for demonstration purposes so you don't have to wait until the job runs again to pick up new data
"""
await deps.currency_trade_service.update_trade_volumes()
return RecordTradeVolumeResponse(success=True) | 35,489 |
def dataload_preprocessing(data_path, dataset, long_sent=800):
"""
:param data_path: base directory
:param dataset: select dataset {'20news', 'mr', 'trec', 'mpqa'}
:param long_sent: if dataset has long sentences, set to be constant length value
:return: seq_length, num_classes, vocab_size, x_train, y_train, x_test, y_test, pre-train_word (GloVe 840b),
word_idx
"""
assert os.path.exists(data_path) is True
x = load_pickle_data(data_path, dataset)
data_frame, pretrain_word, len_train, n_exist_word, vocab, word_idx = x
max_l = int(np.max(pd.DataFrame(data_frame)["num_words"]))
if dataset in ["reuters", "20news", "imdb", 'mr']:
train, test = make_idx_data(data_frame, word_idx, len_train, long_sent)
else:
train, test = make_idx_data(data_frame, word_idx, len_train, max_l)
# train[:, :-1] = word idx
# train[:, -1] = true label
x_train = train[:, :-1]
y_train = train[:, -1]
x_test = test[:, :-1]
y_test = test[:, -1]
sequence_length = len(x_train[0])
# make one-hot
labels = sorted(list(set(y_train)))
one_hot = np.zeros((len(labels), len(labels)), int)
np.fill_diagonal(one_hot, 1)
label_dict = dict(zip(labels, one_hot))
y_train = np.eye(len(label_dict))[y_train]
num_class = y_train.shape[1]
y_test = np.eye(len(label_dict))[y_test]
vocab_size = pretrain_word.shape[0]
print("sequence length :", sequence_length)
print("vocab size :", vocab_size)
print("num classes :", num_class)
return sequence_length, num_class, vocab_size, x_train, y_train, x_test, y_test, pretrain_word, word_idx | 35,490 |
def logged_in():
"""
Method called by Strava (redirect) that includes parameters.
- state
- code
- error
"""
error = request.args.get('error')
state = request.args.get('state')
if error:
return render_template('login_error.html',
error=error,
competition_title=config.COMPETITION_TITLE)
else:
code = request.args.get('code')
client = Client()
token_dict = client.exchange_code_for_token(client_id=config.STRAVA_CLIENT_ID,
client_secret=config.STRAVA_CLIENT_SECRET,
code=code)
# Use the now-authenticated client to get the current athlete
strava_athlete = client.get_athlete()
athlete_model = data.update_athlete_auth(strava_athlete, token_dict)
if not athlete_model:
return render_template('login_error.html',
error="ATHLETE_NOT_FOUND",
competition_title=config.COMPETITION_TITLE)
multiple_teams = None
no_teams = False
team = None
message = None
try:
team = data.register_athlete_team(
strava_athlete=strava_athlete,
athlete_model=athlete_model,
)
except MultipleTeamsError as multx:
multiple_teams = multx.teams
message = multx
except NoTeamsError as noteamsx:
no_teams = True
message = noteamsx
if not no_teams:
auth.login_athlete(strava_athlete)
return redirect(url_for('user.rides'))
else:
return render_template(
'login_results.html',
athlete=strava_athlete,
team=team,
multiple_teams=multiple_teams,
no_teams=no_teams,
message=message,
competition_title=config.COMPETITION_TITLE,
) | 35,491 |
def test_time_dependent_to_qutip():
"""Test conversion of a time-dependent Hamiltonian"""
Hil = LocalSpace(hs_name(), dimension=5)
ad = Create(hs=Hil)
a = Create(hs=Hil).adjoint()
w, g, t = symbols('w, g, t', real=True)
H = ad*a + (a + ad)
assert _time_dependent_to_qutip(H) == convert_to_qutip(H)
H = g * t * a
res = _time_dependent_to_qutip(H, time_symbol=t)
assert res[0] == convert_to_qutip(a)
assert res[1](1, {}) == g
assert res[1](1, {g: 2}) == 2
H = ad*a + g * t * (a + ad)
res = _time_dependent_to_qutip(H, time_symbol=t)
assert len(res) == 3
assert res[0] == convert_to_qutip(ad*a)
assert res[1][0] == convert_to_qutip(ad)
assert res[1][1](1, {}) == g
assert res[2][0] == convert_to_qutip(a)
assert res[2][1](1, {}) == g
res = _time_dependent_to_qutip(H, time_symbol=t, convert_as='str')
terms = [term for H, term in res[1:]]
assert terms == ['g*t', 'g*t']
H = (ad*a + t * (a + ad))**2
res = _time_dependent_to_qutip(H, time_symbol=t, convert_as='str')
assert len(res) == 9
terms = sorted([term for H, term in res[1:]])
expected = sorted([str(op.coeff.val) for op in H.expand().operands
if isinstance(op, ScalarTimesOperator)])
assert terms == expected | 35,492 |
def unit_string_to_cgs(string: str) -> float:
"""
Convert a unit string to cgs.
Parameters
----------
string
The string to convert.
Returns
-------
float
The value in cgs.
"""
# distance
if string.lower() == 'au':
return constants.au
# mass
if string.lower() in ('solarm', 'msun'):
return constants.solarm
# time
if string.lower() in ('year', 'years', 'yr', 'yrs'):
return constants.year
raise ValueError('Cannot convert unit') | 35,493 |
def _make_passphrase(length=None, save=False, file=None):
"""Create a passphrase and write it to a file that only the user can read.
This is not very secure, and should not be relied upon for actual key
passphrases.
:param int length: The length in bytes of the string to generate.
:param file file: The file to save the generated passphrase in. If not
given, defaults to 'passphrase-<the real user id>-<seconds since
epoch>' in the top-level directory.
"""
if not length:
length = 40
passphrase = _make_random_string(length)
if save:
ruid, euid, suid = os.getresuid()
gid = os.getgid()
now = mktime(localtime())
if not file:
filename = str('passphrase-%s-%s' % uid, now)
file = os.path.join(_repo, filename)
with open(file, 'a') as fh:
fh.write(passphrase)
fh.flush()
fh.close()
os.chmod(file, stat.S_IRUSR | stat.S_IWUSR)
os.chown(file, ruid, gid)
log.warn("Generated passphrase saved to %s" % file)
return passphrase | 35,494 |
def Mul(x, x_shape, y, y_shape, data_format=None):
"""mul"""
if data_format:
x_new = broadcast_by_format(x, x_shape, data_format[0], y_shape)
y_new = broadcast_by_format(y, y_shape, data_format[1], x_shape)
else:
x_new = x
y_new = y
return mul.mul(x_new, y_new) | 35,495 |
def new_authentication_challenge(usr: User) -> str:
"""
Initiates an authentication challenge. The challenge proceeds as follows:
1. A user (:class:`sni.user`) asks to start a challenge by calling
this method.
2. This methods returns a UUID, and the user has 60 seconds to change its
teamspeak nickname to that UUID.
3. The user notifies SNI that (s)he has done so.
4. The server checks (see
:meth:`sni.teamspeak.complete_authentication_challenge`), and if
sucessful, the corresponding teamspeak client is registered in the
database and bound to that user. The nickname is also automatically
assigned.
"""
logging.info(
"Starting authentication challenge for %s", usr.character_name
)
challenge_nickname = utils.random_code(20)
TeamspeakAuthenticationChallenge.objects(user=usr).update(
set__challenge_nickname=challenge_nickname,
set__created_on=utils.now(),
set__user=usr,
upsert=True,
)
return challenge_nickname | 35,496 |
def _clear_cache(context: CallbackContext, keep_save_pref: Optional[bool] = True) -> None:
"""Helper function to clear the user data.
:param context: The context containing the user data to clear.
:param keep_save_pref: Flag to indicate whether to keep user preference for global answer save.
"""
# Save global preferencec
save_pref = None
if keep_save_pref and _GLOBAL_SAVE_PREF in context.user_data.get(_SAVE_PREFS, {}).keys():
save_pref = context.user_data.get(_SAVE_PREFS).get(_GLOBAL_SAVE_PREF)
# Clear cache and stop all jobs
if _PROCESSOR in context.user_data.keys() and isinstance(context.user_data.get(_PROCESSOR), FormProcessor):
context.user_data.get(_PROCESSOR).reset()
context.user_data.clear()
for job in context.job_queue.jobs():
job.schedule_removal()
# Restore global save preference
if save_pref:
context.user_data[_SAVE_PREFS] = {_GLOBAL_SAVE_PREF: save_pref} | 35,497 |
def update_data(val):
"""
"""
print('\n\nupdate_data()\nstr_data_key = '+str(str_data_key)+'\n')
str_data_key = rad_data.value_selected
ser_data = dic_data[str_data_key]
print('str_data_key = '+str(str_data_key)+'\n')
update(1) | 35,498 |
def create_channel(application_key):
"""Create a channel.
Args:
application_key: A key to identify this channel on the server side.
Returns:
A string id that the client can use to connect to the channel.
Raises:
InvalidChannelTimeoutError: if the specified timeout is invalid.
Other errors returned by _ToChannelError
"""
request = channel_service_pb.CreateChannelRequest()
response = channel_service_pb.CreateChannelResponse()
request.set_application_key(application_key)
try:
apiproxy_stub_map.MakeSyncCall(_GetService(),
'CreateChannel',
request,
response)
except apiproxy_errors.ApplicationError, e:
raise _ToChannelError(e)
return response.client_id() | 35,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.