content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def get_completed_exploration_ids(user_id, collection_id):
"""Returns a list of explorations the user has completed within the context
of the provided collection.
Args:
user_id: str. ID of the given user.
collection_id: str. ID of the collection.
Returns:
list(str). A list of exploration ids that the user with the given
user id has completed within the context of the provided collection with
the given collection id. The list is empty if the user has not yet
completed any explorations within the collection, or if either the
collection and/or user do not exist.
A progress model isn't added until the first exploration of a collection
is completed, so, if a model is missing, there isn't enough information
to infer whether that means the collection doesn't exist, the user
doesn't exist, or if they just haven't mdae any progress in that
collection yet. Thus, we just assume the user and collection exist for
the sake of this call, so it returns an empty list, indicating that no
progress has yet been made.
"""
progress_model = user_models.CollectionProgressModel.get(
user_id, collection_id)
return progress_model.completed_explorations if progress_model else [] | 7d3456f8fa0af83d776d7f2daf0edde33c83adb6 | 22,000 |
import re
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\?", " ? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower() | dec81e721fb3a83c8ea372d21dfa805394edc0e3 | 22,001 |
def transitive_closure(graph):
"""
Compute the transitive closure of the graph
:param graph: a graph (list of directed pairs)
:return: the transitive closure of the graph
"""
closure = set(graph)
while True:
new_relations = set((x, w) for x, y in closure for q, w in closure if q == y)
closure_until_now = closure | new_relations
if closure_until_now == closure:
break
closure = closure_until_now
closure_no_doubles = [(x, y) for (x, y) in closure if not x == y]
return closure_no_doubles | 3bb6567033cf920ccced7565e75f8f789c55c37d | 22,002 |
def call_function(func_name, func_args, params, system):
"""
func_args : list of values (int or string)
return str or None if fail
return ROPChain if success
"""
if( system == Systems.TargetSystem.Linux and curr_arch_type() == ArchType.ARCH_X86 ):
return call_function_linux_x86(func_name, func_args, params )
elif( system == Systems.TargetSystem.Linux and curr_arch_type() == ArchType.ARCH_X64 ):
return call_function_linux_x64(func_name, func_args, params )
elif( system == Systems.TargetSystem.Linux and curr_arch_type() == ArchType.ARCH_ARM32 ):
return call_function_linux_arm32(func_name, func_args, params )
else:
return "Not implemented yet for this system/arch" | ede0b62dfa6d47c2c79ff405b056c26198e5afb5 | 22,003 |
import traceback
def error_handler(update, context):
"""Log Errors caused by Updates."""
log.error(
'with user: "%s (%s)"\nmessage: "%s"\ntraceback: %s',
update.effective_user,
update.effective_user.id,
context.error,
traceback.format_exc()
)
return ConversationHandler.END | 45ea22efe64c600ede6de81ee278493ff14dc772 | 22,004 |
def jac(w, centred_img_patches, F, NUM_MODES):
"""
The Jacobian of the numerical search procedure.
Parameters
----------
w : numpy array (floats)
Column vector of model weights, used to construct mapping.
centred_img_patches : numpy array (floats)
The mean-centred {p x NUM_PATCHES} array of p-elements image patches.
F : numpy array (floats)
Column vector of all errors.
NUM_MODES : int
Number of independent modes into which the image will be decomposed.
Returns
-------
J : numpy array (floats)
The Jacobian for the current error vector and set of weights.
"""
# Initialise numerical perturbation and Jacobian array
PERT = 1e-15
num_var = w.size
num_err = F.size
J = np.zeros([num_err, num_var])
# Iterate over all weights and populate Jacobian
for i in range(num_var):
w_pert = w.copy()
w_pert[i] = w[i] + PERT
inverse_mapping_pert = generate_inverse_mapping(w_pert, centred_img_patches, NUM_MODES)
sources_pert = map_patches_to_sources(inverse_mapping_pert, centred_img_patches)
source_cov_pert = cov(sources_pert)
dF = err(sources_pert, source_cov_pert) - F
J[:,[i]] = dF/PERT
return J | ee780ac6e366f14c1ab7c661db99bcdbdd3cc033 | 22,005 |
def get_session():
"""Entrega uma instancia da session, para manipular o db."""
return Session(engine) | eb528d0de57e704e96ffa502e7504746efac6cbb | 22,006 |
def sdm_ecart(f):
"""
Compute the ecart of ``f``.
This is defined to be the difference of the total degree of `f` and the
total degree of the leading monomial of `f` [SCA, defn 2.3.7].
Invalid if f is zero.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_ecart
>>> sdm_ecart([((1, 2, 3), 1), ((1, 0, 1), 1)])
0
>>> sdm_ecart([((2, 2, 1), 1), ((1, 5, 1), 1)])
3
"""
return sdm_deg(f) - sdm_monomial_deg(sdm_LM(f)) | 00d4e8807eef38326ee8a588a81287a1c9d62d0d | 22,007 |
def draw_gif_frame(image, bbox, frame_no):
"""Draw a rectangle with given bbox info.
Input:
- image: Frame to draw on
- length: Number of info (4 info/box)
- bbox: A list containing rectangles' info to draw -> frame id x y w h
Output: Frame that has been drawn on"""
obj_id = bbox[1]
bbox_left = int(bbox[2])
bbox_top = int(bbox[3])
bbox_right = bbox_left + int(bbox[4])
bbox_bottom = bbox_top + int(bbox[5])
# Set up params
left_top_pt = (bbox_left, bbox_top)
right_bottom_pt = (bbox_right, bbox_bottom)
color = (0, 0, 255)
thickness = 8
org = (bbox_left, bbox_top - 5)
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 1
thicknes_id = 3
line_type = cv2.LINE_4
cv2.rectangle(image, left_top_pt, right_bottom_pt, color, thickness)
cv2.putText(image, str(obj_id), org, font, font_scale, color, thicknes_id, line_type)
put_text(image, str(frame_no))
return image | 462773a88179361bc013777405b04f99ac73bd3b | 22,008 |
def build_response(session_attributes, speechlet_response):
""" Build the Alexa response """
# Log
debug_print("build_response")
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
} | 87713223e25a39ed29452157dc188e59521e8553 | 22,009 |
def create_host(api_client, orig_host_name, orig_host_uid, cloned_host_name, cloned_host_ip):
"""
Create a new host object with 'new_host_name' as its name and 'new_host_ip_address' as its IP-address.
The new host's color and comments will be copied from the the "orig_host" object.
:param api_client: Api client of the domain
:param orig_host_uid: original host uid
:param cloned_host_name: cloned host name
:param cloned_host_ip: cloned host IP
:return: the cloned host uid on success, otherwise None
"""
# get details of the original host object
log("\n\tGathering information for host {}".format(orig_host_name))
res = api_client.api_call("show-host", {"uid": orig_host_uid})
if res.success is False:
discard_write_to_log_file(api_client, "Failed to open existing host: {}. Aborting.".format(res.error_message))
return None
# copy the color and comments from the original host
color = res.data["color"]
comments = res.data["comments"]
# create a new host object
log("\n\tCreating a new host {}".format(cloned_host_name))
res = api_client.api_call("add-host", {"name": cloned_host_name, "ip-address": cloned_host_ip,
"color": color, "comments": comments})
if res.success is False:
discard_write_to_log_file(api_client, "Failed to create the new host: {}. Aborting.".format(res.error_message))
return None
return res.data["uid"] | 18fb2f727bae8150c98510a45e69409ff8aa4fe9 | 22,010 |
def parenthesize(x):
"""Return a copy of x surrounded by open and close parentheses"""
cast = type(x)
if cast is deque:
return deque(['('] + list(x) + [')'])
return cast('(') + x + cast(')') | ae76b220fd3bc00d3df99ec97982b44010f36e64 | 22,011 |
def get_logo_color():
"""Return color of logo used in application main menu.
RGB format (0-255, 0-255, 0-255). Orange applied.
"""
return (255, 128, 0) | a6eee63d816a44af31893830ac641d6c0b1b9ba1 | 22,012 |
def DG(p,t,Ep=10):
"""
Entrenamiento por Descenso de Gradiente
"""
# m será igual al número patrones de
# entrenamiento (ejemplos) y n al número
# de elementos del vector de caracteristicas.
m,n = p.shape
a = 0.5
#--- Pesos iniciales ---
w = np.random.uniform(-0.25,0.25,2)
b = np.random.uniform(-0.25,0.25)
# ----------------------
for N in range(Ep): # Iteración sobre num. de épocas
for ti in range(m): # Iteración sobre num. de patrones
#---- Salida ----
net = np.dot(w,p[ti])+b
y = logsig(net)
#-----------------
#---Regla Delta---
err = t[ti]- y
Delta = 2*err*df(net)*p[ti]
w = w + a*Delta
b = b + a*2*err*df(net)
#-----------------
return w,b | d23c5b11432cfb6d7d4e3beb56b917f82809442e | 22,013 |
import matplotlib.patheffects as pe
def plot_tc_errors(rec, legend=True, ax=None, per_stim=False,
ylim=(0, 200)):
"""
Plot tuning curve (TC) sMAPE.
.. WARNING:: Untested!
.. TODO:: Test or remove `plot_tc_errors`.
Parameters
----------
rec : `.GANRecords`
"""
if ax is None:
_, ax = pyplot.subplots()
epoch = rec.TC_mean['epoch']
model = rec.TC_mean['gen'].as_matrix()
true = rec.TC_mean['data'].as_matrix()
total_error = smape(model, true)
total_error_lines = ax.plot(
epoch,
total_error,
path_effects=[pe.Stroke(linewidth=5, foreground='white'),
pe.Normal()])
if per_stim:
per_stim_error = 200 * abs((model - true) / (model + true))
per_stim_lines = ax.plot(epoch, per_stim_error, alpha=0.4)
else:
per_stim_error = per_stim_lines = None
if legend:
if per_stim:
leg = ax.legend(
total_error_lines + per_stim_lines,
['TC sMAPE'] + list(range(len(per_stim_lines))),
loc='center left')
else:
leg = ax.legend(
total_error_lines,
['TC sMAPE'],
loc='upper left')
leg.set_frame_on(True)
leg.get_frame().set_facecolor('white')
if ylim:
ax.set_ylim(ylim)
return Namespace(
ax=ax,
per_stim_error=per_stim_error,
per_stim_lines=per_stim_lines,
total_error=total_error,
total_error_lines=total_error_lines,
) | 67d2f8ecc1f41ed71c3734acb28ae9d9016e8cde | 22,014 |
def delete_video_db(video_id):
"""Delete a video reference from the database."""
connection = connect_db()
connection.cursor().execute('DELETE FROM Content WHERE contentID=%s',
(video_id,))
connection.commit()
close_db(connection)
return True | c91428f5f60590d7f0219d732900ae24fcc39749 | 22,015 |
import numba
def int_to_float_fn(inputs, out_dtype):
"""Create a Numba function that converts integer and boolean ``ndarray``s to floats."""
if any(i.type.numpy_dtype.kind in "ib" for i in inputs):
args_dtype = np.dtype(f"f{out_dtype.itemsize}")
@numba.njit(inline="always")
def inputs_cast(x):
return x.astype(args_dtype)
else:
args_dtype_sz = max([_arg.type.numpy_dtype.itemsize for _arg in inputs])
args_dtype = np.dtype(f"f{args_dtype_sz}")
@numba.njit(inline="always")
def inputs_cast(x):
return x.astype(args_dtype)
return inputs_cast | f47f9485fa83acb2e7a0237c7ef851d3c23f8fe6 | 22,016 |
from numpy import sqrt
def vel_gradient(**kwargs):
"""
Calculates velocity gradient across surface object in supersonic
flow (from stagnation point) based upon either of two input variable
sets.
First method:
vel_gradient(R_n = Object radius (or equivalent radius, for
shapes that are not axisymmetric),
p_0 = flow stagnation pressure,
p_inf = flow freestream static pressure
rho = flow density)
Second method:
vel_gardient(R_n = Object radius (or equivalent radius, for
shapes that are not axisymmetric),
delta = Shock stand-off distance (from object
stagnation point),
U_s = Flow velocity immediately behind shock)
"""
if ('R_n' in kwargs) and ('p_0' in kwargs) and ('p_inf' in kwargs) and \
('rho' in kwargs):
vel_gradient = (1 / kwargs['R_n']) * sqrt((2 * (kwargs['p_0'] - \
kwargs['p_inf'])) / kwargs['rho'])
elif ('R_n' in kwargs) and ('U_s' in kwargs) and ('delta' in kwargs):
b = kwargs['delta'] + kwargs['R_n']
vel_gradient = (kwargs['U_s'] / kwargs['R_n']) * (1 + ((2 + ((b**3) / \
(kwargs['R_n']**3))) / (2 * (((b**3) / (kwargs['R_n']**3)) - 1))))
else:
raise KeyError('Incorrect variable assignment')
return vel_gradient | 8ee3ef490c113551e9200743e52378a8206a3666 | 22,017 |
from functools import reduce
def lcm(numbers):
""" Get the least common multiple of a list of numbers
------------------------------------------------------------------------------------
input: numbers [1,2,6] list of integers
output: 6 integer """
return reduce(lambda x, y: int((x * y) / gcd(x, y)), numbers, 1) | a1c3ce93b0ea4f06c8fb54765110fa85f7517fe5 | 22,018 |
def parseBracketed(idxst,pos):
"""parse an identifier in curly brackets.
Here are some examples:
>>> def test(st,pos):
... idxst= IndexedString(st)
... (a,b)= parseBracketed(idxst,pos)
... print(st[a:b])
...
>>> test(r'{abc}',0)
{abc}
>>> test(r'{ab8c}',0)
{ab8c}
>>> test(r'{c}',0)
{c}
>>> test(r'{}',0)
Traceback (most recent call last):
...
ParseException: command enclosed in curly brackets at line 1, col 1
>>> test(r'{abc',0)
Traceback (most recent call last):
...
ParseException: command enclosed in curly brackets at line 1, col 1
>>> test(r'x{ab8c}',1)
{ab8c}
"""
if not isinstance(idxst, IndexedString):
raise TypeError("idxst par wrong: %s" % repr(idxst))
st= idxst.st()
m= rx_bracketed.match(st,pos)
if m is None:
raise ParseException("command enclosed in curly brackets at",
rowcol= idxst.rowcol(pos))
return(pos,m.end()) | d78617fa8a85c234920d0f985566d7a00ebe6b1a | 22,019 |
def compute_agg_tiv(tiv_df, agg_key, bi_tiv_col, loc_num):
""" compute the agg tiv depending on the agg_key"""
agg_tiv_df = (tiv_df.drop_duplicates(agg_key + [loc_num], keep='first')[list(set(agg_key + ['tiv', 'tiv_sum', bi_tiv_col]))]
.groupby(agg_key, observed=True).sum().reset_index())
if 'is_bi_coverage' in agg_key:
# we need to separate bi coverage from the other tiv
agg_tiv_df.loc[agg_tiv_df['is_bi_coverage']==False, 'agg_tiv'] = agg_tiv_df['tiv_sum'] - agg_tiv_df[bi_tiv_col]
agg_tiv_df.loc[agg_tiv_df['is_bi_coverage']==True, 'agg_tiv'] = agg_tiv_df[bi_tiv_col]
else:
agg_tiv_df['agg_tiv'] = agg_tiv_df['tiv_sum']
return agg_tiv_df[agg_key + ['agg_tiv']] | 246ea2d61230f3e3bfe365fdf8fdbedbda98f25b | 22,020 |
from typing import List
def convert_configurations_to_array(configs: List[Configuration]) -> np.ndarray:
"""Impute inactive hyperparameters in configurations with their default.
Necessary to apply an EPM to the data.
Parameters
----------
configs : List[Configuration]
List of configuration objects.
Returns
np.ndarray
Array with configuration hyperparameters. Inactive values are imputed
with their default value.
"""
configs_array = np.array([config.get_array() for config in configs],
dtype=np.float64)
configuration_space = configs[0].configuration_space
for hp in configuration_space.get_hyperparameters():
default = hp._inverse_transform(hp.default)
idx = configuration_space.get_idx_by_hyperparameter_name(hp.name)
# Create a mask which is True for all non-finite entries in column idx!
column_mask = np.zeros(configs_array.shape, dtype=np.bool)
column_mask[:, idx] = True
nonfinite_mask = ~np.isfinite(configs_array)
mask = column_mask & nonfinite_mask
configs_array[mask] = default
return configs_array | 09b14dc5d5bb5707b059b7a469d93c7288da84cf | 22,021 |
from typing import Optional
from datetime import datetime
import requests
def annual_mean(
start: Optional[datetime] = None,
end: Optional[datetime] = None
) -> dict:
"""Get the annual mean data
----------------------------
Data from March 1958 through April 1974 have been obtained by C. David Keeling
of the Scripps Institution of Oceanography (SIO) and were obtained from the
Scripps website (scrippsco2.ucsd.edu).
The estimated uncertainty in the annual mean is the standard deviation
of the differences of annual mean values determined independently by
NOAA/ESRL and the Scripps Institution of Oceanography.
NOTE: In general, the data presented for the last year are subject to change,
depending on recalibration of the reference gas mixtures used, and other quality
control procedures. Occasionally, earlier years may also be changed for the same
reasons. Usually these changes are minor.
CO2 expressed as a mole fraction in dry air, micromol/mol, abbreviated as ppm
"""
if start and not isinstance(start, datetime):
raise TypeError("Start must be a datetime object")
if end and not isinstance(end, datetime):
raise TypeError("End must be a datetime object")
url = 'https://www.esrl.noaa.gov/gmd/webdata/ccgg/trends/co2/co2_annmean_mlo.txt'
res = requests.get(url)
raw = res.content.decode("utf-8")
lines = raw.splitlines()
_license = "\n".join(lines[:41])
description = "\n".join(lines[41:56])
headers = lines[56]
mean = {
"url": url,
"license": _license,
"description": description,
"headers": headers,
"raw": raw,
"data": {
"yr": [],
"mean (ppm)": [],
"unc": [],
},
}
# Parse data
for row in lines[57:]:
yr, ppm, unc = row.split()
date = datetime(year=int(yr), month=1, day=1)
if start and start > date:
continue
if end and end < date:
break
mean["data"]["yr"].append(yr)
mean["data"]["mean (ppm)"].append(ppm)
mean["data"]["unc"].append(unc)
return mean | 4fd06301f9f414e08629cdbfeae75adcc6febdcf | 22,022 |
def exception(logger,extraLog=None):
"""
A decorator that wraps the passed in function and logs
exceptions should one occur
@param logger: The logging object
"""
print logger
def decorator(func):
print "call decorator"
def wrapper(*args, **kwargs):
print "call exception decor"
print args
print kwargs
try:
print "-----: normal"
return func(*args, **kwargs)
except:
# log the exception
err = "There was an exception in "
err += func.__name__
#logger.exception(err)
print "-----: except"
logger.exception(err,extra=extraLog)
# re-raise the exception
raise
return wrapper
return decorator | 52a0ca19b738576a5e42da9d720ec5a5118466fe | 22,023 |
def read_external_sources(service_name):
"""
Try to get config from external sources, with the following priority:
1. Credentials file(ibm-credentials.env)
2. Environment variables
3. VCAP Services(Cloud Foundry)
:param service_name: The service name
:return: dict
"""
config = {}
config = read_from_credential_file(service_name)
if not config:
config = read_from_env_variables(service_name)
if not config:
config = read_from_vcap_services(service_name)
return config | a8008efecf6cbc8801022c9a99617480d50ad525 | 22,024 |
import torch
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1] | 96f67dd8ee1b40af469b5e40dc1f3456250451b3 | 22,025 |
import re
def tokenize(text):
""" tokenize text messages
Input: text messages
Output: list of tokens
"""
# find urls and replace them with 'urlplaceholder'
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
text = re.sub(url_regex, 'urlplaceholder', text)
# normalize case and remove punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# tokenize text
tokens = word_tokenize(text)
# lemmatize and remove stop words
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
return tokens | 02c322b01b0af995030c007be634b7d3d1603518 | 22,026 |
import scipy
import copy
def peakFindBottom(x, y, peaks, fig=None, verbose=1):
""" Find the left bottom of a detected peak
Args:
x (array): independent variable data
y (array): signal data
peaks (list): list of detected peaks
fig (None or int): if integer, then plot results
verbose (int): verbosity level
"""
kk = np.ones(3) / 3.
ys = scipy.ndimage.filters.correlate1d(y, kk, mode='nearest')
peaks = copy.deepcopy(peaks)
dy = np.diff(ys, n=1)
dy = np.hstack((dy, [0]))
kernel_size= [int(np.max([2, dy.size / 100])), ]
dy = qtt.algorithms.generic.boxcar_filter(dy, kernel_size = kernel_size)
for ii, peak in enumerate(peaks):
if verbose:
print('peakFindBottom: peak %d' % ii)
if not peak['valid']:
continue
ind = range(peak['phalf0'])
left_of_peak = 0 * y.copy()
left_of_peak[ind] = 1
r = range(y.size)
left_of_peak_and_decreasing = left_of_peak * (dy < 0) # set w to zero where the scan is increasing
left_of_peak_and_decreasing[0] = 1 # make sure to stop at the left end of the scan...
ww = left_of_peak_and_decreasing.nonzero()[0]
if verbose >= 2:
print(' peakFindBottom: size of decreasing area %d' % ww.size)
if ww.size == 0:
if peak['valid']:
peak['valid'] = 0
peak['validreason'] = 'peakFindBottom'
if verbose >= 2:
print('peakFindBottom: invalid peak')
print(ind)
print(dy)
continue
bidx = ww[-1]
peak['pbottomlow'] = bidx
w = left_of_peak * (dy > 0) # we need to be rising
# we need to be above 10% of absolute low value
w = w * (ys < ys[bidx] + .1 * (ys[peak['p']] - ys[bidx]))
w = w * (r >= peak['pbottomlow'])
ww = w.nonzero()[0]
if ww.size == 0:
if peak['valid']:
peak['valid'] = 0
peak['validreason'] = 'peakFindBottom'
if verbose >= 2:
print('peakFindBottom: invalid peak (%s)' % ('rising part ww.size == 0',))
print(w)
print(ys)
continue
bidx = ww[-1]
peak['pbottom'] = bidx
peak['pbottoml'] = bidx
peak['xbottom'] = x[bidx]
peak['xbottoml'] = x[bidx]
peak['vbottom'] = y[bidx] # legacy
peak['ybottoml'] = y[bidx]
if verbose >= 3:
plt.figure(53)
plt.clf()
plt.plot(x[ind], 0 * np.array(ind) + 1, '.b', label='ind')
plt.plot(x[range(y.size)], w, 'or', label='w')
plt.plot(x[range(y.size)], dy < 0, 'dg',
markersize=12, label='dy<0')
pgeometry.enlargelims()
pgeometry.plot2Dline([-1, 0, peak['x']], '--c', label='x')
pgeometry.plot2Dline([-1, 0, x[peak['phalf0']]],
'--y', label='phalf0')
pgeometry.plot2Dline([-1, 0, x[peak['pbottomlow']]],
':k', label='pbottomlow')
pgeometry.plot2Dline([-1, 0, peak['xbottoml']],
'--y', label='xbottoml')
plt.legend(loc=0)
return peaks | dd79a4fd572f69f5db9adeaaf56ab4c8661c0ca1 | 22,027 |
def replacelast(string, old, new, count = 1):
"""Replace the last occurances of a string"""
return new.join(string.rsplit(old,count)) | 6af2cd56cc43e92b0d398e8aad4e25f0c6c34ddd | 22,028 |
import os
def parse_config(config):
"""Parse the config dictionary for common objects.
Currently only parses the following:
* `directories` for relative path names.
Args:
config (dict): Config items.
Returns:
dict: Config items but with objects.
"""
# Prepend the base directory to relative dirs
if 'directories' in config:
base_dir = os.getenv('PANDIR')
for dir_name, rel_dir in config['directories'].items():
abs_dir = os.path.normpath(os.path.join(base_dir, rel_dir))
if abs_dir != rel_dir:
config['directories'][dir_name] = abs_dir
return config | 696cfb8b2e6ba308310445e451787c8b115d07e0 | 22,029 |
def _safe_read_img(img):
"""
Read in tiff image if a path is given instead of np object.
"""
img = imread(img) if isinstance(img, str) else np.array(img)
return np.nan_to_num(img) | a9a50b5ad76a6ed5833c2c149b1366b318814d6a | 22,030 |
def max_version(*modules: Module) -> str:
"""Maximum version number of a sequence of modules/version strings
See `get_version` for how version numbers are extracted. They are compared
as `packaging.version.Version` objects.
"""
return str(max(get_version(x) for x in modules)) | 34ad9bd27591e3496e6a5a7e75dbf0191a8c077e | 22,031 |
def load_secret(name, default=None):
"""Check for and load a secret value mounted by Docker in /run/secrets."""
try:
with open(f"/run/secrets/{name}") as f:
return f.read().strip()
except Exception:
return default | 1aac980ad6bc039964ef9290827eb5c6d1b1455f | 22,032 |
from operator import sub
def snake_case(x):
"""
Converts a string to snake case
"""
# Disclaimer: This method is annoyingly complex, and i'm sure there is a much better way to do this.
# The idea is to iterate through the characters
# in the string, checking for specific cases and handling them accordingly. One note,
# the built it isupper() and islower() methods will consider an underscore False.
# The process looks like this:
# First, we will check if the current character is uppercase, if its not, we simply insert
# that character into the new string as is.
# Second, we need to see if it's the first character of the string. if it is, we will need
# to check if it is part of an acronym that should stay capitalized, even in snake case(e.g. XML, JSON, HTML).
# We do this by looking at the next character and checking if it is also capitalized. If it
# is, we will insert the character in capital form, if not, we will lowercase it and insert it.
# If the current character is NOT the first character of the string, we still need to determine
# if it is part of an acronym. The same process is applied except now we also look at the previous
# character to see if it is capitalized. If it is, we can assume this is part of an acronym.
# If the next character is uppercase, but the previous one isn't, than we assume it is part of
# an acronym and insert it in uppercase form. now, when checking if the previous character is lowercase during our acronym check,
# it is possible that islower() will return False because the character before it is an underscore. This means
# We have to handle both possibilities.
x = sub('\s+', '_', x) # First, we go ahead and replace any consecutive spaces with underscores
out = ''
for i, char in enumerate(x):
if char.isupper():
# Get the next and previous characters for later use
next_char = x[i + 1]
previous_char = x[i - 1]
if not i == 0: # Check if we are not at the first character
if previous_char.islower():
out += '_'
if next_char.islower() or next_char == '_':
out += char.lower()
continue
elif previous_char == '_':
if next_char.islower() or next_char == '_':
out += char.lower()
continue
elif next_char.isupper():
out += char
continue
else:
out += char.lower()
continue
elif not char == '_' and x[i - 1].isupper() and x[i - 2].isupper(): # This could be a lowercased word following an acronym without any spaces
out += '_' # We will insert an underscore to break this character into its own word
elif char == '_' and x[i - 1] == '_':
continue
out += char
if out.endswith('_'):
out = out[:len(out) - 1]
return out | 133ff68eb42e5e009fe2eee03d1e52f7d015732c | 22,033 |
async def process_manga(data_list: list[dict], image_path: str) -> str:
"""对单张图片进行涂白和嵌字的工序
Args:
data_list (list[dict]): ocr识别的文字再次封装
image_path (str): 图片下载的路径(同时也作为最后保存覆盖的路径)
Returns:
str: 保存的路径
"""
image = Image.open(image_path).convert("RGB")
for i in data_list:
image = await draw_white(image, i)
if i["is_vertical"]:
image = await add_text_for_manga(image, i)
else:
image = await add_text(image, i)
image.save(image_path)
return image_path | 47574e11067c00d3c5ab4110a7dae8100d450a1f | 22,034 |
def padded_nd_indices(is_valid, shuffle=False, seed=None):
"""Pads the invalid entries by valid ones and returns the nd_indices.
For example, when we have a batch_size = 1 and list_size = 3. Only the first 2
entries are valid. We have:
```
is_valid = [[True, True, False]]
nd_indices, mask = padded_nd_indices(is_valid)
```
nd_indices has a shape [1, 3, 2] and mask has a shape [1, 3].
```
nd_indices = [[[0, 0], [0, 1], [0, 0]]]
mask = [[True, True, False]]
```
nd_indices can be used by gather_nd on a Tensor t
```
padded_t = tf.gather_nd(t, nd_indices)
```
and get the following Tensor with first 2 dims are [1, 3]:
```
padded_t = [[t(0, 0), t(0, 1), t(0, 0)]]
```
Args:
is_valid: A boolean `Tensor` for entry validity with shape [batch_size,
list_size].
shuffle: A boolean that indicates whether valid indices should be shuffled.
seed: Random seed for shuffle.
Returns:
A tuple of Tensors (nd_indices, mask). The first has shape [batch_size,
list_size, 2] and it can be used in gather_nd or scatter_nd. The second has
the shape of [batch_size, list_size] with value True for valid indices.
"""
with tf.compat.v1.name_scope(name='nd_indices_with_padding'):
is_valid = tf.convert_to_tensor(value=is_valid)
list_size = tf.shape(input=is_valid)[1]
num_valid_entries = tf.reduce_sum(
input_tensor=tf.cast(is_valid, dtype=tf.int32), axis=1)
indices, mask = _circular_indices(list_size, num_valid_entries)
# Valid indices of the tensor are shuffled and put on the top.
# [batch_size, list_size, 2].
shuffled_indices = organize_valid_indices(
is_valid, shuffle=shuffle, seed=seed)
# Construct indices for gather_nd [batch_size, list_size, 2].
nd_indices = _to_nd_indices(indices)
nd_indices = tf.gather_nd(shuffled_indices, nd_indices)
return nd_indices, mask | 61a57aa95c1d3151900aba3db07bba0eae542dfd | 22,035 |
def part_two(data: str) -> int:
"""The smallest number leading to an md5 hash with six leading zeros for data."""
return smallest_number_satisfying(data, starts_with_six_zeros) | 57195761f388654f9aa099162f337e8177e56111 | 22,036 |
def showlatesttag(context, mapping):
"""List of strings. The global tags on the most recent globally
tagged ancestor of this changeset. If no such tags exist, the list
consists of the single string "null".
"""
return showlatesttags(context, mapping, None) | e04b03a9dca54a93f450de676ea05c307f157dab | 22,037 |
def list_filters():
"""
List all filters
"""
filters = [_serialize_filter(imgfilter) for imgfilter in FILTERS.values()]
return response_list(filters) | e43da929925872f5eefca5da2659052a1a48d442 | 22,038 |
def len_adecuada(palabra, desde, hasta):
"""
(str, int, int) -> str
Valida si la longitud de la palabra está en el rango deseado
>>> len_adecuada('hola', 0, 100)
'La longitud de hola, está entre 0 y 100'
>>> len_adecuada('hola', 1, 2)
'La longitud de hola, no está entre 1 y 2'
:param palabra:
:param desde:
:param hasta:
:return:
"""
return 'La longitud de {0}, {1}está entre {2} y {3}'\
.format(palabra, "" if desde <= len(palabra) <= hasta else "no ", desde, hasta) | df217a0159cd04c76f5eb12ca42e651ee62fcd99 | 22,039 |
import warnings
def ECEF_from_ENU(enu, latitude, longitude, altitude):
"""
Calculate ECEF coordinates from local ENU (east, north, up) coordinates.
Args:
enu: numpy array, shape (Npts, 3), with local ENU coordinates
latitude: latitude of center of ENU coordinates in radians
longitude: longitude of center of ENU coordinates in radians
Returns:
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates
"""
enu = np.array(enu)
if enu.ndim > 1 and enu.shape[1] != 3:
if enu.shape[0] == 3:
warnings.warn('The expected shape of the ENU array is (Npts, 3). '
'Support for arrays shaped (3, Npts) will go away in a '
'future version.', PendingDeprecationWarning)
enu_use = enu.T
transpose = True
else:
raise ValueError('The expected shape of the ENU array array is (Npts, 3).')
else:
enu_use = enu
transpose = False
if enu.shape == (3, 3):
warnings.warn('The enu array in ECEF_from_ENU is being '
'interpreted as (Npts, 3). Historically this function '
'has supported (3, Npts) arrays, please verify that '
'array ordering is as expected.', PendingDeprecationWarning)
if enu_use.ndim == 1:
enu_use = enu_use[np.newaxis, :]
xyz = np.zeros_like(enu_use)
xyz[:, 0] = (-np.sin(latitude) * np.cos(longitude) * enu_use[:, 1]
- np.sin(longitude) * enu_use[:, 0]
+ np.cos(latitude) * np.cos(longitude) * enu_use[:, 2])
xyz[:, 1] = (-np.sin(latitude) * np.sin(longitude) * enu_use[:, 1]
+ np.cos(longitude) * enu_use[:, 0]
+ np.cos(latitude) * np.sin(longitude) * enu_use[:, 2])
xyz[:, 2] = (np.cos(latitude) * enu_use[:, 1]
+ np.sin(latitude) * enu_use[:, 2])
xyz_center = XYZ_from_LatLonAlt(latitude, longitude, altitude)
xyz[:, 0] = xyz[:, 0] + xyz_center[0]
xyz[:, 1] = xyz[:, 1] + xyz_center[1]
xyz[:, 2] = xyz[:, 2] + xyz_center[2]
if len(enu.shape) == 1:
xyz = np.squeeze(xyz)
elif transpose:
return xyz.T
return xyz | 13561632731d59e40e4232f8dc2798e8dcd8067f | 22,040 |
def showresults(options=''):
"""
Generate and plot results from a kima run.
The argument `options` should be a string with the same options as for
the kima-showresults script.
"""
# force correct CLI arguments
args = _parse_args(options)
plots = []
if args.rv:
plots.append('6')
if args.planets:
plots.append('1')
if args.orbital:
plots.append('2'); plots.append('3')
if args.gp:
plots.append('4'); plots.append('5')
if args.extra:
plots.append('7')
for number in args.plot_number:
plots.append(number)
try:
evidence, H, logx_samples = postprocess(plot=args.diagnostic)
except IOError as e:
print(e)
sys.exit(1)
res = KimaResults(list(set(plots)))
show() # render the plots
# __main__.__file__ doesn't exist in the interactive interpreter
if not hasattr(__main__, '__file__'):
return res | 0c5c944dc21e0abf808c258d8993f6133a254701 | 22,041 |
import re
def convert_as_number(symbol: str) -> float:
"""
handle cases:
' ' or '' -> 0
'10.95%' -> 10.95
'$404,691,250' -> 404691250
'$8105.52' -> 8105.52
:param symbol: string
:return: float
"""
result = symbol.strip()
if len(result) == 0:
return 0
result = re.sub('[%$, *]', '', result)
return float(result) | cea1d6e894fa380ecf6968d5cb0ef1ce21b73fac | 22,042 |
import pandas as pd
import os
def mgus(path):
"""Monoclonal gammapothy data
Natural history of 241 subjects with monoclonal gammapothy of
undetermined significance (MGUS).
mgus: A data frame with 241 observations on the following 12 variables.
id:
subject id
age:
age in years at the detection of MGUS
sex:
`male` or `female`
dxyr:
year of diagnosis
pcdx:
for subjects who progress to a plasma cell malignancy
the subtype of malignancy: multiple myeloma (MM) is the
most common, followed by amyloidosis (AM), macroglobulinemia (MA),
and other lymphprolifative disorders (LP)
pctime:
days from MGUS until diagnosis of a plasma cell malignancy
futime:
days from diagnosis to last follow-up
death:
1= follow-up is until death
alb:
albumin level at MGUS diagnosis
creat:
creatinine at MGUS diagnosis
hgb:
hemoglobin at MGUS diagnosis
mspike:
size of the monoclonal protein spike at diagnosis
mgus1: The same data set in start,stop format. Contains the id, age,
sex, and laboratory variable described above along with
+----------------+----------------------------------------------------------+
| start, stop: | sequential intervals of time for each subject |
+----------------+----------------------------------------------------------+
| status: | =1 if the interval ends in an event |
+----------------+----------------------------------------------------------+
| event: | a factor containing the event type: censor, death, |
| | or plasma cell malignancy |
+----------------+----------------------------------------------------------+
| enum: | event number for each subject: 1 or 2 |
+----------------+----------------------------------------------------------+
Mayo Clinic data courtesy of Dr. Robert Kyle.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `mgus.csv`.
Returns:
Tuple of np.ndarray `x_train` with 241 rows and 12 columns and
dictionary `metadata` of column headers (feature names).
"""
path = os.path.expanduser(path)
filename = 'mgus.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/survival/mgus.csv'
maybe_download_and_extract(path, url,
save_file_name='mgus.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | 98a6b9ae47657aa8557b5920490d15e8f7005fec | 22,043 |
def smiles_dict():
"""Store SMILES for compounds used in test cases here."""
smiles = {
"ATP": "Nc1ncnc2c1ncn2[C@@H]1O[C@H](COP(=O)(O)OP(=O)(O)OP(=O)(O)O)[C"
+ "@@H](O)[C@H]1O",
"ADP": "Nc1ncnc2c1ncn2[C@@H]1O[C@H](COP(=O)(O)OP(=O)(O)O)[C@@H](O)[C" + "@H]1O",
"meh": "CCC(=O)C(=O)O",
"l_ala": "C[C@H](N)C(=O)O",
"d_ala": "C[C@@H](N)C(=O)O",
"FADH": "Cc1cc2c(cc1C)N(CC(O)C(O)C(O)COP(=O)(O)OP(=O)(O)OCC1OC(n3cnc"
+ "4c(N)ncnc43)C(O)C1O)c1nc(O)nc(O)c1N2",
"S-Adenosylmethionine": "C[S+](CC[C@H](N)C(=O)O)C[C@H]1O[C@@H](n2cnc"
+ "3c(N)ncnc32)[C@H](O)[C@@H]1O",
}
return smiles | 080373bdfb250f57e20e0e2b89702ac07c430f69 | 22,044 |
def prepare_parser() -> ArgumentParser:
"""Create all CLI parsers/subparsers."""
# Handle core parser args
parser = ArgumentParser(
description="Learning (Hopefully) Safe Agents in Gridworlds"
)
handle_parser_args({"core": parser}, "core", core_parser_configs)
# Handle environment subparser args
env_subparsers = parser.add_subparsers(
help="Types of gridworld environments", dest="env_alias"
)
env_subparsers.required = True
env_parsers = {}
for env_name in ENV_MAP:
env_parsers[env_name] = env_subparsers.add_parser(env_name)
handle_parser_args(env_parsers, env_name, env_parser_configs)
# Handle agent subparser args
agent_subparsers = {}
for env_name, env_parser in env_subparsers.choices.items():
agent_parser_configs = deepcopy(stashed_agent_parser_configs)
agent_subparsers[env_name] = env_parser.add_subparsers(
help="Types of agents", dest="agent_alias"
)
agent_subparsers[env_name].required = True
agent_parsers = {}
for agent_name in AGENT_MAP:
agent_parsers[agent_name] = agent_subparsers[env_name].add_parser(
agent_name
)
handle_parser_args(agent_parsers, agent_name, agent_parser_configs)
return parser | c0a42abf56f3c82ae09ef2459aae49fded71e9f0 | 22,045 |
def import_google(authsub_token, user):
"""
Uses the given AuthSub token to retrieve Google Contacts and
import the entries with an email address into the contacts of the
given user.
Returns a tuple of (number imported, total number of entries).
"""
contacts_service = gdata.contacts.service.ContactsService()
contacts_service.auth_token = authsub_token
contacts_service.UpgradeToSessionToken()
entries = []
feed = contacts_service.GetContactsFeed()
entries.extend(feed.entry)
next_link = feed.GetNextLink()
while next_link:
feed = contacts_service.GetContactsFeed(uri=next_link.href)
entries.extend(feed.entry)
next_link = feed.GetNextLink()
total = 0
imported = 0
for entry in entries:
name = entry.title.text
for e in entry.email:
email = e.address
total += 1
try:
Contact.objects.get(user=user, email=email)
except Contact.DoesNotExist:
Contact(user=user, name=name, email=email).save()
imported += 1
return imported, total | 768b900ceac60cc69d1906ef915fdace8b6d0982 | 22,046 |
def getGpsTime(dt):
"""_getGpsTime returns gps time (seconds since midnight Sat/Sun) for a datetime
"""
total = 0
days = (dt.weekday()+ 1) % 7 # this makes Sunday = 0, Monday = 1, etc.
total += days*3600*24
total += dt.hour * 3600
total += dt.minute * 60
total += dt.second
return(total) | 30f0fa562cf88ca2986c3346b4111dcb18b1cb34 | 22,047 |
def class_logger(module_logger, attribute = "logger"):
"""
Class decorator to add a class-level Logger object as a class
attribute. This allows control of debugging messages at the class
level rather than just the module level.
This decorator takes the module logger as an argument.
"""
def decorator(cls):
setattr(cls, attribute, module_logger.getChild(cls.__name__))
return cls
return decorator | 8eddbca3d112156eca162e0d709dde1ec03d9339 | 22,048 |
from datetime import datetime
def validate_date(date, flash_errors=True):
"""
Validates date string. Format should be YYYY-MM-DD. Flashes errors if
flash_errors is True.
"""
try:
datetime.datetime.strptime(date, '%Y-%m-%d')
except ValueError:
if flash_errors:
flask.flash('Invalid date provided. Make sure dates are in YYYY-MM-DD format.')
return False
return True | ed3e45c3232da8d994f854001a3be852180681e0 | 22,049 |
def generate_error_map(image, losses, box_lenght):
"""
Function to overlap an error map to an image
Args:
image: input image
losses: list of losses, one for each masked part of the flow.
Returs:
error_map: overlapped error_heatmap and image.
"""
box_lenght = int(box_lenght)
# Assert that everything is correct
num_boxes = int(image.shape[0] / box_lenght) * int(image.shape[1] / box_lenght)
assert(num_boxes ==len(losses))
img_width = int(np.floor(image.shape[1] / box_lenght) * box_lenght)
img_height = int(np.floor(image.shape[0] / box_lenght) * box_lenght)
image = image[:img_height, :img_width]
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
heatmap = np.ones_like(image[:,:,0])
res_heatmap = np.reshape(heatmap, (box_lenght, box_lenght, num_boxes))
res_heatmap = res_heatmap * np.array(losses)
heatmap = np.zeros((img_height, img_width))
# ugly for loop, unable to solve atm
i = 0
for y in np.arange(0, img_height, step=box_lenght):
for x in np.arange(0, img_width, step=box_lenght):
# convert to x,y coordinates
heatmap[y: y+box_lenght, x: x+box_lenght] = res_heatmap[:,:,i]
i+=1
heatmap = np.asarray(heatmap / np.max(heatmap) * 255, dtype=np.uint8)
heatmap_img = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
final = cv2.addWeighted(heatmap_img, 0.5, postprocess_image(image), 0.5, 0)
return final | b4b8a8207a90226caba0c5f5be4c322dc6181a42 | 22,050 |
def get_order(oid): # noqa: E501
"""Gets an existing order by order id
# noqa: E501
:param oid:
:type oid: str
:rtype: Order
"""
oid = int(oid)
msg = "error retrieving order"
ret_code = 400
if oid in orders:
msg = {"status": f"order retrieved", "order": orders[oid], "oid": oid}
ret_code = 200
else:
msg = f"Order: {oid} could not be found"
ret_code = 404
return msg, ret_code | d95051e027994b0bd7837859a3e7fd8106f4a07e | 22,051 |
import operator
def most_recent_assembly(assembly_list):
"""Based on assembly summaries find the one submitted the most recently"""
if assembly_list:
return sorted(assembly_list, key=operator.itemgetter('submissiondate'))[-1] | 1d7ecf3a1fa862e421295dda0ba3d89863f33b0f | 22,052 |
import re
def dict_from_xml_text(xml_text, fix_ampersands=False):
"""
Convert an xml string to a dictionary of values
:param xml_text: valid xml string
:param fix_ampersands: additionally replace & to & encoded value before parsing to etree
:return: dictionary of data
"""
if fix_ampersands:
xml_text = re.sub(r'&', '&', xml_text)
root = Etree.fromstring(xml_text)
return dict_from_etree(root) | fc008f9c9ef23640ed09adef3320eb549506988d | 22,053 |
def find_encryption_key(loop_size, subject_number):
"""Find encryption key from the subject_number and loop_size."""
value = 1
for _ in range(loop_size):
value = transform_value(value, subject_number)
return value | cb9f58d065e4bc227ac034357981eac070834f73 | 22,054 |
import math
def carla_rotation_to_RPY(carla_rotation):
"""
Convert a carla rotation to a roll, pitch, yaw tuple
Considers the conversion from left-handed system (unreal) to right-handed
system (ROS).
Considers the conversion from degrees (carla) to radians (ROS).
:param carla_rotation: the carla rotation
:type carla_rotation: carla.Rotation
:return: a tuple with 3 elements (roll, pitch, yaw)
:rtype: tuple
"""
roll = -math.radians(carla_rotation.roll)
pitch = -math.radians(carla_rotation.pitch)
yaw = -math.radians(carla_rotation.yaw)
return (roll, pitch, yaw) | 30f4cd3facd3696d3f0daf25f2723d82541c89f2 | 22,055 |
from bempp.api.integration.triangle_gauss import rule
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import aslinearoperator
def compute_p1_curl_transformation(space, quadrature_order):
"""
Compute the transformation of P1 space coefficients to surface curl values.
Returns two lists, curl_transforms and curl_transforms_transpose. The jth matrix
in curl_transforms is the map from P1 function space coefficients (or extended space
built upon P1 type spaces) to the jth component of the surface curl evaluated at the
quadrature points, multiplied with the quadrature weights and integration element. The
list curl_transforms_transpose contains the transpose of these matrices.
"""
grid_data = space.grid.data("double")
number_of_elements = space.grid.number_of_elements
quad_points, weights = rule(quadrature_order)
npoints = len(weights)
dof_count = space.localised_space.grid_dof_count
data, iind, jind = compute_p1_curl_transformation_impl(
grid_data,
space.support_elements,
space.normal_multipliers,
quad_points,
weights,
)
curl_transforms = []
curl_transforms_transpose = []
for index in range(3):
curl_transforms.append(
aslinearoperator(
coo_matrix(
(data[index, :], (iind, jind)),
shape=(npoints * number_of_elements, dof_count),
).tocsr()
)
@ aslinearoperator(space.map_to_localised_space)
@ aslinearoperator(space.dof_transformation)
)
curl_transforms_transpose.append(
aslinearoperator(space.dof_transformation.T)
@ aslinearoperator(space.map_to_localised_space.T)
@ aslinearoperator(
coo_matrix(
(data[index, :], (jind, iind)),
shape=(dof_count, npoints * number_of_elements),
).tocsr()
)
)
return curl_transforms, curl_transforms_transpose | 6ad147c23fb9c153d534b742443c6238c1dc6f33 | 22,056 |
def _get_individual_id(individual) -> str:
"""
Returns a unique identifier as string for the given individual.
:param individual: The individual to get the ID for.
:return: A string representing the ID.
"""
if hasattr(individual, "identifier") and (isinstance(individual.identifier, list) and
len(individual.identifier) > 0 and
type(individual.identifier[0]) in [int, str]) or (
type(individual.identifier) in [int, str]):
return str(individual.identifier[0])
else:
return str(individual) | e606d5eef7bfbcd0d76113c20f450be3c1e6b2ab | 22,057 |
def get_self_url(d):
"""Returns the URL of a Stash resource"""
return d.html_url if isinstance(d, PullRequest) else d["links"]["self"][0]["href"] | b7b88b49a1035ec7d15e4d0f7c864e489dccbf70 | 22,058 |
def shift(arr, *args):
"""
**WARNING**
The ``Si`` arguments can be either a single array containing the shift
parameters for each dimension, or a sequence of up to eight scalar shift
values. For arrays of more than one dimension, the parameter ``Sn`` specifies
the shift applied to the n-th dimension
while this implementation supports lists as ``arr`` argument, to match the
style of IDL, the IDLpy bridge does *not* support lists, and returns it
*unchanged*!
If ``SHIFT`` is used in combination with ``FFT``, maybe you should look at
``np.fft.fftshift``.
"""
arr = np.asarray(arr) # accept list (see note above)
if arr.ndim==1:
if len(args)==1:
return np.roll(arr, _int_list(args))
elif arr.ndim==2:
if len(args)==1:
return np.roll(arr, _int_list(args))
if len(args)==2:
return np.roll(arr, _int_list(args)[::-1], axis=(0,1))
elif arr.ndim==3:
if len(args)==1:
return np.roll(arr, args)
elif len(args)==1:
raise IDLException("Incorrect number of arguments.")
elif len(args)==3:
return np.roll(arr, args[::-1], axis=(0,1,2))
raise NotImplementedError("shift does only work for 1D, 2D and 3D arrays.") | b70a430039ba369d99a3c2edea920430eb27dfa1 | 22,059 |
def ConvertToMeaningfulConstant(pset):
""" Gets the flux constant, and quotes it above some energy minimum Emin """
# Units: IF TOBS were in yr, it would be smaller, and raw const greater.
# also converts per Mpcs into per Gpc3
units=1e9*365.25
const = (10**pset[7])*units # to cubic Gpc and days to year
Eref=1e40 #erg per Hz
Emin=10**pset[0]
Emax=10**pset[1]
gamma=pset[3]
factor=(Eref/Emin)**gamma - (Emax/Emin)**gamma
const *= factor
return const | e393f66e72c3a43e91e9975f270ac7dcf577ad3e | 22,060 |
def hw_uint(value):
"""return HW of 16-bit unsigned integer in two's complement"""
bitcount = bin(value).count("1")
return bitcount | 9a9c6017d3d6da34c4e9132a0c89b267aa263ace | 22,061 |
import copy
def clip(x,xmin,xmax) :
""" clip input array so that x<xmin becomes xmin, x>xmax becomes xmax, return clipped array
"""
new=copy.copy(x)
bd=np.where(x<xmin)[0]
new[bd]=xmin
bd=np.where(x>xmax)[0]
new[bd]=xmax
return new | 502d8c5ce0427283bf02ead2d5f5c90c69e14638 | 22,062 |
def profile_from_creds(creds, keychain, cache):
"""Create a profile from an AWS credentials file."""
access_key, secret_key = get_keys_from_file(creds)
arn = security_store(access_key, secret_key, keychain, cache)
return profile_from_arn(arn) | 62923959ce115bef776b32c3ed93a19aef93f9c3 | 22,063 |
import traceback
import traceback
from typing import Tuple
from pathlib import Path
from typing import List
from typing import Iterable
import warnings
def get_files(pp: Paths, glob: str=DEFAULT_GLOB, sort: bool=True) -> Tuple[Path, ...]:
"""
Helper function to avoid boilerplate.
Tuple as return type is a bit friendlier for hashing/caching, so hopefully makes sense
"""
# TODO FIXME mm, some wrapper to assert iterator isn't empty?
sources: List[Path]
if isinstance(pp, Path):
sources = [pp]
elif isinstance(pp, str):
if pp == '':
# special case -- makes sense for optional data sources, etc
return () # early return to prevent warnings etc
sources = [Path(pp)]
else:
sources = [Path(p) for p in pp]
def caller() -> str:
# TODO ugh. very flaky... -3 because [<this function>, get_files(), <actual caller>]
return traceback.extract_stack()[-3].filename
paths: List[Path] = []
for src in sources:
if src.parts[0] == '~':
src = src.expanduser()
if src.is_dir():
gp: Iterable[Path] = src.glob(glob)
paths.extend(gp)
else:
ss = str(src)
if '*' in ss:
if glob != DEFAULT_GLOB:
warnings.warn(f"{caller()}: treating {ss} as glob path. Explicit glob={glob} argument is ignored!")
paths.extend(map(Path, do_glob(ss)))
else:
if not src.is_file():
raise RuntimeError(f"Expected '{src}' to exist")
# todo assert matches glob??
paths.append(src)
if sort:
paths = list(sorted(paths))
if len(paths) == 0:
# todo make it conditionally defensive based on some global settings
# TODO not sure about using warnings module for this
warnings.warn(f'{caller()}: no paths were matched against {paths}. This might result in missing data.')
traceback.print_stack()
return tuple(paths) | b22c36dc1fe4af1f05e5f7fb8f9f226226bbb0f1 | 22,064 |
import torch
def test(model, test_loader, dynamics, fast_init):
"""
Evaluate prediction accuracy of an energy-based model on a given test set.
Args:
model: EnergyBasedModel
test_loader: Dataloader containing the test dataset
dynamics: Dictionary containing the keyword arguments
for the relaxation dynamics on u
fast_init: Boolean to specify if fast feedforward initilization
is used for the prediction
Returns:
Test accuracy
Mean energy of the model per batch
"""
test_E, correct, total = 0.0, 0.0, 0.0
for x_batch, y_batch in test_loader:
# Prepare the new batch
x_batch, y_batch = x_batch.to(config.device), y_batch.to(config.device)
# Extract prediction as the output unit with the strongest activity
output = predict_batch(model, x_batch, dynamics, fast_init)
prediction = torch.argmax(output, 1)
with torch.no_grad():
# Compute test batch accuracy, energy and store number of seen batches
correct += float(torch.sum(prediction == y_batch.argmax(dim=1)))
test_E += float(torch.sum(model.E))
total += x_batch.size(0)
return correct / total, test_E / total | 41c6e20fbcf11e76437a175f7b4662ec24b85773 | 22,065 |
def get_cluster_activite(cluster_path_csv, test, train=None):
"""Get cluster activite csv from patch cluster_path_csv.
Merge cluster with station_id
Parameters
----------
cluster_path_csv : String :
Path to export df_labels DataFrame
test : pandas.DataFrame
train : pandas.DataFrame
Returns
-------
If train is not None:
Return 2 pandas.DataFrame train, test
Else:
Return 1 pandas.DataFrame test
"""
cluster_activite = read_cluster_activite(cluster_path_csv=cluster_path_csv)
test = test.merge(cluster_activite, left_on='station_id', right_on='id_station', how='left')
test.drop('id_station', axis=1, inplace=True)
if len(train) > 0:
train = train.merge(cluster_activite, left_on='station_id', right_on='id_station', how='left')
train.drop('id_station', axis=1, inplace=True)
return train, test
else:
return test | f8dbb1bb2149a58f8617f76cfdf1a4943f500314 | 22,066 |
from typing import Optional
def get_ssl_policy(name: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSSLPolicyResult:
"""
Gets an SSL Policy within GCE from its name, for use with Target HTTPS and Target SSL Proxies.
For more information see [the official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_ssl_policy = gcp.compute.get_ssl_policy(name="production-ssl-policy")
```
:param str name: The name of the SSL Policy.
:param str project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
__args__ = dict()
__args__['name'] = name
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:compute/getSSLPolicy:getSSLPolicy', __args__, opts=opts, typ=GetSSLPolicyResult).value
return AwaitableGetSSLPolicyResult(
creation_timestamp=__ret__.creation_timestamp,
custom_features=__ret__.custom_features,
description=__ret__.description,
enabled_features=__ret__.enabled_features,
fingerprint=__ret__.fingerprint,
id=__ret__.id,
min_tls_version=__ret__.min_tls_version,
name=__ret__.name,
profile=__ret__.profile,
project=__ret__.project,
self_link=__ret__.self_link) | 1b4248a6a8dbbd735a006cc99de5d5e855c195ca | 22,067 |
def getitem(self, item):
"""Select elements at the specific index.
Parameters
----------
item : Union[slice, int, dragon.Tensor]
The index.
Returns
-------
dragon.Tensor
The output tensor.
"""
gather_args = []
if isinstance(item, Tensor):
if item.dtype == 'bool' or item.dtype == 'uint8':
if context.executing_eagerly():
return OpLib.execute('BooleanMask', [self, item])
return OpLib.add('BooleanMask', [self, item])
elif item.dtype == 'int64':
gather_args.append((0, item))
else:
raise TypeError('Unsupported index type: ' + item.dtype)
if isinstance(item, tuple):
for i, elem in enumerate(item):
if isinstance(elem, Tensor):
if elem.dtype == 'int64':
gather_args.append((i, elem))
else:
raise TypeError('Unsupported index type: ' + elem.dtype)
if len(gather_args) == 1:
axis, index = gather_args[0]
if context.executing_eagerly():
return OpLib.execute(
'Gather', [self, index], axis=axis, end_axis=None)
return OpLib.add('Gather', [self, index], axis=axis)
elif len(gather_args) > 1:
raise NotImplementedError
starts, sizes = _process_index(item)
if context.executing_eagerly():
return OpLib.execute(
'Slice', [self], ndim=len(starts), starts=starts, sizes=sizes)
return OpLib.add('Slice', [self], starts=starts, sizes=sizes) | 0f7a4659a9fc3ac34fcee8f402fada7f622e11f0 | 22,068 |
import torch
def zdot_batch(x1, x2):
"""Finds the complex-valued dot product of two complex-valued multidimensional Tensors, preserving the batch dimension.
Args:
x1 (Tensor): The first multidimensional Tensor.
x2 (Tensor): The second multidimensional Tensor.
Returns:
The dot products along each dimension of x1 and x2.
"""
batch = x1.shape[0]
return torch.reshape(torch.conj(x1)*x2, (batch, -1)).sum(1) | 5e57dd7a693c420dd1b0c5c01523eb2f0fb85253 | 22,069 |
from datetime import datetime
def add(request):
"""
Case of UPDATE REQUEST '/server/add/'
対象の更新
POST リクエストにのみレスポンス
"""
request_type = request.method
logger.debug(request_type)
if request_type == 'GET':
raise Http404
elif request_type == 'OPTION' or request_type == 'HEAD':
return HttpResponse("OK")
elif request_type == 'POST':
servername = request.POST['servername']
comment = request.POST['comment']
server = Server()
server.name = servername
server.comment = comment
server.save()
target_uuid = server.uuid
# 元のページにリダイレクト ブラウザのキャッシュで更新されてない画面が出るのを防止
return HttpResponseRedirect("/server/%s/?update=%d" % (target_uuid, datetime.datetime.now().microsecond))
else:
raise Http404 | efc4a4dc74356cb68e1c14e24e55cf6d9961b749 | 22,070 |
def gradient_check_numpy_expr(func, x, output_gradient, h=1e-5):
"""
This utility function calculates gradient of the function `func`
at `x`.
:param func:
:param x:
:param output_gradient:
:param h:
:return:
"""
grad = np.zeros_like(x).astype(np.float32)
iter = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not iter.finished:
idx = iter.multi_index
old_value = x[idx]
# calculate positive value
x[idx] = old_value + h
pos = func(x).copy()
# calculate negative value
x[idx] = old_value - h
neg = func(x).copy()
# restore
x[idx] = old_value
# calculate gradient
# Type of pos and neg will be memoryview if we are testing Cython functions.
# Therefore, we create numpy arrays be performing - operation.
# TODO: Don't we have an alternative method without creating numpy array from memoryview?
grad[idx] = np.sum((np.array(pos) - np.array(neg)) * output_gradient) / (2 * h)
iter.iternext()
return grad | 221c3c75be8dd59f9b004c7ac49c973111ac6fe6 | 22,071 |
def serve_values(name, func, args, kwargs, serving_values, fallback_func, backend_name=None, implemented_funcs=None, supported_kwargs=None,): #249 (line num in coconut source)
"""Determines the parameter value to serve for the given parameter
name and kwargs. First checks for unsupported funcs or kwargs, then
uses the following algorithm:
1. if name in serving_values, use serving_values[name], else
2. if guess in kwargs, use the guess, else
3. call fallback_func(name, func, *args, **kwargs).""" #265 (line num in coconut source)
# validate arguments
if implemented_funcs is not None: #267 (line num in coconut source)
assert backend_name is not None, "serve_values expects a backend_name argument when doing func validation" #268 (line num in coconut source)
if func not in implemented_funcs: #269 (line num in coconut source)
raise ValueError("the {_coconut_format_0} backend does not implement the {_coconut_format_1} function".format(_coconut_format_0=(backend_name), _coconut_format_1=(func))) #270 (line num in coconut source)
if supported_kwargs is not None: #271 (line num in coconut source)
assert backend_name is not None, "serve_values expects a backend_name argument when doing kwargs validation" #272 (line num in coconut source)
unsupported_kwargs = set(kwargs) - set(supported_kwargs) #273 (line num in coconut source)
if unsupported_kwargs: #274 (line num in coconut source)
raise ValueError("the {_coconut_format_0} backend does not support {_coconut_format_1} option(s)".format(_coconut_format_0=(backend_name), _coconut_format_1=(unsupported_kwargs))) #275 (line num in coconut source)
# determine value
_coconut_match_to_4 = serving_values #278 (line num in coconut source)
_coconut_match_check_6 = False #278 (line num in coconut source)
_coconut_match_set_name_value = _coconut_sentinel #278 (line num in coconut source)
if _coconut.isinstance(_coconut_match_to_4, _coconut.abc.Mapping): #278 (line num in coconut source)
_coconut_match_temp_19 = _coconut_match_to_4.get(name, _coconut_sentinel) #278 (line num in coconut source)
if _coconut_match_temp_19 is not _coconut_sentinel: #278 (line num in coconut source)
_coconut_match_set_name_value = _coconut_match_temp_19 #278 (line num in coconut source)
_coconut_match_check_6 = True #278 (line num in coconut source)
if _coconut_match_check_6: #278 (line num in coconut source)
if _coconut_match_set_name_value is not _coconut_sentinel: #278 (line num in coconut source)
value = _coconut_match_set_name_value #278 (line num in coconut source)
if _coconut_match_check_6: #278 (line num in coconut source)
return value #279 (line num in coconut source)
else: #280 (line num in coconut source)
_coconut_match_to_3 = kwargs #280 (line num in coconut source)
_coconut_match_check_5 = False #280 (line num in coconut source)
_coconut_match_set_name_guess = _coconut_sentinel #280 (line num in coconut source)
if _coconut.isinstance(_coconut_match_to_3, _coconut.abc.Mapping): #280 (line num in coconut source)
_coconut_match_temp_18 = _coconut_match_to_3.get("guess", _coconut_sentinel) #280 (line num in coconut source)
if _coconut_match_temp_18 is not _coconut_sentinel: #280 (line num in coconut source)
_coconut_match_set_name_guess = _coconut_match_temp_18 #280 (line num in coconut source)
_coconut_match_check_5 = True #280 (line num in coconut source)
if _coconut_match_check_5: #280 (line num in coconut source)
if _coconut_match_set_name_guess is not _coconut_sentinel: #280 (line num in coconut source)
guess = _coconut_match_set_name_guess #280 (line num in coconut source)
if _coconut_match_check_5: #280 (line num in coconut source)
return guess #281 (line num in coconut source)
else: #282 (line num in coconut source)
return fallback_func(name, func, *args, **kwargs) | 7ad5847df2d3904da7786ab0c77e5a0d9e6380cd | 22,072 |
def find_peaks(sig):
"""
Find hard peaks and soft peaks in a signal, defined as follows:
- Hard peak: a peak that is either /\ or \/.
- Soft peak: a peak that is either /-*\ or \-*/.
In this case we define the middle as the peak.
Parameters
----------
sig : np array
The 1d signal array.
Returns
-------
hard_peaks : ndarray
Array containing the indices of the hard peaks.
soft_peaks : ndarray
Array containing the indices of the soft peaks.
"""
if len(sig) == 0:
return np.empty([0]), np.empty([0])
tmp = sig[1:]
tmp = np.append(tmp, [sig[-1]])
tmp = sig - tmp
tmp[np.where(tmp>0)] = 1
tmp[np.where(tmp==0)] = 0
tmp[np.where(tmp<0)] = -1
tmp2 = tmp[1:]
tmp2 = np.append(tmp2, [0])
tmp = tmp-tmp2
hard_peaks = np.where(np.logical_or(tmp==-2, tmp==+2))[0] + 1
soft_peaks = []
for iv in np.where(np.logical_or(tmp==-1,tmp==+1))[0]:
t = tmp[iv]
i = iv+1
while True:
if i==len(tmp) or tmp[i] == -t or tmp[i] == -2 or tmp[i] == 2:
break
if tmp[i] == t:
soft_peaks.append(int(iv + (i - iv)/2))
break
i += 1
soft_peaks = np.array(soft_peaks, dtype='int') + 1
return hard_peaks, soft_peaks | 486b30d506e3d79dc7df8d2503d3bc626b6791f5 | 22,073 |
def evenly_divides(x, y):
"""Returns if [x] evenly divides [y]."""
return int(y / x) == y / x | dbf8236454e88805e71aabf58d9b7ebd2b2a6393 | 22,074 |
def proxmap_sort(arr: list, key: Function = lambda x: x, reverse: bool = False) -> list:
"""Proxmap sort is a sorting algorithm that works by partitioning an array of data items, or keys, into a number of
"subarrays" (termed buckets, in similar sorts). The name is short for computing a "proximity map," which indicates
for each key K the beginning of a subarray where K will reside in the final sorted order. Keys are placed into each
subarray using insertion sort."""
# Time complexity:
# Worst: O(n^2)
# Average: Theta(n)
# Best: Omega(n)
# Stable, Not in place
_check_key_arr(arr, key, IntFloatList)
if not arr:
return []
_min = key(min(arr, key=key))
_max = key(max(arr, key=key))
hit_counts = [0 for _ in range(int(_min), int(_max + 1))]
for item in arr:
hit_counts[int(key(item)) - int(_min)] += 1
proxmaps = []
last_hit_count = 0
for hc in hit_counts:
if hc == 0:
proxmaps.append(None)
else:
proxmaps.append(last_hit_count)
last_hit_count += hc
locations = []
for item in arr:
locations.append(proxmaps[int(key(item)) - int(_min)])
final = [None for _ in range(len(locations))]
for idx, item in enumerate(arr):
loc = locations[idx]
if final[loc] is None:
final[loc] = item
else:
none_ptr = loc
while final[none_ptr] is not None:
none_ptr += 1
for ptr in range(none_ptr - 1, loc - 1, -1):
if final[ptr] > item:
final[ptr], final[ptr + 1] = final[ptr + 1], final[ptr]
else:
final[ptr + 1] = item
break
else:
final[loc] = item
if reverse:
final = final[::-1]
return final | 9673abbad9320df5d83ebef9658c84ab21b5f021 | 22,075 |
import pathlib
def load_footings_file(file: str):
"""Load footings generated file.
:param str file: The path to the file.
:return: A dict representing the respective file type.
:rtype: dict
.. seealso::
:obj:`footings.testing.load_footings_json_file`
:obj:`footings.testing.load_footings_xlsx_file`
"""
file_ext = pathlib.Path(file).suffix
return _load_footings_file(file_ext=file_ext, file=file) | 929cf95e631e8be4dcc8f18c36a0b545593fed69 | 22,076 |
def coupler(*, coupling: float = 0.5) -> SDict:
"""a simple coupler model"""
kappa = coupling ** 0.5
tau = (1 - coupling) ** 0.5
sdict = reciprocal(
{
("in0", "out0"): tau,
("in0", "out1"): 1j * kappa,
("in1", "out0"): 1j * kappa,
("in1", "out1"): tau,
}
)
return sdict | dfd40ce9a8c61ffe8382b461c41d4034c7342570 | 22,077 |
def new_client(request):
"""
Function that allows a new client to register itself.
:param request: Who has made the request.
:return: Response 200 with user_type, state, message and token, if everything goes smoothly. Response 400 if there
is some kind of request error. Response 403 for forbidden. Or Response 404 for not found error.
"""
if "email" not in request.data or "first_name" not in request.data or "last_name" not in request.data or "password" not in request.data:
return Response({"state": "Error", "message": "Missing parameters"}, status=HTTP_400_BAD_REQUEST)
state, message, username = queries.add_client(request.data)
state, status = ("Success", HTTP_200_OK) if state else ("Error", HTTP_400_BAD_REQUEST)
return Response({"state": state, "message": message}, status=status) | d05eeb55527cfe355fb237751693749ed707a598 | 22,078 |
from datetime import datetime
def parse_time_interval_seconds(time_str):
""" Convert a given time interval (e.g. '5m') into the number of seconds in that interval
:param time_str: the string to parse
:returns: the number of seconds in the interval
:raises ValueError: if the string could not be parsed
"""
cal = parsedatetime.Calendar()
parse_result = cal.parseDT(time_str, sourceTime=datetime.min)
if parse_result[1] == 0:
raise ValueError("Could not understand time {time}".format(time=time_str))
return (parse_result[0] - datetime.min).total_seconds() | 6c07ee52b8dd727e96dae8a58f59c1bd043f3627 | 22,079 |
from typing import List
from typing import Dict
def _map_class_names_to_probabilities(probabilities: List[float]) -> Dict[str, float]:
"""Creates a dictionary mapping readable class names to their corresponding probabilites.
Args:
probabilities (List[float]): A List of the probabilities for the best predicted classes.
Returns:
Dict[str, float]: A dictionary mapping all readable class names to their corresponding probabilites.
"""
classes = load_classes()
return {
class_name: probability
for class_name, probability in zip(classes, probabilities)
} | 51096015e4c291b7d3357822ea13de3354b9b12f | 22,080 |
import collections
def order_items(records):
"""Orders records by ASC SHA256"""
return collections.OrderedDict(sorted(records.items(), key=lambda t: t[0])) | a9117282974fcea8d0d99821ea6293df82889b30 | 22,081 |
def G2DListMutatorRealGaussianGradient(genome, **args):
""" A gaussian gradient mutator for G2DList of Real
Accepts the *rangemin* and *rangemax* genome parameters, both optional.
The difference is that this multiplies the gene by gauss(1.0, 0.0333), allowing
for a smooth gradient drift about the value.
"""
if args["pmut"] <= 0.0:
return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
mu = constants.CDefGaussianGradientMU
sigma = constants.CDefGaussianGradientSIGMA
if mutations < 1.0:
mutations = 0
for i in xrange(genome.getHeight()):
for j in xrange(genome.getWidth()):
if utils.randomFlipCoin(args["pmut"]):
final_value = genome[i][j] * abs(prng.normal(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", constants.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", constants.CDefRangeMin))
genome.setItem(i, j, final_value)
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_x = prng.randint(0, genome.getWidth())
which_y = prng.randint(0, genome.getHeight())
final_value = genome[which_y][which_x] * abs(prng.normal(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", constants.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", constants.CDefRangeMin))
genome.setItem(which_y, which_x, final_value)
return int(mutations) | 52e739fe4c490064dbec5fe0b6a7443570cada0e | 22,082 |
def convert_group_by(response, field):
"""
Convert to key, doc_count dictionary
"""
if not response.hits.hits:
return []
r = response.hits.hits[0]._source.to_dict()
stats = r.get(field)
result = [{"key": key, "doc_count": count} for key, count in stats.items()]
result_sorted = sorted(
result, key=lambda i: i["doc_count"], reverse=True
) # sort by count
return result_sorted | 888321f300d88bd6f150a4bfda9420e920bab510 | 22,083 |
import sys
def get_different_columns(
meta_subset1: pd.DataFrame,
meta_subset2: pd.DataFrame,
common_cols: list) -> list:
"""Find which metadata columns have the
same name but their content differ.
Parameters
----------
meta_subset1 : pd.DataFrame
A metadata table
meta_subset2 : pd.DataFrame
Another metadata table
common_cols : list
Metadata columns that are in common
between the two metadata tables
Returns
-------
diff_cols : list
Metadata columns that are
different in contents.
"""
diff_cols = []
for c in common_cols:
try:
meta_col1 = meta_subset1[c].tolist()
meta_col2 = meta_subset2[c].tolist()
except:
print(meta_subset1[c])
sys.exit(1)
if meta_col1 != meta_col2:
diff_cols.append(c)
return diff_cols | f36d89efb9027e4201dbbbbe579c45b9aec6ea30 | 22,084 |
def _parse_single(argv, args_array, opt_def_dict, opt_val):
"""Function: _parse_single
Description: Processes a single-value argument in command line
arguments. Modifys the args_array by adding a dictionary key and a
value.
NOTE: Used by the arg_parse2() to reduce the complexity rating.
Arguments:
(input) argv -> Arguments from the command line.
(input) args_array -> Array of command line options and values.
(input) opt_def_dict -> Dict with options and default values.
(input) opt_val -> List of options allow None or 1 value for option.
(output) argv -> Arguments from the command line.
(output) args_array -> Array of command line options and values.
"""
argv = list(argv)
args_array = dict(args_array)
opt_def_dict = dict(opt_def_dict)
opt_val = list(opt_val)
# If no value in argv for option and it is not an integer.
if len(argv) < 2 or (argv[1][0] == "-" and not gen_libs.chk_int(argv[1])):
if argv[0] in opt_val:
args_array[argv[0]] = None
else:
# See if default value is available for argument.
args_array = arg_default(argv[0], args_array, opt_def_dict)
else:
args_array[argv[0]] = argv[1]
argv = argv[1:]
return argv, args_array | 5b44a891400b545940c9be3913af9710c37df898 | 22,085 |
def compOverValueTwoSets(setA={1, 2, 3, 4}, setB={3, 4, 5, 6}):
"""
task 0.5.9
comprehension whose value is the intersection of setA and setB
without using the '&' operator
"""
return {x for x in (setA | setB) if x in setA and x in setB} | 2b222d6c171e0170ace64995dd64c352f03aa99b | 22,086 |
def d_beta():
"""
Real Name: b'D BETA'
Original Eqn: b'0.05'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 0.05 | 32aa0e1fbf31761a36657b314a7c4bc4bf99889e | 22,087 |
import csv
def _get_data(filename):
"""
:param filename: name of a comma-separated data file with two columns: eccentricity and some other
quantity x
:return: eccentricities, x
"""
eccentricities = []
x = []
with open(filename) as file:
r = csv.reader(file)
for row in r:
eccentricities.append(float(row[0]))
x.append(float(row[1]))
return np.array(eccentricities), np.array(x) | f8c86f0f1c9bf2ee91108382cd6da6b98445bf1f | 22,088 |
def longestCommonPrefix(strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs) > 0:
common = strs[0]
for str in strs[1:]:
while not str.startswith(common):
common = common[:-1]
return common
else:
return '' | a860d46df8dbaeaab90bb3bc69abb68484216b5b | 22,089 |
def analytics_dashboard(request):
"""Main page for analytics related things"""
template = 'analytics/analyzer/dashboard.html'
return render(request, template) | 068913de2f2b1a381f73395e53e2e06db7d02e8e | 22,090 |
def insert(shape, axis=-1):
"""Shape -> shape with one axis inserted"""
return shape[:axis] + (1,) + shape[axis:] | 8c786df81b76cfa5dae78b51d16b2ee302263c53 | 22,091 |
import re
def simplify_text(text):
"""
:param text:
:return:
"""
no_html = re.sub('<[^<]+?>', '', str(text))
stripped = re.sub(r"[^a-zA-Z]+", "", str(no_html))
clean = stripped.lower()
return clean | a867bd08a642843df9d8a2a517f1b0c13ea145b1 | 22,092 |
def is_numpy_convertable(v):
"""
Return whether a value is meaningfully convertable to a numpy array
via 'numpy.array'
"""
return hasattr(v, "__array__") or hasattr(v, "__array_interface__") | 163da2cf50e2172e1fc39ae8afd7c4417b02a852 | 22,093 |
def grower(array):
"""grows masked regions by one pixel
"""
grower = np.array([[0,1,0],[1,1,1],[0,1,0]])
ag = convolve2d(array , grower , mode = "same")
ag = ag != 0
return ag | b6ae0c9eeb96ec13a5f9ef7a282ec428b5d536ad | 22,094 |
def SMAPELossFlat(*args, axis=-1, floatify=True, **kwargs):
"""Same as `smape`, but flattens input and target.
DOES not work yet
"""
return BaseLoss(smape, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) | 470e8f34cfd5fd6a867f9991e8babcad25fa03ff | 22,095 |
from datetime import datetime
def get_fake_datetime(now: datetime):
"""Generate monkey patch class for `datetime.datetime`, whose now() and utcnow() always returns given value."""
class FakeDatetime:
"""Fake datetime.datetime class."""
@classmethod
def now(cls):
"""Return given value."""
return now
@classmethod
def utcnow(cls):
"""Return given value."""
return now
return FakeDatetime | f268640c6459f4eb88fd9fbe72acf8c9d806d3bc | 22,096 |
from typing import List
def generate_order_by(fields: List[str], sort_orders: List[str], table_pre: str = '') -> str:
"""Функция генерит ORDER BY запрос для SQL
Args:
fields: список полей для сортировки
sort_orders: список (asc\desc) значений
table_pre: префикс таблицы в запросе
Return:
sql ORBER BY
"""
def _get_str_order(field: str, sort_order: str, table_pre: str = '') -> str:
"""Функция генерации одной FIELD ASC"""
if sort_order.upper() not in ['ASC', 'DESC']:
raise PGsqlOrderByExcept(f'sort_order value should consist of ASC or DESC but he {sort_order}')
if table_pre:
return f"{table_pre}.{field} {sort_order.upper()}"
return f"{field} {sort_order.upper()}"
if not fields:
return ''
orders_clause = []
for i, f in enumerate(fields):
orders_clause.append(_get_str_order(f, sort_orders[i], table_pre))
return "ORDER BY " + ", ".join(orders_clause) | 863d348d2bd844718e056c6767e1f282405b3edf | 22,097 |
def toUnicode(glyph, isZapfDingbats=False):
"""Convert glyph names to Unicode, such as 'longs_t.oldstyle' --> u'ſt'
If isZapfDingbats is True, the implementation recognizes additional
glyph names (as required by the AGL specification).
"""
# https://github.com/adobe-type-tools/agl-specification#2-the-mapping
#
# 1. Drop all the characters from the glyph name starting with
# the first occurrence of a period (U+002E; FULL STOP), if any.
glyph = glyph.split(".", 1)[0]
# 2. Split the remaining string into a sequence of components,
# using underscore (U+005F; LOW LINE) as the delimiter.
components = glyph.split("_")
# 3. Map each component to a character string according to the
# procedure below, and concatenate those strings; the result
# is the character string to which the glyph name is mapped.
result = [_glyphComponentToUnicode(c, isZapfDingbats)
for c in components]
return "".join(result) | 42bfee52db47f466308dfc4782a609776f6b90b9 | 22,098 |
def list_watchlist_items_command(client, args):
"""
Get specific watchlist item or list of watchlist items.
:param client: (AzureSentinelClient) The Azure Sentinel client to work with.
:param args: (dict) arguments for this command.
"""
# prepare the request
alias = args.get('watchlist_alias', '')
url_suffix = f'watchlists/{alias}/watchlistItems'
item_id = args.get('watchlist_item_id')
if item_id:
url_suffix += f'/{item_id}'
# request
result = client.http_request('GET', url_suffix)
# prepare result
raw_items = [result] if item_id else result.get('value')
items = [{'WatchlistAlias': alias, **watchlist_item_data_to_xsoar_format(item)} for item in raw_items]
readable_output = tableToMarkdown('Watchlist items results', items,
headers=['ID', 'ItemsKeyValue'],
headerTransform=pascalToSpace,
removeNull=True)
return CommandResults(
readable_output=readable_output,
outputs_prefix='AzureSentinel.WatchlistItem',
outputs=items,
outputs_key_field='ID',
raw_response=result
) | 1567bd56c90e9560fcae583d8360e4299620c266 | 22,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.