content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def vectorized_range(start, end):
""" Return an array of NxD, iterating from the start to the end"""
N = int(np.max(end - start)) + 1
idxes = np.floor(np.arange(N) * (end - start)[:, None] / N + start[:, None]).astype('int')
return idxes | cef2304639dbac3c1a1dfbd9ae928f813bd65b05 | 32,200 |
import random
def stratified(W, M):
"""Stratified resampling.
"""
su = (random.rand(M) + np.arange(M)) / M
return inverse_cdf(su, W) | 4f1ceb6840240178df312fee266fe612abb3193f | 32,201 |
def is_configured():
"""Return if Azure account is configured."""
return False | 5662656b513330e0a05fa25decc03c04b5f367fa | 32,202 |
def box_strings(*strings: str, width: int = 80) -> str:
"""Centre-align and visually box some strings.
Args:
*strings (str): Strings to box. Each string will be printed on its own
line. You need to ensure the strings are short enough to fit in the
box (width-6) or the results will not be as intended.
width (int, optional): Width of the box. Defaults to 80.
Returns:
str: The strings, centred and surrounded by a border box.
"""
lines = ["+" + "-"*(width-2) + "+", "|" + " "*(width-2) + "|"]
lines.extend(f'| {string.center(width-6)} |' for string in strings)
lines.extend(lines[:2][::-1])
return "\n".join(lines) | b47aaf020cf121b54d2b588bdec3067a3b83fd27 | 32,203 |
import traceback
def exceptions(e):
"""This exceptions handler manages Flask/Werkzeug exceptions.
For Renku exception handlers check ``service/decorators.py``
"""
# NOTE: Capture werkzeug exceptions and propagate them to sentry.
capture_exception(e)
# NOTE: Capture traceback for dumping it to the log.
tb = traceback.format_exc()
if hasattr(e, "code") and e.code == 404:
service_log.error(
"{} {} {} {} 404 NOT FOUND\n{}".format(
request.remote_addr, request.method, request.scheme, request.full_path, tb
)
)
return error_response(HTTP_SERVER_ERROR - e.code, e.name)
if hasattr(e, "code") and e.code >= 500:
service_log.error(
"{} {} {} {} 5xx INTERNAL SERVER ERROR\n{}".format(
request.remote_addr, request.method, request.scheme, request.full_path, tb
)
)
return error_response(HTTP_SERVER_ERROR - e.code, e.name)
# NOTE: Werkzeug exceptions should be covered above, following line is for unexpected HTTP server errors.
return error_response(HTTP_SERVER_ERROR, str(e)) | 574c97b301f54785ae30dbfc3cc5176d5352cb82 | 32,204 |
import torch
def top_k_top_p_filtering(logits, top_k, top_p, filter_value=-float("Inf")):
"""
top_k或top_p解码策略,仅保留top_k个或累积概率到达top_p的标记,其他标记设为filter_value,后续在选取标记的过程中会取不到值设为无穷小。
Args:
logits: 预测结果,即预测成为词典中每个词的分数
top_k: 只保留概率最高的top_k个标记
top_p: 只保留概率累积达到top_p的标记
filter_value: 过滤标记值
Returns:
"""
# logits的维度必须为2,即size:[batch_size, vocab_size]
assert logits.dim() == 2
# 获取top_k和字典大小中较小的一个,也就是说,如果top_k大于字典大小,则取字典大小个标记
top_k = min(top_k, logits[0].size(-1))
# 如果top_k不为0,则将在logits中保留top_k个标记
if top_k > 0:
# 由于有batch_size个预测结果,因此对其遍历,选取每个预测结果的top_k标记
for logit in logits:
indices_to_remove = logit < torch.topk(logit, top_k)[0][..., -1, None]
logit[indices_to_remove] = filter_value
# 如果top_p不为0,则将在logits中保留概率值累积达到top_p的标记
if top_p > 0.0:
# 对logits进行递减排序
sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
# 对排序后的结果使用softmax归一化,再获取累积概率序列
# 例如:原始序列[0.1, 0.2, 0.3, 0.4],则变为:[0.1, 0.3, 0.6, 1.0]
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# 删除累积概率高于top_p的标记
sorted_indices_to_remove = cumulative_probs > top_p
# 将索引向右移动,使第一个标记也保持在top_p之上
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
for index, logit in enumerate(logits):
# 由于有batch_size个预测结果,因此对其遍历,选取每个预测结果的累积概率达到top_p的标记
indices_to_remove = sorted_indices[index][sorted_indices_to_remove[index]]
logit[indices_to_remove] = filter_value
return logits | 74cf4a6cf4622ad1c9b124089cd84ddb07bdb7be | 32,205 |
def get_alb(alb_name, aws_auth_cred):
"""
Find and return loadbalancers of mentioned name
Args:
alb_name (str): Load balancer name
aws_auth (dict): Dict containing AWS credentials
Returns:
alb (dict): Loadbalancer details
"""
client = get_elbv2_client(aws_auth_cred)
try:
response = client.describe_load_balancers(Names=[alb_name])
albs = response['LoadBalancers']
return albs.pop() if len(albs) else None
except:
return None | a31ae3067d96008622b43c57ffd1b0de74eceaa0 | 32,206 |
def align_left_position(anchor, size, alignment, margin):
"""Find the position of a rectangle to the left of a given anchor.
:param anchor: A :py:class:`~skald.geometry.Rectangle` to anchor the
rectangle to.
:param size: The :py:class:`~skald.geometry.Size` of the rectangle.
:param alignment: The :py:class:`~skald.definitions.Alignment` of the
rectangle.
:param margin: The margin, in pixels, the rectangle must have from the
anchor.
"""
x = anchor.left - size.width - margin
y = vertical_align(anchor, size, alignment)
return Point(x=x, y=y) | 2af1c6175960313958cc51d0180ebc4f6ed9dc41 | 32,207 |
def quickdraw_to_linestring(qd_image):
"""Returns a Shapely MultiLineString for the provided quickdraw image.
This MultiLineString can be passed to vsketch
"""
linestrings = []
for i in range(0, len(qd_image["image"])):
line = zip(qd_image["image"][i][0], qd_image["image"][i][1])
linestrings.append(tuple(line))
return MultiLineString(linestrings) | 39957b9a36a59b33a2fb5abc91f7479c946515a2 | 32,208 |
def get_device_details():
"""TODO(jordanhuus): add description
"""
ptpTransport = PtpUsbTransport(
PtpUsbTransport.findptps(PtpUsbTransport.USB_CLASS_PTP))
bulk_in, bulk_out, interrupt_in = \
PtpUsbTransport.retrieve_device_endpoints(
PtpUsbTransport.findptps(PtpUsbTransport.USB_CLASS_PTP))
ptpSession = PtpSession(ptpTransport)
vendorId = PtpValues.Vendors.STANDARD
device_info = None
try:
# Open device session
ptpSession.OpenSession()
device_info = ptpSession.GetDeviceInfoDict()
device_info["Model"] = ptpTransport.device_name
except PtpException as e:
raise PtpException(
"PTP Exception: %s" %
PtpValues.ResponseNameById(
e.responsecode,
vendorId),
ptpSession, ptpTransport)
except Exception as e:
raise Exception(e)
# Close the session
del ptpSession
del ptpTransport
return device_info | ec98583681a5aefa0701a7b3695210f2f78b4845 | 32,209 |
import functools
def build(image_resizer_config):
"""Builds callable for image resizing operations.
Args:
image_resizer_config: image_resizer.proto object containing parameters for
an image resizing operation.
Returns:
image_resizer_fn: Callable for image resizing. This callable always takes
a rank-3 image tensor (corresponding to a single image) and returns a
rank-3 image tensor, possibly with new spatial dimensions.
Raises:
ValueError: if `image_resizer_config` is of incorrect type.
ValueError: if `image_resizer_config.image_resizer_oneof` is of expected
type.
ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer
is used.
"""
if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer):
raise ValueError('image_resizer_config not of type '
'image_resizer_pb2.ImageResizer.')
if image_resizer_config.WhichOneof(
'image_resizer_oneof') == 'keep_aspect_ratio_resizer':
keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer
if not (keep_aspect_ratio_config.min_dimension
<= keep_aspect_ratio_config.max_dimension):
raise ValueError('min_dimension > max_dimension')
return functools.partial(
preprocessor.resize_to_range,
min_dimension=keep_aspect_ratio_config.min_dimension,
max_dimension=keep_aspect_ratio_config.max_dimension)
if image_resizer_config.WhichOneof(
'image_resizer_oneof') == 'fixed_shape_resizer':
fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer
return functools.partial(preprocessor.resize_image,
new_height=fixed_shape_resizer_config.height,
new_width=fixed_shape_resizer_config.width)
raise ValueError('Invalid image resizer option.') | 75df1c37397e88322113aa8822d60053ae54981d | 32,210 |
from typing import Optional
from typing import Tuple
def plotly_protein_structure_graph(
G: nx.Graph,
plot_title: Optional[str] = None,
figsize: Tuple[int, int] = (620, 650),
node_alpha: float = 0.7,
node_size_min: float = 20.0,
node_size_multiplier: float = 20.0,
label_node_ids: bool = True,
node_colour_map=plt.cm.plasma,
edge_color_map=plt.cm.plasma,
colour_nodes_by: str = "degree",
colour_edges_by: str = "kind",
) -> go.Figure:
"""
Plots protein structure graph using plotly.
:param G: nx.Graph Protein Structure graph to plot
:type G: nx.Graph
:param plot_title: Title of plot, defaults to None
:type plot_title: str, optional
:param figsize: Size of figure, defaults to (620, 650)
:type figsize: Tuple[int, int]
:param node_alpha: Controls node transparency, defaults to 0.7
:type node_alpha: float
:param node_size_min: Specifies node minimum size
:type node_size_min: float
:param node_size_multiplier: Scales node size by a constant. Node sizes reflect degree.
:type node_size_multiplier: float
:param label_node_ids: bool indicating whether or not to plot node_id labels
:type label_node_ids: bool
:param node_colour_map: colour map to use for nodes
:type node_colour_map: plt.cm
:param edge_color_map: colour map to use for edges
:type edge_color_map: plt.cm
:param colour_nodes_by: Specifies how to colour nodes. "degree", "seq_position" or a node feature
:type colour_edges_by: str
:param colour_edges_by: Specifies how to colour edges. Currently only "kind" is supported
:type colour_nodes_by: str
:returns: Plotly Graph Objects plot
:rtype: go.Figure
"""
# Get Node Attributes
pos = nx.get_node_attributes(G, "coords")
# Get node colours
node_colors = colour_nodes(
G, colour_map=node_colour_map, colour_by=colour_nodes_by
)
edge_colors = colour_edges(
G, colour_map=edge_color_map, colour_by=colour_edges_by
)
# 3D network plot
x_nodes = []
y_nodes = []
z_nodes = []
node_sizes = []
node_labels = []
# Loop on the pos dictionary to extract the x,y,z coordinates of each node
for i, (key, value) in enumerate(pos.items()):
x_nodes.append(value[0])
y_nodes.append(value[1])
z_nodes.append(value[2])
node_sizes.append(node_size_min + node_size_multiplier * G.degree[key])
if label_node_ids:
node_labels.append(list(G.nodes())[i])
nodes = go.Scatter3d(
x=x_nodes,
y=y_nodes,
z=z_nodes,
mode="markers",
marker={
"symbol": "circle",
"color": node_colors,
"size": node_sizes,
"opacity": node_alpha,
},
text=list(G.nodes()),
hoverinfo="text+x+y+z",
)
# Loop on the list of edges to get the x,y,z, coordinates of the connected nodes
# Those two points are the extrema of the line to be plotted
x_edges = []
y_edges = []
z_edges = []
for node_a, node_b in G.edges(data=False):
x_edges.extend([pos[node_a][0], pos[node_b][0], None])
y_edges.extend([pos[node_a][1], pos[node_b][1], None])
z_edges.extend([pos[node_a][2], pos[node_b][2], None])
axis = dict(
showbackground=False,
showline=False,
zeroline=False,
showgrid=False,
showticklabels=False,
title="",
)
edges = go.Scatter3d(
x=x_edges,
y=y_edges,
z=z_edges,
mode="lines",
line={"color": edge_colors, "width": 10},
text=[
" / ".join(list(edge_type))
for edge_type in nx.get_edge_attributes(G, "kind").values()
],
hoverinfo="text",
)
fig = go.Figure(
data=[nodes, edges],
layout=go.Layout(
title=plot_title,
width=figsize[0],
height=figsize[1],
showlegend=False,
scene=dict(
xaxis=dict(axis),
yaxis=dict(axis),
zaxis=dict(axis),
),
margin=dict(t=100),
),
)
return fig | 4aae1ce763daa06627fe43e31780fa61cd1886a4 | 32,211 |
import os
def get_configuration_route(model_name: str) -> str:
"""Gets the prediction configuration file of a model.
Args:
model_name (str): Name of the model
Raises:
ModelToLoadNotFoundError: The model to load could not be found or
opened.
Returns:
str: Evaluation file
"""
try:
full_filename = Files.MODEL_PREDICTION_CONFIGURATION_FMT.format(
model_name)
if os.path.isfile(full_filename):
return send_from_directory(os.path.dirname(full_filename),
os.path.basename(full_filename))
raise ModelToLoadNotFoundError()
except errors.Error as error:
return create_error_response(error) | 5e31235705a75bf2cc043978aaf4428c620e6ea9 | 32,212 |
def mag_scale_rel_to_hazardlib(mag_scale_rel, use_default=False):
"""
Returns the magnitude scaling relation in a format readable by
openquake.hazardlib
"""
if isinstance(mag_scale_rel, BaseMSR):
return mag_scale_rel
elif isinstance(mag_scale_rel, str):
if not mag_scale_rel in SCALE_RELS.keys():
raise ValueError('Magnitude scaling relation %s not supported!'
% mag_scale_rel)
else:
return SCALE_RELS[mag_scale_rel]()
else:
if use_default:
# Returns the Wells and Coppersmith string
return WC1994()
else:
raise ValueError('Magnitude Scaling Relation Not Defined!') | 7db46083d4c05e3f53b4a5d064c923937bb5fe2a | 32,213 |
import toml
def write_config(conf):
"""
write_config(conf) function dumps app configuration to TOML file $NOTESDIR/config
:param conf: Dictionary containing configuration
(see_default_config as a sample structure)
:return bool: returns True on successful write of configfile
"""
# write config to config file
# TODO write error handler for
with open(get_config_file(), "w") as configf:
toml.dump(conf, configf)
return True | 292db9faf278cbc8fae19fe5b18eead13e5d61d0 | 32,214 |
import regex
import tokenize
def __get_words(text, by_spaces):
"""
Helper function which splits the given text string into words. If by_spaces is false, then text like
'01-02-2014' will be split into 3 separate words. For backwards compatibility, this is the default for all
expression functions.
:param text: the text to split
:param by_spaces: whether words should be split only by spaces or by punctuation like '-', '.' etc
"""
if by_spaces:
splits = regex.split(r'\s+', text, flags=regex.MULTILINE | regex.UNICODE | regex.V0)
return [split for split in splits if split] # return only non-empty
else:
return tokenize(text) | 289d7cc58d165355a4e5a25db016dbe2e6aa74ec | 32,215 |
from typing import Any
def gera_paragrafo(data: pd.DataFrame) -> pd.DataFrame:
"""docstring for gera_paragrafo"""
data[["div_sup", "par"]] = data.location.str.split(".", n=1, expand=True)
data.dropna(inplace=True)
j: Any = data.groupby(["author", "text", "file", "div_sup", "par", "genero"]).agg(
{"lemma": lambda x: " ".join(x)}
)
i: Any = j.reset_index()
return i.loc[:, :] | 04285d5df307e87b8adc389cf2f03d9ef9b44276 | 32,216 |
def _parse_boolean(xml_boolean):
"""Converts strings "true" and "false" from XML files to Python bool"""
if xml_boolean is not None:
assert xml_boolean in ["true", "false"], \
"The boolean string must be \"true\" or \"false\""
return {"true": True, "false": False}[xml_boolean] | 6d9d1b617f8935d1684bd24bbea06d00ca2a5b4a | 32,217 |
def to_heterogeneous(G, ntypes, etypes, ntype_field=NTYPE,
etype_field=ETYPE, metagraph=None):
"""Convert a homogeneous graph to a heterogeneous graph and return.
The input graph should have only one type of nodes and edges. Each node and edge
stores an integer feature as its type ID
(specified by :attr:`ntype_field` and :attr:`etype_field`).
DGL uses it to retrieve the type names stored in the given
:attr:`ntypes` and :attr:`etypes` arguments.
The function will automatically distinguish edge types that have the same given
type IDs but different src and dst type IDs. For example, it allows both edges A and B
to have the same type ID 0, but one has (0, 1) and the other as (2, 3) as the
(src, dst) type IDs. In this case, the function will "split" edge type 0 into two types:
(0, ty_A, 1) and (2, ty_B, 3). In another word, these two edges share the same edge
type name, but can be distinguished by an edge type triplet.
The function stores the node and edge IDs in the input graph using the ``dgl.NID``
and ``dgl.EID`` names in the ``ndata`` and ``edata`` of the resulting graph.
It also copies any node/edge features from :attr:`G` to the returned heterogeneous
graph, except for reserved fields for storing type IDs (``dgl.NTYPE`` and ``dgl.ETYPE``)
and node/edge IDs (``dgl.NID`` and ``dgl.EID``).
Parameters
----------
G : DGLGraph
The homogeneous graph.
ntypes : list[str]
The node type names.
etypes : list[str]
The edge type names.
ntype_field : str, optional
The feature field used to store node type. (Default: ``dgl.NTYPE``)
etype_field : str, optional
The feature field used to store edge type. (Default: ``dgl.ETYPE``)
metagraph : networkx MultiDiGraph, optional
Metagraph of the returned heterograph.
If provided, DGL assumes that G can indeed be described with the given metagraph.
If None, DGL will infer the metagraph from the given inputs, which could be
costly for large graphs.
Returns
-------
DGLGraph
A heterogeneous graph.
Notes
-----
The returned node and edge types may not necessarily be in the same order as
``ntypes`` and ``etypes``.
Examples
--------
The following example uses PyTorch backend.
>>> import dgl
>>> import torch
>>> hg = dgl.heterograph({
... ('user', 'develops', 'activity'): (torch.tensor([0, 1]), torch.tensor([1, 2])),
... ('developer', 'develops', 'game'): (torch.tensor([0, 1]), torch.tensor([0, 1]))
... })
>>> print(hg)
Graph(num_nodes={'activity': 3, 'developer': 2, 'game': 2, 'user': 2},
num_edges={('developer', 'develops', 'game'): 2, ('user', 'develops', 'activity'): 2},
metagraph=[('developer', 'game', 'develops'), ('user', 'activity', 'develops')])
We first convert the heterogeneous graph to a homogeneous graph.
>>> g = dgl.to_homogeneous(hg)
>>> print(g)
Graph(num_nodes=9, num_edges=4,
ndata_schemes={'_TYPE': Scheme(shape=(), dtype=torch.int64),
'_ID': Scheme(shape=(), dtype=torch.int64)}
edata_schemes={'_TYPE': Scheme(shape=(), dtype=torch.int64),
'_ID': Scheme(shape=(), dtype=torch.int64)})
>>> g.ndata
{'_TYPE': tensor([0, 0, 0, 1, 1, 2, 2, 3, 3]), '_ID': tensor([0, 1, 2, 0, 1, 0, 1, 0, 1])}
Nodes 0, 1, 2 for 'activity', 3, 4 for 'developer', 5, 6 for 'game', 7, 8 for 'user'
>>> g.edata
{'_TYPE': tensor([0, 0, 1, 1]), '_ID': tensor([0, 1, 0, 1])}
Edges 0, 1 for ('developer', 'develops', 'game'), 2, 3 for ('user', 'develops', 'activity')
Now convert the homogeneous graph back to a heterogeneous graph.
>>> hg_2 = dgl.to_heterogeneous(g, hg.ntypes, hg.etypes)
>>> print(hg_2)
Graph(num_nodes={'activity': 3, 'developer': 2, 'game': 2, 'user': 2},
num_edges={('developer', 'develops', 'game'): 2, ('user', 'develops', 'activity'): 2},
metagraph=[('developer', 'game', 'develops'), ('user', 'activity', 'develops')])
Retrieve the original node/edge IDs.
>>> hg_2.ndata[dgl.NID]
{'activity': tensor([0, 1, 2]),
'developer': tensor([3, 4]),
'game': tensor([5, 6]),
'user': tensor([7, 8])}
>>> hg_2.edata[dgl.EID]
{('developer', 'develops', 'game'): tensor([0, 1]),
('user', 'develops', 'activity'): tensor([2, 3])}
See Also
--------
to_homogeneous
"""
if (hasattr(G, 'ntypes') and len(G.ntypes) > 1
or hasattr(G, 'etypes') and len(G.etypes) > 1):
raise DGLError('The input graph should be homogeneous and have only one '
' type of nodes and edges.')
num_ntypes = len(ntypes)
idtype = G.idtype
device = G.device
ntype_ids = F.asnumpy(G.ndata[ntype_field])
etype_ids = F.asnumpy(G.edata[etype_field])
# relabel nodes to per-type local IDs
ntype_count = np.bincount(ntype_ids, minlength=num_ntypes)
ntype_offset = np.insert(np.cumsum(ntype_count), 0, 0)
ntype_ids_sortidx = np.argsort(ntype_ids)
ntype_local_ids = np.zeros_like(ntype_ids)
node_groups = []
for i in range(num_ntypes):
node_group = ntype_ids_sortidx[ntype_offset[i]:ntype_offset[i+1]]
node_groups.append(node_group)
ntype_local_ids[node_group] = np.arange(ntype_count[i])
src, dst = G.all_edges(order='eid')
src = F.asnumpy(src)
dst = F.asnumpy(dst)
src_local = ntype_local_ids[src]
dst_local = ntype_local_ids[dst]
# a 2D tensor of shape (E, 3). Each row represents the (stid, etid, dtid) tuple.
edge_ctids = np.stack([ntype_ids[src], etype_ids, ntype_ids[dst]], 1)
# infer metagraph and canonical edge types
# No matter which branch it takes, the code will generate a 2D tensor of shape (E_m, 3),
# E_m is the set of all possible canonical edge tuples. Each row represents the
# (stid, dtid, dtid) tuple. We then compute a 2D tensor of shape (E, E_m) using the
# above ``edge_ctids`` matrix. Each element i,j indicates whether the edge i is of the
# canonical edge type j. We can then group the edges of the same type together.
if metagraph is None:
canonical_etids, _, etype_remapped = \
utils.make_invmap(list(tuple(_) for _ in edge_ctids), False)
etype_mask = (etype_remapped[None, :] == np.arange(len(canonical_etids))[:, None])
else:
ntypes_invmap = {nt: i for i, nt in enumerate(ntypes)}
etypes_invmap = {et: i for i, et in enumerate(etypes)}
canonical_etids = []
for i, (srctype, dsttype, etype) in enumerate(metagraph.edges(keys=True)):
srctype_id = ntypes_invmap[srctype]
etype_id = etypes_invmap[etype]
dsttype_id = ntypes_invmap[dsttype]
canonical_etids.append((srctype_id, etype_id, dsttype_id))
canonical_etids = np.asarray(canonical_etids)
etype_mask = (edge_ctids[None, :] == canonical_etids[:, None]).all(2)
edge_groups = [etype_mask[i].nonzero()[0] for i in range(len(canonical_etids))]
data_dict = dict()
canonical_etypes = []
for i, (stid, etid, dtid) in enumerate(canonical_etids):
src_of_etype = src_local[edge_groups[i]]
dst_of_etype = dst_local[edge_groups[i]]
canonical_etypes.append((ntypes[stid], etypes[etid], ntypes[dtid]))
data_dict[canonical_etypes[-1]] = \
(src_of_etype, dst_of_etype)
hg = heterograph(data_dict,
{ntype: count for ntype, count in zip(ntypes, ntype_count)},
idtype=idtype, device=device)
ntype2ngrp = {ntype : node_groups[ntid] for ntid, ntype in enumerate(ntypes)}
# features
for key, data in G.ndata.items():
if key in [ntype_field, NID]:
continue
for ntid, ntype in enumerate(hg.ntypes):
rows = F.copy_to(F.tensor(ntype2ngrp[ntype]), F.context(data))
hg._node_frames[ntid][key] = F.gather_row(data, rows)
for key, data in G.edata.items():
if key in [etype_field, EID]:
continue
for etid in range(len(hg.canonical_etypes)):
rows = F.copy_to(F.tensor(edge_groups[etid]), F.context(data))
hg._edge_frames[hg.get_etype_id(canonical_etypes[etid])][key] = \
F.gather_row(data, rows)
# Record the original IDs of the nodes/edges
for ntid, ntype in enumerate(hg.ntypes):
hg._node_frames[ntid][NID] = F.copy_to(F.tensor(ntype2ngrp[ntype]), device)
for etid in range(len(hg.canonical_etypes)):
hg._edge_frames[hg.get_etype_id(canonical_etypes[etid])][EID] = \
F.copy_to(F.tensor(edge_groups[etid]), device)
return hg | f1792d78e4b94c5f3d4f72ef6cfcbcb14c7d1158 | 32,218 |
def Solution(image):
"""
input: same size (256*256) rgb image
output: the label of the image
"l" -> left
"m" -> middle
"r" -> right
"o" -> other(NO target)
if no target detected, return "o", which is the initial value
"""
#initial two point for locatate the target area
topLeft = [0,0]
bottomRight = [0,0]
pred_label = "o" #initial the recognition label
#make image to gray
thresh = 200
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
image = cv2.threshold(image, thresh, 255, cv2.THRESH_BINARY)[1]
#find contours in grey image
C,h= cv2.findContours(image, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if len(C) > 0:
for i in range(len(C)):
c = C[i]
area = cv2.contourArea(c)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
# convert all coordinates floating point values to int
box = np.int0(box)
#limit the area we interested
if area>500:
topLeft = [box[1][0],box[1][1]]
bottomRight = [box[3][0],box[3][1]]
#cut the traffic sign with slight remnants around
cut = image[topLeft[1]:bottomRight[1],topLeft[0]:bottomRight[0]]
Ishape = cut.shape
#limit the area we interested again
if Ishape[0] <30 or Ishape[1] < 40:
continue
else:
#use two different template to match
#each return two position with is the topleft and the bottomright of the processed image
#t1.jpg is the x-like character
topleft_1,bottomright_1 = Matcher(cut,"./template/t2.jpg")
topleft_2,bottomright_2= Matcher(cut,"./template/t1.jpg")
#if not none
if topleft_1 and topleft_2 and bottomright_1 and bottomright_2:
pred_label = helper(topleft_1,bottomright_1,topleft_2,bottomright_2,Ishape=Ishape)
return pred_label | 11fb49c96cb7cbfdfb522d6794f148cd6354dcf9 | 32,219 |
def index_to_tag(v, index_tag):
"""
:param v: vector
:param index_tag:
:return:
"""
idx = np.nonzero(v)
tags = [index_tag[i] for i in idx[0]]
return ' '.join(tags) | ebf30632bbf8a7b399461b191c33f345f04c4cc2 | 32,220 |
import time
import torch
import sys
def train(train_loader, model, criterion, optimizer, epoch, opt):
"""one epoch training"""
model.train()
losses = AverageMeter()
end = time.time()
for idx, (train_x, labels) in enumerate(train_loader):
train_x = train_x.cuda()
labels = labels.cuda()
bsz = labels.shape[0]
#compute loss
output = model(train_x)
output = output.reshape(output.shape[0],1,-1)
output_2x = torch.cat((output, output), dim=1)
loss = criterion(output_2x, labels)
#update metric
losses.update(loss.item(), bsz)
# SGD
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print info
if (epoch + 1) % opt.print_freq == 0:
print('Train: [{0}][{1}/{2}]\t'
'training loss {loss.val:.3f} (average: {loss.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), loss=losses))
sys.stdout.flush()
return losses.avg | 482b8c864cb565891b8d9a2b288fb1f64de4db16 | 32,221 |
def first_phrase_span(utterance, phrases):
"""Returns the span (start, end+1) of the first phrase from the given list
that is found in the utterance. Returns (-1, -1) if no phrase is found.
:param utterance: The utterance to search in
:param phrases: a list of phrases to be tried (in the given order)
:rtype: tuple
"""
for phrase in phrases:
pos = phrase_pos(utterance, phrase)
if pos != -1:
return pos, pos + len(phrase)
return -1, -1 | f3be7bd976c60467bcf51edfb15d3736e00568a8 | 32,222 |
from datetime import datetime
def parse_date(value):
"""Parse a string and return a datetime.date.
Raise ValueError if the input is well formatted but not a valid date.
Return None if the input isn't well formatted.
"""
match = date_re.match(value)
if match:
kw = {k: int(v) for k, v in list(match.groupdict().items())}
return datetime.date(**kw) | b32cc64bab460e1384492b7cb694b8263431625f | 32,223 |
import scipy
def construct_Dfunc(delays, plot=False):
"""Return interpolation functions fD(t) and fdD(t).
fD(t) is the delay between infection and reporting at reporting time t.
fdD(t) is its derivative.
Parameter:
- delays: tuples (time_report, delay_days)
- plot: whether to generate a plot.
Return:
- fD: interpolation function for D(t) with t in nanoseconds.
- fdD: interpolation function for dD/dt.
(taking time in ns but returning dD per day.)
- delay_str: delay string e.g. '7' or '7-9'
"""
ts0 = [float(pd.to_datetime(x[0]).to_datetime64()) for x in delays]
Ds0 = [float(x[1]) for x in delays]
if len(delays) == 1:
# prevent interp1d complaining.
ts0 = [ts0[0], ts0[0]+1e9]
Ds0 = np.concatenate([Ds0, Ds0])
# delay function as linear interpolation;
# nanosecond timestamps as t value.
fD0 = scipy.interpolate.interp1d(
ts0, Ds0, kind='linear', bounds_error=False,
fill_value=(Ds0[0], Ds0[-1])
)
# construct derivative dD/dt, smoothen out
day = 1e9*86400 # one day in nanoseconds
ts = np.arange(ts0[0]-3*day, ts0[-1]+3.01*day, day)
dDs = (fD0(ts+3*day) - fD0(ts-3*day))/6
fdD = scipy.interpolate.interp1d(
ts, dDs, 'linear', bounds_error=False,
fill_value=(dDs[0], dDs[-1]))
# reconstruct D(t) to be consistent with the smoothened derivative.
Ds = scipy.integrate.cumtrapz(dDs, ts/day, initial=0) + Ds0[0]
fD = scipy.interpolate.interp1d(
ts, Ds, 'linear', bounds_error=False,
fill_value=(Ds[0], Ds[-1]))
Dmin, Dmax = np.min(Ds0), np.max(Ds0)
if Dmin == Dmax:
delay_str = f'{Dmin:.0f}'
else:
delay_str = f'{Dmin:.0f}-{Dmax:.0f}'
if plot:
fig, ax = plt.subplots(1, 1, figsize=(7, 3), tight_layout=True)
tsx = np.linspace(
ts[0],
int(pd.to_datetime('now').to_datetime64())
)
ax.plot(pd.to_datetime(tsx.astype(int)), fD(tsx))
ax.set_ylabel('Vertraging (dagen)')
tools.set_xaxis_dateformat(ax, 'Rapportagedatum')
fig.canvas.set_window_title('Vertraging infectiedatum - rapportage')
fig.show()
return fD, fdD, delay_str | ee6acbc265d8020815ac2e9cd77fe74a6ff9d5f7 | 32,224 |
def deimmunization_rate_80():
"""
Real Name: b'deimmunization rate 80'
Original Eqn: b'Recovered 80/immunity time 80'
Units: b'person/Day'
Limits: (None, None)
Type: component
b''
"""
return recovered_80() / immunity_time_80() | 9221343889ba05d93671102e72ef70a5efd40a5a | 32,225 |
def connect_to_lightsail():
"""
Uses Paramiko to create a connection to Brendan's instance. Relies on authetication information from a JSON file.
:return SFTP_Client:
"""
return open_sftp_from_json(JSON_PRIVATE_DIR / 'lightsail_server_info.json') | fb0f74fe58e5a99ca93737415b931018be4d67d7 | 32,226 |
def coleman_operator(c, cp):
"""
The approximate Coleman operator.
Iteration with this operator corresponds to time iteration on the Euler
equation. Computes and returns the updated consumption policy
c. The array c is replaced with a function cf that implements
univariate linear interpolation over the asset grid for each
possible value of z.
Parameters
----------
c : array_like(float)
A NumPy array of dim len(cp.asset_grid) times len(cp.z_vals)
cp : ConsumerProblem
An instance of ConsumerProblem that stores primitives
Returns
-------
array_like(float)
The updated policy, where updating is by the Coleman
operator.
"""
# === simplify names, set up arrays === #
R, Pi, beta, du, b = cp.R, cp.Pi, cp.beta, cp.du, cp.b
asset_grid, z_vals = cp.asset_grid, cp.z_vals
z_size = len(z_vals)
gamma = R * beta
vals = np.empty(z_size)
# === linear interpolation to get consumption function === #
def cf(a):
"""
The call cf(a) returns an array containing the values c(a,
z) for each z in z_vals. For each such z, the value c(a, z)
is constructed by univariate linear approximation over asset
space, based on the values in the array c
"""
for i in range(z_size):
vals[i] = np.interp(a, asset_grid, c[:, i])
return vals
# === solve for root to get Kc === #
Kc = np.empty(c.shape)
for i_a, a in enumerate(asset_grid):
for i_z, z in enumerate(z_vals):
def h(t):
expectation = np.dot(du(cf(R * a + z - t)), Pi[i_z, :])
return du(t) - max(gamma * expectation, du(R * a + z + b))
Kc[i_a, i_z] = brentq(h, 1e-8, R * a + z + b)
return Kc | dee76b425b5a81799fd1677f2b9ca9889f4a813c | 32,227 |
def generate_scanset_metadata( image_set_dictionary, html_base_path, session_id ):
"""This is passed a set of NII images, their PNG equilvalents, and an html base path, and then it generates the metadata needed """
cur_subj_info = {}
"""need to think through the data structure a bit more.... but can always adjust later """
cur_subj_info['session_id'] = session_id
#cur_subj_info['img_id'] = counter
cur_subj_info['subject_id'] = session_id.split('/')[0]
global counter
nii_image_dict = image_set_dictionary['nii_images']
png_image_dict = image_set_dictionary['png_image_set']
scan_metadata = {}
for scan in nii_image_dict:
print "propcessing ", scan
nii_img = nii_image_dict[scan]['base_image'][0]
print nii_img
# if 'mask' not in scan:
# if 'mask' not in scan:
# nii_img = nii_image_dict[scan]['base_image'][0]
# else:
# continue
# print "HI DAVE!"
if not nii_img:
print "did not find base image for",nii_image_dict
continue
png_img = html_path_root+ png_image_dict[scan]
print nii_img,"is being passed"
(dim_x, dim_y, dim_z, vox_size_x, vox_size_y, vox_size_z, image_orientation )= igcf.get_nii_image_info(nii_img)
image_info = Image.open(png_img)
width, height = image_info.size
#print width,height,dim_x,dim_y,dim_z,vox_size_x,vox_size_y,vox_size_z
scan_info = {}
scan_info['slice_width'] = dim_x
scan_info['slice_height'] = dim_y
scan_info['num_slices'] = dim_z
scan_info['main_image_width'] = width
scan_info['main_image_height'] = height
scan_info['nii_image'] = nii_img
scan_info['base_url'] = png_img.replace(html_path_root,'')
scan_metadata[scan] = scan_info
### There can be one or MORE masks for a given base image... so I will return a list of
#dictionaries..
mask_list = nii_image_dict[scan]['masks']
mask_id = 0
mask_info_list = []
for mask in mask_list:
cur_mask_info = {}
### I'll call the mask by it's basename
print mask,"was passed..."
mask_base = os.path.basename(mask)
nii_img = nii_image_dict[scan]['masks'][mask_id]
print nii_image_dict,'mask_id is',mask_id
print "nii maeg found should be",nii_img
if not nii_img:
print "did not find a valid mask image for ",nii_image_dict
continue
cur_mask_info['name'] = mask_base
cur_mask_info['id'] = mask_id
cur_mask_info['nii_file'] = nii_img
## NEED TO ADD IN THE MASK_URL
# cur_mask_info['mask_url'] =
print png_image_dict
png_img = html_path_root+ png_image_dict[scan]
print nii_img,"is being passed"
cur_mask_info['mask_url'] = png_img.replace(html_path_root,'')
mask_info_list.append( cur_mask_info )
mask_id +=1
# print cur_mask_info
cur_subj_info['masks'] = mask_info_list
scan_metadata[scan]['masks'] = [ mask_info_list]
# print mask_info_list
cur_subj_info['image_data'] = scan_metadata
counter += 1
return { 'session_name': session_id , 'session_metadata': cur_subj_info } | 4fa326018fc64f9ef2f7974d850256fdfa30f8f6 | 32,228 |
def read_error_codes(src_root='src/mongo'):
"""Define callback, call parse_source_files() with callback, save matches to global codes list."""
seen = {}
errors = []
dups = defaultdict(list)
skips = []
malformed = [] # type: ignore
# define validation callbacks
def check_dups(assert_loc):
"""Check for duplicates."""
codes.append(assert_loc)
code = assert_loc.code
if not code in seen:
seen[code] = assert_loc
else:
if not code in dups:
# on first duplicate, add original to dups, errors
dups[code].append(seen[code])
errors.append(seen[code])
dups[code].append(assert_loc)
errors.append(assert_loc)
def validate_code(assert_loc):
"""Check for malformed codes."""
code = int(assert_loc.code)
if code > MAXIMUM_CODE:
malformed.append(assert_loc)
errors.append(assert_loc)
def callback(assert_loc):
validate_code(assert_loc)
check_dups(assert_loc)
parse_source_files(callback, src_root)
if "0" in seen:
code = "0"
bad = seen[code]
errors.append(bad)
line, col = get_line_and_column_for_position(bad)
print("ZERO_CODE:")
print(" %s:%d:%d:%s" % (bad.sourceFile, line, col, bad.lines))
for loc in skips:
line, col = get_line_and_column_for_position(loc)
print("EXCESSIVE SKIPPING OF ERROR CODES:")
print(" %s:%d:%d:%s" % (loc.sourceFile, line, col, loc.lines))
for code, locations in list(dups.items()):
print("DUPLICATE IDS: %s" % code)
for loc in locations:
line, col = get_line_and_column_for_position(loc)
print(" %s:%d:%d:%s" % (loc.sourceFile, line, col, loc.lines))
for loc in malformed:
line, col = get_line_and_column_for_position(loc)
print("MALFORMED ID: %s" % loc.code)
print(" %s:%d:%d:%s" % (loc.sourceFile, line, col, loc.lines))
return (codes, errors, seen) | 46f64798fd3e7010a96e054600557464cf99eade | 32,229 |
def filter_check_vlan_number(value):
"""
Function to check for a good VLAN number in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error the VLAN# should be between 1 and 4096!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_vlan_number %s', error)
return error
else:
try:
if int(value) not in range(1, 4097): # pylint: disable=no-else-return
return error
else:
return value
except ValueError as e: # pylint: disable=invalid-name
J2_FILTER_LOGGER.info('filter_check_vlan_number %s, caught %s', error, e)
return error | 6c9e060b13f49048f056b72a6def2d1d15241a74 | 32,230 |
def _sanitize(element) -> Gst.Element:
"""
Passthrough function which sure element is not `None`
Returns `Gst.Element` or raises Error
"""
if element is None:
raise Exception("Element is none!")
else:
return element | f07062474dcf2671cb1c3d13a7e80d9ee96b9878 | 32,231 |
import pytz
def mean(dt_list):
"""
.. py:function:: mean(dt_list)
Returns the mean datetime from an Iterable collection of datetime objects.
Collection can be all naive datetime objects or all datatime objects with tz
(if non-naive datetimes are provided, result will be cast to UTC).
However, collection cannot be a mix of naive and non-naive datetimes.
Can handle micro-second level datetime differences. Can handle Collection of
datetime objects with different timezones. Works with lists or pandas.Series.
:param collection.Iterable dt_list: Iterable list or Series of datetime objects
:return: mean datetime
:rtype: datetime.datetime
:raises TypeError: if operand is not type Iterable or
if operand contains naive and non-naive datetime objects or
if result is not type datetime.datetime
"""
try:
list_size = len(dt_list)
except TypeError:
raise TypeError(_OPER_ERR_MSG + str(dt_list))
if list_size == 1:
mean_dt = dt_list[0]
elif (list_size == 2) and (dt_list[0] == dt_list[1]):
mean_dt = dt_list[0]
else:
try:
if dt_list[0].tzinfo:
base_dt = dt.datetime(1970, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
else:
base_dt = dt.datetime(1970, 1, 1)
delta_total = 0
for item in dt_list:
delta_total += (item - base_dt).total_seconds()
delta = delta_total / float(list_size)
mean_dt = base_dt + dt.timedelta(seconds=delta)
except TypeError:
raise TypeError(_OPER_ERR_MSG + str(dt_list))
except IndexError:
raise IndexError(_LEN_ERR_MSG)
return validate_dt(mean_dt) | 2d56eeea44d2afbf752672abb6870d7045745a0f | 32,232 |
from typing import Optional
from typing import Dict
def win_get_nonblocking(name: str, src_weights: Optional[Dict[int, float]] = None,
require_mutex: bool = False) -> int:
""" Passively get the tensor(s) from neighbors' shared window memory into
local shared memory, which cannot be accessed in python directly.
The win_update function is responsible for fetching that memeory.
This is a non-blocking function, which will return without waiting the
win_get operation is really finished.
Args:
name: The unique name to associate the window object.
src_weights: A dictionary that maps the source ranks to the weight.
Namely, {rank: weight} means get tensor from rank neighbor multipling the weight.
If not provided, src_weights will be set as all neighbor ranks defined by
virtual topology with weight 1.0.
Note src_weights should only contain the in-neighbors only.
require_mutex: If set to be true, out-neighbor process's window mutex will be
acquired.
Returns:
A handle to the win_get operation that can be used with `win_poll()` or
`win_wait()`.
"""
function = "bluefog_torch_win_get"
src_weights = ({rank: 1.0 for rank in in_neighbor_ranks()}
if src_weights is None else src_weights)
if not set(src_weights.keys()).issubset(set(in_neighbor_ranks())):
raise ValueError(
"The key of src_weights should only containranks that "
" belong to in-neighbors.")
handle = getattr(mpi_lib, function)(name, src_weights, require_mutex)
_win_handle_map[handle] = name
return handle | a641f963ac3434ece7ded8a642c7833fc8a2b30c | 32,233 |
def parse(file, beautifulsoup=None, makeelement=None, **bsargs):
"""Parse a file into an ElemenTree using the BeautifulSoup parser.
You can pass a different BeautifulSoup parser through the
`beautifulsoup` keyword, and a diffent Element factory function
through the `makeelement` keyword. By default, the standard
``BeautifulSoup`` class and the default factory of `lxml.html` are
used.
"""
if not hasattr(file, 'read'):
file = open(file)
root = _parse(file, beautifulsoup, makeelement, **bsargs)
return etree.ElementTree(root) | 5ccf2bfc8f1d6ec4f83200b250755ab149fd60dd | 32,234 |
def get_L_dashdash_b1_d(L_dashdash_b1_d_t):
"""
Args:
L_dashdash_b1_d_t: 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/h)
Returns:
1日当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_b1_d_t.reshape((365, 24)), axis=1) | aa541c5f82aa94c33c65ac264f2df420020ca443 | 32,235 |
def split_df(df, index_range, columns, iloc=False):
"""Split a data frame by selecting from columns a particular range.
Args:
df (:class:`pd.DataFrame`): Data frame to split.
index_range (tuple): Tuple containing lower and upper limit of the
range to split the index by. If `index_range = (a, b)`, then
`[a, b)` is taken.
columns (list[object]): Columns to select.
iloc (bool, optional): The index range is the integer location instead
of the index value. Defaults to `False`.
Returns:
tuple[:class:`pd.DataFrame`]: Selected rows from selected columns
and the remainder.
"""
if iloc:
inds = np.arange(df.shape[0])
rows = (inds >= index_range[0]) & (inds < index_range[1])
else:
rows = (df.index >= index_range[0]) & (df.index < index_range[1])
selected = pd.DataFrame([df[name][rows] for name in columns]).T
remainder = pd.DataFrame(
[df[name][~rows] for name in columns]
+ [df[name] for name in set(df.columns) - set(columns)]
).T
# Fix order of columns.
selected_inds = [i for i, c in enumerate(df.columns) if c in columns]
selected = selected.reindex(df.columns[np.array(selected_inds)], axis=1)
remainder = remainder.reindex(df.columns, axis=1)
return selected, remainder | 84e77e60a0f9c73ff3147c3648310875e5b58228 | 32,236 |
def basemap_to_tiles(basemap, day=yesterday, **kwargs):
"""Turn a basemap into a TileLayer object.
Parameters
----------
basemap : class:`xyzservices.lib.TileProvider` or Dict
Basemap description coming from ipyleaflet.basemaps.
day: string
If relevant for the chosen basemap, you can specify the day for
the tiles in the "%Y-%m-%d" format. Defaults to yesterday's date.
kwargs: key-word arguments
Extra key-word arguments to pass to the TileLayer constructor.
"""
if isinstance(basemap, xyzservices.lib.TileProvider):
url = basemap.build_url(time=day)
elif isinstance(basemap, dict):
url = basemap.get("url", "")
else:
raise ValueError("Invalid basemap type")
return TileLayer(
url=url,
max_zoom=basemap.get('max_zoom', 18),
min_zoom=basemap.get('min_zoom', 1),
attribution=basemap.get('html_attribution', '') or basemap.get('attribution', ''),
name=basemap.get('name', ''),
**kwargs
) | ccaf3430294216e7015167dad3ef82bee8071192 | 32,237 |
import os
def ensure_directory_exists(path, expand_user=True, file=False):
""" Create a directory if it doesn't exists.
Expanding '~' to the user's home directory on POSIX systems.
"""
if expand_user:
path = os.path.expanduser(path)
if file:
directory = os.path.dirname(path)
else:
directory = path
if not os.path.exists(directory) and directory:
try:
os.makedirs(directory)
except OSError as e:
# A parallel process created the directory after the existence check.
pass
return(path) | 5e353ad854792e7af57af1a37700e4ffd8e83967 | 32,238 |
def sms_count(request):
"""Return count of SMSs in Inbox"""
sms_count = Messaging.objects.filter(hl_status__exact='Inbox').count()
sms_count = sms_count if sms_count else ""
return HttpResponse(sms_count) | c445b7c5fd54f632fc6f7c3d0deaeca47c1dd382 | 32,239 |
from pathlib import Path
import yaml
def deserializer(file_name: Path) -> Deserializer:
"""Load and parse the data deserialize declaration"""
with open(file_name) as f:
return Deserializer(yaml.load(f, Loader=SafeLoader)) | 5df5de579e359e7d1658dd00cf279baacb844f1f | 32,240 |
import sys
def read_annotations(**kws):
"""Read annotations from either a GAF file or NCBI's gene2go file."""
if 'gaf' not in kws and 'gene2go' not in kws:
return
gene2gos = None
if 'gaf' in kws:
gene2gos = read_gaf(kws['gaf'], prt=sys.stdout)
if not gene2gos:
raise RuntimeError("NO ASSOCIATIONS LOADED FROM {F}".format(F=kws['gaf']))
elif 'gene2go' in kws:
assert 'taxid' in kws, 'taxid IS REQUIRED WHEN READING gene2go'
gene2gos = read_ncbi_gene2go(kws['gene2go'], taxids=[kws['taxid']])
if not gene2gos:
raise RuntimeError("NO ASSOCIATIONS LOADED FROM {F} FOR TAXID({T})".format(
F=kws['gene2go'], T=kws['taxid']))
return gene2gos | ccf01d712e20ad0e0f55e3f7e1e32fdc845e781a | 32,241 |
def p2db(a):
"""Returns decibel of power ratio"""
return 10.0*np.log10(a) | 5177d9ca5ca0ec749e64ebf3e704cf496fa365db | 32,242 |
def buildDictionary(message):
"""
counts the occurrence of every symbol in the message and store it in a python dictionary
parameter:
message: input message string
return:
python dictionary, key = symbol, value = occurrence
"""
_dict = dict()
for c in message:
if c not in _dict.keys():
_dict[c] = 1
else:
_dict[c] += 1
return _dict | 71b196aaccfb47606ac12242585af4ea2554a983 | 32,243 |
import pandas as pd
import os
def boston_housing(path):
"""Load the Boston Housing data set [@harrison1978hedonic].
It contains 506 examples of housing values in suburbs of Boston,
each with 13 continuous attributes and 1 binary attribute.
The data contains the following columns:
| Feature | Description |
| --- | --- |
| CRIM | per capita crime rate by town |
| ZN | proportion of residential land zoned for lots over 25,000 sq.ft. |
| INDUS | proportion of non-retail business acres per town. |
| CHAS | Charles River dummy variable (1 if tract bounds river; 0 otherwise) |
| NOX | nitric oxides concentration (parts per 10 million) |
| RM | average number of rooms per dwelling |
| AGE | proportion of owner-occupied units built prior to 1940 |
| DIS | weighted distances to five Boston employment centres |
| RAD | index of accessibility to radial highways |
| TAX | full-value property-tax rate per $10,000 |
| PTRATIO | pupil-teacher ratio by town |
| B | 1000(Bk | 0.63)^2 where Bk is the proportion of blacks by town |
| LSTAT | % lower status of the population |
| MEDV | Median value of owner-occupied homes in $1000's |
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there. Filename is `housing.data`.
Returns:
Tuple of np.darray `x_train` and dictionary `metadata` of column
headers (feature names).
"""
path = os.path.expanduser(path)
filename = 'housing.data'
if not os.path.exists(os.path.join(path, filename)):
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/' \
'housing/housing.data'
maybe_download_and_extract(path, url)
x_train = pd.read_csv(os.path.join(path, filename),
header=None, delimiter=r"\s+").as_matrix()
columns = ['CRIM',
'ZN',
'INDUS',
'CHAS',
'NOX',
'RM',
'AGE',
'DIS',
'RAD',
'TAX',
'PTRATIO',
'B',
'LSTAT',
'MEDV']
metadata = {'columns': columns}
return x_train, metadata | 732eaeab4183fcd70930c28cdaba09da16ee3995 | 32,244 |
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
import tensorflow.keras.backend as be
def model_fit(mb_query: str, features_dict: dict, target_var: str, model_struct_fn, get_model_sample_fn,
existing_models: dict, batch_size: int, epochs: int, patience: int, verbose: int,
bias_query: str, model_in: str, model_out: str, out_tensorboard: str, lr: float, iter: int,
model_save_dir: str, model_columns: list, target_values: list):
"""
Fits a Keras model. Self-contained with the idea that it is called as a new process.
:param mb_query: query to get the model-build data
:param features_dict: dict of features used to build the model structure
:param target_var: name of the field that's the dependent variable
:param model_struct_fn: function that builds the model structure
:param get_model_sample_fn: function that retrieves the model-build data
:param existing_models: dict of existing models to run and add to the model-build DataFrame
:param batch_size: batch size for model build
:param epochs: # of epochs to run
:param patience: patience in waiting to see if validation metric does not improve
:param verbose: verbosity of .fit (0=quiet, 1=not)
:param bias_query: query to calculate initial bias of output layer
:param model_in: location of the model (for a warm start)
:param model_out: location to store the model
:param out_tensorboard: location of tensorboard output
:param lr: learning rate
:param iter: iteration we're on (for saving the model)
:param model_save_dir: where to put the .h5 file
:param model_columns: columns of .predict output we're interested in for plotting
:param target_values: values of the target feature that correspond to model_columns
:return: history dict
"""
#from muti import tfu commented out 5/1
# model
if model_in != '':
mod = tf.keras.models.load_model(model_in)
be.set_value(mod.optimizer.lr, lr)
else:
bias, p_df = dq_get_bias(bias_query)
mod = model_struct_fn(features_dict, learning_rate=lr, output_bias=bias)
print(mod.summary())
# callbacks
model_ckpt = ModelCheckpoint(model_out, monitor='val_loss', save_best_only=True)
tensorboard = TensorBoard(
log_dir=out_tensorboard,
histogram_freq=1,
write_images=True,
embeddings_freq=100
)
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
verbose=1,
patience=patience,
mode='auto',
restore_best_weights=True)
print('getting data')
data_df = get_model_sample_fn(mb_query, existing_models)
model_df = data_df.loc[data_df['holdout'] == 0].copy()
valid_df = data_df.loc[data_df['holdout'] == 1].copy()
print('modeling data set size: {0}'.format(model_df.shape[0]))
print('validation data set size: {0}'.format(valid_df.shape[0]))
steps_per_epoch = int(model_df.shape[0] / batch_size)
model_ds = get_tf_dataset(features_dict, target_var, model_df, batch_size)
valid_ds = get_tf_dataset(features_dict, target_var, valid_df, batch_size, repeats=1)
print('starting fit')
h = mod.fit(model_ds, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=verbose,
callbacks=[tensorboard, model_ckpt, early_stopping], validation_data=valid_ds)
save_file = model_save_dir + 'model' + str(iter) + '.h5'
mod.save(save_file, overwrite=True, save_format='h5')
model_output = mod.predict(valid_ds)
valid_df['model'] = get_pred(model_output, model_columns)
valid_df['actual'] = valid_df[target_var].isin(target_values).astype(int)
title = 'Validation KS<br>After {0} epochs'.format((iter + 1) * epochs)
genu.ks_calculate(valid_df['model'], valid_df['actual'], in_browser=True, plot=True, title=title)
title = 'Validation Decile Plot<br>After {0} epochs'.format((iter + 1) * epochs)
genu.decile_plot(valid_df['model'], valid_df['actual'], title=title, in_browser=True)
return h.history | add35320ef1d9f6474f3712f3222d9a5fdbb3185 | 32,245 |
def classify_subtrop(storm_type):
"""
SD purely - yes
SD then SS then TS - no
SD then TS - no
"""
if 'SD' in storm_type:
if 'SD' in storm_type and True not in np.isin(storm_type,['TD','TS','HU']):
return True
if 'SS' in storm_type and True not in np.isin(storm_type,['TD','TS','HU']):
return True
else:
return False | abfc8e002e798e5642e2ab4ae38fe0882259d708 | 32,246 |
def overridden_settings(settings):
"""Return a dict of the settings that have been overridden"""
settings = Settings(settings)
for name, dft_value in iter_default_settings():
value = settings[name]
if value != dft_value and value is not None:
settings.update(name, value)
elif value is None:
settings.update(name, dft_value)
return settings | ec76feb90dbc97012f84f9ebc75b41131dc925fe | 32,247 |
def ScaleImageToSize(ip, width, height):
"""Scale image to a specific size using Stephans scaler"""
smaller = ip.scale( width, height );
return smaller | 9e2ee47ab30bfca70417eafbddd84958cd582618 | 32,248 |
import types
def retrieve_parent(*, schema: types.Schema, schemas: types.Schemas) -> str:
"""
Get or check the name of the parent.
If x-inherits is True, get the name of the parent. If it is a string, check the
parent.
Raise InheritanceError if x-inherits is not defined or False.
Args:
schema: The schema to retrieve the parent for.
schemas: All the schemas.
Returns:
The parent.
"""
inherits = peek_helper.inherits(schema=schema, schemas=schemas)
if inherits is True:
return get_parent(schema=schema, schemas=schemas)
if isinstance(inherits, str):
if not check_parent(schema=schema, parent_name=inherits, schemas=schemas):
raise exceptions.InheritanceError(
f"The x-inherits value {inherits} is not a valid parent."
)
return inherits
raise exceptions.InheritanceError(
"Cannot retrieve the name of the parent if x-inherits is not defined or False."
) | 4f6fc55af7b998e02b108d1bc5fea61f2afe82f1 | 32,249 |
from .translation.vensim.vensim2py import translate_vensim
def read_vensim(mdl_file, data_files=None, initialize=True,
missing_values="warning", split_views=False,
encoding=None, **kwargs):
"""
Construct a model from Vensim `.mdl` file.
Parameters
----------
mdl_file : str
The relative path filename for a raw Vensim `.mdl` file.
initialize: bool (optional)
If False, the model will not be initialize when it is loaded.
Default is True.
data_files: list or str or None (optional)
If given the list of files where the necessary data to run the model
is given. Default is None.
missing_values : str ("warning", "error", "ignore", "keep") (optional)
What to do with missing values. If "warning" (default)
shows a warning message and interpolates the values.
If "raise" raises an error. If "ignore" interpolates
the values without showing anything. If "keep" it will keep
the missing values, this option may cause the integration to
fail, but it may be used to check the quality of the data.
split_views: bool (optional)
If True, the sketch is parsed to detect model elements in each
model view, and then translate each view in a separate python
file. Setting this argument to True is recommended for large
models split in many different views. Default is False.
encoding: str or None (optional)
Encoding of the source model file. If None, the encoding will be
read from the model, if the encoding is not defined in the model
file it will be set to 'UTF-8'. Default is None.
**kwargs: (optional)
Additional keyword arguments for translation.
subview_sep: list
Characters used to separate views and subviews (e.g. [",", "."]).
If provided, and split_views=True, each submodule will be placed
inside the directory of the parent view.
Returns
-------
model: a PySD class object
Elements from the python model are loaded into the PySD class
and ready to run
Examples
--------
>>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl')
"""
py_model_file = translate_vensim(mdl_file, split_views, encoding, **kwargs)
model = load(py_model_file, data_files, initialize, missing_values)
model.mdl_file = str(mdl_file)
return model | 28d062ebb234cf991dcef164d5151e1ab62e08f7 | 32,250 |
import typing
def get_feature_importance(
trained_pipeline: sklearn.pipeline.Pipeline,
numeric_features: typing.List[str]
) -> pd.Series:
"""
Get feature importance measures from a trained model.
Args:
trained_pipeline (:obj:`sklearn.pipeline.Pipeline`): Fitted model pipeline
numeric_features (list(str)): Names of numeric features
Returns:
:obj:`pandas.Series` containing each feature and its importance
"""
# Retrieve categorical features from the one-hot encoder
# (numeric features need to be passed in manually)
categorical_features = list(trained_pipeline["preprocessor"]
.transformers_[1][1]
.get_feature_names())
features = numeric_features + categorical_features
# Fetch importance values (without labels) from the model itself
importances = trained_pipeline["predictor"].feature_importances_
return pd.Series(data=importances, index=features) | cd303af5a0b343a18fb42a3cd562998ecec96423 | 32,251 |
from typing import Any
import json
def json_loads(json_text: str) -> Any:
"""Does the same as json.loads, but with some additional validation."""
try:
json_data = json.loads(json_text)
validate_all_strings(json_data)
return json_data
except json.decoder.JSONDecodeError:
raise _jwt_error.JwtInvalidError('Failed to parse JSON string')
except RecursionError:
raise _jwt_error.JwtInvalidError(
'Failed to parse JSON string, too many recursions')
except UnicodeEncodeError:
raise _jwt_error.JwtInvalidError('invalid character') | d123054612a0a3e29f312e1506181ca3f9bed219 | 32,252 |
def weights(layer, expected_layer_name):
"""
Return the kernels/weights and bias from the VGG model for a given layer.
"""
W = vgg_layers[0][layer][0][0][2][0][0]
b = vgg_layers[0][layer][0][0][2][0][1]
layer_name = vgg_layers[0][layer][0][0][0][0]
#to check we obtained the correct layer from the vgg model
assert layer_name == expected_layer_name
return W, b | 5271f932bd9a870bd7857db50632cd51d91b60a9 | 32,253 |
import textwrap
def alert(title: str, text: str, *, level: str = "warning", ID: str = None):
"""
Generate the HTML to display a banner that can be permanently hidden
This is used to inform player of important changes in updates.
Arguments:
text: Main text of the banner
title: Title of the banner
type: On of "warning", "info". The aspect of the banner
ID: optional string ID of this banner, if you need to check if it is
open/closed somewhere. Do NOT use numbers
"""
if not level in ("info", "warning"):
raise ValueError("Level must be among 'info', 'warning'")
if alert.has_disable_been_called:
raise RuntimeError(
"The function alert() is called after disable_old_alert() has generated "
"the javascript code to handle hidding closed alerts. This breaks the "
"system completely, make sure disable_old_alerts is called last"
)
if ID is None:
alert_id = alert.numid
alert.numid += 1
else:
alert_id = str(ID)
alert.strid.append(alert_id)
indent = " " * 4 * 4
text = str(text).replace("\n", "\n" + indent)
return textwrap.dedent(
f"""\
<input type="hidden" class="alert-hidder" name="attr_alert-{alert_id}" value="0"/>
<div class="alert alert-{level}">
<div>
<h3> {level.title()} - {title}</h3>
{text}
</div>
<label class="fakebutton">
<input type="checkbox" name="attr_alert-{alert_id}" value="1" /> ×
</label>
</div>"""
) | 90ff85c228dc70318deee196bdd512e5be90a5ad | 32,254 |
def get_stim_data_df(sessions, analyspar, stimpar, stim_data_df=None,
comp_sess=[1, 3], datatype="rel_unexp_resp", rel_sess=1,
basepar=None, idxpar=None, abs_usi=True, parallel=False):
"""
get_stim_data_df(sessions, analyspar, stimpar)
Returns dataframe with relative ROI data for one session relative
to another, for each line/plane.
Required args:
- sessions (list):
session objects
- analyspar (AnalysPar):
named tuple containing analysis parameters
- stimpar (StimPar):
named tuple containing stimulus parameters
Optional args:
- stim_data_df (pd.DataFrame):
dataframe with one row per line/plane, and the basic sess_df
columns
default: None
- comp_sess (int):
sessions for which to obtain absolute fractional change
[x, y] => |(y - x) / x|
default: [1, 3]
- datatype (str):
type of data to retrieve
default: "rel_unexp_resp"
- rel_sess (int):
number of session relative to which data should be scaled, for each
mouse
default: 1
- basepar (BasePar):
named tuple containing baseline parameters
(needed if datatype is "usis")
default: None
- idxpar (IdxPar):
named tuple containing index parameters
(needed if datatype is "usis")
default: None
- abs_usi (bool):
if True, absolute USIs are returned (applies if datatype is "usis")
default: True
- parallel (bool):
if True, some of the analysis is run in parallel across CPU cores
default: False
Returns:
- stim_data_df (pd.DataFrame):
dataframe with one row per line/plane, and the basic sess_df
columns, as well as stimulus columns for each comp_sess:
- {stimpar.stimtype}_s{comp_sess[0]}:
first comp_sess data for each ROI
- {stimpar.stimtype}_s{comp_sess[1]}:
second comp_sess data for each ROI
"""
data_df = collect_base_data(
sessions, analyspar, stimpar, datatype=datatype, rel_sess=rel_sess,
basepar=basepar, idxpar=idxpar, abs_usi=abs_usi, parallel=parallel
)
stim_data_df = check_init_stim_data_df(
data_df, sessions, stimpar, stim_data_df=stim_data_df,
analyspar=analyspar
)
# populate dataframe
group_columns = ["lines", "planes"]
for grp_vals, grp_df in data_df.groupby(group_columns):
grp_df = grp_df.sort_values(["sess_ns", "mouse_ns"])
line, plane = grp_vals
row_idxs = stim_data_df.loc[
(stim_data_df["lines"] == line) & (stim_data_df["planes"] == plane)
].index
if len(row_idxs) != 1:
raise ValueError("Expected exactly one row to match line/plane.")
row_idx = row_idxs[0]
sess_ns = sorted(grp_df["sess_ns"].unique())
for sess_n in comp_sess:
if int(sess_n) not in sess_ns:
raise RuntimeError(f"Session {sess_n} missing in grp_df.")
# obtain comparison data
comp_data = [[], []]
for mouse_n in sorted(grp_df["mouse_ns"].unique()):
mouse_loc = (grp_df["mouse_ns"] == mouse_n)
for i in range(2):
sess_loc = (grp_df["sess_ns"] == comp_sess[i])
data_row = grp_df.loc[mouse_loc & sess_loc]
if len(data_row) != 1:
raise RuntimeError("Expected to find exactly one row")
# retrieve ROI data
data = data_row.loc[data_row.index[0], datatype]
comp_data[i].append(data)
# add data for each session to dataframe
for n, data in zip(comp_sess, comp_data):
stim_data_df.loc[row_idx, f"{stimpar.stimtype}_s{n}"] = \
np.concatenate(data)
return stim_data_df | 5a352a66ad06ed70b04db3ca3e26073fb412cccd | 32,255 |
def get_all_elems_from_json(search_json: dict, search_key: str) -> list:
"""Returns values by key in all nested dicts.
Args:
search_json: Dictionary in which one needs to find all values by specific key.
search_key: Key for search.
Returns:
List of values stored in nested structures by ``search_key``.
Examples:
>>> get_all_elems_from_json({'a':{'b': [1,2,3]}, 'b':42}, 'b')
[[1, 2, 3], 42]
"""
result = []
if isinstance(search_json, dict):
for key in search_json:
if key == search_key:
result.append(search_json[key])
else:
result.extend(get_all_elems_from_json(search_json[key], search_key))
elif isinstance(search_json, list):
for item in search_json:
result.extend(get_all_elems_from_json(item, search_key))
return result | 6ab45e33962ccb5996b50d13e57626365c4ed78b | 32,256 |
import sys
import os
import subprocess
def FindVisualStudioInstallation():
"""
Returns appropriate values for .build_tool and .uses_msbuild fields
of TestGypBase for Visual Studio.
We use the value specified by GYP_MSVS_VERSION. If not specified, we
search for likely deployment paths.
"""
msvs_version = 'auto'
for flag in (f for f in sys.argv if f.startswith('msvs_version=')):
msvs_version = flag.split('=')[-1]
msvs_version = os.environ.get('GYP_MSVS_VERSION', msvs_version)
override_build_tool = os.environ.get('GYP_BUILD_TOOL')
if override_build_tool:
return override_build_tool, True, override_build_tool, msvs_version
if msvs_version == 'auto' or msvs_version >= '2017':
msbuild_exes = []
top_vs_info = VSSetup_PowerShell()
if top_vs_info:
inst_path = top_vs_info['InstallationPath']
args2 = ['cmd.exe', '/d', '/c',
'cd', '/d', inst_path,
'&', 'dir', '/b', '/s', 'msbuild.exe']
msbuild_exes = subprocess.check_output(args2).strip().splitlines()
if len(msbuild_exes):
msbuild_path = str(msbuild_exes[0].decode('utf-8'))
os.environ['GYP_MSVS_VERSION'] = top_vs_info['CatalogVersion']
os.environ['GYP_BUILD_TOOL'] = msbuild_path
return msbuild_path, True, msbuild_path, msvs_version
possible_roots = ['%s:\\Program Files%s' % (chr(drive), suffix)
for drive in range(ord('C'), ord('Z') + 1)
for suffix in ['', ' (x86)']]
possible_paths = {
'2015': r'Microsoft Visual Studio 14.0\Common7\IDE\devenv.com',
'2013': r'Microsoft Visual Studio 12.0\Common7\IDE\devenv.com',
'2012': r'Microsoft Visual Studio 11.0\Common7\IDE\devenv.com',
'2010': r'Microsoft Visual Studio 10.0\Common7\IDE\devenv.com',
'2008': r'Microsoft Visual Studio 9.0\Common7\IDE\devenv.com',
'2005': r'Microsoft Visual Studio 8\Common7\IDE\devenv.com'
}
# Check that the path to the specified GYP_MSVS_VERSION exists.
if msvs_version in possible_paths:
path = possible_paths[msvs_version]
for r in possible_roots:
build_tool = os.path.join(r, path)
if os.path.exists(build_tool):
uses_msbuild = msvs_version >= '2010'
msbuild_path = FindMSBuildInstallation(msvs_version)
return build_tool, uses_msbuild, msbuild_path, msvs_version
else:
print('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
'but corresponding "%s" was not found.' % (msvs_version, path))
print('Error: could not find MSVS version %s' % msvs_version)
sys.exit(1) | b6969a2a87022efa3bd3164c3b53a21efecf6f15 | 32,257 |
def prFinalNodeName(q):
"""In : q (state : string)
Out: dot string (string)
Return dot string for generating final state (double circle)
"""
return dot_san_str(q) + '[shape=circle, peripheries=2];' | 8a4e5649ebeb0c68f2e1741fefd935c9a5f919bf | 32,258 |
import typing
from datetime import datetime
def decodeExifDateTime(value: str) -> typing.Optional[datetime.datetime]:
"""
utility fct to encode/decode
"""
try:
# return path.encode(sys.stdout.encoding, 'ignore').decode(sys.stdout.encoding)
d = datetime.datetime.strptime(value, '%Y:%m:%d %H:%M:%S')
return d
except ValueError:
return | a1ce11305e8e486ad643530930368c47f1c073ef | 32,259 |
def parse(file: str) -> Env:
"""Parse an RLE file and create a user environment
Parameters
----------
file: str
Path to the RLE file.
Returns
-------
user_env: `dict` [str, `Any`]
User environment returned from ``user_env()``. It has these attributes:
``width``
Width of the Game of Life matrix.
``height``
Height of the Game of Life matrix.
``rule``
Rule used to run the simulation.
``seed``
Seed to base simulation on.
"""
return user_env(parse_file(file)) | cf0a884169b22f4781450c78a35b33ef43049d65 | 32,260 |
import six
def logger_has_handlers(logger):
"""
Check if given logger has at least 1 handler associated, return a boolean value.
Since Python 2 doesn't provide Logger.hasHandlers(), we have to perform the lookup by ourself.
"""
if six.PY3:
return logger.hasHandlers()
else:
c = logger
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv | dc0093dd25a41c997ca92759ccb9fa17ad265bdd | 32,261 |
import json
def query_parameters(prefix, arpnum, t_recs, keywords, redis=True):
"""Query keyword sequence from a header file.
Alternative design: replace prefix and arpnum with filepath.
"""
KEYWORDS = ['T_REC', 'AREA', 'USFLUXL', 'MEANGBL', 'R_VALUE']
if redis:
id = f'{prefix}{arpnum:06d}' # header file identifier
if r_header.exists(id) == 0:
dataset = 'sharp' if prefix == 'HARP' else 'smarp'
header = read_header(dataset, arpnum)
header = header[KEYWORDS]
header = header.set_index('T_REC')
mapping = {t_rec: header.loc[t_rec].to_json() for t_rec in header.index}
r_header.hmset(id, mapping)
buff = r_header.hmget(id, t_recs)
# series = [pd.read_json(b, typ='series') if b else None for b in buff]
# if any([s is None for s in series]):
# print(series)
records = [json.loads(b) if b else {} for b in buff]
df = pd.DataFrame(records, index=t_recs)[keywords] # Takes up 61% of the time
else:
raise
return df | 84c0a43d6d045e3255478175697cbb0bfaac5da8 | 32,262 |
def get_columns(tablename):
""" This function returns simbench csv file column names for a given table name. """
allcolumns = all_columns()
if tablename in allcolumns.keys():
if "Profile" in tablename:
logger.debug("The returned column list of %s is given for simbench " % tablename +
"dataset and may be incomplete")
return allcolumns[tablename]
else:
raise ValueError('The tablename %s is unknown.' % tablename) | 7f333f7ceb4d9ff02519d886a650e5bc5a489270 | 32,263 |
def x0_rand(mu3,xb,num_min):
"""
Randomly initialise the 5 protocol parameters using the specified bounds.
Parameters and bounds should be specified in the order {Px,pk1,pk2,mu1,mu2}.
Parameters
----------
mu3 : float
Intensity of pulse 3 (vacuum).
xb : float, array-like
Upper and lower bounds for the protocol parameters. (5,2)
num_min : float
An arbitrarily small number.
Returns
-------
x0 : float, array
Randomly initialised protocol parameters.
"""
Px_i = np.random.rand() * (xb[0,1] - xb[0,0] - 2*num_min) + xb[0,0] + \
num_min
pk1_i, pk2_i = 1.0, 1.0
while (pk1_i+pk2_i >= 1.0):
pk1_i = np.random.rand() * (xb[1,1] - xb[1,0] - 2*num_min) + \
xb[1,0] + num_min
pk2_i = np.random.rand() * (min(xb[2,1],1-pk1_i) - xb[2,0] - \
2*num_min) + xb[2,0] + num_min
mu1_i = np.random.rand() * (xb[3,1] - max(xb[3,0],2*mu3) - 2*num_min) + \
max(xb[3,0],2*mu3) + num_min
mu2_i = np.random.rand() * (min(xb[4,1],mu1_i) - max(xb[4,0],mu3) - \
2*num_min) + max(xb[4,0],mu3) + num_min
return np.array([Px_i,pk1_i,pk2_i,mu1_i,mu2_i]) | fcf32cd7367e7b78e48829f72523f50855ba563e | 32,264 |
def render_smiles_list(smiles_list):
"""
Format and return a SMILES string(s).
"""
# The string that will be returned to the template
result = r'<h3>Solvent SMILES:</h3>' + '\n'
result += r'<p>'
if len(smiles_list) == 1:
result += smiles_list[0]
else:
result += 'This is a mixture of the following solvents: '
for smiles in smiles_list:
result += f'{smiles}, '
result = result[:-2]
result += r'</p>'
return mark_safe(result) | f6207bb63452d1037c321874b8ed5248e89dc83e | 32,265 |
def get_config_id(kwargs=None, call=None):
"""
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
"""
if call == "action":
raise SaltCloudException(
"The get_config_id function must be called with -f or --function."
)
return _get_cloud_interface().get_config_id(kwargs=kwargs) | b66eda936157d0c6794289ed90acd681a5d31c02 | 32,266 |
def bunq_oauth_reauthorize():
""" Endpoint to reauthorize OAuth with bunq """
cookie = request.cookies.get('session')
if cookie is None or cookie != util.get_session_cookie():
return render_template("message.html", msgtype="danger", msg=\
"Invalid request: session cookie not set or not valid")
return auth.bunq_oauth_reauthorize() | 57708acb8e4c726640360bfb1263ede323571c15 | 32,267 |
def get_centrality_measures(network, tol):
"""
Calculates five centrality measures (degree, betweenness, closeness, and
eigenvector centrality, and k-shell) for the nodes of the given network.
Parameters
----------
network: networkx.Graph()
tol: tolerance parameter for calculating eigenvector centrality
Returns
--------
[degree, betweenness, closeness, eigenvector_centrality, kshell]: list of
numpy.arrays
"""
# YOUR CODE HERE
#degree = []
#betweenness = []
#closeness = []
#eigenvector_centrality = [] # remember to use tol parameter
#kshell = []
#TODO: Using networkX functions calculate the different centrality measures. Each of these networkx functions return a dictionary of nodes with the centrality measure of the nodes as the value.
# Then you should sort all the measures with same nodewise order as in network.nodes() and add them to their corresponding list defined above. Also notice that at the end, get_centrality_measures()
# function should return a list of numpy arrays.
nodes = network.node
N = len(nodes)
degrees_temp = nx.degree(network)
betweenness_temp = nx.betweenness_centrality(network)
closeness_temp = nx.closeness_centrality(network)
eigenvector_centrality_temp = nx.eigenvector_centrality(network, tol=tol)
kshell_temp = nx.core_number(network)
degrees = np.zeros(N)
betweenness = np.zeros(N)
closeness = np.zeros(N)
eigenvector_centrality = np.zeros(N)
kshell = np.zeros(N)
for i, node in enumerate(nodes):
degrees[i] = degrees_temp[node]
betweenness[i] = betweenness_temp[node]
closeness[i] = closeness_temp[node]
eigenvector_centrality[i] = eigenvector_centrality_temp[node]
kshell[i] = kshell_temp[node]
return [degrees, betweenness, closeness, eigenvector_centrality, kshell] | aacbd931f0809f48f3ee0eab2092259810f08205 | 32,268 |
from typing import List
from typing import Tuple
def extract_mealentries(meals: List[Meal]) -> List[Tuple]:
"""
Extract meal entries records from a sequence of myfitnesspal meals.
Args:
- meals (List[Meal]): A list with meal objects to extract data from
Returns:
- List[Tuple]: A list with meal record values
"""
return [
(
meal.username,
meal.date,
meal.name,
entry.short_name,
entry.quantity,
entry.unit,
entry.totals.get("calories", None),
entry.totals.get("carbohydrates", None),
entry.totals.get("fat", None),
entry.totals.get("protein", None),
entry.totals.get("sodium", None),
entry.totals.get("sugar", None),
)
for meal in meals
for entry in meal.entries
] | c7c043cee0b4af1253902080af67919cc9238d75 | 32,269 |
import math
def get_45point_spiralling_sphere_with_normal_zaxis_dist( num_of_spirals = 4, num_of_vertices = 45):
"""
A sphere of spiralling points. Each point is equally spaced on the x,y,z axes. The equal spacing is calculated by dividing the straight-line spiral distance by 45.
Adapted from Leonsim's code here: https://github.com/leonsim/sphere/blob/9ec92d2f5411171776e1176be58468af68a93442/Sphere.cpp
"""
vertices = []
xy_degree_change = (num_of_spirals * 360) / num_of_vertices
zaxis_dist = get_normalised_normal_curve()
c = math.pi / 180.0 # Degrees to radians
phiStart = 90.0 # Default 100
thetaStart = 180.0 # Default 180
theta = -thetaStart
phi = -phiStart
index = -1
while phi <= (phiStart):
index +=1
phir = c * phi
thetar = c * theta
x = sin(thetar) * cos(phir)
y = cos(thetar) * cos(phir)
z = sin(phir)
v1 = (x, y, z)
vertices.append(v1)
theta += xy_degree_change
print("Vertex:"+str(index)+", zAxis:"+str(len(zaxis_dist))+", Vertices:"+str(len(vertices)))
z_degree_change = (360 /num_of_vertices) * zaxis_dist[index]
phi += z_degree_change
return vertices | 59173bdd28b513d0f039215ea7d713cd80d81b4e | 32,270 |
import os
def refresh_gui_with_new_image(shared, df_files, df_model, df_landmarks, main_window, landmarks_window):
"""
Parameters
----------
shared : dictionary
contains data shared across windows, definition is in the main function.
df_files : pandas DataFrame
dataframe containing the paths to images of current project.
df_model : pandas DataFrame
dataframe containing the names and positions of landmarks of current project.
df_landmarks : pandas DataFrame
dataframe containing the positions of landmarks and additional notes for
all images of current project.
main_window : PySimplegui window
main window of the GUI.
landmarks_window : PySimplegui window
landmark selection window of the GUI.
Returns
-------
shared : dictionary
updated data shared across windows.
landmarks_window: PySimplegui window
resfreshed landmark selection window of the GUI.
"""
# updated current image, raw_image and current file:
shared['curr_image'] = open_image(df_files.loc[shared['im_index'],"full path"], normalize=shared['normalize'])
shared['raw_image'] = shared['curr_image']
shared['curr_file'] = df_files.loc[shared['im_index'],"file name"]
shared['pt_size'] = shared['curr_image'].width / 80
# update all the fields related to the image (image qualty, notes, etc..)
update_image_fields(shared['im_index'], shared['curr_image'], df_files, main_window, shared['graph_width'])
# refresh the landmarks window, if present
if landmarks_window:
location = landmarks_window.CurrentLocation()
temp_window = make_landmarks_window(df_model, df_landmarks, shared['curr_file'], location = location)
landmarks_window.Close()
landmarks_window = temp_window
# else, create a new one:
else:
landmarks_window = make_landmarks_window(df_model, df_landmarks, shared['curr_file'])
# update the preview of the landmarks:
update_landmarks_preview(os.path.join(shared['proj_folder'], ref_image_name), main_window, 300)
# remove selection of the current landmark
shared['curr_landmark'] = None
# update the progress bar
update_progress_bar(df_files, main_window)
# remove focus from any object
try:
x = main_window.FindElementWithFocus()
x.block_focus()
except:
pass
# place the focus on the main window:
main_window.TKroot.focus_force()
return shared, landmarks_window | 3036e25e6b6284351c83c9ac8907170252396c31 | 32,271 |
def pak64(seq, debug=False):
"""
:param seq: smallish sequence of smallish integers
:return seq64: smaller sequence of 64bit integers
Pack a sequence of smallish integers into an array of 64 bit ints,
using bit pitch and number of big ints appropriate to the length
of the sequence and the range of the smallish integers.
Unfortuately 4-bit dtypes are not possible so cannot use re-view shape changing tricks
and have to twiddle bits.
Approach:
* work out number of 64 bit ints needed and the
number of sequnce items that can be stored in each
"""
len_ = len(seq)
max_ = seq.max()
if max_ < 0x10:
itembits = 4
elif max_ < 0x100:
itembits = 8
else:
assert 0, "max value in seq %d is too big " % max_
pass
totbits = len_*itembits
n64 = 1 + totbits // 64
nitem64 = 64 // itembits
if debug:
print "seq", seq
print "len_:%d max_:%d itembits:%d totbits:%d nitem64:%d n64:%d" % (len_, max_, itembits, totbits, nitem64, n64)
pass
aseq = np.zeros( (n64*nitem64), dtype=np.uint64 )
aseq[0:len(seq)] = seq
bseq = aseq.reshape((n64,nitem64))
for i in range(n64):
for j in range(nitem64):
bseq[i,j] <<= np.uint64(itembits*j)
pass
pass
seq64 = np.zeros( (n64,), dtype=np.uint64 )
for i in range(n64):
seq64[n64-i-1] = np.bitwise_or.reduce(bseq[i])
pass
if debug:
print "bseq", bseq
print "seq64", seq64
pass
return seq64 | 96938d9b23e9b8d3a0c219537c8cd3a62d633e20 | 32,272 |
import re
import json
def parse_results(line):
"""Parses and logs event information from logcat."""
header = re.search(r'cr_PasswordChangeTest: (\[[\w|:| |#]+\])', line).group(1)
print(header)
credentials_count = re.search(r'Number of stored credentials: (\d+).', line)
if not credentials_count:
# Event does not contain any credentials information.
# Print an empty line and continue.
print()
return
print('Number of stored credentials: %s' % credentials_count.group(1))
def build_credential(credential_match):
return {
'url': credential_match.group(1),
'username': credential_match.group(2),
'password': credential_match.group(3),
}
re_all_credentials = re.compile(r'PasswordStoreCredential\{.*?\}')
re_credential_info = re.compile(
r'PasswordStoreCredential\{url=(.*?), username=(.*?), password=(.*?)\}')
credentials = [
build_credential(re_credential_info.search(credential))
for credential in re_all_credentials.findall(line)
]
# Print credentials with json format.
print(json.dumps(credentials, indent=2))
print() | 24cc928d945d2d4f16f394be68a8bb217c21b342 | 32,273 |
def bqwrapper(datai):
"""
Wraps the kdtree ball query for concurrent tree search.
"""
return kdtbq(datai, r=bw[0]) | 203e77e37ddb53b76366b0d376c37b63536da923 | 32,274 |
import re
def crawl_user_movies():
"""
@功能: 补充用户观看过的电影信息
@参数: 无
@返回: 电影信息
"""
user_df = pd.read_csv('douban_users.csv')
user_df = user_df.iloc[:, [1, 2, 3]]
user_movies = list(user_df['movie_id'].unique())
movies = [] # 储存电影
count = 1 # 日志参数
for i in user_movies:
url = 'https://movie.douban.com/subject/{}/'.format(str(i))
text = get_html(url) # 获取每部电影的页面
if text == None:
count += 1 # 日志参数
continue
html = etree.HTML(text) # 解析每部电影的页面
info = html.xpath("//div[@class='subject clearfix']/div[@id='info']//text()") # 每部电影的相关信息
# 电影ID
dataID = i
# 电影名称
name = html.xpath("//*[@id='content']/h1/span[1]/text()")[0]
name = name.split(' ')[0]
# 电影英文名称
english_name = html.xpath("//*[@id='content']/h1/span[1]/text()")[0]
# 判断字符串中是否存在英文
if bool(re.search('[A-Za-z]', english_name)):
english_name = english_name.split(' ') # 分割字符串以提取英文名称
del english_name[0] # 去除中文名称
english_name = ' '.join(english_name) # 重新以空格连接列表中的字符串
else:
english_name = None
# 导演
flag = 1
directors = []
for i in range(len(info)):
if info[i] == '导演':
for j in range(i + 1, len(info)):
if info[j] == '编剧':
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文或英文
if (u'\u4e00' <= ch <= u'\u9fff') or (bool(re.search('[a-z]', info[j]))):
directors.append(info[j].strip())
flag = 0
break
if flag == 0:
break
if flag == 0:
break
directors = ''.join(directors) # 转换为字符串形式
# 编剧
flag = 1
writer = []
for i in range(len(info)):
if info[i] == '编剧':
for j in range(i + 1, len(info)):
if info[j] == '主演':
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文或英文
if (u'\u4e00' <= ch <= u'\u9fff') or (bool(re.search('[a-z]', info[j]))):
writer.append(info[j].strip())
flag = 0
break
if flag == 0:
break
if flag == 0:
break
writer = ''.join(writer) # 转换为字符串形式
# 主演
flag = 1
actors = []
for i in range(len(info)):
if info[i] == '编剧':
for j in range(i + 1, len(info)):
if info[j] == '主演':
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文或英文
if (u'\u4e00' <= ch <= u'\u9fff') or (bool(re.search('[a-z]', info[j]))):
actors.append(info[j].strip())
flag = 0
break
if flag == 0:
break
if flag == 0:
break
actors = ''.join(actors) # 转换为字符串形式
# 电影评分
try:
rate = html.xpath("//div[@class='rating_wrap clearbox']/div[@class='rating_self clearfix']/strong[@class='ll rating_num']/text()")[0]
except:
rate = None
# 电影类型
flag = 1
style = []
for i in range(len(info)):
if info[i] == '类型:':
for j in range(i + 1, len(info)):
if (info[j] == '制片国家/地区:') or (info[j] == '官方网站:'):
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文
if u'\u4e00' <= ch <= u'\u9fff':
style.append(info[j])
if len(style) == 3:
flag = 0
break
break
if flag == 0:
break
if flag == 0:
break
# 把电影类型分开存储
if len(style) == 0:
style1 = None
style2 = None
style3 = None
if len(style) == 1:
style1 = style[0]
style2 = None
style3 = None
if len(style) == 2:
style1 = style[0]
style2 = style[1]
style3 = None
if len(style) == 3:
style1 = style[0]
style2 = style[1]
style3 = style[2]
# 国家
flag = 1
country = []
for i in range(len(info)):
if info[i] == r'制片国家/地区:':
for j in range(i + 1, len(info)):
if info[j] == '语言:':
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文
if u'\u4e00' <= ch <= u'\u9fff':
country.append(info[j].strip())
flag = 0
break
if flag == 0:
break
if flag == 0:
break
country = country[0].split(r'/')
country = country[0]
# 电影语言
flag = 1
language = []
for i in range(len(info)):
if info[i] == '语言:':
for j in range(i + 1, len(info)):
if info[j] == '上映日期:':
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文
if u'\u4e00' <= ch <= u'\u9fff':
language.append(info[j].strip())
flag = 0
break
if flag == 0:
break
if flag == 0:
break
try:
language = language[0].split(r'/')
language = language[0]
except:
language = None
# 电影上映日期
flag = 1
date = []
for i in range(len(info)):
if info[i] == '上映日期:':
for j in range(i + 1, len(info)):
if (info[j] == '片长:') or (info[j] == '又名:'):
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文或英文
if (u'\u4e00' <= ch <= u'\u9fff') or (bool(re.search('[a-z]', info[j]))):
date.append(re.search(r'\d+', info[j]).group(0))
flag = 0
break
if flag == 0:
break
if flag == 0:
break
date = ''.join(date) # 转换为字符串形式
# 电影片长
flag = 1
duration = []
for i in range(len(info)):
if info[i] == '片长:':
for j in range(i + 1, len(info)):
if (info[j] == '又名:') or (info[j] == 'IMDb链接:'):
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文
if u'\u4e00' <= ch <= u'\u9fff':
info[j] = info[j].split('/')[0]
duration.append(re.search(r'\d+', info[j].strip()).group(0))
flag = 0
break
if flag == 0:
break
if flag == 0:
break
duration = ''.join(duration) # 转换为字符串形式
# 海报图片
pic = html.xpath("//div[@id='mainpic']/a[@class='nbgnbg']/img/@src")[0]
# 电影简介
introduction = ''.join(html.xpath("//div[@class='related-info']/div[@id='link-report']/span/text()")).strip().replace(' ', '').replace('\n', '').replace('\xa0', '').replace(u'\u3000', u' ')
# 把每部电影的信息存入一个列表,再append进去movies总列表
each_movie = [name, english_name, directors, writer, actors, rate, style1, style2, style3,
country, language, date, duration, introduction, dataID, url, pic]
movies.append(each_movie)
print("成功解析第" + str(count) + "部电影的信息: ", each_movie)
count += 1 # 日志参数
return movies | 882fe56fc2fc5e22b6ad0ce518b7adaabd724cd2 | 32,275 |
import logging
def filter_by_shape(data: pd.DataFrame, geofence: Polygon) -> pd.DataFrame:
"""Remove trips outside of geofence. Filter by pickup and dropoff locations"""
logging.info('Filtering by bbox')
(min_lon, min_lat, max_lon, max_lat) = geofence.bounds
data = data[
(data.pickup_longitude > min_lon) & (data.pickup_longitude < max_lon) &
(data.pickup_latitude > min_lat) & (data.pickup_latitude < max_lat) &
(data.dropoff_longitude > min_lon) & (data.dropoff_longitude < max_lon) &
(data.dropoff_latitude > min_lat) & (data.dropoff_latitude < max_lat)
]
logging.info(f"Data shape {data.shape}")
return data | fa98c85ea286921e9a986820a7a17e03e94181dc | 32,276 |
from ..core import cache as cache
def upload_collection(flask_app, filenames, runs, dataset_id, collection_id,
descriptions=None, cache=None):
""" Create new Predictors from TSV files
Args:
filenames list of (str): List of paths to TSVs
runs list of (int): List of run ids to apply events to
dataset_id (int): Dataset id.
collection_id (int): Id of collection object
descriptions (dict): Optional descriptions for each column
cache (obj): Optional flask cache object
"""
if cache is None:
if descriptions is None:
descriptions = {}
collection_object = PredictorCollection.query.filter_by(
id=collection_id).one()
# Load into pandas
try:
events = [pd.read_csv(f, sep='\t') for f in filenames]
except Exception as e:
update_record(
collection_object,
exception=e,
traceback='Error reading event files'
)
raise
# Check columns are all the same across all files
cols = [set(e.columns) for e in events]
common_cols = set.intersection(*cols)
if not len(common_cols) == len(cols[0]):
update_record(
collection_object,
traceback='Event files contain distinct columns'
)
raise Exception('Event files contain distinct columns')
if not set(['onset', 'duration']).issubset(common_cols):
update_record(
collection_object,
traceback='Not all columns have "onset" and "duration"'
)
raise Exception('Not all columns have "onset" and "duration"')
pe_objects = []
try:
for col in common_cols - set(['onset', 'duration']):
predictor = Predictor(
name=col,
source=f'Collection: {collection_object.collection_name}',
dataset_id=dataset_id,
predictor_collection_id=collection_object.id,
private=True,
description=descriptions.get(col))
db.session.add(predictor)
db.session.commit()
for ix, e in enumerate(events):
select = e[['onset', 'duration', col]].dropna()
for run_id in runs[ix]:
# Add PredictorRun
pr, _ = get_or_create(
PredictorRun, predictor_id=predictor.id, run_id=run_id)
for _, row in select.iterrows():
row = row.to_dict()
pe_objects.append(
PredictorEvent(
predictor_id=predictor.id,
run_id=run_id, onset=row['onset'],
duration=row['duration'], value=row[col])
)
collection_object.predictors.append(predictor)
db.session.bulk_save_objects(pe_objects)
db.session.commit()
except Exception as e:
cache.clear()
db.session.rollback()
update_record(
collection_object,
exception=e,
traceback=f'Error creating predictors. Failed processing {col}'
)
raise
cache.clear()
return update_record(
collection_object,
status='OK'
) | d6d16206716dae0e7e945d1cff95317454031e3e | 32,277 |
def get_filtered_metadata_list(metadata_list, strand):
""" Given a lis of exon junctions, remove the ones that redundantly cover a junction
Parameters
----------
metadata_list: List(Output_metadata),
strand: strand of the gene
Returns
-------
filtered_meetadata_list: List of metadata objects remaining after filter
"""
exon_dict = _get_exon_junction_dict(metadata_list, strand)
remove_id_list = _collect_remove_ids(exon_dict)
return list(filter(lambda m: m.output_id not in remove_id_list, metadata_list)) | dab3da34f435d7401dd5e76be2c9c032aea875c1 | 32,278 |
import functools
import traceback
def handle_exceptions(database, params, constraints, start_params, general_options):
"""Handle exceptions in the criterion function.
This decorator catches any exceptions raised inside the criterion function. If the
exception is a :class:`KeyboardInterrupt` or a :class:`SystemExit`, the user wants
to stop the optimization and the exception is raised
For other exceptions, it is assumed that the optimizer proposed parameters which
could not be handled by the criterion function. For example, the parameters formed
an invalid covariance matrix which lead to an :class:`numpy.linalg.LinAlgError` in
the matrix decompositions. Then, we calculate a penalty as a function of the
criterion value at the initial parameters and some distance between the initial and
the current parameters.
"""
def decorator_handle_exceptions(func):
@functools.wraps(func)
def wrapper_handle_exceptions(x, *args, **kwargs):
try:
out = func(x, *args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
# Adjust the criterion value at the start.
start_criterion_value = general_options["start_criterion_value"]
constant, slope = general_options.get(
"criterion_exception_penalty", (None, None)
)
constant = 2 * start_criterion_value if constant is None else constant
slope = 0.1 * start_criterion_value if slope is None else slope
raise_exc = general_options.get("criterion_exception_raise", False)
if raise_exc:
raise e
else:
if database:
exception_info = traceback.format_exc()
p = reparametrize_from_internal(
internal=x,
fixed_values=params["_internal_fixed_value"].to_numpy(),
pre_replacements=params["_pre_replacements"]
.to_numpy()
.astype(int),
processed_constraints=constraints,
post_replacements=(
params["_post_replacements"].to_numpy().astype(int)
),
processed_params=params,
)
msg = (
exception_info
+ "\n\n"
+ "The parameters are\n\n"
+ p["value"].to_csv(sep="\t", header=True)
)
append_rows(database, "exceptions", {"value": msg})
out = min(
MAX_CRITERION_PENALTY,
constant + slope * np.linalg.norm(x - start_params),
)
return out
return wrapper_handle_exceptions
return decorator_handle_exceptions | 725cfc7d3c338e2a4dbd143fc558307cbb49e1cc | 32,279 |
def vector2angles(gaze_vector: np.ndarray):
"""
Transforms a gaze vector into the angles yaw and elevation/pitch.
:param gaze_vector: 3D unit gaze vector
:return: 2D gaze angles
"""
gaze_angles = np.empty((1, 2), dtype=np.float32)
gaze_angles[0, 0] = np.arctan(-gaze_vector[0]/-gaze_vector[2]) # phi= arctan2(x/z)
gaze_angles[0, 1] = np.arcsin(-gaze_vector[1]) # theta= arcsin(y)
return gaze_angles | b0db8e1f6cb9865e9563af5385f760699069013e | 32,280 |
def setup_train_test_idx(X, last_train_time_step, last_time_step, aggregated_timestamp_column='time_step'):
""" The aggregated_time_step_column needs to be a column with integer values, such as year, month or day """
split_timesteps = {}
split_timesteps['train'] = list(range(last_train_time_step + 1))
split_timesteps['test'] = list(range(last_train_time_step + 1, last_time_step + 1))
train_test_idx = {}
train_test_idx['train'] = X[X[aggregated_timestamp_column].isin(split_timesteps['train'])].index
train_test_idx['test'] = X[X[aggregated_timestamp_column].isin(split_timesteps['test'])].index
return train_test_idx | 256fbe66c0b27b651c8190101e5068f7e0542498 | 32,281 |
def get_targets_as_list(key_list):
"""Get everything as list
:param key_list: Target key list
:type key_list: `list`
:return: Values list
:rtype: `list`
"""
session = get_scoped_session()
values = []
for key in key_list:
values.append(get_all_targets(session, key))
return values | bcd2ed48d685353a59c4545d1277589fa388b4a0 | 32,282 |
import re
def load_jmfd(jmfd_path):
"""Loads j-MFD as Pandas DataFrame.
Args:
jmfd_path (str): Path of J-MFD.
Raises:
JMFDFormatError: J-MFD format error.
Returns:
pandas.DataFrame: Pandas DataFrame of loaded j-MFD with word, existence of stem, foundation
id and foundation columns.
dict: A dict mapping ids to the corresponding Moral foundation.
"""
with open(jmfd_path, mode='r') as f:
text = f.read()
splitted = text.split('%')
if len(splitted) != 3:
raise JMFDFormatError('Invalid JMFD format.')
text_cat = splitted[1].strip()
text_dic = splitted[2].strip()
# Creates a dict mapping ids to the corresponding Moral foundation.
foundation = {}
for t in text_cat.splitlines():
fid, cat = t.strip().split('\t')
foundation[fid] = cat
# Gets moral foundation words and ids.
words = []
fids = []
for t in text_dic.splitlines():
text_splitted = re.split('\t+', t.strip())
for i in range(1, len(text_splitted)):
words.append(text_splitted[0].strip())
fids.append(text_splitted[i].strip())
# Creates DataFrame containing loaded J-MFD.
df = pd.DataFrame({
'word': [w.replace('*', '') for w in words],
'stem': [w.endswith('*') for w in words],
'fid': fids,
'foundation': [foundation[i] for i in fids]
})
return df, foundation | 675370c9ce0ed37667ec347dc4a0af57ea5b20b3 | 32,283 |
import os
def binary_to_notelist(data):
"""data is a numpy array: [timestep, feature]. Timestep 0 is a midi #, timestep 1 is a duration, timestep 2 is midi, etc..."""
assert len(data.shape) == 2
# Read the duration symbol table
symbol_to_index = read_pickle(os.path.join(TXT_TOKENIZED, 'symbol_to_index.pkl'))
index_to_symbol = {idx: symbol for idx, symbol in enumerate(symbol_to_index)}
notes = []
Note = namedtuple('Note', ['midi', 'dur_string'])
midi = None
dur_string = None
for timestep in range(len(data)):
value = np.argmax(data[timestep])
if value == PADDING_TOKEN:
continue
if timestep % 2 == 0:
# midi # case
midi = value
else:
# duration case
dur_string = index_to_symbol[value]
notes.append(Note(midi, dur_string))
return notes | 535b774d789055ff99a555d5e44873ff373ac429 | 32,284 |
def objectId(value):
"""objectId校验"""
if value and not ObjectId.is_valid(value):
raise ValueError('This is not valid objectId')
return value | 2e33950649fe95460e82102c1d6209a9173fa5fd | 32,285 |
def add_lists(list1, list2):
"""
Add corresponding values of two lists together. The lists should have the same number of elements.
Parameters
----------
list1: list
the first list to add
list2: list
the second list to add
Return
----------
output: list
a new list containing the sum of the given lists
"""
output = []
for it1, it2 in zip(list1, list2):
output.append(it1 + it2)
return output | e4efbc079a981caa4bcbff4452c8845a7e534195 | 32,286 |
def get_struc_first_offset(*args):
"""
get_struc_first_offset(sptr) -> ea_t
Get offset of first member.
@param sptr (C++: const struc_t *)
@return: BADADDR if memqty == 0
"""
return _ida_struct.get_struc_first_offset(*args) | f589dec791c3a81664b81573ea52f02d1c9a6b15 | 32,287 |
def export_gps_route( trip_id, trip_date, vehicle_id,
gtfs_error, offset_seconds,
gps_data ):
"""
Writes the given entry to the "tracked_routes" table. This table is used
to cache the results of finding and filtering only the valid routes as
represented in the GPS dataset.
Returns segment_id, a unique identifier for this GPS segment
trip_id: the GTFS trip id
trip_date: the date of the trip
vehicle_id: as reported in the GPS data
gtfs_error: The distance from the matched GTFS trip as measured by
the GPSBusTrack metric
offset_seconds: Number of seconds to subtract from GTFS trip to normalize.
gps_data: A list of (lat, lon, reported_update_time) values, exactly as
reported in the GPS dat. Note that reported_update_time should
be a timestamp.
WARNING: No effort is made to prevent duplicate entries! If you do this
more than once for the same route then YOU MUST DELETE IT FIRST!
"""
sql1 = """insert into gps_segments (
trip_id, trip_date, vehicle_id,
schedule_error, schedule_offset_seconds
) VALUES (
%(trip_id)s,%(trip_date)s,%(vehicle_id)s,
%(gtfs_error)s, %(offset)s
) RETURNING gps_segment_id"""
sql2 = """insert into tracked_routes (
gps_segment_id, lat, lon, reported_update_time
) VALUES (
%(seg_id)s,%(lat)s,%(lon)s,%(reported_update_time)s
)"""
cur = get_cursor()
SQLExec(cur,sql1,
{'trip_id':trip_id,'trip_date':trip_date,'vehicle_id':vehicle_id,
'gtfs_error':str(gtfs_error),'offset':offset_seconds});
segment_id = list(cur.fetchall())[0][0];
for lat,lon,reported_update_time in gps_data:
SQLExec(cur,sql2,
{'lat':lat,'lon':lon,
'reported_update_time':reported_update_time,
'seg_id':str(segment_id)});
cur.close()
return segment_id | fe1a4f4fb2c89c6634353748d5cdd49d82110e64 | 32,288 |
def optimize_solution(solution):
"""
Eliminate moves which have a full rotation (N % 4 = 0)
since full rotations don't have any effects in the cube
also if two consecutive moves are made in the same direction
this moves are mixed in one move
"""
i = 0
while i < len(solution):
dir, n = solution[i]
if n % 4 == 0:
solution.pop(i)
if i > 0:
i -= 1
elif i + 1 < len(solution):
dir2, n2 = solution[i+1]
if dir == dir2:
solution[i] = (dir, (n + n2) % 4)
solution.pop(i+1)
else:
i += 1
else:
break
return solution | 4be6bf0e4200dbb629c37a9bdae8338ee32c262b | 32,289 |
from typing import Iterable
import resource
from typing import Optional
def secretsmanager_resource(
client: Client,
policies: Iterable[Policy] = None,
):
"""
Create Secrets Manager resource.
Parameters:
• client: Secrets Manager client object
• policies: security policies to apply to all operations
"""
if client.service_name != "secretsmanager":
raise TypeError("expecting Secrets Manager client")
@resource
class SecretsResource:
@mutation(policies=policies)
async def create(self, secret, secret_string: Optional[str]):
"""Add a secret to secrets manager"""
await client.create_secret(Name=secret, SecretString=secret_string)
@operation(policies=policies)
async def put(self, secret, secret_string: Optional[str]):
"""Update a secret in the secrets manager"""
await client.put_secret_value(SecretId=secret, SecretString=secret_string)
@operation(policies=policies)
async def delete(self, secret):
"""Delete the secret."""
await client.delete_secret(SecretId=secret)
@resource
class SecretsManagerResource:
"""Amazon Secrets Manager resource."""
@mutation(policies=policies)
async def get_secret(self, secret_name: str) -> Secret:
"""
Retrieve a secret from Secrets Manager.
Parameters:
• secret_name: The name of the secret or secret Amazon Resource Names (ARNs).
"""
with wrap_client_error():
get_secret_value_response = await client.get_secret_value(SecretId=secret_name)
if "SecretString" in get_secret_value_response:
return Secret(
ARN=get_secret_value_response["ARN"],
Name=get_secret_value_response["Name"],
SecretString=get_secret_value_response["SecretString"],
)
else:
return Secret(
ARN=get_secret_value_response["ARN"],
Name=get_secret_value_response["Name"],
SecretBinary=get_secret_value_response["SecretBinary"],
)
secretsresource = SecretsResource()
return SecretsManagerResource() | ee2d880944065331aba0751bdfba2f82c3d7e2ac | 32,290 |
def lgbm_hyperband_classifier(numeric_features, categoric_features, learning_rate=0.08):
"""
Simple classification pipeline using hyperband to optimize lightgbm hyper-parameters
Parameters
----------
`numeric_features` : The list of numeric features
`categoric_features` : The list of categoric features
`learning_rate` : The learning rate
"""
return _lgbm_hyperband_model('classification', numeric_features, categoric_features, learning_rate) | 7c48373d1f40d7248a9d0f6a37c95281027aa1bd | 32,291 |
def GL(mu, wid, x, m = 0.5):
"""
Function to generate a 1D Gaussian-Lorentzian peak. The peak
is centered at pos, is wid wide (FWHM) and with blending parameter m.
Parameters
----------
mu: float
Peak center
wid: float
FWHM of Gaussian peak. FWHM is related to sigma by the
relation: FWHM = 2 * sqrt(2 * ln(2)) * sigma
m: float
Blending constant. Default is 0.5.
x: ndarray
Input numpy array of numbers
Output
------
Numpy ndarray
Single blended Gaussian-Lorentzian peak.
Reference
---------
Implementation of MATLAB code from
http://terpconnect.umd.edu/~toh/spectrum/functions.html#Peak_shape_functions
"""
return m * gaussian(mu, wid, x) + (1 - m) * lorentzian(mu, wid, x) | d458eae3ad1ea31dcab021c798e9d7d02fa390ae | 32,292 |
def jp_(var,mask):
"""Value at j+1/2, no gradient across boundary"""
return div0((var*mask + np.roll(var*mask,-1,axis=0)),(mask+np.roll(mask,-1,axis=0))) | cc2aaf2e17bd0cbe3a211b26bc9d976298307f0d | 32,293 |
import re
def _solrize_date(date, date_type=''):
"""
Takes a date string like 2018/01/01 and returns an
integer suitable for querying the date field in a solr document.
"""
solr_date = "*"
if date:
date = date.strip()
start_year, end_year = fulltext_range()
if date_type == 'start' and date == str(start_year) +'-01-01':
return '*'
elif date_type == 'end' and date == str(end_year) +'-12-31':
return '*'
# 1900-01-01 -> 19000101
match = re.match(r'(\d{4})-(\d{2})-(\d{2})', date)
if match:
y, m, d = match.groups()
if y and m and d:
solr_date = y+m+d
return solr_date | f309f784d79b46ed704ee1e631d7b4bdda7057f6 | 32,294 |
def read_file(filename):
"""
read filename and return its content
"""
in_fp = open(filename)
content = in_fp.read()
in_fp.close()
return content | c707a412b6099591daec3e70e9e2305fee6511f9 | 32,295 |
import argparse
def get_arguments():
"""Return the values of CLI params"""
parser = argparse.ArgumentParser()
parser.add_argument("--image_folder", default="images")
parser.add_argument("--image_width", default=400, type=int)
args = parser.parse_args()
return getattr(args, "image_folder"), getattr(args, "image_width") | 288bb7a589bae308252a36febcb14b9349371603 | 32,296 |
def delete_job(job_id):
"""Delete my job by Id
Upon success, marks job as 'aborted' if it must be suspended, and returns the deleted job with the appropriate status # noqa: E501
:param job_id: Id of the job that needs to be deleted
:type job_id: str
:rtype: Job
"""
job = q.fetch_job(job_id)
job.cancel()
return job_id | e8f02faa2a9336c93725739443b9007242b50b5c | 32,297 |
def service(appctx):
"""Service with files instance."""
return RecordService(ServiceWithFilesConfig) | 4902f8eae2c2c4200543a9c594f2abbc5163ec70 | 32,298 |
import json
import re
def get_sci_edus(filepath):
"""
load each sciedu
"""
with open(filepath, 'r') as fb:
train = json.loads(fb.read().encode('utf-8'))['root']
EDUs = []
sentenceNo = 1
sentenceID = 1
for edu_dict in train:
if edu_dict['id'] == 0:
continue
EDUs.append(EDU([edu_dict['id'], edu_dict['parent'], edu_dict['relation'],
re.sub('<S>|\r' ,'',edu_dict['text']), sentenceNo, sentenceID],
[1]))
if '<S>' in edu_dict['text']:
sentenceNo += 1
sentenceID = 1
else:
sentenceID += 1
return EDUs | 0b8fd37dd8884e9e3f38f4bb671dff2df978f5b2 | 32,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.