content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def build_get_string_with_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get string dictionary value {"0": "foo", "1": null, "2": "foo2"}.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"str": "str" # Optional.
}
"""
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/dictionary/prim/string/foo.null.foo2')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
| 5,335,400
|
def get_db_url(db_host, db_name, db_user, db_pass):
"""
Helper function for creating the "pyodbc" connection string.
@see /etc/freetds.conf
@see http://docs.sqlalchemy.org/en/latest/dialects/mssql.html
@see https://code.google.com/p/pyodbc/wiki/ConnectionStrings
"""
params = parse.quote(
"Driver={{FreeTDS}};Server={};Port=1433;"
"Database={};UID={};PWD={};"
.format(db_host, db_name, db_user, db_pass))
return 'mssql+pyodbc:///?odbc_connect={}'.format(params)
| 5,335,401
|
def do_md_tag_update(gc, args):
"""Rename a metadata definitions tag inside a namespace."""
name = args.name.strip()
if name:
fields = {'name': name}
new_tag = gc.metadefs_tag.update(args.namespace, args.tag,
**fields)
_tag_show(new_tag)
else:
utils.exit('Please supply at least one non-blank tag name.')
| 5,335,402
|
def boxes_iou3d_cpu(boxes_a, boxes_b, box_mode='wlh', rect=False, need_bev=False):
"""
Input (torch):
boxes_a: (N, 7) [x, y, z, h, w, l, ry], torch tensor with type float32
boxes_b: (M, 7) [x, y, z, h, w, l, ry], torch tensor with type float32
rect: True/False means boxes in camera/velodyne coord system.
Output:
iou_3d: (N, M)
"""
w_index, l_index, h_index = box_mode.index('w') + 3, box_mode.index('l') + 3, box_mode.index('h') + 3
boxes_a_bev = utils.boxes3d_to_bev_torch(boxes_a, box_mode, rect)
boxes_b_bev = utils.boxes3d_to_bev_torch(boxes_b, box_mode, rect)
overlaps_bev = torch.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M)
iou3d_cuda.boxes_overlap_bev_cpu(boxes_a_bev.contiguous(), boxes_b_bev.contiguous(), overlaps_bev)
# bev iou
area_a = (boxes_a[:, w_index] * boxes_a[:, l_index]).view(-1, 1) # (N, 1)
area_b = (boxes_b[:, w_index] * boxes_b[:, l_index]).view(1, -1) # (1, M) -> broadcast (N, M)
iou_bev = overlaps_bev / torch.clamp(area_a + area_b - overlaps_bev, min=1e-7)
# height overlap
if rect:
boxes_a_height_min = (boxes_a[:, 1] - boxes_a[:, h_index]).view(-1, 1) # y - h
boxes_a_height_max = boxes_a[:, 1].view(-1, 1) # y
boxes_b_height_min = (boxes_b[:, 1] - boxes_b[:, h_index]).view(1, -1)
boxes_b_height_max = boxes_b[:, 1].view(1, -1)
else:
boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, h_index]).view(-1, 1) # z - h, (N, 1)
boxes_a_height_max = boxes_a[:, 2].view(-1, 1) # z
boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, h_index]).view(1, -1) # (1, M)
boxes_b_height_max = boxes_b[:, 2].view(1, -1)
max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min) # (N, 1)
min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max) # (1, M)
overlaps_h = torch.clamp(min_of_max - max_of_min, min=0) # (N, M)
# 3d iou
overlaps_3d = overlaps_bev * overlaps_h # broadcast: (N, M)
vol_a = (boxes_a[:, h_index] * boxes_a[:, w_index] * boxes_a[:, l_index]).view(-1, 1) # (N, 1)
vol_b = (boxes_b[:, h_index] * boxes_b[:, w_index] * boxes_b[:, l_index]).view(1, -1) # (1, M) -> broadcast (N, M)
iou3d = overlaps_3d / torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-7)
if need_bev:
return iou3d, iou_bev
return iou3d
| 5,335,403
|
def make_attrstring(attr):
"""Returns an attribute string in the form key="val" """
attrstring = ' '.join(['%s="%s"' % (k, v) for k, v in attr.items()])
return '%s%s' % (' ' if attrstring != '' else '', attrstring)
| 5,335,404
|
def distribute(
computation_graph: ComputationGraph,
agentsdef: Iterable[AgentDef],
hints=None,
computation_memory: Callable[[ComputationNode], float] = None,
communication_load: Callable[[ComputationNode, str], float] = None,
) -> Distribution:
"""
gh-cgdp distribution method.
Heuristic distribution baed on communication and hosting costs, while respecting
agent's capacities
Parameters
----------
computation_graph
agentsdef
hints
computation_memory
communication_load
Returns
-------
Distribution:
The distribution for the computation graph.
"""
# Place computations with hosting costs == 0
# For SECP, this assign actuators var and factor to the right device.
fixed_mapping = {}
for comp in computation_graph.node_names():
for agent in agentsdef:
if agent.hosting_cost(comp) == 0:
fixed_mapping[comp] = (
agent.name,
computation_memory(computation_graph.computation(comp)),
)
break
# Sort computation by footprint, but add a random element to avoid sorting on names
computations = [
(computation_memory(n), n, None, random.random())
for n in computation_graph.nodes
if n.name not in fixed_mapping
]
computations = sorted(computations, key=lambda o: (o[0], o[3]), reverse=True)
computations = [t[:-1] for t in computations]
logger.info("placing computations %s", [(f, c.name) for f, c, _ in computations])
current_mapping = {} # Type: Dict[str, str]
i = 0
while len(current_mapping) != len(computations):
footprint, computation, candidates = computations[i]
logger.debug(
"Trying to place computation %s with footprint %s",
computation.name,
footprint,
)
# look for cancidiate agents for computation c
# TODO: keep a list of remaining capacities for agents ?
if candidates is None:
candidates = candidate_hosts(
computation,
footprint,
computations,
agentsdef,
communication_load,
current_mapping,
fixed_mapping,
)
computations[i] = footprint, computation, candidates
logger.debug("Candidates for computation %s : %s", computation.name, candidates)
if not candidates:
if i == 0:
logger.error(
f"Cannot find a distribution, no candidate for computation {computation}\n"
f" current mapping: {current_mapping}"
)
raise ImpossibleDistributionException(
f"Impossible Distribution, no candidate for {computation}"
)
# no candidate : backtrack !
i -= 1
logger.info(
"No candidate for %s, backtrack placement "
"of computation %s (was on %s",
computation.name,
computations[i][1].name,
current_mapping[computations[i][1].name],
)
current_mapping.pop(computations[i][1].name)
# FIXME : eliminate selected agent for previous computation
else:
_, selected = candidates.pop()
current_mapping[computation.name] = selected.name
computations[i] = footprint, computation, candidates
logger.debug(
"Place computation %s on agent %s", computation.name, selected.name
)
i += 1
# Build the distribution for the mapping
agt_mapping = defaultdict(lambda: [])
for c, a in current_mapping.items():
agt_mapping[a].append(c)
for c, (a, _) in fixed_mapping.items():
agt_mapping[a].append(c)
dist = Distribution(agt_mapping)
return dist
| 5,335,405
|
def LinearScaling(pop):
""" Linear Scaling scheme
.. warning :: Linear Scaling is only for positive raw scores
"""
logging.debug("Running linear scaling.")
pop.statistics()
c = Consts.CDefScaleLinearMultiplier
a = b = delta = 0.0
pop_rawAve = pop.stats["rawAve"]
pop_rawMax = pop.stats["rawMax"]
pop_rawMin = pop.stats["rawMin"]
if pop_rawAve == pop_rawMax:
a = 1.0
b = 0.0
elif pop_rawMin > (c * pop_rawAve - pop_rawMax / c - 1.0):
delta = pop_rawMax - pop_rawAve
a = (c - 1.0) * pop_rawAve / delta
b = pop_rawAve * (pop_rawMax - (c * pop_rawAve)) / delta
else:
delta = pop_rawAve - pop_rawMin
a = pop_rawAve / delta
b = -pop_rawMin * pop_rawAve / delta
for i in xrange(len(pop)):
f = pop[i].score
if f < 0.0:
Util.raiseException("Negative score, linear scaling not supported !", ValueError)
f = f * a + b
if f < 0:
f = 0.0
pop[i].fitness = f
| 5,335,406
|
def depreciated_get_paste(paste_tup):
"""
This takes a tuple consisting of href from a paste link and a name that identify a pastebin paste.
It scrapes the page for the pastes content.
:param paste_tup: (string, string)
:return: Paste if successful or False
"""
href, name = paste_tup
# Form the url from the href and perform GET request
paste_url = 'http://pastebin.com' + href
paste_page = requests.get(paste_url)
# Collect the paste details from paste page
if paste_page.status_code == 200:
text = paste_page.text
soup = BeautifulSoup(text, 'html.parser')
# soup.textarea.get_text() return the paste content
paste = Paste(url="http://www.pastebin.com"+href, name=name, content=soup.textarea.get_text(), datetime=datetime.now())
return paste
# Return False if the scrape failed
return False
| 5,335,407
|
def delete_nodes_list(
nodes: List[str],
credentials: HTTPBasicCredentials = Depends(
check_credentials
), # pylint: disable=unused-argument
) -> Dict[str, str]:
"""Deletes a list of nodes (that are discoverables with lldp) to the db.
Exple of simplest call :
curl -X DELETE --user u:p -H "Content-type: application/json" \
http://127.0.0.1/api/nodes \
-d '["node1", "node2", "node3"]'"""
for node in nodes:
delete_node(node)
return {"response": "Ok"}
| 5,335,408
|
def index():
"""
Application Home page
"""
module_name = settings.modules[c].get("name_nice")
response.title = module_name
return {"module_name": module_name,
}
| 5,335,409
|
def handle_option_error(error):
"""Raises exception if error in option command found.
In tmux 3.0, show-option and show-window-otion return invalid option instead of
unknown option. See https://github.com/tmux/tmux/blob/3.0/cmd-show-options.c.
In tmux >2.4, there are 3 different types of option errors:
- unknown option
- invalid option
- ambiguous option
In tmux <2.4, unknown option was the only option.
All errors raised will have the base error of :exc:`exc.OptionError`. So to
catch any option error, use ``except exc.OptionError``.
Parameters
----------
error : str
Error response from subprocess call.
Raises
------
:exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`,
:exc:`exc.AmbiguousOption`
"""
if 'unknown option' in error:
raise exc.UnknownOption(error)
elif 'invalid option' in error:
raise exc.InvalidOption(error)
elif 'ambiguous option' in error:
raise exc.AmbiguousOption(error)
else:
raise exc.OptionError(error)
| 5,335,410
|
def image_click_xshift(axes = "gca"):
"""
Takes a starting and ending point, then shifts the image y by this amount
"""
if axes == "gca": axes = _pylab.gca()
try:
p1 = ginput()
p2 = ginput()
xshift = p2[0][0]-p1[0][0]
e = axes.images[0].get_extent()
e[0] = e[0] + xshift
e[1] = e[1] + xshift
axes.images[0].set_extent(e)
_pylab.draw()
except:
print "whoops"
| 5,335,411
|
def download_abstruse_goose(program_abs_path, abstruse_page_num):
"""Downloads latest Abstruse Goose comics."""
# Create/change appropriate comic folder.
comic_folder = os.path.join(program_abs_path, "abstruse")
if os.path.exists(comic_folder):
os.chdir(comic_folder)
else:
os.mkdir(comic_folder)
os.chdir(comic_folder)
while True:
# Create the comic URL.
url = 'https://abstrusegoose.com/' + str(abstruse_page_num)
try:
# Get the comic page.
res = requests.get(url)
res.raise_for_status()
# Extract the image src.
soup = bs4.BeautifulSoup(res.text, 'html.parser')
match1 = soup.select("img[src*='/strips/']")
if not match1: # If img element is not found, no futher new comics can be downloaded.
return abstruse_page_num
comic_url = match1[0].get("src")
# Get the comic image.
res = requests.get(comic_url)
res.raise_for_status()
# Download the comic image.
image_file = open('abstruse' + str(abstruse_page_num) + '.jpg', 'wb')
for chunk in res.iter_content(100000):
image_file.write(chunk)
image_file.close()
# Increment the latest comic num.
abstruse_page_num += 1
except requests.exceptions.HTTPError:
return abstruse_page_num
| 5,335,412
|
def is_CW_in_extension(G):
"""
Returns True if G is 'CW in expansion', otherwise it returns False.
G: directed graph of type 'networkx.DiGraph'
EXAMPLE
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
G=nx.DiGraph()
e_list = [(0,1),(0,2),(0,3),(0,4),(1,2),(1,3),(1,4)]
G.add_edges_from(e_list)
plt.figure(1)
nx.draw_circular(G,with_labels=True)
plt.show()
print("is_CW_in_extension(G):",is_CW_in_extension(G))
G.remove_edge(0,1)
plt.figure(1)
nx.draw_circular(G,with_labels=True)
plt.show()
print("is_CW_in_extension(G):",is_CW_in_extension(G))
G.remove_edge(0,3)
plt.figure(1)
nx.draw_circular(G,with_labels=True)
plt.show()
print("is_CW_in_extension(G):",is_CW_in_extension(G))
<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
"""
assert type(G) is nx.DiGraph, "'G' has to be of type 'networkx.DiGraph'."
nodes = list(G.nodes)
m = len(nodes)
nr_beaten_list = np.zeros(m) #nr_beaten_list[i] is the number of nodes v with an edge i->v in G if i is NOT beaten by any other node. Otherwise its -1
for i in range(0,m):
for j in range(0,m):
if i != j and G.has_edge(nodes[i],nodes[j]) and nr_beaten_list[i] != -1:
nr_beaten_list[i]+=1
if i != j and G.has_edge(nodes[j],nodes[i]):
nr_beaten_list[i]=-1
#print(nr_beaten_list)
if len(np.where(nr_beaten_list==m-1)[0]) >0: #G has a CW
return(True)
buf = np.where(nr_beaten_list==m-2)[0]
if len(buf)==2:
[i0,i1] = buf
if not G.has_edge(i0,i1) and not G.has_edge(i1,i0): # There exist i0, i1 which are connected to every other node and i0 is not connected to i1
return(True)
return(False)
| 5,335,413
|
def get_index_where(condition: Callable[..., bool], iterable: Iterable) -> List[int]:
"""Return index values where `condition` is `True`."""
return [idx for idx, item in enumerate(iterable) if condition(item)]
| 5,335,414
|
def geom_crossbar(mapping=None, *, data=None, stat=None, position=None, show_legend=None, sampling=None, tooltips=None,
fatten=None,
**other_args):
"""
Display bars with horizontal median line.
Parameters
----------
mapping : `FeatureSpec`
Set of aesthetic mappings created by `aes()` function.
Aesthetic mappings describe the way that variables in the data are
mapped to plot "aesthetics".
data : dict or `DataFrame`
The data to be displayed in this layer. If None, the default, the data
is inherited from the plot data as specified in the call to ggplot.
stat : str, default='identity'
The statistical transformation to use on the data for this layer, as a string.
Supported transformations: 'identity' (leaves the data unchanged),
'count' (counts number of points with same x-axis coordinate),
'bin' (counts number of points with x-axis coordinate in the same bin),
'smooth' (performs smoothing - linear default),
'density' (computes and draws kernel density estimate).
position : str or `FeatureSpec`
Position adjustment, either as a string ('identity', 'stack', 'dodge', ...),
or the result of a call to a position adjustment function.
show_legend : bool, default=True
False - do not show legend for this layer.
sampling : `FeatureSpec`
Result of the call to the `sampling_xxx()` function.
Value None (or 'none') will disable sampling for this layer.
tooltips : `layer_tooltips`
Result of the call to the `layer_tooltips()` function.
Specifies appearance, style and content.
fatten : float, default=2.5
A multiplicative factor applied to size of the middle bar.
other_args
Other arguments passed on to the layer.
These are often aesthetics settings used to set an aesthetic to a fixed value,
like color='red', fill='blue', size=3 or shape=21.
They may also be parameters to the paired geom/stat.
Returns
-------
`LayerSpec`
Geom object specification.
Notes
-----
`geom_crossbar()` represents a vertical interval, defined by `x`, `ymin`, `ymax`.
The mean is represented by horizontal line.
`geom_crossbar()` understands the following aesthetics mappings:
- x : x-axis coordinates.
- ymin : lower bound for error bar.
- middle : position of median bar.
- ymax : upper bound for error bar.
- alpha : transparency level of a layer. Understands numbers between 0 and 1.
- color (colour) : color of a geometry lines. Can be continuous or discrete. For continuous value this will be a color gradient between two colors.
- fill : color of geometry filling.
- size : lines width.
- width : width of a bar.
- linetype : type of the line. Codes and names: 0 = 'blank', 1 = 'solid', 2 = 'dashed', 3 = 'dotted', 4 = 'dotdash', 5 = 'longdash', 6 = 'twodash'.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 10
from lets_plot import *
LetsPlot.setup_html()
data = {
'x': ['a', 'b', 'c', 'd'],
'ymin': [5, 7, 3, 5],
'middle': [6.5, 9, 4.5, 7],
'ymax': [8, 11, 6, 9],
}
ggplot(data, aes(x='x')) + \\
geom_crossbar(aes(ymin='ymin', middle='middle', ymax='ymax'))
|
.. jupyter-execute::
:linenos:
:emphasize-lines: 14-15
import numpy as np
import pandas as pd
from lets_plot import *
LetsPlot.setup_html()
n = 800
cat_list = {c: np.random.uniform(3) for c in 'abcdefgh'}
np.random.seed(42)
x = np.random.choice(list(cat_list.keys()), n)
y = np.array([cat_list[c] for c in x]) + np.random.normal(size=n)
df = pd.DataFrame({'x': x, 'y': y})
err_df = df.groupby('x').agg({'y': ['min', 'median', 'max']}).reset_index()
err_df.columns = ['x', 'ymin', 'ymedian', 'ymax']
ggplot() + \\
geom_crossbar(aes(x='x', ymin='ymin', middle='ymedian', ymax='ymax', fill='x'), \\
data=err_df, width=.6, fatten=5) + \\
geom_jitter(aes(x='x', y='y'), data=df, width=.3, shape=1, color='black', alpha=.5)
"""
return _geom('crossbar',
mapping=mapping,
data=data,
stat=stat,
position=position,
show_legend=show_legend,
sampling=sampling,
tooltips=tooltips,
fatten=fatten,
**other_args)
| 5,335,415
|
def _save_user_contributions(user_contributions):
"""Commits a user contributions object to the datastore."""
user_contributions.validate()
user_models.UserContributionsModel(
id=user_contributions.user_id,
created_exploration_ids=user_contributions.created_exploration_ids,
edited_exploration_ids=user_contributions.edited_exploration_ids,
).put()
| 5,335,416
|
def chess_to_coordinate(pos: str) -> Union[Coordinate, Move]:
"""
Arguments:
"""
if len(pos) == 2:
return Coordinate(int(pos[1]) - 1, file_dict[pos[0]])
else:
if len(pos) == 5:
if pos[4] == 'n':
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]), False, MoveType.N_PROMO)
elif pos[4] == 'b':
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]), False, MoveType.B_PROMO)
elif pos[4] == 'r':
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]), False, MoveType.R_PROMO)
elif pos[4] == 'q':
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]), False, MoveType.Q_PROMO)
else:
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]))
| 5,335,417
|
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [
("state", "merged", ("config",)),
("state", "replaced", ("config",)),
("state", "rendered", ("config",)),
("state", "overridden", ("config",)),
("state", "parsed", ("running_config",)),
]
module = AnsibleModule(
argument_spec=InterfacesArgs.argument_spec,
required_if=required_if,
supports_check_mode=True,
)
result = Interfaces(module).execute_module()
module.exit_json(**result)
| 5,335,418
|
def get_db_session():
"""
Get the db session from g.
If not exist, create a session and return.
:return:
"""
session = get_g_cache('_flaskz_db_session')
if session is None:
session = DBSession()
set_g_cache('_flaskz_db_session', session)
return session
| 5,335,419
|
def make_tril_scale(
loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinOp representing a lower triangular matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to the LinOp.
The upper triangular elements above the diagonal are ignored.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to the LinOp.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
assert_positive: Python `bool` indicating whether LinOp should be checked
for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
array_ops.matrix_diag_part(x),
message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
array_ops.matrix_diag_part(x),
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
with ops.name_scope(name, "make_tril_scale",
values=[loc, scale_diag, scale_identity_multiplier]):
loc = _convert_to_tensor(loc, name="loc")
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier")
if scale_tril is not None:
scale_tril = array_ops.matrix_band_part(scale_tril, -1, 0) # Zero out TriU.
tril_diag = array_ops.matrix_diag_part(scale_tril)
if scale_diag is not None:
tril_diag += scale_diag
if scale_identity_multiplier is not None:
tril_diag += scale_identity_multiplier[..., array_ops.newaxis]
scale_tril = array_ops.matrix_set_diag(scale_tril, tril_diag)
return linalg.LinearOperatorLowerTriangular(
tril=_maybe_attach_assertion(scale_tril),
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=assert_positive)
return make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
shape_hint=shape_hint,
validate_args=validate_args,
assert_positive=assert_positive,
name=name)
| 5,335,420
|
def remove_punctuation(word):
"""Remove all punctuation from the word (unicode). Note that the `translate`
method is used, and we assume unicode inputs. The str method has a different
`translate` method, so if you end up working with strings, you may want to
revisit this method.
"""
return word.translate(TRANSLATION_TABLE)
| 5,335,421
|
def run_no_arg():
"""
runs code without giving any arguments
"""
print("Welcome to the Pokey Setup Script Interpreter!")
print("For easier access to the interpreter, use the command line entry. See github page for details.")
print("Anyway, where is your setup script located?")
pssFileLocation = input("")
print("Now, what might the workspace location be?")
workspaceLocation = input("")
run(pssFileLocation, workspaceLocation)
| 5,335,422
|
def test_tag_propagation(pcluster_config_reader, clusters_factory, scheduler, os):
"""
Verify tags from various sources are propagated to the expected resources.
The following resources are checked for tags:
- main CFN stack
- head node
- head node's root EBS volume
- compute node (traditional schedulers)
- compute node's root EBS volume (traditional schedulers)
- shared EBS volume
"""
config_file_tags = {"ConfigFileTag": "ConfigFileTagValue"}
version_tags = {"parallelcluster:version": get_pcluster_version()}
cluster_config = pcluster_config_reader()
cluster = clusters_factory(cluster_config)
cluster_name_tags = {"parallelcluster:cluster-name": cluster.name}
test_cases = [
{
"resource": "Main CloudFormation Stack",
"tag_getter": get_main_stack_tags,
"expected_tags": (version_tags, config_file_tags),
},
{
"resource": "Head Node",
"tag_getter": get_head_node_tags,
"expected_tags": (
cluster_name_tags,
{"Name": "HeadNode", "parallelcluster:node-type": "HeadNode"},
),
},
{
"resource": "Head Node Root Volume",
"tag_getter": get_head_node_root_volume_tags,
"expected_tags": (cluster_name_tags, {"parallelcluster:node-type": "HeadNode"}),
"tag_getter_kwargs": {"cluster": cluster, "os": os},
},
{
"resource": "Compute Node",
"tag_getter": get_compute_node_tags,
"expected_tags": (
cluster_name_tags,
{"Name": "Compute", "parallelcluster:node-type": "Compute"},
config_file_tags,
),
"skip": scheduler == "awsbatch",
},
{
"resource": "Compute Node Root Volume",
"tag_getter": get_compute_node_root_volume_tags,
"expected_tags": (
cluster_name_tags,
{"parallelcluster:node-type": "Compute"},
config_file_tags if scheduler == "slurm" else {},
),
"tag_getter_kwargs": {"cluster": cluster, "os": os},
"skip": scheduler == "awsbatch",
},
{
"resource": "Shared EBS Volume",
"tag_getter": get_shared_volume_tags,
"expected_tags": (version_tags, config_file_tags),
},
]
for test_case in test_cases:
if test_case.get("skip"):
continue
logging.info("Verifying tags were propagated to %s", test_case.get("resource"))
tag_getter = test_case.get("tag_getter")
# Assume tag getters use lone cluster object arg if none explicitly given
tag_getter_args = test_case.get("tag_getter_kwargs", {"cluster": cluster})
observed_tags = tag_getter(**tag_getter_args)
expected_tags = test_case["expected_tags"]
assert_that(observed_tags).contains(*convert_tags_dicts_to_tags_list(expected_tags))
| 5,335,423
|
def run(args):
"""Construct an instance of Compare_IsoSeq_Runs and do the comparison."""
with CompareReferenceTranscripts(isoseq_output_fn=args.isoseq_output_fn,
reference_transcripts_fn=args.reference_transcripts_fn,
output_analysis_fn=args.output_analysis_fn,
min_true_positive=args.min_true_positive,
max_false_positive=args.max_false_positive,
min_seq_similarity=args.min_seq_similarity,
max_fuzzy_junction=args.max_fuzzy_junction) as runner:
runner.run()
| 5,335,424
|
def prepare_filter_weights_slice_conv_2d(weights):
"""Change dimension order of 2d filter weights to the one used in fdeep"""
assert len(weights.shape) == 4
return np.moveaxis(weights, [0, 1, 2, 3], [1, 2, 0, 3]).flatten()
| 5,335,425
|
async def hello(request):
"""Hello page containing sarafan node metadata.
`version` contains sarafan node version.
`content_service_id` — contains service_id of content node
:param request:
:return:
"""
return web.json_response(await request.app['sarafan'].hello())
| 5,335,426
|
def missing_toolchain(triplet: str) -> bool:
"""
Checks whether gcc, g++ and binutils are installed and in the path for the
current triplet
:param triplet: a triplet in the form riscv64-linux-gnu
:return: True if some part of the toolchain is missing, False otherwise
"""
toolchain_expected = {"ar", "as", "gcc", "g++", "ld", "ranlib", "strip"}
retval = False
for tool in toolchain_expected:
retval |= shutil.which(cmd=triplet + "-" + tool, mode=os.X_OK) is None
return retval
| 5,335,427
|
def load_capabilities(
base: str = "docassemble.ALWeaver", minimum_version="1.5", include_playground=False
):
"""
Load and return a dictionary containing all advertised capabilities matching
the specified minimum version, and optionally include capabilities that were
advertised from a namespace matching docassemble.playground*. The local
capabilities will always be the default configuration.
"""
current_package_name = _package_name()
this_yaml = path_and_mimetype(
f"{current_package_name}:data/sources/configuration_capabilities.yml"
)[0]
weaverdata = DAStore(base=base)
published_configuration_capabilities = (
weaverdata.get("published_configuration_capabilities") or {}
)
try:
with open(this_yaml) as f:
this_yaml_contents = f.read()
first_file = list(yaml.safe_load_all(this_yaml_contents))[0]
capabilities = {"Default configuration": first_file}
except:
capabilities = {}
for key in list(published_configuration_capabilities.keys()):
# Filter configurations based on minimum published version
if isinstance(published_configuration_capabilities[key], tuple) and Version(
published_configuration_capabilities[key][1]
) < Version(minimum_version):
log(
"Skipping published weaver configuration {key}:{published_configuration_capabilities[key]} because it is below the minimum version {minimum_version}. Consider updating the {key} package."
)
del published_configuration_capabilities[key]
# Filter out capability files unless the package is installed system-wide
if not include_playground and key.startswith("docassemble.playground"):
del published_configuration_capabilities[key]
for package_name in published_configuration_capabilities:
# Don't add the current package twice
if not current_package_name == package_name:
path = path_and_mimetype(
f"{package_name}:data/sources/{published_configuration_capabilities[package_name][0]}"
)[0]
try:
with open(path) as f:
yaml_contents = f.read()
capabilities[package_name] = list(yaml.safe_load_all(yaml_contents))[0]
except:
log(f"Unable to load published Weaver configuration file {path}")
return capabilities
| 5,335,428
|
def build_graph(
config,
train_input_fn, test_input_fn, model_preprocess_fn, model):
"""Builds the training graph.
Args:
config: Training configuration.
train_input_fn: Callable returning the training data as a nest of tensors.
test_input_fn: Callable returning the test data as a nest of tensors.
model_preprocess_fn: Image pre-processing that should be combined with
the model for adversarial evaluation.
model: Callable taking (preprocessed_images, is_training, test_local_stats)
and returning logits.
Returns:
loss: 0D tensor containing the loss to be minimised.
train_measures: Dict (with string keys) of 0D tensors containing
training measurements.
test_measures: Dict (with string keys) of 0D tensors containing
test set evaluation measurements.
init_step_fn: Function taking (session, initial_step_val)
to be invoked to initialise the global training step.
"""
global_step = tf.train.get_or_create_global_step()
optimizer = _optimizer(config.optimizer, global_step)
model_with_preprocess = _model_with_preprocess_fn(
model, model_preprocess_fn)
# Training step.
loss, train_logits, train_adv_logits, train_labels = _train_step(
config.train, model_with_preprocess, global_step, optimizer,
train_input_fn())
train_measures = {
'acc': _top_k_accuracy(train_labels, train_logits),
}
if config.train.adversarial_loss_weight > 0.:
train_measures.update({
'adv_acc': _top_k_accuracy(train_labels, train_adv_logits),
})
# Test evaluation.
with tf.name_scope('test_accuracy'):
test_logits, test_adv_logits, test_labels = _test_step(
config.train, model_with_preprocess, test_input_fn())
test_measures = {
'acc': _top_k_accuracy(test_labels, test_logits),
'adv_acc': _top_k_accuracy(test_labels, test_adv_logits),
}
initial_step = tf.placeholder(shape=(), dtype=tf.int64)
init_global_step_op = tf.assign(global_step, initial_step)
def init_step_fn(session, initial_step_val):
session.run(init_global_step_op, feed_dict={initial_step: initial_step_val})
return loss, train_measures, test_measures, init_step_fn
| 5,335,429
|
def execli_deco():
""" This is a decorating function to excecute a client side Earth Engine
function and retry as many times as needed.
Parameters can be set by modifing module's variables `_execli_trace`,
`_execli_times` and `_execli_wait`
:Example:
.. code:: python
from geetools.tools import execli_deco
import ee
# TRY TO GET THE INFO OF AN IMAGE WITH DEFAULT PARAMETERS
@execli_deco()
def info():
# THIS IMAGE DOESN'E EXISTE SO IT WILL THROW AN ERROR
img = ee.Image("wrongparam")
return img.getInfo()
# TRY WITH CUSTOM PARAM (2 times 5 seconds and traceback)
@execli_deco(2, 5, True)
def info():
# THIS IMAGE DOESN'E EXISTE SO IT WILL THROW AN ERROR
img = ee.Image("wrongparam")
return img.getInfo()
:param times: number of times it will try to excecute the function
:type times: int
:param wait: waiting time to excetue the function again
:type wait: int
:param trace: print the traceback
:type trace: bool
"""
def wrap(f):
'''
if trace is None:
global trace
trace = _execli_trace
if times is None:
global times
times = _execli_times
if wait is None:
global wait
wait = _execli_wait
try:
times = int(times)
wait = int(wait)
except:
print(type(times))
print(type(wait))
raise ValueError("'times' and 'wait' parameters must be numbers")
'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
trace = _execli_trace
times = _execli_times
wait = _execli_wait
r = range(times)
for i in r:
try:
result = f(*args, **kwargs)
except Exception as e:
print("try n°", i, "ERROR:", e)
if trace:
traceback.print_exc()
if i < r[-1] and wait > 0:
print("waiting {} seconds...".format(str(wait)))
time.sleep(wait)
elif i == r[-1]:
raise RuntimeError("An error occured tring to excecute"\
" the function '{0}'".format(f.__name__))
else:
return result
return wrapper
return wrap
| 5,335,430
|
def on_click_add(event):
"""Add paper to file cof-papers.csv: botton will turn red if something is missing or paper already present."""
if not (inp_paper_id.value and inp_reference.value and inp_doi.value and inp_title.value) or \
"(already present)" in inp_paper_id.value or \
"ERROR" in inp_paper_id.value:
btn_add_paper.button_type = 'danger'
print(inp_paper_id.value + " Paper not added because of some problem.")
return
btn_add_paper.button_type = 'primary'
line = '{id},"{ref}",{doi},"{title}"\n'.format(id=inp_paper_id.value, ref=inp_reference.value,
doi=inp_doi.value, title=inp_title.value)
print(line)
with open(PAPERS_FILE, 'a+') as handle:
handle.write(line)
btn_add_paper.button_type = 'success'
| 5,335,431
|
def new_canvas():
""" Creates a new canvas for user """
pass
| 5,335,432
|
def lstm_with_backend_selection(inputs, init_h, init_c, kernel,
recurrent_kernel, bias, mask, time_major,
go_backwards, sequence_lengths,
zero_output_for_mask):
"""Call the LSTM with optimized backend kernel selection.
Under the hood, this function will create two TF function, one with the most
generic kernel and can run on all device condition, and the second one with
cuDNN specific kernel, which can only run on GPU.
The first function will be called with normal_lstm_params, while the second
function is not called, but only registered in the graph. The Grappler will
do the proper graph rewrite and swap the optimized TF function based on the
device placement.
Args:
inputs: Input tensor of LSTM layer.
init_h: Initial state tensor for the cell output.
init_c: Initial state tensor for the cell hidden state.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the corresponding
timestep should be ignored.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
List of output tensors, same as standard_lstm.
"""
params = {
'inputs': inputs,
'init_h': init_h,
'init_c': init_c,
'kernel': kernel,
'recurrent_kernel': recurrent_kernel,
'bias': bias,
'mask': mask,
'time_major': time_major,
'go_backwards': go_backwards,
'sequence_lengths': sequence_lengths,
'zero_output_for_mask': zero_output_for_mask,
}
def gpu_lstm_with_fallback(inputs, init_h, init_c, kernel, recurrent_kernel,
bias, mask, time_major, go_backwards,
sequence_lengths, zero_output_for_mask):
"""Use cuDNN kernel when mask is none or strictly right padded."""
if mask is None:
return gpu_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def cudnn_lstm_fn():
return gpu_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def stardard_lstm_fn():
return standard_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths,
zero_output_for_mask=zero_output_for_mask)
return tf.cond(
gru_lstm_utils.is_cudnn_supported_inputs(mask, time_major),
true_fn=cudnn_lstm_fn,
false_fn=stardard_lstm_fn)
if gru_lstm_utils.use_new_gru_lstm_impl():
# Chooses the implementation dynamically based on the running device.
(last_output, outputs, new_h, new_c,
runtime) = tf.__internal__.execute_fn_for_device(
{
gru_lstm_utils.CPU_DEVICE_NAME:
lambda: standard_lstm(**params),
gru_lstm_utils.GPU_DEVICE_NAME:
lambda: gpu_lstm_with_fallback(**params)
}, lambda: standard_lstm(**params))
else:
# Each time a `tf.function` is called, we will give it a unique
# identifiable API name, so that Grappler won't get confused when it
# sees multiple LSTM layers added into same graph, and it will be able
# to pair up the different implementations across them.
api_name = 'lstm_' + str(uuid.uuid4())
supportive_attribute = {
'time_major': time_major,
'go_backwards': go_backwards,
}
defun_standard_lstm = gru_lstm_utils.generate_defun_backend(
api_name, gru_lstm_utils.CPU_DEVICE_NAME, standard_lstm,
supportive_attribute)
defun_gpu_lstm = gru_lstm_utils.generate_defun_backend(
api_name, gru_lstm_utils.GPU_DEVICE_NAME, gpu_lstm_with_fallback,
supportive_attribute)
# Call the normal LSTM impl and register the cuDNN impl function. The
# grappler will kick in during session execution to optimize the graph.
last_output, outputs, new_h, new_c, runtime = defun_standard_lstm(**params)
gru_lstm_utils.function_register(defun_gpu_lstm, **params)
return last_output, outputs, new_h, new_c, runtime
| 5,335,433
|
def upgrade(version, verbose):
"""Upgrade the database to a version"""
_sync_migrate(version, downgrade=False, verbose=verbose)
click.echo('Upgrade successful! <3')
| 5,335,434
|
async def cname(ctx, *, nn : str):
"""Change the name of the bot"""
await bot.edit_profile(username = nn)
await bot.say('Done.')
| 5,335,435
|
def _build_stack_from_3d(recipe, input_folder, fov=0, nb_r=1, nb_c=1):
"""Load and stack 3-d tensors.
Parameters
----------
recipe : dict
Map the images according to their field of view, their round,
their channel and their spatial dimensions. Only contain the keys
'fov', 'r', 'c', 'z', 'ext' or 'opt'.
input_folder : str
Path of the folder containing the images.
fov : int
Index of the fov to build.
nb_r : int
Number of round file to stack in order to get a 5-d tensor.
nb_c : int
Number of channel file to stack in order to get a 4-d tensor.
Returns
-------
tensor_5d : np.ndarray, np.uint
Tensor with shape (r, c, z, y, x).
"""
# load and stack successively channel elements then round elements
tensors_4d = []
for r in range(nb_r):
# load and stack channel elements (3-d tensors)
tensors_3d = []
for c in range(nb_c):
path = get_path_from_recipe(recipe, input_folder, fov=fov, r=r,
c=c)
tensor_3d = read_image(path)
tensors_3d.append(tensor_3d)
# stack 3-d tensors in 4-d
tensor_4d = np.stack(tensors_3d, axis=0)
tensors_4d.append(tensor_4d)
# stack 4-d tensors in 5-d
tensor_5d = np.stack(tensors_4d, axis=0)
return tensor_5d
| 5,335,436
|
def view_menu(request):
"""Admin user view all the reservations."""
menus = Menu.objects.all()
return render(request,
"super/view_menu.html",
{'menus': menus})
| 5,335,437
|
def _get_hdfs_dirs_by_date(physical_table_name, date):
"""
根据日期获取指定日期的hdfs上数据目录列表
:param physical_table_name: 物理表名称
:param date: 日期
:return: hdfs上的数据目录列表
"""
return [f"{physical_table_name}/{date[0:4]}/{date[4:6]}/{date[6:8]}/{hour}" for hour in DAY_HOURS]
| 5,335,438
|
def InstallDTBO(DTBO):
"""Install Dtbo using dd cmd
DTBO : name of DTBO
"""
# Build file using mkdir
GetCmdReturn('sudo mkdir -p \
/sys/kernel/config/device-tree/overlays/$DTBO')
# Install dtbo using dd
DTBO_PATH = DTBO + '.dtbo'
GetCmdReturn('sudo dd \
of=/sys/kernel/config/device-tree/overlays/$DTBO/dtbo \
if=/lib/firmware/$DTBO_PATH')
| 5,335,439
|
def indeed_jobs(request, category_id):
"""
Load Indeed jobs via ajax.
"""
if request.is_ajax() and request.method == 'POST':
per_page = 10
page = 1
html = []
if category_id == '0':
all_jobs = IndeedJob.objects.all()
else:
all_jobs = IndeedJob.objects.filter(category=category_id)
paginator = Paginator(all_jobs, per_page)
page = request.GET.get('page')
try:
jobs = paginator.page(page)
except PageNotAnInteger:
jobs = paginator.page(1)
except EmptyPage:
jobs = paginator.page(paginator.num_pages)
for job in jobs:
html.append(render_to_string('indeed-job.html', {'job': job}))
context = {
'html': u''.join(html),
'page': jobs.number,
}
if jobs.has_next(): context.update({'next_page': jobs.next_page_number()})
return HttpResponse(json.dumps(context), content_type='application/json')
raise Http404
| 5,335,440
|
def iinsertion_sort(arr, order=ASCENDING):
"""Iterative implementation of insertion sort.
:param arr: input list
:param order: sorting order i.e "asc" or "desc"
:return: list sorted in the order defined
"""
operator = SORTING_OPERATORS.get(order.lower(), GREATER_THAN)
for i in range(1, len(arr)):
position = i - 1
value = arr[i]
while position >= 0 and operator(arr[position], value):
arr[position + 1] = arr[position]
position -= 1
arr[position + 1] = value
return arr
| 5,335,441
|
def actor_files_paths():
"""
Returns the file paths that are bundled with the actor. (Path to the content of the actor's file directory).
"""
return current_actor().actor_files_paths
| 5,335,442
|
def parse_table(data: bytes, fields: list) -> dict:
"""Return a Python dictionary created from the bytes *data* of
an ISIS cube table (presumably extracted via read_table_data()),
and described by the *fields* list and *records*.
Please be aware that this does not perform masking of the ISIS
special pixels that may be present in the table, and simply
returns them as the appropriate int or float values.
The *fields* list must be a list of dicts, each of which must
contain the following keys: 'Name', 'Type', and 'Size'. The
'Name' key can be any string (and these will end up being the
keys in the returned dict). 'Size' is the size in bytes of the
field, and 'Type' is a string that must be one of 'Integer',
'Double', 'Real', or 'Text'.
If you are using the pvl library, the get_table() function will
be easier to use.
"""
row_len = 0
for f in fields:
row_len += data_sizes[f["Type"]] * int(f["Size"])
if len(data) % row_len != 0:
raise ValueError(
f"The total sizes of each field ({row_len}) do not evenly divide "
f"into the size of the data ({len(data)}), so something is off."
)
# Parse the binary data
results = {f["Name"]: [] for f in fields}
offset = 0
while offset < len(data):
for f in fields:
if f["Type"] == "Text":
field_data = data[offset : offset + int(f["Size"])].decode(
encoding="latin_1"
)
else:
data_fmt = data_formats[f["Type"]] * int(f["Size"])
f_data = struct.unpack_from(data_fmt, data, offset)
if len(f_data) == 1:
field_data = f_data[0]
else:
field_data = list(f_data)
results[f["Name"]].append(field_data)
offset += data_sizes[f["Type"]] * int(f["Size"])
return results
| 5,335,443
|
def gridtilts(shape, thismask, slit_cen, coeff2, func2d, spec_order, spat_order, pad_spec=30, pad_spat = 5, method='interp'):
"""
Parameters
----------
tilt_fit_dict: dict
Tilt fit dictioary produced by fit_tilts
Returns
-------
piximg: ndarray, float
Image indicating how spectral pixel locations move across the image. This output is used in the pipeline.
"""
# Compute the tilts image
nspec, nspat = shape
xnspecmin1 = float(nspec-1)
xnspatmin1 = float(nspat-1)
spec_vec = np.arange(nspec)
spat_vec = np.arange(nspat)
# JFH This histogram method is not preferred, since it basically does NGP. It is however super fast, so for big images
# it is useful to have it
if 'hist2d' in method:
oversamp_spec=5
oversamp_spat=3
spec_ind, spat_ind = np.where(thismask)
min_spec = spec_ind.min() - pad_spec
max_spec = spec_ind.max() + pad_spec
num_spec = max_spec - min_spec + 1
min_spat = spat_ind.min() - pad_spat
max_spat = spat_ind.max() + pad_spat
num_spat = max_spat - min_spat + 1
spec_lin = np.linspace(min_spec,max_spec,num = int(np.round(num_spec*oversamp_spec)))
spat_lin = np.linspace(min_spat,max_spat,num = int(np.round(num_spat*oversamp_spat)))
spat_img, spec_img = np.meshgrid(spat_lin, spec_lin)
# Normalized spatial offset image (from central trace)
slit_cen_lin = (scipy.interpolate.interp1d(np.arange(nspec),slit_cen,bounds_error=False,fill_value='extrapolate'))(spec_lin)
slit_cen_img = np.outer(slit_cen_lin, np.ones(spat_img.shape[1])) # center of the slit replicated spatially
dspat_img_nrm = (spat_img - slit_cen_img)/xnspatmin1
spec_img_nrm = spec_img/xnspecmin1
# normalized spec image
tracepix = spec_img + xnspecmin1*utils.func_val(coeff2, spec_img_nrm, func2d, x2=dspat_img_nrm,
minx=0.0, maxx=1.0, minx2=-1.0, maxx2=1.0)
norm_img, spec_edges, spat_edges = np.histogram2d(tracepix.flatten(), spat_img.flatten(),
bins=[np.arange(nspec+1), np.arange(nspat+1)], density=False)
weigh_img, spec_edges, spat_edges = np.histogram2d(tracepix.flatten(), spat_img.flatten(),
bins=[np.arange(nspec+1), np.arange(nspat+1)],
weights = spec_img.flatten(),density=False)
piximg =(norm_img > 0.0)*weigh_img/(norm_img + (norm_img == 0.0))
inmask = thismask & (norm_img > 0) & (piximg/xnspecmin1 > -0.2) & (piximg/xnspecmin1 < 1.2)
# This is the defulat method although scipy.interpolate.griddata is a bit slow
elif 'interp' in method:
spec_vec_pad = np.arange(-pad_spec,nspec+pad_spec)
spat_vec_pad = np.arange(-pad_spat,nspat+pad_spat)
spat_img, spec_img = np.meshgrid(spat_vec, spec_vec)
spat_img_pad, spec_img_pad = np.meshgrid(np.arange(-pad_spat,nspat+pad_spat),np.arange(-pad_spec,nspec+pad_spec))
slit_cen_pad = (scipy.interpolate.interp1d(spec_vec,slit_cen,bounds_error=False,fill_value='extrapolate'))(spec_vec_pad)
thismask_pad = np.zeros_like(spec_img_pad,dtype=bool)
ind_spec, ind_spat = np.where(thismask)
slit_cen_img_pad= np.outer(slit_cen_pad, np.ones(nspat + 2*pad_spat)) # center of the slit replicated spatially
# Normalized spatial offset image (from central trace)
dspat_img_nrm = (spat_img_pad - slit_cen_img_pad)/xnspatmin1
# normalized spec image
spec_img_nrm = spec_img_pad/xnspecmin1
# Embed the old thismask in the new larger padded thismask
thismask_pad[ind_spec + pad_spec,ind_spat + pad_spat] = thismask[ind_spec,ind_spat]
# Now grow the thismask_pad
kernel = np.ones((2*pad_spec, 2*pad_spat))/float(4*pad_spec*pad_spat)
thismask_grow = scipy.ndimage.convolve(thismask_pad.astype(float), kernel, mode='nearest') > 0.0
# Evaluate the tilts on the padded image grid
tracepix = spec_img_pad[thismask_grow] + xnspecmin1*utils.func_val(coeff2, spec_img_nrm[thismask_grow], func2d, x2=dspat_img_nrm[thismask_grow],
minx=0.0, maxx=1.0, minx2=-1.0, maxx2=1.0)
## TESTING STARTS
"""
ikeep = np.isfinite(tracepix)
sigma = np.full_like(spec_img_pad[thismask_grow], 10.0)/xnspecmin1
fitxy = [spec_order, spat_order]
fitmask, coeff2_tilts = utils.robust_polyfit_djs(tracepix/xnspecmin1, spec_img_pad[thismask_grow]/xnspecmin1,
fitxy, x2=spat_img_pad[thismask_grow]/xnspatmin1,
sigma=sigma,
upper=5.0, lower=5.0, maxdev=10.0/xnspecmin1,
inmask=ikeep, function=func2d, maxiter=20,
minx=0.0, maxx=1.0, minx2=0.0, maxx2=1.0, use_mad=False)
## TESTING ENDS
# values(points) \equiv spec_pos(tilt,spat_pos) which is the piximg that we want to create via griddata interpolation
"""
ikeep = np.isfinite(tracepix)
points = np.stack((tracepix[ikeep], spat_img_pad[thismask_grow][ikeep]), axis=1)
values =spec_img_pad[thismask_grow][ikeep]
piximg = scipy.interpolate.griddata(points, values, (spec_img, spat_img), method='cubic')
inmask = thismask & np.isfinite(piximg) & (piximg/xnspecmin1 > -0.2) & (piximg/xnspecmin1 < 1.2)
# Now simply do a 2d polynomial fit with just rejection of crazy behavior, i.e. 10 pixels
fitxy = [spec_order, spat_order]
sigma = np.full_like(spec_img,10.0)/xnspecmin1
fitmask, coeff2_tilts = utils.robust_polyfit_djs(spec_img.flatten()/xnspecmin1, piximg.flatten()/xnspecmin1,
fitxy, x2=spat_img.flatten()/xnspatmin1, sigma = sigma.flatten(),
upper=5.0, lower=5.0, maxdev = 10.0/xnspecmin1,
inmask=inmask.flatten(), function=func2d, maxiter=20,
minx=0.0, maxx=1.0, minx2=0.0,maxx2=1.0,use_mad=False)
irej = np.invert(fitmask) & inmask.flatten()
msgs.info('Rejected {:d}/{:d} pixels in final tilts image after gridding'.format(np.sum(irej),np.sum(inmask)))
# normalized tilts image
tilts = utils.func_val(coeff2_tilts, spec_img/xnspecmin1, func2d, x2=spat_img/xnspatmin1,minx=0.0, maxx=1.0, minx2=0.0, maxx2=1.0)
tilts = np.fmax(np.fmin(tilts, 1.2),-0.2)
# Added this to ensure that tilts are never crazy values due to extrapolation of fits which can break
# wavelength solution fitting
return coeff2_tilts, tilts
| 5,335,444
|
def exp_t(u, t):
"""Compute exp_t for `u`."""
def _internal_exp_t(u, t):
return tf.nn.relu(1.0 + (1.0 - t) * u) ** (1.0 / (1.0 - t))
return tf.cond(
tf.math.equal(t, 1.0), lambda: tf.math.exp(u),
functools.partial(_internal_exp_t, u, t))
| 5,335,445
|
def max_matching(G, method="ilp"):
"""Return a largest matching in *G*.
Parameters
----------
G : NetworkX graph
An undirected graph.
method: string
The method to use for finding the maximum matching. Use
'ilp' for integer linear program or 'bf' for brute force.
Defaults to 'ilp'.
Returns
-------
set
A set of edges comprising a maximum matching in *G*.
See Also
--------
max_matching
"""
max_matching_func = {"bf": max_matching_bf, "ilp": max_matching_ilp}.get(method, None)
if max_matching_func:
return max_matching_func(G)
raise ValueError('Invalid `method` argument "{}"'.format(method))
| 5,335,446
|
def hasEdgeFlux(source, edgeDistance=1):
"""hasEdgeFlux
Determine whether or not a source has flux within `edgeDistance`
of the edge.
Parameters
----------
source : `scarlet.Component`
The source to check for edge flux
edgeDistance : int
The distance from the edge of the image to consider
a source an edge source. For example if `edgeDistance=3`
then any source within 3 pixels of the edge will be
considered to have edge flux.
If `edgeDistance` is `None` then the edge check is ignored.
Returns
-------
isEdge: `bool`
Whether or not the source has flux on the edge.
"""
if edgeDistance is None:
return False
assert edgeDistance > 0
# Use the first band that has a non-zero SED
flux = scarlet.measure.flux(source)
if hasattr(source, "sed"):
band = np.min(np.where(flux > 0)[0])
else:
band = np.min(np.where(flux > 0)[0])
model = source.get_model()[band]
for edge in range(edgeDistance):
if (
np.any(model[edge-1] > 0)
or np.any(model[-edge] > 0)
or np.any(model[:, edge-1] > 0)
or np.any(model[:, -edge] > 0)
):
return True
return False
| 5,335,447
|
def test_timestamp_rollback(version, intervals):
"""Test removing versions from a timestamp."""
ts = Timestamp(intervals=[[2, 3], [5, 7], 9, [11, 12]])
assert ts.rollback(version).is_equal(Timestamp(intervals=intervals))
| 5,335,448
|
def biswas_robustness(data_scikit, data_mm):
"""
summary stats on consensus peaks
"""
CV = find_CV(th=0.0001, ca=0.5, sd=1)
CV_th001 = find_CV(th=0.001, ca=0.5, sd=1)
CV_th01 = find_CV(th=0.01, ca=0.5, sd=1)
CV_th00001 = find_CV(th=0.00001, ca=0.5, sd=1)
CV_sd15 = find_CV(th=0.0001, ca=0.5, sd=1.5)
CV_sd05 = find_CV(th=0.0001, ca=0.5, sd=0.5)
CV_ca09 = find_CV(th=0.0001, ca=0.9, sd=0.5)
CV_ca01 = find_CV(th=0.0001, ca=0.1, sd=0.5)
biswas_df = pd.DataFrame(columns=['ORF', 'corr_th001', 'corr_th01', 'corr_th00001', 'corr_sd15', 'corr_sd05', 'corr_ca09', 'corr_ca01'])
list_orfs = list( data_scikit.keys() )
for ix, orf in enumerate(list_orfs):
output = np.zeros(( 7 ))
coef = 0
p = 1
current_data = data_scikit[orf]
current_mm = data_mm[orf]
if np.shape(current_data)[1] == len(current_mm):
current_data[:,~current_mm] = 0 # after, for false consensus (i.e. multimapping), set to 0
current_cons, current_peaks = run_mc(current_data, CV)
current_cons_th001, current_peaks_th001 = run_mc(current_data, CV_th001)
current_cons_th01, current_peaks_th01 = run_mc(current_data, CV_th01)
current_cons_th00001, current_peaks_th00001 = run_mc(current_data, CV_th00001)
current_cons_sd15, current_peaks_sd15 = run_mc(current_data, CV_sd15)
current_cons_sd05, current_peaks_sd05 = run_mc(current_data, CV_sd05)
current_cons_ca09, current_peaks_ca09 = run_mc(current_data, CV_ca09)
current_cons_ca01, current_peaks_ca01 = run_mc(current_data, CV_ca01)
output[0], p = stats.spearmanr(current_cons, current_cons_th001)
output[1], p = stats.spearmanr(current_cons, current_cons_th01)
output[2], p = stats.spearmanr(current_cons, current_cons_th00001)
output[3], p = stats.spearmanr(current_cons, current_cons_sd15)
output[4], p = stats.spearmanr(current_cons, current_cons_sd05)
output[5], p = stats.spearmanr(current_cons, current_cons_ca09)
output[6], p = stats.spearmanr(current_cons, current_cons_ca01)
output = np.around(output,3)
biswas_df.loc[len(biswas_df)] = ( orf, output[0], output[1], output[2], output[3], output[4], output[5], output[6] )
print(ix, orf, output[0], output[1], output[2], output[3], output[4], output[5], output[6] )
return biswas_df
| 5,335,449
|
def get_click_offset(df):
"""
df[session_key] return a set of session_key
df[session_key].nunique() return the size of session_key set (int)
df.groupby(session_key).size() return the size of each session_id
df.groupby(session_key).size().cumsum() retunn cumulative sum
"""
offsets = np.zeros(df[session_key].nunique() + 1, dtype=np.int32)
offsets[1:] = df.groupby(session_key).size().cumsum()
return offsets
| 5,335,450
|
def store_attention_plots(attentions, targets, sources, output_prefix,
idx):
"""
Saves attention plots.
:param attentions:
:param targets:
:param sources:
:param output_prefix:
:param idx:
:return:
"""
for i in idx:
plot_file = "{}.{}.pdf".format(output_prefix, i)
src = sources[i]
trg = targets[i]
attention_scores = attentions[i].T
try:
plot_heatmap(scores=attention_scores, column_labels=trg,
row_labels=src, output_path=plot_file)
except:
print("Couldn't plot example {}: src len {}, trg len {}, "
"attention scores shape {}".format(i, len(src), len(trg),
attention_scores.shape))
continue
| 5,335,451
|
def test_agent_context_ledger_apis():
"""Test that the ledger apis configurations are loaded correctly."""
private_key_pem_path = os.path.join(CUR_PATH, "data", "priv.pem")
wallet = Wallet({'default': private_key_pem_path})
connections = [DummyConnection()]
ledger_apis = LedgerApis({"fetchai": ('alpha.fetch-ai.com', 80)})
my_aea = AEA("Agent0", connections, wallet, ledger_apis, resources=Resources(str(Path(CUR_PATH, "data", "dummy_aea"))))
assert set(my_aea.context.ledger_apis.apis.keys()) == {"fetchai"}
fetchai_ledger_api_obj = my_aea.context.ledger_apis.apis["fetchai"]
assert fetchai_ledger_api_obj.tokens.host == 'alpha.fetch-ai.com'
assert fetchai_ledger_api_obj.tokens.port == 80
| 5,335,452
|
def build_crs_table(savepath):
"""
Build crs table of all equivalent format variations by scraping spatialreference.org.
Saves table as tab-delimited text file.
NOTE: Might take a while.
Arguments:
- *savepath*: The absolute or relative filepath to which to save the crs table, including the ".txt" extension.
"""
# create table
outfile = open(savepath, "wb")
# create fields
fields = ["codetype", "code", "proj4", "ogcwkt", "esriwkt"]
outfile.write("\t".join(fields) + "\n")
# make table from url requests
for codetype in ("epsg", "esri", "sr-org"):
print(codetype)
# collect existing proj list
print("fetching list of available codes")
codelist = []
page = 1
while True:
try:
link = 'https://spatialreference.org/ref/%s/?page=%s' %(codetype,page)
html = urllib2.urlopen(link).read()
codes = [match.groups()[0] for match in re.finditer(r'/ref/'+codetype+'/(\d+)', html) ]
if not codes: break
print("page",page)
codelist.extend(codes)
page += 1
except:
break
print("fetching string formats for each projection")
for i,code in enumerate(codelist):
# check if code exists
link = 'https://spatialreference.org/ref/%s/%s/' %(codetype,code)
urllib2.urlopen(link)
# collect each projection format in a table row
row = [codetype, code]
for resulttype in ("proj4", "ogcwkt", "esriwkt"):
try:
link = 'https://spatialreference.org/ref/%s/%s/%s/' %(codetype,code,resulttype)
result = urllib2.urlopen(link).read()
row.append(result)
except:
pass
print("projection %i of %i added" %(i,len(codelist)) )
outfile.write("\t".join(row) + "\n")
# close the file
outfile.close()
| 5,335,453
|
async def claptext(memereview):
""" Praise people! """
textx = await memereview.get_reply_message()
message = memereview.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await memereview.edit("`Hah, I don't clap pointlessly!`")
return
reply_text = "👏 "
reply_text += message.replace(" ", " 👏 ")
reply_text += " 👏"
await memereview.edit(reply_text)
| 5,335,454
|
def delete_enrichment():
"""
Controller to delete all existing GO enrichments
:return: Redirect to admin main screen
"""
CoexpressionCluster.delete_enrichment()
flash('Successfully removed GO enrichment for co-expression clusters', 'success')
return redirect(url_for('admin.controls.index'))
| 5,335,455
|
def get_all_permissions(context=None):
"""Get the ids of all defined permissions
"""
for id, permission in get_utilities_for(IPermission, context):
if id != 'zope.Public':
yield id
| 5,335,456
|
def evaluate_agent(agent, env, alpha, num_users=100, deterministic=False,
softmax_temperature=1.0,
scatter_plot_trajectories=False, figure_file_obj=None,
risk_score_extractor=violence_risk, plot_histogram=False,
plot_trajectories=True,
stepwise_plot=False, only_evaluate_pool=None,
reward_health_distribution_plot=False, debug_log=False):
"""Runs an agent-env simulation to evaluate average reward and safety costs.
Args:
agent: rnn_cvar_agent.SafeRNNAgent object.
env: Recsim environment that returns responses with reward and health score.
alpha: The alpha used as the level for VaR/CVaR.
num_users: Number of users to sample for the evaluation.
deterministic: Whether the agent chooses the argmax action instead of
sampling.
scatter_plot_trajectories: Whether to evaluate
figure_file_obj: File object to store the plot.
risk_score_extractor: A function which takes an observation and returns a
risk score.
Returns:
Dictionary with average reward, health score, cvar, var for num_users
sampled.
"""
results = {}
if hasattr(env._environment, 'set_active_pool'): # pylint: disable=protected-access
pools = ['train', 'eval', 'test']
if only_evaluate_pool:
pools = [only_evaluate_pool]
else:
pools = ['all']
for pool in pools:
tf.keras.backend.set_learning_phase(0)
if hasattr(env._environment._user_model._user_sampler, 'set_active_pool'): # pylint: disable=protected-access
env._environment.set_active_pool(
pool) # pylint: disable=protected-access
else:
assert pool == 'all' or only_evaluate_pool
if plot_histogram or plot_trajectories:
recs_histogram = Counter({})
recs_histogram_keys_list = {}
if debug_log:
user_rec_log = []
ratings = []
ratings_health_user_map = {}
health = []
rewards = []
max_episode_length = agent.max_episode_length
if stepwise_plot:
stepwise_ratings = [[] for _ in range(max_episode_length)]
stepwise_healths = [[] for _ in range(max_episode_length)]
agent.epsilon = 0.0 # Turn off any exploration.
env._environment._user_model._user_sampler.reset_sampler()
# Set the learning phase to 0 i.e. evaluation to not use dropout.
# Generate num_users trajectories.
for _ in range(num_users):
# TODO(): Clean the logged variables by making a data class.
curr_user_reward = 0.0
curr_user_health = 0.0
curr_user_rating = 0.0
if plot_histogram or plot_trajectories:
current_trajectory = []
reward = 0
observation = env.reset()
curr_user_vector = env.environment.user_model._user_state.topic_affinity
user_id = observation['user']['user_id']
if debug_log:
user_rec_log.append((user_id, []))
for step_number in range(max_episode_length):
slate = agent.step(reward, observation, eval_mode=True,
deterministic=deterministic, temperature=softmax_temperature)
observation, reward, _, _ = env.step(slate)
rating = observation['response'][0]['rating']
if plot_histogram or plot_trajectories:
current_trajectory.append(slate[0])
if slate[0] in recs_histogram:
recs_histogram[slate[0]] = recs_histogram[slate[0]] + 1
else:
recs_histogram[slate[0]] = 1
recs_histogram_keys_list[slate[0]] = len(
recs_histogram.keys())
if stepwise_plot:
# print(reward, risk_score_extractor(observation))
stepwise_ratings[step_number].append(rating)
stepwise_healths[step_number].append(
1-risk_score_extractor(observation))
curr_user_rating += rating
curr_user_reward += reward
curr_user_health += 1-risk_score_extractor(observation)
if debug_log:
user_rec_log[-1][1].append((slate[0], rating, 1-risk_score_extractor(observation), reward))
agent.end_episode(reward, observation, eval_mode=True)
ratings.append(curr_user_rating/float(max_episode_length))
health.append(curr_user_health/float(max_episode_length))
ratings_health_user_map[str(curr_user_vector)] = (ratings[-1], health[-1])
rewards.append(curr_user_reward/float(max_episode_length))
if plot_trajectories:
plot_current_trajectory(
current_trajectory, observation, recs_histogram_keys_list)
plt.show()
agent.empty_buffer()
health_risks = 1-np.array(health)
var = np.percentile(health_risks, 100*alpha)
cvar = compute_cvar(health_risks, var)
logging.info('Average Reward = %f, Average Health = %f, '
'Average Ratings = %f,VaR = %f, CVaR = %f',
np.mean(rewards), np.mean(health), np.mean(ratings), var, cvar)
if plot_histogram:
plot_recs_hists(recs_histogram, pool)
plt.show()
if stepwise_plot:
plot_stepwise_ratings(stepwise_ratings, stepwise_healths)
# Set the learning phase back to 1.
tf.keras.backend.set_learning_phase(1)
if scatter_plot_trajectories:
plot_trajectories(ratings, health, figure_file_obj)
results[pool] = {
'rewards': np.mean(rewards),
'health': np.mean(health),
'ratings': np.mean(ratings),
'var': var,
'cvar': cvar
}
if plot_histogram:
results[pool]['unique_recs'] = len(recs_histogram.keys())
if reward_health_distribution_plot:
results[pool]['ratings_health_user_map'] = ratings_health_user_map
plot_reward_vs_health_distribution(ratings, health)
if debug_log:
save_user_rec_log()
results[pool]['user_rec_log'] = user_rec_log
if len(results) == 1: # No train/eval/test split, just return one value.
return results[only_evaluate_pool] if only_evaluate_pool else results['all']
# Promote the eval results to the top-level dictionary.
results.update(results['eval'])
return results
| 5,335,457
|
def argmax(sequence: Sequence) -> int:
"""Find the argmax of a sequence."""
return max(range(len(sequence)), key=lambda i: sequence[i])
| 5,335,458
|
def read_data_from_device(device, location):
""" Reads text data from device and returns it as output
Args:
location ('str'): Path to the text file
Raises:
FileNotFoundError: File Does not Exist
Returns:
Data ('str'): Text data read from the device
"""
# IMPORTANT
# =========
# This API does not require the device to have network connection
# copy_from_device is the other API that behaves similar to this one,
# but it requires network connection since it uses SCP
try:
return device.execute("cat {}".format(location))
except Exception: # Throw file not found error when encounter generic error
raise FileNotFoundError("File {} does not exist.".format(location))
| 5,335,459
|
def get_credentials() -> tuple:
"""Gets bot auth credentials from environment variables defined in the local .env file"""
load_dotenv()
irc_token = os.environ.get('TWITCH_OAUTH_PASS')
client_id = os.environ.get('TWITCH_CLIENT_ID')
channel = os.environ.get('TWITCH_CHANNEL')
return irc_token, client_id, channel
| 5,335,460
|
def test_config_file(tmpdir):
"""Get options from a config file."""
config = str(tmpdir.join('mapbox.ini'))
with open(config, 'w') as cfg:
cfg.write("[mapbox]\n")
cfg.write("access-token = pk.test_config_file\n")
cfg.write("verbosity = 11\n")
runner = CliRunner()
result = runner.invoke(main_group, ['-c', config, 'config'], catch_exceptions=False)
assert config in result.output
assert "access-token = pk.test_config_file" in result.output
assert "verbosity = 11" in result.output
| 5,335,461
|
def postman_parser(postman_info: dict,
environment_vars: Dict = None) -> APITest:
"""
Get a parser collection, in JSON input format, and parser it
:param postman_info: JSON parsed info from Postman
:type postman_info: dict
:param environment_vars: variables to replace
:type environment_vars: dict
:return: a Postman object
:rtype: APITest
:raise ApitestValueError: when an invalid Postman format was received
"""
assert isinstance(postman_info, dict)
assert len(postman_info) > 0
# Try to find Postman variables in the JSON info from Postman Project
variables_from_postman_file = extract_postman_variables(postman_info)
# If variables was found, replace with the values
if variables_from_postman_file:
if not environment_vars:
raise ApitestMissingDataError(
"The Postman collections need some environment variables. "
"Please specify these variables and try again: "
",".join(x for x in variables_from_postman_file))
else:
postman_info = replace_postman_variables(postman_info,
variables_from_postman_file,
environment_vars)
collections = []
try:
# Get all collections
for collection in postman_info.get("item"):
end_points = []
# Get each end-point
for endpoint in collection.get("item"):
# --------------------------------------------------------------------------
# APITestRequest info
# --------------------------------------------------------------------------
query_info = endpoint.get("request")
# APITestRequest headers
request_headers = []
for header in query_info.get("header"):
request_headers.append(APITestHeader(key=header.get("key"),
value=header.get("value")))
# APITestRequest body
request_body_content_type = from_http_content_type_get_type(request_headers, query_info.get("body").get("mode"))
request_body = APITestBody(content_type=request_body_content_type,
value=from_raw_body_get_python_object(data_type=request_body_content_type,
data=query_info.get("body").get("formdata")))
# Build request
_request_url = query_info.get("url") \
if query_info.get("url").startswith("http") \
else "http://{}".format(query_info.get("url"))
request = APITestRequest(url=_request_url,
method=query_info.get("method"),
headers=request_headers,
body=request_body)
# --------------------------------------------------------------------------
# APITestResponse info
# --------------------------------------------------------------------------
response_list = endpoint.get("response")
responses = []
if response_list:
for response_info in response_list:
# APITestResponse headers
response_headers = []
for header in response_info.get("header"):
response_headers.append(APITestHeader(key=header.get("key"),
value=header.get("value")))
# APITestResponse APITestBody
response_body_content_type = from_http_content_type_get_type(response_headers, None)
response_body = APITestBody(content_type=response_body_content_type,
value=from_raw_body_get_python_object(data_type=response_body_content_type,
data=response_info.get("body")))
# APITestResponse cookie
response_cookies = []
for cookie in response_info.get("cookie"):
response_cookies.append(APITestCookie(expires=cookie.get("expires"),
host_only=cookie.get("hostOnly"),
http_only=cookie.get("httpOnly"),
domain=cookie.get("domain"),
path=cookie.get("path"),
secure=cookie.get("secure"),
session=cookie.get("session"),
value=cookie.get("value")))
# Build response
responses.append(APITestResponse(code=response_info.get("code"),
status=response_info.get("status"),
headers=response_headers,
body=response_body,
cookies=response_cookies))
end_points.append(APITestEndPoint(name=endpoint.get("name"),
description=endpoint.get("description"),
request=request,
response=responses))
collections.append(APITestCollection(name=endpoint.get("name"),
description=endpoint.get("description"),
end_points=end_points))
except Exception as exc:
raise ApitestInvalidFormatError from exc
data = APITest(title=postman_info.get("info").get("name"),
description=postman_info.get("info").get("description"),
collections=collections)
return data
| 5,335,462
|
def registerFont(faceName, afm, pfb):
"""
Helvetica BUT AS AFM
The below section is NOT equal to::
_baseFontName ='Helvetica'
_baseFontNameB ='Helvetica-Bold'
_baseFontNameI ='Helvetica-Oblique'
_baseFontNameBI='Helvetica-BoldOblique'
we will mapp afm files from matplotlib with pfb files from reportlab
this will give embedded Type1 Face Fonts
"""
afm = os.path.join(__font_dir__, "".join(afm, ".afm"))
pfb = os.path.join(__font_dir__, "".join(pfb, ".pfb"))
face = pdfmetrics.EmbeddedType1Face(afm, pfb)
pdfmetrics.registerTypeFace(face)
justFont = pdfmetrics.Font(faceName, faceName, 'WinAnsiEncoding')
pdfmetrics.registerFont(justFont)
| 5,335,463
|
def test_anti_periodic_bcs():
"""test a simulation with anti-periodic BCs"""
grid = grids.CartesianGrid([[-10, 10]], 32, periodic=True)
field = ScalarField.from_expression(grid, "0.01 * x**2")
field -= field.average
# test normal periodic BCs
eq1 = PDE({"c": "laplace(c) + c - c**3"}, bc="periodic")
res1 = eq1.solve(field, t_range=1e5, dt=1e-1)
assert np.allclose(np.abs(res1.data), 1)
assert res1.fluctuations == pytest.approx(0)
# test normal anti-periodic BCs
eq2 = PDE({"c": "laplace(c) + c - c**3"}, bc="anti-periodic")
res2 = eq2.solve(field, t_range=1e3, dt=1e-3)
assert np.all(np.abs(res2.data) <= 1)
assert res2.fluctuations > 0.1
| 5,335,464
|
def allOPT2 (routes, dists, maxtime=float("inf")):
"""
A simpler way to make the 2-OPT optimization on all
the provided routes.
:param routes: The routes to optimize.
:param dists: The matrix of distances.
:param maxtime: The maximum time the optimization can go on.
:return: The optimised routes and the overall respective cost.
"""
optimized_routes = [None] * len(routes)
total_cost = 0
for i, route in enumerate(routes):
oproute, cost = OPT2(route, dists, maxtime)
optimized_routes[i] = oproute
total_cost += cost
return optimized_routes, total_cost
| 5,335,465
|
def getScoreByName(name):
"""
This function will search for the name and
will, if found, return the scores
"""
for idx, val in enumerate(names):
if val == name:
return scores[idx]
| 5,335,466
|
def validate_marginal(marg_type, marg_dict):
"""
convenience function which validates keys and values of a marginals dict with default kwargs
:param marg_type: (str) one of the keys from marginals_template defined inside function
:param marg_dict: (dict) dict representing the marginals for a single independent variable
:return:
"""
validate_marginal_keys(marg_type, marg_dict)
validate_marginal_values(marg_dict)
| 5,335,467
|
def count_dict(dict_):
"""
Count how many levels the dict has
"""
if not isinstance(dict_, dict):
raise Dict_Exception("dict_ must be a dict")
return max(count_dict(v) if isinstance(v, dict) else 0 for v in dict_.values()) + 1
| 5,335,468
|
def __draw_tick_labels(scales, chart_height, chart_width):
"""Draws the numbers in both axes."""
axis_values = [0, 0.25, 0.5, 0.75, 1]
axis_df = pd.DataFrame({"main_axis_values": axis_values, "aux_axis_position": 0})
x_tick_labels = (
alt.Chart(axis_df)
.mark_text(
yOffset=Scatter_Axis.label_font_size * 1.5,
tooltip="",
align="center",
fontSize=Scatter_Axis.label_font_size,
color=Scatter_Axis.label_color,
fontWeight=Scatter_Axis.label_font_weight,
font=FONT,
)
.encode(
text=alt.Text("main_axis_values:Q"),
x=alt.X("main_axis_values:Q", scale=scales["x"], axis=no_axis()),
y=alt.Y("aux_axis_position:Q", scale=scales["y"], axis=no_axis()),
)
)
axis_df.drop(0, inplace=True)
y_tick_labels = (
alt.Chart(axis_df)
.mark_text(
baseline="middle",
xOffset=-Scatter_Axis.label_font_size * 1.5,
tooltip="",
align="center",
fontSize=Scatter_Axis.label_font_size,
fontWeight=Scatter_Axis.label_font_weight,
color=Scatter_Axis.label_color,
font=FONT,
)
.encode(
text=alt.Text("main_axis_values:Q"),
x=alt.X("aux_axis_position:Q", scale=scales["x"], axis=no_axis()),
y=alt.Y("main_axis_values:Q", scale=scales["y"], axis=no_axis()),
)
)
return x_tick_labels + y_tick_labels
| 5,335,469
|
def HIP_to_HD(name):
"""Convert an HIP name in *Hipparcos Catalogue* to HD name in *Henry Draper
Catalogue*.
Args:
name (str or int): Name of star in *Hipparcos Catalogue*.
"""
hip = _get_HIP_number(name)
filename = os.path.join(xindex_path, 'HIP-HD.csv')
f1 = lambda row: int(row.split(',')[0])
f2 = lambda row: 'HD '+row.split(',')[1].strip()
if hip<100:
HDname = find_sortedfile(hip, filename, f1, f2)
else:
HDname = quickfind_sortedfile(hip, filename, f1, f2)
if HDname == None:
return None
else:
return [HDname]
| 5,335,470
|
def convert_torchscript_module_to_torch_backend_contract_mlir(program: torch.nn.Module):
"""Perform common lowering from TorchScript to Torch MLIR
Returns an MLIR module that satisfies the Torch backend contract.
"""
mb = ModuleBuilder()
scripted = torch.jit.script(program)
class_annotator = ClassAnnotator()
extract_annotations(program, scripted, class_annotator)
# TODO: Find a way to make each of these calls own its own
# "debuggable error report" situation.
try:
original_stderr = sys.stderr
sys.stderr = StringIO()
# Import the TorchScript module to MLIR
mb.import_module(scripted._c, class_annotator)
except Exception as e:
raise Exception(f"""
PyTorch TorchScript module -> torch-mlir Object Graph IR import failed with:
Exception:
{e}
Diagnostics:
{sys.stderr.getvalue()}
""") from None
finally:
sys.stderr = original_stderr
run_pipeline_with_repro_report(
mb.module,
"torchscript-module-to-torch-backend-pipeline",
"Lowering TorchScript Object Graph IR -> Torch Backend IR")
return mb.module
| 5,335,471
|
def thread_profile(D,P,inset,internal=True,base_pad=0.1):
"""ISO thread profile"""
H = P*np.sqrt(3)/2
Dm = D - 2*5*H/8
Dp = D - 2*3*H/8
if internal:
return np.array([
(-P/2,D/2+H/8+base_pad+inset),
(-P/2,D/2+H/8+inset),
(-P/8,Dm/2+inset),
(P/8,Dm/2+inset),
(P/2,D/2+H/8+inset),
(P/2,D/2+H/8+base_pad+inset),
])
else:
return np.array([
(-P/2,Dm/2-H/4-base_pad-inset),
(-P/2,Dm/2-H/4-inset),
(-P/16,D/2-inset),
(P/16,D/2-inset),
(P/2,Dm/2-H/4-inset),
(P/2,Dm/2-H/4-base_pad-inset),
])
| 5,335,472
|
def mock_session(monkeypatch, data):
""" Mocked out sqlalchemy session """
if data:
dirname = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(dirname, data)
with open(filename) as data_file:
json_data = json.load(data_file)
predictions = json_data['predictions']
geometry = json_data['geometry']
prediction_model = PredictionModel(
id=1, name='name', abbreviation='abbrev', projection='projection')
def mock_get_session(*args):
mock_session = UnifiedAlchemyMagicMock()
return mock_session
def mock_get_most_recent_model_run(*args) -> PredictionModelRunTimestamp:
timestamp = '2020-01-22T18:00:00+00:00'
return PredictionModelRunTimestamp(id=1,
prediction_model=prediction_model,
prediction_run_timestamp=datetime.fromisoformat(timestamp))
def mock_get_model_run_predictions(*args):
shape = shapely.wkt.loads(geometry)
grid = PredictionModelGridSubset(
id=1,
prediction_model_id=prediction_model.id,
prediction_model=prediction_model,
geom=from_shape(shape)
)
result = []
for prediction in predictions:
prediction['prediction_timestamp'] = datetime.fromisoformat(
prediction['prediction_timestamp'])
result.append(
(grid, ModelRunGridSubsetPrediction(**prediction)))
return result
monkeypatch.setattr(app.db.database, 'get_session', mock_get_session)
monkeypatch.setattr(app.db.crud, 'get_most_recent_model_run',
mock_get_most_recent_model_run)
monkeypatch.setattr(app.db.crud, 'get_model_run_predictions',
mock_get_model_run_predictions)
| 5,335,473
|
def plot_eigenvectors(*, obs_names, eigenvecs, fig_size=(8, 6), font_size=12,
label_size=16, save_fig=False, write_path=None):
"""Plot eigenvectors of the covariance matrix of SV residuals.
Produces a plot of the eigenvectors corresponding to the n largest
eigenvalues of the covariance matrix obtained during PCA of SV residuals,
where n is the number of eigenvalues used as a proxy for unmodelled
external field signal. The n eigenvectors corresponding to the n largest
eigenvalue represent the directions with the largest contribution
to the residuals (i.e. the "noisiest" directions). See Wardinski & Holme
(2011, GJI, https://doi.org/10.1111/j.1365-246X.2011.04988.x)
for further details.
Args:
obs_names (list): list of observatory names given as three digit IAGA
codes.
eigenvecs (array): the eigenvalues obtained from the principal
component analysis of the SV residuals.
fig_size (array): figure size in inches. Defaults to 8 inches by 6
inches.
font_size (int): font size for axes. Defaults to 12 pt.
label_size (int): font size for axis labels. Defaults to 16 pt.
save_fig (bool): option to save figure. Defaults to False.
write_path (str): output path for figure if saved.
"""
# Loop over directions and plot each eigenvector on a separate subplot
for direction in range(eigenvecs.shape[1]):
plt.figure(figsize=fig_size)
plt.plot(np.abs(eigenvecs[::3, direction]), 'bx',
np.abs(eigenvecs[1::3, direction]), 'rx',
np.abs(eigenvecs[2::3, direction]), 'cx',
markersize=10, mew=3)
plt.ylim(0, 1)
plt.grid()
plt.yticks(fontsize=font_size)
plt.xticks(range(len(obs_names)), obs_names, fontsize=font_size)
plt.xticks(rotation=60)
plt.ylabel(r'$\mathbf{{v}}_{%03d}$' % (direction), fontsize=label_size)
plt.legend(['x direction', 'y direction', 'z direction'],
loc='upper right', frameon=False, fontsize=label_size)
plt.xlabel('Location', fontsize=label_size)
if save_fig is True:
# Create the output directory if it does not exist
if not os.path.exists(write_path):
os.makedirs(write_path)
fpath = os.path.join(write_path, 'eigendirection%03d.pdf' % direction)
plt.savefig(fpath, bbox_inches='tight')
plt.close()
| 5,335,474
|
def get_service(hass, config, discovery_info=None):
"""Get the HipChat notification service."""
return HipchatNotificationService(
config[CONF_TOKEN],
config[CONF_ROOM],
config[CONF_COLOR],
config[CONF_NOTIFY],
config[CONF_FORMAT],
config[CONF_HOST])
| 5,335,475
|
def connect_with_interior_or_edge_bulk(
polygon: Polygon, polygon_array: GeometryArray
) -> List[bool]:
"""
Return boolean array with True iff polys overlap in interior/edge, but not corner.
Args:
polygon (Polygon): A shapely Polygon
polygon_array (GeometryArray): The other shapely Polygons in a geopandas
geometry array
Returns:
List[bool]: Boolean array with value True, iff `polygon` and the polygon in
`polygon_array` at the given location overlap in their interior/edge.
"""
patterns = polygon_array.relate(polygon)
return [
de9im_match(pattern, EDGE_ONLY_PATTERN) or de9im_match(pattern, OVERLAP_PATTERN)
for pattern in patterns
]
| 5,335,476
|
def get_history(kmodel=None):
"""
returns a python dict with key = metric_id val = [metric each epoch ]
"""
# get kmodel object from input str if the input is a string
if isinstance(kmodel,str):
try:
kmodel = KModel.objects.get(id=kmodel)
except ObjectDoesNotExist:
# object with name doesn't exist
return None
except ValidationError:
# input string isn't a valid uuid
return None
elif isinstance(kmodel, KModel):
# awesome! proceed
pass
else:
raise ValueError("call get_history with etiher a str uuid for model or a db model instance")
# get the history object and load history
if kmodel.artifacts.filter(descriptor="history").exists():
artifact_path = kmodel.artifacts.get(descriptor="history").path
return pickle.load(open(artifact_path,"rb"))
else:
return None
| 5,335,477
|
def get_neighbor_v4_by_search(search=None):
"""Return a list of NeighborV4's by dict."""
try:
objects = NeighborV4.objects.filter()
search_dict = search if search else dict()
object_map = build_query_to_datatable_v3(objects, search_dict)
except FieldError as e:
raise api_rest_exceptions.ValidationAPIException(str(e))
except Exception as e:
raise api_rest_exceptions.NetworkAPIException(str(e))
else:
return object_map
| 5,335,478
|
def load_clean_data():
"""funcion that loads tuberculosis file and preprocesses/cleans the dataframe"""
df = pd.read_csv('tb.csv')
# drop columns 'fu' and 'mu' since they only contain missing values and would mess up the following processing steps
df = df.drop(columns = ['fu', 'mu'])
# define row and column length
initial_rows = len(df.index)
initial_col = len(df.columns)
# melt the gender-age columns of the df
df = pd.melt(df, id_vars=['country', 'year'], var_name='variable', value_name='value')
melted_row = len(df.index)
# assert that (initial col-number - id_var_no) * rows = length of rows afterwards
assert (initial_col - 2)*initial_rows == melted_row
# the column 'variable' needs to be split into two columns 'gender' and 'age', delete column 'variable'
df['gender'] = df.variable.str[0]
df['age'] = df.variable.str[1:3]
df = df.drop(columns = 'variable')
# transform age into an integer
df['age'] = pd.to_numeric(df['age'], errors='coerce')
# transform gender into category in order to store memory
df['gender'] = df['gender'].astype('category')
return df
#print(df.info())
#print(df.head())
#print(df.loc[df['country'] == 'AD'])
# the transformation seems to be correct. The columns age and gender have no missing values (which would have been
# suspicious)
| 5,335,479
|
def is_anagram_passphrase(phrase):
"""
Checks whether a phrase contains no words that are anagrams of other words.
>>> is_anagram_passphrase(["abcde", "fghij"])
True
>>> is_anagram_passphrase(["abcde", "xyz", "ecdab"])
False
>>> is_anagram_passphrase(["a", "ab", "abc", "abd", "abf", "abj"])
True
>>> is_anagram_passphrase(["iiii", "oiii", "ooii", "oooi", "oooo"])
True
>>> is_anagram_passphrase(["oiii", "ioii", "iioi", "iiio"])
False
"""
return not any(
any(
first_word == "".join(permutated_word)
for permutated_word in permutations(second_word)
)
for first_word, second_word in combinations(phrase, 2)
)
| 5,335,480
|
def _client_ip(client):
"""Compatibility layer for Flask<0.12."""
return getattr(client, 'environ_base', {}).get('REMOTE_ADDR')
| 5,335,481
|
def findtrapezoidfunc(
thexvals,
theyvals,
thetoplength,
initguess=None,
debug=False,
minrise=0.0,
maxrise=200.0,
minfall=0.0,
maxfall=200.0,
minstart=-100.0,
maxstart=100.0,
refine=False,
displayplots=False,
):
"""
Parameters
----------
thexvals
theyvals
thetoplength
initguess
debug
minrise
maxrise
minfall
maxfall
minstart
maxstart
refine
displayplots
Returns
-------
"""
# guess at parameters: risestart, riseamplitude, risetime
if initguess is None:
initstart = 0.0
initamp = np.mean(theyvals[-10:-1])
initrisetime = 5.0
initfalltime = 5.0
else:
initstart = initguess[0]
initamp = initguess[1]
initrisetime = initguess[2]
initfalltime = initguess[3]
p0 = np.array([initstart, initamp, initrisetime, initfalltime])
if debug:
for i in range(0, len(theyvals)):
print(thexvals[i], theyvals[i])
plsq, dummy = sp.optimize.leastsq(
trapezoidresiduals, p0, args=(theyvals, thexvals, thetoplength), maxfev=5000
)
# except ValueError:
# return 0.0, 0.0, 0.0, 0
if (
(minrise <= plsq[2] <= maxrise)
and (minfall <= plsq[3] <= maxfall)
and (minstart <= plsq[0] <= maxstart)
):
return plsq[0], plsq[1], plsq[2], plsq[3], 1
else:
return 0.0, 0.0, 0.0, 0.0, 0
| 5,335,482
|
def extractRecords(getRecordsResponse):
"""Returns a list of etrees of the individual
records of a getRecords response"""
recs = getRecordsResponse.xpath(
'/csw:GetRecordsResponse/csw:SearchResults//csw:Record',
namespaces={'csw': ns_csw})
return recs
| 5,335,483
|
def tagify(tail=u'', head=u'', sep=u'.'):
"""
Returns namespaced event tag string.
Tag generated by joining with sep the head and tail in that order
head and tail may be a string or a list, tuple, or Set of strings
If head is a list, tuple or Set Then
join with sep all elements of head individually
Else
join in whole as string prefix
If tail is a list, tuple or Set Then
join with sep all elements of tail individually
Else
join in whole as string suffix
If either head or tail is empty then do not exhibit in tag
"""
if isinstance(head, (list, tuple, Set)): # list like so expand
parts = list(head)
else: # string like so put in list
parts = [head]
if isinstance(tail, (list, tuple, Set)): # listlike so extend parts
parts.extend(tail)
else: # string like so append
parts.append(tail)
return sep.join([part for part in parts if part])
| 5,335,484
|
def run_all():
""" Run all of the tests
This function is useful in case we want to run our tests outside of the pytest framework.
"""
# since we are not running through pytest, we have to grab the inputs to the tests
version = get_version()
test_version(version)
config = get_config()
test_peptide_encoder_lstm_model(config)
| 5,335,485
|
def after_all(context):
"""
Function saves modified MARC record to file system
:param context: behave context object
"""
marc_filename = open('modified-ybp-dda-for-ebl.mrc','wb')
marc_filename.write(context.marc_record.as_marc())
marc_filename.close()
| 5,335,486
|
def get_fn_data(src_db, fn_table, year=None):
"""Get the data and fields from the query in the src database for the
fish net table specified by fn_table. Returns list of
dictionaries - each element represents a single row returned by the query.
Arguments:
- `src_db`: full path the source database.
- `fn_table`: the name of the stored query that returns the data for
the specified fish net table
"""
if year:
sql = "execute get_{} @yr='{}'".format(fn_table, year)
else:
sql = "execute get_{}".format(fn_table)
constring = "DRIVER={{Microsoft Access Driver (*.mdb, *.accdb)}};DBQ={}"
with pyodbc.connect(constring.format(src_db)) as src_conn:
src_cur = src_conn.cursor()
rs = src_cur.execute(sql)
data = rs.fetchall()
flds = [x[0].lower() for x in src_cur.description]
records = []
for record in data:
records.append({k: v for k, v in zip(flds, record)})
return records
| 5,335,487
|
def convert_example_to_feature(example, tokenizer, max_seq_length=512,
doc_stride=384, max_query_length=125, is_training=True,
cls_token_at_end=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0,
mask_padding_with_zero=True,
sequence_a_is_doc=False):
"""Convert a single QuacExample to features (model input)"""
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[-max_query_length:]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
assert max_tokens_for_doc >= 384, max_tokens_for_doc
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
# we set the doc_stride to 384, which is the max length of evidence text,
# meaning that each evidence has exactly one _DocSpan
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
assert len(doc_spans) == 1, (max_tokens_for_doc, example)
# if len(doc_spans) > 1:
# print(len(doc_spans), example)
# doc_spans = [doc_spans[0]]
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implem also keep the classification token (set to 0) (not sure why...)
p_mask = []
# CLS token at the beginning
if not cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = 0
# XLNet: P SEP Q SEP CLS
# Others: CLS Q SEP P SEP
if not sequence_a_is_doc:
# Query
tokens += query_tokens
segment_ids += [sequence_a_segment_id] * len(query_tokens)
p_mask += [1] * len(query_tokens)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# Paragraph
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
if not sequence_a_is_doc:
segment_ids.append(sequence_b_segment_id)
else:
segment_ids.append(sequence_a_segment_id)
p_mask.append(0)
paragraph_len = doc_span.length
if sequence_a_is_doc:
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
tokens += query_tokens
segment_ids += [sequence_b_segment_id] * len(query_tokens)
p_mask += [1] * len(query_tokens)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
# CLS token at the end
if cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = len(tokens) - 1 # Index of classification token
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
p_mask.append(1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
span_is_impossible = True
else:
if sequence_a_is_doc:
doc_offset = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = cls_index
end_position = cls_index
if False:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (example.example_id))
logger.info("example_id: %s" % (example.example_id))
logger.info("qid of the example: %s" % (example.qas_id))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and span_is_impossible:
logger.info("impossible example")
if is_training and not span_is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info("retrieval_label: %d" % (example.retrieval_label))
logger.info(
"answer: %s" % (answer_text))
feature = InputFeatures(
unique_id=example.example_id,
example_id=example.example_id,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
cls_index=cls_index,
p_mask=p_mask,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible,
retrieval_label=example.retrieval_label)
return feature
| 5,335,488
|
def notify(silent=False, **kwargs):
"""Helper method to send a notification."""
notification = Notification(**kwargs)
# Validate channels
for channel_alias in notification.channels:
__validate_channel_alias(channel_alias)
# If it's a not a silent notification, save the notification
if not silent:
notification.save()
# Send the notification asynchronously with celery
send_notification.delay(notification.to_json())
| 5,335,489
|
def make_pickle(golfed=False):
"""Returns the pickle-quine.
If "golfed" is true, we return the minimized version; if false we return
the one that's easier to understand.
"""
part_1 = b''.join(PART_1)
part_2 = b''.join(GOLFED_PART_2 if golfed else PART_2)
# We tack the length onto part 1:
length = len(part_1) + 1 + len(part_2)
part_1 = part_1 + b'%c' % length
# Now glue everything together.
the_string = part_1 + part_2
return part_1 + the_string + part_2
| 5,335,490
|
def classify_design_space(action: str) -> int:
"""
The returning index corresponds to the list stored in "count":
[sketching, 3D features, mating, visualizing, browsing, other organizing]
Formulas for each design space action:
sketching = "Add or modify a sketch" + "Copy paste sketch"
3D features = "Commit add or edit of part studio feature" + "Delete part studio feature"
- "Add or modify a sketch"
mating = "Add assembly feature" + "Delete assembly feature" + "Add assembly instance"
+ "Delete assembly instance"
visualizing = "Start assembly drag" + "Animate action called"
browsing = Opening a tab + Creating a tab + Deleting a tab + Renaming a tab
other organizing = "Create version" + "Cancel Operation" + "Undo Redo Operation"
+ "Merge branch" + "Branch workspace" + "Update version"
:param action: the action to be classified
:return: the index of the action type that this action is accounted for; if the action does not
belong to any category, return -1
Note: "Add or modify a sketch" is special (+1 for sketching and -1 for 3D features),
return -10
"""
# Creating a sketch is special as it affects both the sketching and the 3D features counts
if action == "Add or modify a sketch":
return -10
# Sketching
elif action == "Copy paste sketch":
return 0
# 3D features
elif action in ["Commit add or edit of part studio feature",
"Delete part studio feature"]:
return 1
# Mating
elif action in ["Add assembly feature", "Delete assembly feature", "Add assembly instance"
"Delete assembly instance"]:
return 2
# Visualizing
elif action in ["Start assembly drag", "Animate action called"]:
return 3
# Browsing
elif "Tab" in action and ("opened" in action or "created" in action or "deleted" in action or
"renamed" in action):
return 4
# Other organizing
elif action in ["Create version", "Cancel Operation", "Undo Redo Operation", "Merge branch",
"Branch workspace", "Update version"]:
return 5
# Not classified (Optional: print out the unclassified actions)
else:
return -1
| 5,335,491
|
def create(name, database=None, auto=False, auto_source=False, directory=None, migratetable=None, verbose=None):
"""Create a migration."""
router = get_router(directory, database, migratetable, verbose)
if auto and auto_source:
auto = auto_source
router.create(name, auto=auto)
| 5,335,492
|
def get_sha256_hash(plaintext):
"""
Hashes an object using SHA256. Usually used to generate hash of chat ID for lookup
Parameters
----------
plaintext: int or str
Item to hash
Returns
-------
str
Hash of the item
"""
hasher = hashlib.sha256()
string_to_hash = str(plaintext)
hasher.update(string_to_hash.encode('utf-8'))
hash = hasher.hexdigest()
return hash
| 5,335,493
|
def annexRepo__enable_remote(self, name, options=None, env=None):
"""Enables use of an existing special remote
Parameters
----------
name: str
name, the special remote was created with
options: list, optional
"""
# MIH thinks there should be no `env` argument at all
# https://github.com/datalad/datalad/issues/5162
# if it would not be there, this whole dance is pretty much
# obsolete
env = env or self._git_runner.env
# an enableremote can do pretty much anything, including a type change.
# in order to be able to determine whether credentials *will* be needed,
# we have to look ahead and form the special remote parameters that will
# be there at the end -- more or less
# pull info for present config
sp_remotes = {v['name']: dict(v, uuid=k) for k, v in self.get_special_remotes().items()}
remote_info = sp_remotes.get(name, {})
# TODO if remote_info is empty, we can fail right here
if options:
# and now update with given params
remote_info.update(get_specialremote_param_dict(options))
# careful here, `siblings()` also calls this for regular remotes, check
# for a known type
if 'type' in remote_info \
and needs_specialremote_credential_envpatch(remote_info['type']):
# see if we can identify any matching credentials
credprops = get_specialremote_credential_properties(remote_info)
credman = None
credspec = None
if credprops:
credman = CredentialManager(self.config)
creds = credman.query(_sortby='last-used', **credprops)
if creds:
# found one
credspec = creds[0]
# TODO manual entry could be supported here too! (also see at the end)
if env:
env.copy()
if credspec:
credpatch = get_specialremote_credential_envpatch(
remote_info['type'], credspec[1])
if credpatch:
if not env:
env = os.environ.copy()
env.update(credpatch)
try:
from unittest.mock import patch
with patch.object(self._git_runner, 'env', env):
# TODO: outputs are nohow used/displayed. Eventually convert to
# to a generator style yielding our "dict records"
self.call_annex(['enableremote', name] + ensure_list(options))
except CommandError as e:
if re.match(r'.*StatusCodeException.*statusCode = 401', e.stderr):
raise AccessDeniedError(e.stderr) from e
elif 'FailedConnectionException' in e.stderr:
raise AccessFailedError(e.stderr) from e
else:
raise e
self.config.reload()
# TODO when manual credential entry is supported,
# implement store-after-success here
| 5,335,494
|
def get_bit_coords(dtype_size):
"""Get coordinates for bits assuming float dtypes."""
if dtype_size == 16:
coords = (
["±"]
+ [f"e{int(i)}" for i in range(1, 6)]
+ [f"m{int(i-5)}" for i in range(6, 16)]
)
elif dtype_size == 32:
coords = (
["±"]
+ [f"e{int(i)}" for i in range(1, 9)]
+ [f"m{int(i-8)}" for i in range(9, 32)]
)
elif dtype_size == 64:
coords = (
["±"]
+ [f"e{int(i)}" for i in range(1, 12)]
+ [f"m{int(i-11)}" for i in range(12, 64)]
)
else:
raise ValueError(f"dtype of size {dtype_size} neither known nor implemented.")
return coords
| 5,335,495
|
def create_record(user_inputs):
"""
Create a ObsRecord from the informations gathered from the users.
:param user_inputs: Dictionary with all the values (as strings) required
to fully populate a ObsRecord object.
:type user_inputs: dict
:rtype: ObsRecord object
"""
from klpyastro.utils import obstable
record = obstable.ObsRecord()
record.targetname = user_inputs['targetname']
record.rootname = user_inputs['rootname']
record.band = user_inputs['band']
record.grism = user_inputs['grism']
record.datatype = user_inputs['datatype']
record.applyto = user_inputs['applyto']
record.filerange = user_inputs['filerange']
record.exptime = float(user_inputs['exptime'])
record.lnrs = int(user_inputs['lnrs'])
record.rdmode = user_inputs['rdmode']
return record
| 5,335,496
|
def best_wild_hand(hand):
"""best_hand но с джокерами"""
non_jokers = list(filter(lambda x: x[0] != '?', hand))
jokers = filter(lambda x: x[0] == '?', hand)
jokers_variations = itertools.product(
*[joker_variations(joker) for joker in jokers]
)
best_hands = []
for variations in jokers_variations:
full_hand = itertools.chain(variations, non_jokers)
best_hands.append(best_hand(full_hand))
return max((hand_rank(h), h) for h in best_hands)[1]
| 5,335,497
|
def GetTestMetadata(test_metadata_file=FAAS_ROOT+"/synthetic_workload_invoker/test_metadata.out"):
"""
Returns the test start time from the output log of SWI.
"""
test_start_time = None
with open(test_metadata_file) as f:
lines = f.readlines()
test_start_time = lines[0]
config_file = lines[1]
invoked_actions = int(lines[2][:-1])
print('Invocations by Workload Invoker: ' + str(invoked_actions))
try:
return int(test_start_time[:-1]), config_file[:-1]
except:
logger.error("Error reading the test metadata!")
return None, None
| 5,335,498
|
def addsong():
"""Adds a semi-random song to the playlist"""
rand = random.uniform(-0.5, 2)
cursor.execute("SELECT file, listened, added FROM songs "
"WHERE karma>? AND time < ? "
"AND NOT duplicate ORDER BY random() LIMIT 1;",
(rand, int(time.time()-(60*(flood_delay-trigger*3)))))
songdata = cursor.fetchone()
if not songdata:
updateone()
addsong()
else:
newkarma = karma(songdata[1], songdata[2]+1)
cursor.execute(
"UPDATE songs SET added=?, karma=?, time=? WHERE file=?",
(songdata[2]+1, newkarma, int(time.time()), songdata[0],)
)
cursor.execute(
"SELECT inode, dev FROM songs WHERE file=?;",
(songdata[0],)
)
one = cursor.fetchone()
if one and one[0]:
cursor.execute(
"""UPDATE SONGS SET added=?, karma=?, time=? WHERE inode=?
AND dev=?""", (songdata[2]+1, newkarma, int(time.time()),
one[0], one[1])
)
db.commit()
try:
client.add(songdata[0].encode(enc))
log("I Added " + songdata[0].encode(enc))
log("D A:" + str(songdata[2]+1) + ", K:" +
str(newkarma))
except mpd.CommandError:
log("W Couldn't add " + songdata[0].encode(enc))
update(songdata[0])
addsong()
| 5,335,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.