content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def get_cnt_sw(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, mode):
""" usalbe only when g_wc was used to find pr_wv
"""
cnt_sc = get_cnt_sc(g_sc, pr_sc)
cnt_sa = get_cnt_sa(g_sa, pr_sa)
cnt_wn = get_cnt_wn(g_wn, pr_wn)
cnt_wc = get_cnt_wc(g_wc, pr_wc)
cnt_wo = get_cnt_wo(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode)
cnt_wv = get_cnt_wv(g_wn, g_wc, g_wvi, pr_wvi, mode)
return cnt_sc, cnt_sa, cnt_wn, cnt_wc, cnt_wo, cnt_wv | 069632779f353e28f23a0687e11d00761c8dea19 | 3,635,200 |
def _get_memcache_client():
"""Return memcache client if it's enabled, otherwise return None"""
if not cache_utils.has_memcache():
return None
return cache_utils.get_cache_manager().cache_object.memcache_client | c781bf4d638fc4b094fc0d64943b9305e0ec18b8 | 3,635,201 |
def get_long_description(readme_file='README.md'):
"""Returns the long description of the package.
@return str -- Long description
"""
return "".join(open(readme_file, 'r').readlines()[2:]) | 604c57fce1f9b8c32df4b64dc9df4fe61120d680 | 3,635,202 |
import requests
def extract_dem(
bounds,
out_raster="dem.tif"
):
"""Get 25m DEM for area of interest from BC WCS, write to GeoTIFF
"""
bbox = ",".join([str(b) for b in bounds])
# build request
payload = {
"service": "WCS",
"version": "1.0.0",
"request": "GetCoverage",
"coverage": "pub:bc_elevation_25m_bcalb",
"Format": "GeoTIFF",
"bbox": bbox,
"CRS": "EPSG:3005",
"resx": "25",
"resy": "25",
}
# request data from WCS
r = requests.get(BC_DEM_WCS_URL, params=payload)
# save to tiff
if r.status_code == 200:
with open(out_raster, "wb") as file:
file.write(r.content)
return out_raster | 9ff9349df9e3cd129a12111f2f45f151bd2851d0 | 3,635,203 |
def G1DListGetEdgesComposite(mom, dad):
""" Get the edges and the merge between the edges of two G1DList individuals
:param mom: the mom G1DList individual
:param dad: the dad G1DList individual
:rtype: a tuple (mom edges, dad edges, merge)
"""
mom_edges = G1DListGetEdges(mom)
dad_edges = G1DListGetEdges(dad)
return (mom_edges, dad_edges, G1DListMergeEdges(mom_edges, dad_edges)) | c61ffa657dfaf3daecfbc66d4166aa57efb6141b | 3,635,204 |
def entropy_approximate(signal, delay=1, dimension=2, tolerance="default", corrected=False, **kwargs):
"""Approximate entropy (ApEn)
Python implementations of the approximate entropy (ApEn) and its corrected version (cApEn).
Approximate entropy is a technique used to quantify the amount of regularity and the unpredictability
of fluctuations over time-series data. The advantages of ApEn include lower computational demand
(ApEn can be designed to work for small data samples (< 50 data points) and can be applied in real
time) and less sensitive to noise. However, ApEn is heavily dependent on the record length and lacks
relative consistency.
This function can be called either via ``entropy_approximate()`` or ``complexity_apen()``, and the
corrected version via ``complexity_capen()``.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted 'Tau', sometimes referred to as 'lag'). In practice, it is common
to have a fixed time lag (corresponding for instance to the sampling rate; Gautama, 2003), or
to find a suitable value using some algorithmic heuristics (see ``delay_optimal()``).
dimension : int
Embedding dimension (often denoted 'm' or 'd', sometimes referred to as 'order'). Typically
2 or 3. It corresponds to the number of compared runs of lagged data. If 2, the embedding returns
an array with two columns corresponding to the original signal and its delayed (by Tau) version.
tolerance : float
Tolerance (similarity threshold, often denoted as 'r'). It corresponds to the filtering level
- max absolute difference between segments. If 'default', will be set to 0.2 times the
standard deviation of the signal (for dimension = 2).
corrected : bool
If true, will compute corrected ApEn (cApEn), see Porta (2007).
**kwargs
Other arguments.
See Also
--------
entropy_shannon, entropy_sample, entropy_fuzzy
Returns
----------
apen : float
The approximate entropy of the single time series.
info : dict
A dictionary containing additional information regarding the parameters used
to compute approximate entropy.
Examples
----------
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=2, frequency=5)
>>> entropy1, parameters = nk.entropy_approximate(signal)
>>> entropy1 #doctest: +SKIP
>>> entropy2, parameters = nk.entropy_approximate(signal, corrected=True)
>>> entropy2 #doctest: +SKIP
References
-----------
- `EntroPy` <https://github.com/raphaelvallat/entropy>`_
- Sabeti, M., Katebi, S., & Boostani, R. (2009). Entropy and complexity measures for EEG signal
classification of schizophrenic and control participants. Artificial intelligence in medicine,
47(3), 263-274.
- Shi, B., Zhang, Y., Yuan, C., Wang, S., & Li, P. (2017). Entropy analysis of short-term heartbeat
interval time series during regular walking. Entropy, 19(10), 568.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Prepare parameters
info = {"Dimension": dimension, "Delay": delay, "Corrected": corrected}
info["Tolerance"] = _get_tolerance(signal, tolerance=tolerance, dimension=dimension)
out = _entropy_approximate(
signal,
tolerance=info["Tolerance"],
delay=delay,
dimension=dimension,
corrected=corrected,
**kwargs
)
return out, info | 5e39f5aa4e571e3e8d452c79b3742520af2644bc | 3,635,205 |
from typing import Tuple
def ir_typeref_to_type(
schema: s_schema.Schema,
typeref: irast.TypeRef,
) -> Tuple[s_schema.Schema, s_types.Type]:
"""Return a schema type for a given IR TypeRef.
This is the reverse of :func:`~type_to_typeref`.
Args:
schema:
A schema instance. The result type must exist in it.
typeref:
A :class:`ir.ast.TypeRef` instance for which to return
the corresponding schema type.
Returns:
A tuple containing the possibly modified schema and
a :class:`schema.types.Type` instance corresponding to the
given *typeref*.
"""
if is_anytuple(typeref):
return schema, s_pseudo.PseudoType.get(schema, 'anytuple')
elif is_any(typeref):
return schema, s_pseudo.PseudoType.get(schema, 'anytype')
elif is_tuple(typeref):
named = False
tuple_subtypes = {}
for si, st in enumerate(typeref.subtypes):
if st.element_name:
named = True
type_name = st.element_name
else:
type_name = str(si)
schema, st_t = ir_typeref_to_type(schema, st)
tuple_subtypes[type_name] = st_t
return s_types.Tuple.from_subtypes(
schema, tuple_subtypes, {'named': named})
elif is_array(typeref):
array_subtypes = []
for st in typeref.subtypes:
schema, st_t = ir_typeref_to_type(schema, st)
array_subtypes.append(st_t)
return s_types.Array.from_subtypes(schema, array_subtypes)
else:
t = schema.get_by_id(typeref.id)
assert isinstance(t, s_types.Type), 'expected a Type instance'
return schema, t | 71ce2deae8caa5a177e0d8ad7df60ce1ba1b1be6 | 3,635,206 |
def get_releases_query(session: db.Session, current_user: UserType, show_legacy=False):
"""Returns the query necessary to fetch a list of releases
If a user is passed, then the releases will be tagged `is_mine` if in that user's collection.
"""
if current_user.is_anonymous():
query = session.query(Release.name, Release.stub, Release.is_legacy)
else:
query = session.query(
Release.name,
Release.stub,
Release.is_legacy,
db.case(
[
(UserRelease.release_id == Release.id, True),
],
else_=False,
).label("is_mine"),
).outerjoin(
UserRelease,
db.and_(
UserRelease.release_id == Release.id,
UserRelease.user_id == current_user.id,
),
)
query = query.filter(
Release.is_legacy.is_(show_legacy),
Release.is_public.is_(True),
).order_by(Release.id.asc())
return query | 56732a67eb909f89afe01f6189c516d06bccf518 | 3,635,207 |
def get_total_supply(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get total number of pre-mined tokens
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returnss
-------
int
number of pre-mined tokens
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
"""
#update to v2
method = 'hmyv2_getTotalSupply'
try:
return int(rpc_request(method, endpoint=endpoint, timeout=timeout)['result'],16)
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e | f235be169273a638f042ab661fc88744a0b029ae | 3,635,208 |
def _is_css(filename):
"""
Checks whether a file is CSS waveform data (header) or not.
:type filename: str
:param filename: CSS file to be checked.
:rtype: bool
:return: ``True`` if a CSS waveform header file.
"""
# Fixed file format.
# Tests:
# - the length of each line (283 chars)
# - two epochal time fields
# (for position of dot and if they convert to UTCDateTime)
# - supported data type descriptor
try:
with open(filename, "rb") as fh:
lines = fh.readlines()
# check for empty file
if not lines:
return False
# check every line
for line in lines:
assert(len(line.rstrip(b"\n\r")) == 283)
assert(b"." in line[26:28])
UTCDateTime(float(line[16:33]))
assert(b"." in line[71:73])
UTCDateTime(float(line[61:78]))
assert(line[143:145] in DTYPE)
except Exception:
return False
return True | 57d7b1fdbc244a7d17c905b5f5b5a2aa653f6e13 | 3,635,209 |
def extract_versions():
"""
Extracts version values from the main matplotlib __init__.py and
returns them as a dictionary.
"""
with open('lib/matplotlib/__init__.py') as fd:
for line in fd.readlines():
if (line.startswith('__version__numpy__')):
exec(line.strip())
return locals() | b54733ffdae76206400e7203f792e3b809cf6c30 | 3,635,210 |
def check_range(coord, range):
"""Check if coordinates are within range (0,0,0) - (range)
Returns
-------
bool
Success status
"""
# TODO: optimize
if len(coord) != len(range):
raise ValueError(
"Provided coordinate %r and given range %r" % (coord, range)
+ " have different dimensionality"
)
for c, r in zip(coord, range):
if c < 0 or c >= r:
return False
return True | e6e5e5585f02d3c3c6fbec51963d2e637d8643c9 | 3,635,211 |
def make_df(raw_data, add_annotations = True):
"""
Basic preprocessing of data:
* Turn dictionry into a pandas dataframe
* Add annotator column to DF -- stored as string
* Name columns according to body part, etc
"""
df = []
labels = []
seqs = []
for seq_id in raw_data['sequences']:
pts = raw_data['sequences'][seq_id]['keypoints']
for idx in range(len(pts)):
data = pts[idx].flatten()
df.append(list(data))
if 'annotations' in raw_data['sequences'][seq_id] and add_annotations:
labels += list(train['sequences'][seq_id]['annotations'])
seqs += [seq_id]*len(pts)
#Make this a dataframe
df_ = pd.DataFrame(df, columns = colnames)
if len(labels) > 0 and add_annotations:
df_['annotation'] = labels
df_['seq_id'] = seqs
return df_ | cfc893b3616c879e734cabfd8a2dc420aec095d7 | 3,635,212 |
def cyclic_tdma(lower_diagonal, main_diagonal, upper_diagonal, right_hand_side):
"""The thomas algorithm (TDMA) solution for tri-diagonal matrix inversion with the sherman morison formula applied
Parameters
----------
lower_diagonal: np.ndarray
The lower diagonal of the matrix length n, the first element is taken to be the top right element of the matrix
main_diagonal: np.ndarray
The main diagonal of the matrix length n
upper_diagonal: np.ndarray
The upper diagonal of the matrix length n, the last element is taken to be the bottom left element of the matrix
right_hand_side: np.ndarray
The right hand side of the equation
Returns
-------
x: np.ndarray
The solution array length n
Notes
-----
Nothing is mutated by this function
"""
# modify b
gamma = -main_diagonal[0] if main_diagonal[0] else 1.0
main_diagonal[0] = main_diagonal[0] - gamma
main_diagonal[-1] = main_diagonal[-1] - lower_diagonal[0] * upper_diagonal[-1] / gamma
# find Ax=rhs
_, _, _, x, _ = dgtsv(lower_diagonal[1:], main_diagonal, upper_diagonal[:-1], right_hand_side)
# make u
u = np.zeros_like(right_hand_side)
u[0] = gamma
u[-1] = upper_diagonal[-1]
# find Az=u
_, _, _, z, _ = dgtsv(lower_diagonal[1:], main_diagonal, upper_diagonal[:-1], u)
# find the factor from the second part of SM formula
factor = (x[0] + x[-1] * lower_diagonal[0] / gamma) / (1 + z[0] + z[-1] * lower_diagonal[0] / gamma)
return x - z * factor | a1fd869d181caad075a14bae061c8ee217ef185f | 3,635,213 |
def lico2_ocp_Ramadass2004(sto):
"""
Lithium Cobalt Oxide (LiCO2) Open Circuit Potential (OCP) as a a function of the
stochiometry. The fit is taken from Ramadass 2004. Stretch is considered the
overhang area negative electrode / area positive electrode, in Ramadass 2002.
References
----------
.. [1] P. Ramadass, Bala Haran, Parthasarathy M. Gomadam, Ralph White, and Branko
N. Popov. "Development of First Principles Capacity Fade Model for Li-Ion Cells."
(2004)
Parameters
----------
sto : :class:`pybamm.Symbol`
Stochiometry of material (li-fraction)
"""
stretch = 1.13
sto = stretch * sto
u_eq = ((- 4.656 + 88.669 * (sto ** 2)
- 401.119 * (sto ** 4) + 342.909 * (sto ** 6)
- 462.471 * (sto ** 8) + 433.434 * (sto ** 10)) / (
- 1 + 18.933 * (sto ** 2) - 79.532 * (sto ** 4)
+ 37.311 * (sto ** 6) - 73.083 * (sto ** 8)
+ 95.96 * (sto ** 10))
)
return u_eq | 2c0902e1d1cdec9ac7626038e34092933665bf84 | 3,635,214 |
import doctest
def doctestobj(*args, **kwargs):
"""
Wrapper for doctest.run_docstring_examples that works in maya gui.
"""
return doctest.run_docstring_examples(*args, **kwargs) | 1efccd1a887636bbcf80e762f12934e7d03efe28 | 3,635,215 |
def _return_model_names_for_plots():
"""Returns models to be used for testing plots. Needs
- 1 model that has prediction interval ("theta")
- 1 model that does not have prediction interval ("lr_cds_dt")
- 1 model that has in-sample forecasts ("theta")
- 1 model that does not have in-sample forecasts ("lr_cds_dt")
"""
model_names = ["theta", "lr_cds_dt"]
return model_names | bd180134c5c74f4d1782384bc8e3b13abff8b125 | 3,635,216 |
def _convert_input_type_range(img):
"""Convert the type and range of the input image.
It converts the input image to np.float16 type and range of [0, 1].
It is mainly used for pre-processing the input image in colorspace
convertion functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (AscendArray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float16 type with range [0, 1].
Returns:
(AscendArray): The converted image with type of np.float16 and range of [0, 1].
"""
img_type = img.dtype
if img_type == np.float16:
pass
elif img_type == np.uint8:
img = img.astype(np.float16)
'''
to do normalize
'''
img /= 255.
else:
raise TypeError('The img type should be np.float16 or np.uint8, '
f'but got {img_type}')
return img | 990516e2cb069b9afd4388c6fbb8f1f333893dc9 | 3,635,217 |
from pathlib import Path
def collect_derivatives(derivatives_dir, subject_id, std_spaces, freesurfer,
spec=None, patterns=None):
"""Gather existing derivatives and compose a cache."""
if spec is None or patterns is None:
_spec, _patterns = tuple(
loads(Path(pkgrf('aslprep', 'smriprep/data/io_spec.json')).read_text()).values())
if spec is None:
spec = _spec
if patterns is None:
patterns = _patterns
derivs_cache = defaultdict(list, {})
derivatives_dir = Path(derivatives_dir)
def _check_item(item):
if not item:
return None
if isinstance(item, str):
item = [item]
result = []
for i in item:
if not (derivatives_dir / i).exists():
i = i.rstrip('.gz')
if not (derivatives_dir / i).exists():
return None
result.append(str(derivatives_dir / i))
return result
for space in [None] + std_spaces:
for k, q in spec['baseline'].items():
q['subject'] = subject_id
if space is not None:
q['space'] = space
item = _check_item(build_path(q, patterns, strict=True))
if not item:
return None
if space:
derivs_cache["std_%s" % k] += item if len(item) == 1 else [item]
else:
derivs_cache["t1w_%s" % k] = item[0] if len(item) == 1 else item
for space in std_spaces:
for k, q in spec['std_xfms'].items():
q['subject'] = subject_id
q['from'] = q['from'] or space
q['to'] = q['to'] or space
item = _check_item(build_path(q, patterns))
if not item:
return None
derivs_cache[k] += item
derivs_cache = dict(derivs_cache) # Back to a standard dictionary
if freesurfer:
for k, q in spec['surfaces'].items():
q['subject'] = subject_id
item = _check_item(build_path(q, patterns))
if not item:
return None
if len(item) == 1:
item = item[0]
derivs_cache[k] = item
derivs_cache['template'] = std_spaces
return derivs_cache | f33353c4c67d847b94f4d8467c6721ecc7dc71fa | 3,635,218 |
def aten_meshgrid(mapper, graph, node):
""" 构造对每个张量做扩充操作的PaddleLayer。
TorchScript示例:
%out.39 : int = aten::mshgrid(%input.1)
参数含义:
%out.39 (Tensor): 输出,扩充后的结果。
%input.1 (Tensor): 输入。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%input.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["args"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = layer_inputs.values()
current_outputs = layer_outputs
graph.add_layer("paddle.meshgrid", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
return current_inputs, current_outputs | e260855ca6732f9d846cc492c949197e93e9551a | 3,635,219 |
def bearing_example():
"""This function returns an instance of a simple bearing.
The purpose is to make available a simple model
so that doctest can be written using it.
Parameters
----------
Returns
-------
An instance of a bearing object.
Examples
--------
>>> bearing = bearing_example()
>>> bearing.frequency[0]
0.0
"""
kxx = 1e6
kyy = 0.8e6
cxx = 2e2
cyy = 1.5e2
w = np.linspace(0, 200, 11)
bearing = BearingElement(n=0, kxx=kxx, kyy=kyy, cxx=cxx, cyy=cyy, frequency=w)
return bearing | f4d8e71b0b13aa17f9ad08208e8082ef5827a70c | 3,635,220 |
import os
def parsing_check(dataset, source, attr):
"""
The annotator gets a contextualized patent citation displayed
- the patent citation is highlighted
- the title (h3 + bold + purple) is the value of the parsed attribute (e.g. orgname)
- the citation has an href linking to the patent webpage (on google patents), in case further
inspection is needed. Note, There is no guarantee that the link actually exists.
The annotator faces a binary choice ACCEPT or REJECT.
"""
def add_html(stream):
for task in stream:
span = task["spans"][0]
root = "https://patents.google.com/patent/"
suffix = span["orgname"] + span["original"]
start, end = (span["start"], span["end"])
text = task["text"]
before = text[:start]
span_ = text[start:end]
after = text[end:]
task["html"] = (
f"<span style='background-color:#775ec2;color:white;font-size:130%;font-weight:bold;'> "
f"{str(span.get(attr))} </span><br> \
{before} <span style='background-color: #fae284'><a \
href={root + suffix}>{span_}</a></span> \
{after}"
)
yield task
fmt = os.path.splitext(source)[-1]
stream = JSONL(source) if fmt == ".jsonl" else JSON(source)
stream = add_html(stream)
# return {"view_id": "classification",
return {
"view_id": "blocks",
"dataset": dataset,
"stream": stream,
"config": {"blocks": [{"view_id": "html"}]}, # add the blocks to the config
} | 9f920f53ddd488a08f63ee7e12b558ebb90c7335 | 3,635,221 |
def create_data():
"""Create some random exponential data"""
#np.random.seed(18)
pure = np.array(sorted([np.random.exponential() for i in range(10)]))
noise = np.random.normal(0,1, pure.shape)
signal = pure + noise
return signal | 613231436fe0faca177106cf80abe5d475510469 | 3,635,222 |
def fetch_accidents(data_home=None):
"""Fetch and return the accidents dataset (Frequent Itemset Mining)
Traffic accident data, anonymized.
see: http://fimi.uantwerpen.be/data/accidents.pdf
==================== ==============
Nb of items 468
Nb of transactions 340183
Avg transaction size 33.807
Density 0.072
==================== ==============
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit-mine data is stored in `~/scikit_mine_data`.
Returns
-------
pd.Series
Transactions from the accidents dataset, as an in-memory pandas Series.
Each unique transaction is represented as a Python list.
"""
return fetch_any("accidents.dat", data_home=data_home) | 612a7c67fc5b81297ec7ca37d38e0267d23fed36 | 3,635,223 |
def align_nodes(nodenet_uid, nodespace):
""" Automatically align the nodes in the given nodespace """
return runtime.align_nodes(nodenet_uid, nodespace) | aa9fd5f22d8433d0b15e9b826dba9d4c6fa1b590 | 3,635,224 |
def cindex(y_true: np.array, scores: np.array) -> float:
"""AI is creating summary for cindex
Args:
y_true (np.array): An array of actual values of target
scores (np.array): An array of predicted score of target
Returns:
[float]: Returns C-Index score
"""
return lifelines.utils.concordance_index(y_true, scores) | e69bc4f2e3b391c4049b6b60936a64bcfff9f27d | 3,635,225 |
def match_tones(
left, right, eps=2000., shift_from_right=0.,
match_col='fr',
join_type='inner'):
"""Return a table with tones matched.
This function makes use the ``stilts`` utility.
Parameters
----------
left: astropy.Table
The left model params table.
right: astropy.Table
The right model params table.
eps: float
The error to tolerate in Hz.
shift_from_right: float
The frequency shift to apply to the right.
match_col: str
The column to use for the match.
Default is the resonance frequency ``fr``.
join_type: str
Join type to use for the output table.
"""
# make match column
col = 'match_tones_col'
idx_col = 'idx'
_left = left.copy()
_right = right.copy()
_left[col] = _left[match_col]
_left[idx_col] = list(range(len(_left)))
_right[col] = _right[match_col] + shift_from_right
_right[idx_col] = list(range(len(_right)))
join = {
'left': 'all1',
'right': 'all2',
'inner': '1and2',
'outer': '1or2'
}[join_type]
return stilts_match1d(_left, _right, col, eps, extra_args=[
f'join={join}', f'find=best',
]) | 0be28ac4727b721f4b8847b6e3bc92fbea95de55 | 3,635,226 |
import logging
def name(ea, string, *suffix, **flags):
"""Renames the address specified by `ea` to `string`.
If `ea` is pointing to a global and is not contained by a function, then by default the label will be added to the Names list.
If `flags` is specified, then use the specified value as the flags.
If the boolean `listed` is specified, then specify whether to add the label to the Names list or not.
"""
# combine name with its suffix
res = (string,) + suffix
string = interface.tuplename(*res)
# validate the address
ea = interface.address.inside(ea)
# XXX: what's this for?
if idaapi.has_any_name(type.flags(ea)):
pass
# XXX: isolate this default flags logic into a separate closure
# since this logic can be short-circuited by the 'flags' parameter.
# some default options
fl = idaapi.SN_NON_AUTO
fl |= idaapi.SN_NOCHECK
# preserve any flags that were previously applied
fl |= 0 if idaapi.is_in_nlist(ea) else idaapi.SN_NOLIST
fl |= idaapi.SN_WEAK if idaapi.is_weak_name(ea) else idaapi.SN_NON_WEAK
fl |= idaapi.SN_PUBLIC if idaapi.is_public_name(ea) else idaapi.SN_NON_PUBLIC
# set its local flag based on whether we're in a function or not
fl = (fl | idaapi.SN_LOCAL) if function.within(ea) else (fl & ~idaapi.SN_LOCAL)
# if we're within a function and 'listed' wasn't explicitly specified
# then ensure it's not listed as it's likely to be a local label
if not function.within(ea) and 'listed' not in flags:
fl &= ~idaapi.SN_NOLIST
# if the bool `listed` is True, then ensure that it's added to the name list.
if 'listed' in flags:
fl = (fl & ~idaapi.SN_NOLIST) if flags.get('listed', False) else (fl | idaapi.SN_NOLIST)
# check to see if we're a label being applied to a switch
# that way we can make it a local label
# FIXME: figure out why this doesn't work on some switch labels
try:
# check if we're a label of some kind
f = type.flags(ea)
if idaapi.has_dummy_name(f) or idaapi.has_user_name(f):
# that is referenced by an array with a correctly sized pointer inside it
(r, sidata), = ((r, type.array(r)) for r in xref.data_up(ea))
if config.bits() == sidata.itemsize*8 and ea in sidata:
# which we check to see if its a switch_info_t
si = next(idaapi.get_switch_info_ex(r) for r in xref.data_up(r))
if si is not None:
# because its name has its local flag cleared
fl |= idaapi.SN_LOCAL
except: pass
# convert the specified string into a form that IDA can handle
ida_string = utils.string.to(string)
# validate the name
res = idaapi.validate_name2(buffer(ida_string)[:]) if idaapi.__version__ < 7.0 else idaapi.validate_name(buffer(ida_string)[:], idaapi.VNT_VISIBLE)
if ida_string and ida_string != res:
logging.info(u"{:s}.name({:#x}, \"{:s}\"{:s}) : Stripping invalid chars from specified name resulted in \"{:s}\".".format(__name__, ea, utils.string.escape(string, '"'), u", {:s}".format(utils.string.kwargs(flags)) if flags else '', utils.string.escape(utils.string.of(res), '"')))
ida_string = res
# set the name and use the value of 'flags' if it was explicit
res, ok = name(ea), idaapi.set_name(ea, ida_string or "", flags.get('flags', fl))
if not ok:
raise E.DisassemblerError(u"{:s}.name({:#x}, \"{:s}\"{:s}) : Unable to call `idaapi.set_name({:#x}, \"{:s}\", {:#x})`.".format(__name__, ea, utils.string.escape(string, '"'), u", {:s}".format(utils.string.kwargs(flags)) if flags else '', ea, utils.string.escape(string, '"'), flags.get('flags', fl)))
return res | f3f16ba223f45bd74cf4987274a3cda8c5bb0098 | 3,635,227 |
from typing import Tuple
from typing import Optional
from typing import List
from typing import cast
def verify(
symbol_table: intermediate.SymbolTable,
) -> Tuple[Optional[VerifiedIntermediateSymbolTable], Optional[List[Error]]]:
"""Verify that C# code can be generated from the ``symbol_table``."""
errors = [] # type: List[Error]
structure_name_collisions = _verify_structure_name_collisions(
symbol_table=symbol_table
)
errors.extend(structure_name_collisions)
if len(errors) > 0:
return None, errors
return cast(VerifiedIntermediateSymbolTable, symbol_table), None | c7af0f196cb59022f89f8097f17e98a913fb7615 | 3,635,228 |
import time
def find_workflow_component_figures(page):
""" Returns workflow component figure elements in `page`. """
time.sleep(0.5) # Pause for stable display.
root = page.root or page.browser
return root.find_elements_by_class_name('WorkflowComponentFigure') | 1a56a0a348803394c69478e3443cbe8c6cb0ce9c | 3,635,229 |
def select_best_features(tx, selected_features, rho_exp, w, number_of_select):
"""Selects features by the highest value of the weights
Parameters
----------
tx : np.ndarray
Original features
selected_features : [(int, int)]
Best features from previous iteration
rho_exp : np.ndarray
Trained paremeters
w : np.ndarray
The weights of the model
number_of_select : int
Returns
-------
np.ndarray
Selected features
np.ndarray
Selected features selection
np.ndarray
Selected rho values
"""
select = np.argpartition(np.abs(w), -number_of_select)[-number_of_select:]
return tx[:, select], selected_features[:, select], rho_exp[select] | bef855b3685116cec90cfc51194e00d5cee4d3af | 3,635,230 |
def make_pol_lookup(codes):
"""
Returns a lookup table from a list of polarization codes
"""
codes = unique(codes)
codes.sort()
lookup = {}
for code in codes:
if code == 'X' or code == 'XX' or code == 'H':
lookup[code] = -5
elif code == 'Y' or code == 'YY' or code == 'V' or code == 'E':
lookup[code] = -6
elif code == 'XY':
lookup[code] = -7
elif code == 'YX':
lookup[code] = -8
elif code == 'R' or code == 'RR':
lookup[code] = -1
elif code == 'L' or code == 'LL':
lookup[code] = -2
elif code == 'RL':
lookup[code] = -3
elif code == 'LR':
lookup[code] = -4
elif code == 'I':
lookup[code] = 1
elif code == 'Q':
lookup[code] = 2
elif code == 'U':
lookup[code] = 3
elif code == 'V':
lookup[code] = 4
else:
# unknown
lookup[code] = 0
return codes, lookup, len(codes) | f5c4aece195ff436af8855a5296e1be493c92737 | 3,635,231 |
def _calc_best_estimator_optuna_univariate(
X,
y,
estimator,
measure_of_accuracy,
estimator_params,
verbose,
test_size,
random_state,
eval_metric,
number_of_trials,
sampler,
pruner,
with_stratified,
):
"""Function for calculating best estimator
Parameters
----------
estimator : estimator instance
An unfitted estimator.
cv : object, cross-validation object, default=None
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 0.24
"""
if estimator.__class__.__name__ == "XGBClassifier" and with_stratified:
train_x, valid_x, train_y, valid_y = train_test_split(
X, y, stratify=y[y.columns.to_list()[0]], test_size=test_size
)
if estimator.__class__.__name__ == "XGBClassifier" and not with_stratified:
train_x, valid_x, train_y, valid_y = train_test_split(
X, y, test_size=test_size, random_state=random_state
)
if estimator.__class__.__name__ == "XGBRegressor":
train_x, valid_x, train_y, valid_y = train_test_split(
X, y, test_size=test_size, random_state=random_state
)
def objective(trial):
dtrain = xgboost.DMatrix(train_x, label=train_y)
dvalid = xgboost.DMatrix(valid_x, label=valid_y)
if (
estimator.__class__.__name__ == "XGBClassifier"
or estimator.__class__.__name__ == "XGBRegressor"
):
param = {}
param["verbosity"] = verbose
param["eval_metric"] = eval_metric
param["booster"] = trial.suggest_categorical("booster", ["gbtree"])
if (
valid_y.iloc[:, 0].nunique() <= 2
and estimator.__class__.__name__ == "XGBClassifier"
):
param["objective"] = "binary:logistic"
if estimator.__class__.__name__ == "XGBRegressor":
param["objective"] = "reg:squarederror"
if "lambda" in estimator_params.keys():
param["lambda"] = _trail_param_retrive(
trial, estimator_params, "lambda"
)
# L1 regularization weight.
if "alpha" in estimator_params.keys():
param["alpha"] = _trail_param_retrive(trial, estimator_params, "alpha")
# sampling ratio for training data.
if "subsample" in estimator_params.keys():
param["subsample"] = _trail_param_retrive(
trial, estimator_params, "subsample"
)
# sampling according to each tree.
if "colsample_bytree" in estimator_params.keys():
param["colsample_bytree"] = _trail_param_retrive(
trial, estimator_params, "colsample_bytree"
)
if estimator.__class__.__name__ == "XGBClassifier":
if "scale_pos_weight" in estimator_params.keys():
param["scale_pos_weight"] = _trail_param_retrive(
trial, estimator_params, "scale_pos_weight"
)
if "booster" in estimator_params.keys():
if param["booster"] in ["gbtree", "dart"]:
# maximum depth of the tree, signifies complexity of the tree.
if "max_depth" in estimator_params.keys():
param["max_depth"] = _trail_param_retrive(
trial, estimator_params, "max_depth"
)
if "min_child_weight" in estimator_params.keys():
# minimum child weight, larger the term more conservative the tree.
param["min_child_weight"] = _trail_param_retrive(
trial, estimator_params, "min_child_weight"
)
if "eta" in estimator_params.keys():
param["eta"] = _trail_param_retrive(
trial, estimator_params, "eta"
)
if "gamma" in estimator_params.keys():
# defines how selective algorithm is.
param["gamma"] = _trail_param_retrive(
trial, estimator_params, "gamma"
)
# if "grow_policy" in estimator_params.keys():
# param["grow_policy"] = _trail_param_retrive(
# trial, estimator_params, "grow_policy"
# )
if "booster" in estimator_params.keys():
if param["booster"] == "dart":
if "sample_type" in estimator_params.keys():
param["sample_type"] = _trail_param_retrive(
trial, estimator_params, "sample_type"
)
if "normalize_type" in estimator_params.keys():
param["normalize_type"] = _trail_param_retrive(
trial, estimator_params, "normalize_type"
)
if "rate_drop" in estimator_params.keys():
param["rate_drop"] = _trail_param_retrive(
trial, estimator_params, "rate_drop"
)
if "skip_drop" in estimator_params.keys():
param["skip_drop"] = _trail_param_retrive(
trial, estimator_params, "skip_drop"
)
# Add a callback for pruning.
pruning_callback = optuna.integration.XGBoostPruningCallback(
trial, "validation-" + eval_metric
)
est = xgboost.train(
param,
dtrain,
evals=[(dvalid, "validation")],
callbacks=[pruning_callback],
)
preds = est.predict(dvalid)
pred_labels = np.rint(preds)
if "classifier" in estimator.__class__.__name__.lower():
accr = _calc_metric_for_single_output_classification(
valid_y, pred_labels, measure_of_accuracy
)
if "regressor" in estimator.__class__.__name__.lower():
accr = _calc_metric_for_single_output_regression(
valid_y, pred_labels, measure_of_accuracy
)
return accr
study = optuna.create_study(direction="maximize", sampler=sampler, pruner=pruner)
study.optimize(objective, n_trials=number_of_trials, timeout=600)
trial = study.best_trial
dtrain = xgboost.DMatrix(train_x, label=train_y)
dvalid = xgboost.DMatrix(valid_x, label=valid_y)
print(trial.params)
best_estimator = xgboost.train(
trial.params,
dtrain,
evals=[(dvalid, "validation")],
)
return best_estimator | 35cbc2458455d7153a1b17c91576c2629baf2ab3 | 3,635,232 |
def get_users_info_async(future_session: "FuturesSession", connection, name_begins,
abbreviation_begins, offset=0, limit=-1, fields=None):
"""Get information for a set of users asynchronously.
Args:
future_session: Future Session object to call MicroStrategy REST
Server asynchronously
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
name_begins(string): Characters that the user name must begin with.
abbreviation_begins(string): Characters that the user abbreviation must
begin with.
offset(int): Starting point within the collection of returned search
results. Used to control paging behavior.
limit(int): Maximum number of items returned for a single search
request. Used to control paging behavior. Use -1 (default ) for no
limit (subject to governing settings).
fields(list, optional): Comma separated top-level field whitelist. This
allows client to selectively retrieve part of the response model.
Returns:
Complete Future object.
"""
params = {
'nameBegins': name_begins,
'abbreviationBegins': abbreviation_begins,
'offset': offset,
'limit': limit,
'fields': fields
}
url = f'{connection.base_url}/api/users/'
headers = {'X-MSTR-ProjectID': None}
future = future_session.get(url=url, headers=headers, params=params)
return future | f2679de38822a12abb2e0e65d102de7876304ccd | 3,635,233 |
import os
import gzip
def load_sparse(fname):
"""
.. todo::
WRITEME
"""
f = None
try:
if not os.path.exists(fname):
fname = fname + '.gz'
f = gzip.open(fname)
elif fname.endswith('.gz'):
f = gzip.open(fname)
else:
f = open(fname)
d = cPickle.load(f)
finally:
if f:
f.close()
return d | 98fcee3e8ebe0ee76d61e08cb6f32b2c00bc5149 | 3,635,234 |
def site_link_url(request, siteobj):
"""returns a site urls form already given keys"""
return '%s://%s%s/site/%s' % (
presettings.DYNAMIC_LINK_SCHEMA_PROTO,
request.META.get('HTTP_HOST'),
presettings.DYNAMIC_LINK_URL,
siteobj.link_key
) | c0a29c6ac0157e7ac7fae506ea9f87960c03a92e | 3,635,235 |
def get_arguments():
""" All cli arguments. """
p = ap.ArgumentParser()
p.add_argument('mode', type=str, choices=['train', 'predict'])
# files
p.add_argument('--train-file', type=str)
p.add_argument('--dev-file', type=str)
p.add_argument('--test-file', type=str)
p.add_argument('--model-path', type=str, default='checkpoints')
p.add_argument('--output-file', type=str, default='tags.txt')
p.add_argument('--delete-existing', default=True)
# Model hyperparams
p.add_argument('--model', default='standard_word_crf', type=str)
p.add_argument('--hparam-defaults', default='bi_crf_default_9', type=str)
p.add_argument('--hparams-str', type=str,
help=("Update `hparams` from comma separated list of ",
"name=value pairs"))
p.add_argument('--hparams-json', type=str,
help="Update `hparams` from parameters in JSON file")
# training hyperparams
p.add_argument('--save-checkpoints-steps', default=100)
p.add_argument('--min-epochs-before-early-stop', type=int, default=5)
p.add_argument('--early-stop-patience', type=int, default=10)
p.add_argument('--train-epochs', default=1, type=int)
p.add_argument('--shuffle-buffer-size', type=int, default=10000)
p.add_argument('--random-seed', type=int, default=42)
return p.parse_args() | e1d426856fcb7fba3e8bf56200748a7354b09797 | 3,635,236 |
import configparser
def get_headers(path='.credentials/key.conf'):
"""Get the authentication key header for all requests"""
config = configparser.ConfigParser()
config.read(path)
headers = {
'Ocp-Apim-Subscription-Key': config['default']['primary']
}
return headers | d40c1b6246efb728040adc47b6180f50aa4dc3e8 | 3,635,237 |
def Sdif(M0, dM0M1, alpha):
"""
:math:`S(\\alpha)`, as defined in the paper, computed using `M0`,
`M0 - M1`, and `alpha`.
Parameters
----------
M0 : ndarray or matrix
A symmetric indefinite matrix to be shrunk.
dM0M1 : ndarray or matrix
M0 - M1, where M1 is a positive definite target matrix.
alpha : float
A shrinking parameter.
Returns
-------
S(alpha) : float
The convex combination :math:`(1-\\alpha) M_0 + \\alpha M_1`.
"""
return M0 - alpha * dM0M1 | 6463cb04d7dcfaad93358c7db38f4674a51654b2 | 3,635,238 |
import os
def env_world_size():
"""World size for distributed training.
Is set in torch.distributed.launch as args.nproc_per_node * args.nnodes.
For example, when running on 1 node with 4 GPUs per node, the world size is 4.
see: https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py"""
if 'WORLD_SIZE' in os.environ:
return int(os.environ['WORLD_SIZE'])
return 1 | 58587f8f4462fd18e156834214385f5dfebfaa2a | 3,635,239 |
def add_entry(entries, folders, collections, session):
"""Add vault entry
Args: entries - list of dicts
folders - dict of folder objects
collections - dict of collections objects
session - bytes
Returns: None or entry (Item)
"""
folder = select_folder(folders)
colls = select_collection(collections, session, coll_list=[]) or []
if folder is False:
return None
entry = {"organizationId": next(iter(colls.values()))['organizationId'] if colls else None,
"folderId": folder['id'],
"type": 1,
"name": "",
"notes": "",
"favorite": False,
"fields": [{"name": "autotype", "value": "", "type": 0}],
"login": {"username": "",
"password": "",
"url": ""},
"collectionIds": [*colls],
"secureNote": "",
"card": "",
"identity": ""}
return edit_entry(entry, entries, folders, collections, session) | 7eab6d0b1df5c713c96a2b70dd874345faf28b3f | 3,635,240 |
import sys
import warnings
def make_wsgi_app(services_conf=None, debug=False, ignore_config_warnings=True, reloader=False):
"""
Create a MapProxyApp with the given services conf.
:param services_conf: the file name of the mapproxy.yaml configuration
:param reloader: reload mapproxy.yaml when it changed
"""
if sys.version_info[0] == 2 and sys.version_info[1] == 5:
warnings.warn('Support for Python 2.5 is deprecated since 1.7.0 and will be dropped with 1.8.0', FutureWarning)
if reloader:
make_app = lambda: make_wsgi_app(services_conf=services_conf, debug=debug,
reloader=False)
return ReloaderApp(services_conf, make_app)
try:
conf = load_configuration(mapproxy_conf=services_conf, ignore_warnings=ignore_config_warnings)
services = conf.configured_services()
except ConfigurationError as e:
log.fatal(e)
raise
config_files = conf.config_files()
app = MapProxyApp(services, conf.base_config)
if debug:
app = wrap_wsgi_debug(app, conf)
app.config_files = config_files
return app | f8c6bf1cb6a7a3fd591e04ad43faa50fb17fb3f3 | 3,635,241 |
def _get_node_by_name(graph_def: rewrite.GraphDef,
node_name: str) -> rewrite.NodeDef:
"""Return a node from a graph that matches the provided name"""
matches = [node for node in graph_def.node if node.name == node_name]
return matches[0] if len(matches) > 0 else None | 8e893b4d51a1fba861f7659ec7edaf8bd794e114 | 3,635,242 |
import requests
def shorten_link(url: str) -> tuple:
"""
Method to shorten a given url using the shrtco.de API
@Parameters
url:str url to be shortened
@Returns
(errorcode:int,result:str)
errorcode: int indicating whether operation succeeded or failed
result: on success, new url of shortened link, else error description
"""
try:
request_url = BASE_URL + url.strip()
except AttributeError:
return (1, "Invalid input, doesnt have strip() method")
response = requests.get(url=request_url).json()
if not response["ok"]:
return (1, response["error"])
else:
return (0, "http://" + response["result"]["short_link3"]) | c8acbcb1641d8344ced55e5bd820f0084fc01164 | 3,635,243 |
from typing import Dict
def get_sanitized_bot_name(dict: Dict[str, int], name: str) -> str:
"""
Cut off at 31 characters and handle duplicates.
:param dict: Holds the list of names for duplicates
:param name: The name that is being sanitized
:return: A sanitized version of the name
"""
# This doesn't work someimtes in continue_and_spawn because it doesn't understand the names already in the match
# which may be kept if the spawn IDs match. In that case it's the caller's responsibility to figure it out upstream.
name = name[:31]
base_name = name
count = 2
while name in dict:
name = f'{base_name[:27]} ({count})' # Truncate at 27 because we can have up to '(10)' appended
count += 1
dict[name] = 1
return name | 42d432610602b15b1206f0ce1bc007fdaef6b23f | 3,635,244 |
def eval_nmt_bleu(model,dataset,vectorizer,args):
"""
Evaluates the trained model on the test set using the bleu_score method
from NLTK.
Parameters
----------
model : NMTModel
Trained NMT model.
dataset : Dataset
Dataset with Source/Target sentences.
vectorizer : object
Vectorizes sentences.
args : Namespace
Simulation parameters.
Returns
-------
float
Average bleu-4 score.
bleu4 : TYPE
Array of sentence bleu-4 scores.
"""
model = model.eval().to(args.device)
sampler = NMTSamplerWithMLTM(vectorizer, model)
dataset.set_split('test')
batch_generator = generate_nmt_batches(dataset,
batch_size=args.batch_size,
device=args.device)
test_results = []
for batch_dict in batch_generator:
sampler.apply_to_batch(batch_dict)
for i in range(args.batch_size):
test_results.append(sampler.get_ith_item(i, False))
bleu4 = np.array([r['bleu-4'] for r in test_results])*100
return np.mean(bleu4),bleu4 | e26bb0ab39cf7af704a32e8ed36d7df44a799f55 | 3,635,245 |
import yaml
def load_up_the_tests(folder):
"""reads the files from the samples directory and parametrizes the test"""
tests = []
for i in folder:
if not i.path.endswith('.yml'):
continue
with open(i, 'r') as f:
out = yaml.load(f.read(), Loader=yaml.BaseLoader)
# The format of the YAML file assumed is as follows:
# description: <string>
# tests:
# - command: <sqcmd to execute in non-modal format
# data-directory: <where the data is present>, not used yet
# marks: <space separated string of marks to mark the test>
# output: |
# <json_output>
#
# - command:
# ....
if out and 'tests' in out:
for t in out['tests']:
# We use tags to dynamically mark the parametrized test
# the marks MUST be registered in pytest.ini
markers = []
if 'marks' in t:
markers = [MarkDecorator(Mark(x, [], {}))
for x in t['marks'].split()]
if 'xfail' in t:
except_err = None
if 'raises' in t['xfail']:
except_err = globals()['__builtins__'].get(
t['xfail']['raises'], None)
if except_err:
markers += [pytest.mark.xfail(
reason=t['xfail']['reason'],
raises=except_err)]
else:
if 'reason' in t['xfail']:
markers += [pytest.mark.xfail(
reason=t['xfail']['reason'])]
else:
markers += [pytest.mark.xfail()]
if markers:
tests += [pytest.param(t, marks=markers,
id=t['command'])]
else:
tests += [pytest.param(t, id=t['command'])]
return tests | 5361f7805452471cf65385ddb1901709d69245a7 | 3,635,246 |
from .algorithms.dpll import dpll_satisfiable
from .algorithms.dpll2 import dpll_satisfiable
def satisfiable(expr, algorithm='dpll2', all_models=False):
"""
Check satisfiability of a propositional sentence.
Returns a model when it succeeds.
Returns {true: true} for trivially true expressions.
On setting all_models to True, if given expr is satisfiable then
returns a generator of models. However, if expr is unsatisfiable
then returns a generator containing the single element False.
Examples
========
>>> satisfiable(a & ~b)
{a: True, b: False}
>>> satisfiable(a & ~a)
False
>>> satisfiable(True)
{true: True}
>>> next(satisfiable(a & ~a, all_models=True))
False
>>> models = satisfiable((a >> b) & b, all_models=True)
>>> next(models)
{a: False, b: True}
>>> next(models)
{a: True, b: True}
>>> def use_models(models):
... for model in models:
... if model:
... # Do something with the model.
... return model
... else:
... # Given expr is unsatisfiable.
... print('UNSAT')
>>> use_models(satisfiable(a >> ~a, all_models=True))
{a: False}
>>> use_models(satisfiable(a ^ a, all_models=True))
UNSAT
"""
expr = to_cnf(expr)
if algorithm == 'dpll':
return dpll_satisfiable(expr)
elif algorithm == 'dpll2':
return dpll_satisfiable(expr, all_models)
else:
raise NotImplementedError | 03cfa14bfa2f7812263f7ca5be98e583e7a3136c | 3,635,247 |
def dist_create_samples(net_file, K=Inf, nproc=None, U=0.0, S=0.0, V=0.0, max_iter=Inf, T=Inf, discard=False,
variance=False,
input_vars=DEFAULT_INPUTS, output_vars=DEFAULT_OUTPUTS, dual_vars=DEFAULT_DUALS,
sampler='sample_polytope_cprnd', sampler_opts=dict(), # sampler_opts keys: Main.Symbol("arg")
pl_max=None, pf_min=0.7, pf_lagging=True, print_level=0, stat_track=False, save_while=False,
save_infeasible=False, save_path="", net_path="",
model_type='PM.QCLSPowerModel', # model_type should be a string that defines a PowerModels type
r_solver='JuMP.optimizer_with_attributes(Ipopt.Optimizer, "tol" => 1e-6)',
opf_solver='JuMP.optimizer_with_attributes(Ipopt.Optimizer, "tol" => 1e-6)'):
""" Creates an AC OPF dataset for the given PowerModels network dictionary. Generates samples until one of the given stopping criteria is met.
Takes options to determine how to sample points, what information to save, and what information is printed.
Keyword arguments:
- 'net::Dict': network information stored in a PowerModels.jl format specified dictionary
- 'K::Integer': the maximum number of samples before stopping sampling
- 'U::Float': the minimum % of unique active sets sampled in the previous 1 / U samples to continue sampling
- 'S::Float': the minimum % of saved samples in the previous 1 / L samples to continue sampling
- 'V::Float': the minimum % of feasible samples that increase the variance of the dataset in the previous 1 / L samples to continue sampling
- 'T::Integer': the maximum time for the sampler to run in seconds.
- 'max_iter::Integer': maximum number of iterations for the sampler to run for.
- 'nproc::Integer': the number of processors for the sampler to run with. Defaults to the number reported by Distributed.nprocs().
- 'sampler::Function': the sampling function to use. This function must take arguements A and b, and can take optional arguments.
- 'sampler_opts::Dict': a dictionary of optional arguments to pass to the sampler function.
- 'pl_max::Array': the maximum active load values to use when initializing the sampling space and constraining the loads. If nothing, finds the maximum load at each bus with the given relaxed model type.
- 'pf_min::Array/Float:' the minimum power factor for all loads in the system (Number) or an array of minimum power factors for each load in the system.
- 'pf_lagging::Bool': indicating if load power factors can be only lagging (True), or both lagging or leading (False).
- 'reset_level::Integer': determines how to reset the load point to be inside the polytope before sampling. 2: Reset closer to nominal load & chebyshev center, 1: Reset closer to chebyshev center, 0: Reset at chebyshev center.
- 'model_type::Type': an abstract PowerModels type indicating the network model to use for the relaxed AC-OPF formulations (Max Load & Nearest Feasible)
- 'r_solver': an optimizer constructor used for solving the relaxed AC-OPF optimization problems.
- 'opf_solver': an optimizer constructor used to find the AC-OPF optimal solution for each sample.
- 'print_level::Integer': from 0 to 3 indicating the level of info to print to console, with 0 indicating minimum printing.
- 'stat_track::Integer': from 0 to 3 indicating the level of stats info saved during each iteration 0: No information saved, 1: Feasibility, New Certificate, Added Sample, Iteration Time, 2: Variance for all input & output variables
- 'save_while::Bool': indicates whether results and stats information is saved to a csv file during processing.
- 'save_infeasible::Bool': indicates if infeasible samples are saved. If true saves infeasible samples in a seperate file from feasible samples.
- 'save_path::String:' a string with the file path to the desired result save location.
- 'net_path::String': a string with the file path to the network file.
- 'variance::Bool': indicates if dataset variance information is tracked for each unique active set.
- 'discard::Bool': indicates if samples that do not increase the variance within a unique active set are discarded.
See 'OPF-Learn: An Open-Source Framework for Creating Representative AC Optimal Power Flow Datasets'
for more information on how the AC OPF datasets are created.
Modified from AgenerateACOPFsamples.m written by Ahmed Zamzam
"""
if nproc is None:
nproc = Main.eval("Sys.CPU_THREADS")
Main.eval('Distributed.nprocs() > 1 && Distributed.rmprocs(Distributed.workers())')
Main.addprocs(nproc - 1, exeflags="--project")
Main.eval('Distributed.@everywhere using OPFLearn')
return Main.dist_create_samples(net_file, K, U=U, S=S, V=V, max_iter=max_iter, T=T, discard=discard,
variance=variance, nproc=nproc,
input_vars=input_vars, output_vars=output_vars, dual_vars=dual_vars,
sampler=Main.eval(sampler), sampler_opts=sampler_opts,
pl_max=pl_max, pf_min=pf_min, pf_lagging=pf_lagging,
print_level=print_level, stat_track=stat_track, save_while=save_while,
save_infeasible=save_infeasible, save_path=save_path, net_path=net_path,
model_type=Main.eval(model_type), r_solver=Main.eval(r_solver),
opf_solver=Main.eval(opf_solver)) | a012c019374b45e9a657b15f602b2cbc4b9cbc31 | 3,635,248 |
import re
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; slashes and colons are converted to
dashes; and anything that is not a unicode alphanumeric, dash, underscore,
or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
>>> get_valid_filename("library/hello-world:latest")
'library-hello-world-latest'
Copied from https://github.com/django/django/blob/20be1918e77414837178d6bf1657068c8306d50c/django/utils/encoding.py
Distributed under BSD-3 License
""" # noqa: E501
s = s.strip().replace(' ', '_').replace('/', '-').replace(':', '-')
return re.sub(r'(?u)[^-\w.]', '', s) | 7be8b5080d79b44b167fe2b1cf03108b1a36b169 | 3,635,249 |
import re
def clean_text(text, remove_stopwords = True):
"""
remove artifacts, unneccessary words etc
"""
## regex method - remove '\n'
cleantext = re.sub(r"\\n", " ", text)
## remove '\BA'
cleantext = re.sub(r"\\BA", " ", cleantext)
## remove '\'
cleantext = re.sub(r"\\", " ", cleantext)
## remove parenthesis
cleantext = re.sub(r'\([^)]*\)', '', cleantext)
## removing double spaces with single space
cleantext = re.sub('\s\s+', " ", cleantext)
# remove 'b''
cleantext = re.sub(r"b'","",cleantext)
## substitute % with 'percent'
cleantext = re.sub(r"[%]", "percent",cleantext)
## substitute $ with 'USD'
cleantext = re.sub(r"[$]", "USD",cleantext)
## remove '''
cleantext = re.sub(r"[\']", "", cleantext)
## remove '-'
cleantext = re.sub(r"[-']", "", cleantext)
## remove '""'
cleantext = re.sub(r"[\"]", "", cleantext)
## remove all b"
cleantext = cleantext.strip('b"')
## remove xc2
cleantext = re.sub(r"xc2", "", cleantext)
# remove xa359m
cleantext = re.sub(r"xa359m", "", cleantext)
# remove xa35.7n
cleantext = re.sub(r"xa35.7n ", "", cleantext)
# remove xa3160m
cleantext = re.sub(r"xa3160m", "", cleantext)
# remove xa35.7bn
cleantext = re.sub(r"xa35.7bn ", "", cleantext)
# remove xa3125m
cleantext = re.sub(r"xa3125m", "", cleantext)
# remove xa375m
cleantext = re.sub(r"xa375m", "", cleantext)
# remove xa3106m
cleantext = re.sub(r"xa3106m", "", cleantext)
# remove xa31.97bn
cleantext = re.sub(r"xa31.97bn", "", cleantext)
# remove xa3250m
cleantext = re.sub(r"xa3250m", "", cleantext)
# remove xa36
cleantext = re.sub(r"xa36", "", cleantext)
# remove xa310
cleantext = re.sub(r"xa310", "", cleantext)
# remove xa34
cleantext = re.sub(r"xa34", "", cleantext)
# remove xa32.50
cleantext = re.sub(r"xa32.50", "", cleantext)
# lower case
cleantext = cleantext.lower()
if remove_stopwords:
cleantext = cleantext.split()
stops = set(stopwords.words("english"))
cleantext = [w for w in cleantext if not w in stops]
cleantext = " ".join(cleantext)
return cleantext | f7ee64e905b22d62d039e94251347aa126588f09 | 3,635,250 |
def initialise_empty_cells():
"""Initialise empty dictionary of cells for the grid."""
cells = {(x, y): False for x in range(CELL_WIDTH) for y in range(CELL_HEIGHT)}
return cells | eed3b50adefa7c5bf8dff9875ee26f23217e54e5 | 3,635,251 |
def query_left(tree, index):
"""Returns sum of values between 1-index inclusive.
Args:
tree: BIT
index: Last index to include to the sum
Returns:
Sum of values up to given index
"""
res = 0
while index:
res += tree[index]
index -= (index & -index)
return res | e293194c86ad1c53a005be290ba61ef2fff097c8 | 3,635,252 |
def deleteCategory(category_name):
""" This endpoint will show category delete confirmation by GET request
and will delet the category by POST request."""
session = DBSession()
category = session.query(Category).filter_by(name=category_name).one()
if request.method == 'POST' and login_session['username']:
# CSRF fix
if not (request.args['state'] == login_session['state']):
abort(403)
session.delete(category)
session.commit()
flash('%s Successfully Deleted' % category.name)
return redirect(url_for('showCatalogs'))
elif login_session['username'] == username:
return render_template('deleteCategory.html',
login_session=login_session,
category=category)
else:
abort(403) | cf1ef2e0363347125cc270b5c07751da5a5467f8 | 3,635,253 |
def figure_defaults():
"""Generates default figure arguments.
Returns:
dict: A dictionary of the style { "argument":"value"}
"""
plot_arguments={
"fig_width":"6.0",\
"fig_height":"6.0",\
"xcols":[],\
"xvals":"",\
"xvals_colors_list":[],\
"xvals_colors":"",\
"ycols":[],\
"yvals":"",\
"yvals_colors":"",\
"title":'',\
"title_size":STANDARD_SIZES,\
"title_size_value":"10",\
"xticklabels":'.off',\
"yticklabels":".on",\
"method":['single','complete','average', 'weighted','centroid','median','ward'],\
"method_value":"ward",\
"distance":["euclidean","minkowski","cityblock","seuclidean","sqeuclidean",\
"cosine","correlation","hamming","jaccard","chebyshev","canberra",\
"braycurtis","mahalanobis","yule","matching","dice","kulsinski","rogerstanimoto",\
"russellrao","sokalmichener","sokalsneath","wminkowski"],\
"distance_value":"euclidean",\
"n_cols_cluster":"0",\
"n_rows_cluster":"0",\
"cmap":["viridis","plasma","inferno","magma","cividis","Greys","Purples",\
"Blues","Greens","Oranges","Reds","YlOrBr","YlOrRd","OrRd","PuRd",\
"RdPu","BuPu","GnBu","PuBu","YlGnBu","PuBuGn","BuGn","YlGn",\
"binary","gist_yard","gist_gray","gray","bone","pink","spring",\
"summer","autumn","winter","cool","Wistia","hot","afmhot","gist_heat",\
"copper","PiYg","PRGn","BrBG","PuOr","RdGy","RdBu","RdYlBu","Spectral",\
"coolwarm","bwr","seismic","Pastel1","Pastel2","Paired","Accent","Dark2",\
"Set1","Set2","Set3","tab10","tab20","tab20b","tab20c","flag","prism","ocean",\
"gist_earth", "gnuplot","gnuplot2","CMRmap","cubehelix","brg","hsv",\
"gist_rainbow","rainbow","jet","nipy_spectral","gist_ncar"],\
"cmap_value":"YlOrRd",\
"vmin":"",\
"vmax":"",\
"linewidths":"0",\
"linecolor":STANDARD_COLORS,\
"linecolor_value":"white",\
"color_bar_label":"",\
"center":"",\
"row_cluster":".on",\
"col_cluster":".on",\
"robust":".on",\
"col_dendogram_ratio":"0.25",\
"row_dendogram_ratio":"0.25",\
"zscore":["none","row","columns"],\
"zscore_value":"none",\
"xaxis_font_size":"10",\
"yaxis_font_size":"10",\
"annotate":".off",\
"download_format":["png","pdf","svg"],\
"downloadf":"pdf",\
"downloadn":"heatmap",\
"session_downloadn":"MySession.heatmap",\
"inputsessionfile":"Select file..",\
"session_argumentsn":"MyArguments.heatmap",\
"inputargumentsfile":"Select file.."}
return plot_arguments | 262053b3f6d94b5290f869f56cc7d162791166ac | 3,635,254 |
def apply_box_deltas_graph(boxes, deltas, Size=24):
"""Applies the given deltas to the given boxes.
boxes: [N, (z1, y1, x1, z2, y2, x2)] boxes to update
deltas: [N, (dz, dy, dx)] refinements to apply
"""
# center_z, center_y, center_x are the (normalized) coordinates of the centers
center_z = 0.5 * (boxes[:, 0] + boxes[:, 3])
center_y = 0.5 * (boxes[:, 1] + boxes[:, 4])
center_x = 0.5 * (boxes[:, 2] + boxes[:, 5])
# Apply deltas
center_z += deltas[:, 0]
center_y += deltas[:, 1]
center_x += deltas[:, 2]
# Convert back to z1, y1, x1, z2, y2, x2
z1 = center_z - 0.5 * Size
y1 = center_y - 0.5 * Size
x1 = center_x - 0.5 * Size
z2 = center_z + 0.5 * Size
y2 = center_y + 0.5 * Size
x2 = center_x + 0.5 * Size
result = tf.stack([z1, y1, x1, z2, y2, x2], axis=1, name="apply_box_deltas_out")
return result | 9a9c6a8c40f53d0956533a55815e65a2fa94eaf1 | 3,635,255 |
def calculate_number_of_peaks_gottschalk_80_rule(peak_to_measure, spread):
"""
Calculate number of peaks optimal for SOBP optimization
on given spread using Gottschalk 80% rule.
"""
width = peak_to_measure.width_at(val=0.80)
n_of_optimal_peaks = int(np.ceil(spread // width))
return n_of_optimal_peaks + 1 | 2204222a3df6ebe4bebb62f5e4c53311cadaaa77 | 3,635,256 |
def maxsum(sequence):
"""Return maximum sum."""
maxsofar, maxendinghere = 0, 0
for x in sequence:
# invariant: ``maxendinghere`` and ``maxsofar`` are accurate for ``x[0..i-1]``
maxendinghere = max(maxendinghere + x, 0)
maxsofar = max(maxsofar, maxendinghere)
return maxsofar | 884d8b5dd20a0a35ff79c64bc6151b0d8ae7f5a0 | 3,635,257 |
import torch
def matrix_from_angles(rot):
"""
Create a rotation matrix from a triplet of rotation angles.
Args:
rot: a tf.Tensor of shape [..., 3], where the last dimension is the rotation angles, along x, y, and z.
Returns:
A tf.tensor of shape [..., 3, 3], where the last two dimensions are the rotation matrix.
This function mimics _euler2mat from struct2depth/project.py, for backward compatibility,
but wraps tensorflow_graphics instead of reimplementing it.
The negation and transposition are needed to bridge the differences between the two.
"""
rank = len(rot.shape)
# Swap the two last dimensions
perm = torch.cat([torch.arange(start=0, end=rank-1), torch.tensor([rank]), torch.tensor([rank - 1])], dim=0)
return from_euler(-rot).permute(tuple(perm.numpy())) | 2108cf7d59d5f641ef7a9813f32cae7a33b00322 | 3,635,258 |
from re import X
import sys
def textbox(msg="", title=" ", text="", codebox=0, get_updated_text=None):
"""
Display some text in a proportional font with line wrapping at word breaks.
This function is suitable for displaying general written text.
The text parameter should be a string, or a list or tuple of lines to be
displayed in the textbox.
:param str msg: the msg to be displayed
:param str title: the window title
:param str text: what to display in the textbox
:param str codebox: if 1, act as a codebox
"""
if msg is None:
msg = ""
if title is None:
title = ""
global boxRoot, __replyButtonText, __widgetTexts, buttonsFrame
global rootWindowPosition
choices = ["OK"]
__replyButtonText = choices[0]
boxRoot = Tk()
# Quit when x button pressed
boxRoot.protocol('WM_DELETE_WINDOW', boxRoot.quit)
screen_width = boxRoot.winfo_screenwidth()
screen_height = boxRoot.winfo_screenheight()
root_width = int((screen_width * 0.8))
root_height = int((screen_height * 0.5))
root_xpos = int((screen_width * 0.1))
root_ypos = int((screen_height * 0.05))
boxRoot.title(title)
boxRoot.iconname('Dialog')
rootWindowPosition = "+0+0"
boxRoot.geometry(rootWindowPosition)
boxRoot.expand = NO
boxRoot.minsize(root_width, root_height)
rootWindowPosition = '+{0}+{1}'.format(root_xpos, root_ypos)
boxRoot.geometry(rootWindowPosition)
mainframe = Frame(master=boxRoot)
mainframe.pack(side=TOP, fill=BOTH, expand=YES)
# ---- put frames in the window -----------------------------------
# we pack the textboxFrame first, so it will expand first
textboxFrame = Frame(mainframe, borderwidth=3)
textboxFrame.pack(side=BOTTOM, fill=BOTH, expand=YES)
message_and_buttonsFrame = Frame(mainframe)
message_and_buttonsFrame.pack(side=TOP, fill=X, expand=NO)
messageFrame = Frame(message_and_buttonsFrame)
messageFrame.pack(side=LEFT, fill=X, expand=YES)
buttonsFrame = Frame(message_and_buttonsFrame)
buttonsFrame.pack(side=RIGHT, expand=NO)
# -------------------- put widgets in the frames --------------------
# put a textArea in the top frame
if codebox:
character_width = int((root_width * 0.6) / MONOSPACE_FONT_SIZE)
textArea = Text(
textboxFrame, height=25, width=character_width, padx="2m", pady="1m")
textArea.configure(wrap=NONE)
textArea.configure(font=(MONOSPACE_FONT_FAMILY, MONOSPACE_FONT_SIZE))
else:
character_width = int((root_width * 0.6) / MONOSPACE_FONT_SIZE)
textArea = Text(
textboxFrame, height=25, width=character_width, padx="2m", pady="1m"
)
textArea.configure(wrap=WORD)
textArea.configure(
font=(PROPORTIONAL_FONT_FAMILY, PROPORTIONAL_FONT_SIZE))
# some simple keybindings for scrolling
mainframe.bind("<Next>", textArea.yview_scroll(1, PAGES))
mainframe.bind("<Prior>", textArea.yview_scroll(-1, PAGES))
mainframe.bind("<Right>", textArea.xview_scroll(1, PAGES))
mainframe.bind("<Left>", textArea.xview_scroll(-1, PAGES))
mainframe.bind("<Down>", textArea.yview_scroll(1, UNITS))
mainframe.bind("<Up>", textArea.yview_scroll(-1, UNITS))
# add a vertical scrollbar to the frame
rightScrollbar = Scrollbar(
textboxFrame, orient=VERTICAL, command=textArea.yview)
textArea.configure(yscrollcommand=rightScrollbar.set)
# add a horizontal scrollbar to the frame
bottomScrollbar = Scrollbar(
textboxFrame, orient=HORIZONTAL, command=textArea.xview)
textArea.configure(xscrollcommand=bottomScrollbar.set)
# pack the textArea and the scrollbars. Note that although we must define
# the textArea first, we must pack it last, so that the bottomScrollbar will
# be located properly.
# Note that we need a bottom scrollbar only for code.
# Text will be displayed with wordwrap, so we don't need to have a horizontal
# scroll for it.
if codebox:
bottomScrollbar.pack(side=BOTTOM, fill=X)
rightScrollbar.pack(side=RIGHT, fill=Y)
textArea.pack(side=LEFT, fill=BOTH, expand=YES)
# ---------- put a msg widget in the msg frame-------------------
messageWidget = Message(
messageFrame, anchor=NW, text=msg, width=int(root_width * 0.9))
messageWidget.configure(
font=(PROPORTIONAL_FONT_FAMILY, PROPORTIONAL_FONT_SIZE))
messageWidget.pack(side=LEFT, expand=YES, fill=BOTH, padx='1m', pady='1m')
# put the buttons in the buttonsFrame
okButton = Button(
buttonsFrame, takefocus=YES, text="Update", height=1, width=6)
okButton.pack(
expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m")
def __update_myself(event):
new_text = get_updated_text()
textArea.delete(1.0, END)
textArea.insert('end', new_text, "normal")
# for the commandButton, bind activation events to the activation event
# handler
commandButton = okButton
handler = __textboxOK
handler = __update_myself
for selectionEvent in ["Return", "Button-1", "Escape"]:
commandButton.bind("<%s>" % selectionEvent, handler)
# ----------------- the action begins ------------------------------------
try:
# load the text into the textArea
if isinstance(text, str):
pass
else:
try:
text = "".join(text) # convert a list or a tuple to a string
except:
msgbox(
"Exception when trying to convert {} to text in textArea".format(type(text)))
sys.exit(16)
textArea.insert('end', text, "normal")
except:
msgbox("Exception when trying to load the textArea.")
sys.exit(16)
try:
okButton.focus_force()
except:
msgbox("Exception when trying to put focus on okButton.")
sys.exit(16)
boxRoot.mainloop()
# this line MUST go before the line that destroys boxRoot
areaText = textArea.get(0.0, 'end-1c')
boxRoot.destroy()
return areaText | c017053c98c69c57550d4e7d73b30264c253d5f4 | 3,635,259 |
def get_fov_stats(mrcnn, low_confidence, discordant,
extreme, artifacts,
roi_mask= None, keep_thresh = 0.5,
fov_dims= (256,256), shift_step= 128):
"""
Gets potential FOVs along with their associated
statistics.
Args:
* mrcnn [m, n, 4] - postprocessed outpt from mrcnn (4 channels)
* low_confidence, discordant, extreme, artifacts (all [m, n]) -
output from get_nuclei_for_review
* roi_mask - [m,n] where 1 indicates region inside ROI
* keep_thresh - what fraction of FOV needs to be inside ROI
Returns:
df of FOV stats
"""
print("\tGetting FOV proposal stats ...")
# Get FOV bounds for potential FOVs for review
M, N = mrcnn.shape[:2]
FOV_bounds = get_fov_bounds(M, N, fov_dims=fov_dims, shift_step=shift_step)
# Assume entirefield is ROI mask if not given
fov_n_pixels = fov_dims[0] * fov_dims[1]
if roi_mask is None:
roi_mask = np.ones((M, N))
FOV_stats = df(
index= np.arange(len(FOV_bounds)),
columns= [
"fovidx", "xmin", "xmax", "ymin" , "ymax",
"n_total", "predominant_label",
"ratio_predominant", "ratio_non_predominant",
"n_low_confidence", "n_discordant",
"n_extreme", "n_artifacts",
])
n_fov_proposals = len(FOV_bounds)
# fovidx = 0; fovbounds = FOV_bounds[fovidx]
for fovidx, fovbounds in enumerate(FOV_bounds):
if fovidx % 500 == 0:
print("\t\tFOV %d of %d" % (fovidx, n_fov_proposals))
# get bounds
(rmin, rmax, cmin, cmax) = fovbounds
# check that at least part of FOV is inside ROI
is_inside = roi_mask[rmin:rmax, cmin:cmax]
if np.sum(is_inside[:]) / fov_n_pixels < keep_thresh:
continue
# slice
label_fov = mrcnn[rmin:rmax, cmin:cmax, 0]
instance_fov = mrcnn[rmin:rmax, cmin:cmax, 1]
low_confidence_fov = low_confidence[rmin:rmax, cmin:cmax]
discordant_fov = discordant[rmin:rmax, cmin:cmax]
extreme_fov = extreme[rmin:rmax, cmin:cmax]
artifact_fov = artifacts[rmin:rmax, cmin:cmax]
# add location
FOV_stats.loc[fovidx, "fovidx"] = fovidx
FOV_stats.loc[fovidx, "xmin"] = int(cmin)
FOV_stats.loc[fovidx, "xmax"] = int(cmax)
FOV_stats.loc[fovidx, "ymin"] = int(rmin)
FOV_stats.loc[fovidx, "ymax"] = int(rmax)
# add stats to help choose helpful FOVs for review
unique_labels = list(np.unique(label_fov))
unique_labels.remove(0)
label_nucleus_counts = [
len(np.unique(instance_fov[label_fov == j]))
for j in unique_labels]
# handle if no relevant instances in FOV
if len(label_nucleus_counts) < 1:
continue
n_total = np.sum(label_nucleus_counts)
ratio_predominant = int(100 * np.max(label_nucleus_counts) / n_total)
FOV_stats.loc[fovidx, "n_total"] = n_total
FOV_stats.loc[fovidx, "predominant_label"] = int(unique_labels[np.argmax(label_nucleus_counts)])
FOV_stats.loc[fovidx, "ratio_predominant"] = ratio_predominant
FOV_stats.loc[fovidx, "ratio_non_predominant"] = 100 - ratio_predominant
# add stats for to help choose FOVs based on lack of
# confidence in whether nuclei are correctly classified
FOV_stats.loc[fovidx, "n_low_confidence"] = len(
np.unique(instance_fov[low_confidence_fov]))
FOV_stats.loc[fovidx, "n_discordant"] = len(
np.unique(instance_fov[discordant_fov]))
FOV_stats.loc[fovidx, "n_extreme"] = len(
np.unique(instance_fov[extreme_fov]))
FOV_stats.loc[fovidx, "n_artifacts"] = len(
np.unique(instance_fov[artifact_fov]))
return FOV_stats.dropna() | 767a101900ebdf68e8fbf3fedac8bfaa58b27c15 | 3,635,260 |
import torch
def negative_sampling_loss(pos_dot, neg_dot, size_average=True, reduce=True):
"""
:param pos_dot: The first tensor of SKipGram's output: (#mini_batches)
:param neg_dot: The second tensor of SKipGram's output: (#mini_batches, #negatives)
:param size_average:
:param reduce:
:return: a tensor has a negative sampling loss
"""
loss = - (
logsigmoid(pos_dot) + torch.sum(logsigmoid(-neg_dot), dim=1)
)
if not reduce:
return loss
if size_average:
return torch.mean(loss)
return torch.sum(loss) | 18f05e138010b98ef8abaec35dd37030b08ecfcb | 3,635,261 |
def _has_externally_shared_axis(ax1: "matplotlib.axes", compare_axis: "str") -> bool:
"""
Return whether an axis is externally shared.
Parameters
----------
ax1 : matplotlib.axes
Axis to query.
compare_axis : str
`"x"` or `"y"` according to whether the X-axis or Y-axis is being
compared.
Returns
-------
bool
`True` if the axis is externally shared. Otherwise `False`.
Notes
-----
If two axes with different positions are sharing an axis, they can be
referred to as *externally* sharing the common axis.
If two axes sharing an axis also have the same position, they can be
referred to as *internally* sharing the common axis (a.k.a twinning).
_handle_shared_axes() is only interested in axes externally sharing an
axis, regardless of whether either of the axes is also internally sharing
with a third axis.
"""
if compare_axis == "x":
axes = ax1.get_shared_x_axes()
elif compare_axis == "y":
axes = ax1.get_shared_y_axes()
else:
raise ValueError(
"_has_externally_shared_axis() needs 'x' or 'y' as a second parameter"
)
axes = axes.get_siblings(ax1)
# Retain ax1 and any of its siblings which aren't in the same position as it
ax1_points = ax1.get_position().get_points()
for ax2 in axes:
if not np.array_equal(ax1_points, ax2.get_position().get_points()):
return True
return False | 6f71975e62ba763e2fece42e4d3d760e12f5ddc5 | 3,635,262 |
def network_size(graph, n1, degrees_of_separation=None):
""" Determines the nodes within the range given by
a degree of separation
:param graph: Graph
:param n1: start node
:param degrees_of_separation: integer
:return: set of nodes within given range
"""
if not isinstance(graph, (BasicGraph, Graph, Graph3D)):
raise TypeError(f"Expected BasicGraph, Graph or Graph3D, not {type(graph)}")
if n1 not in graph:
raise ValueError(f"{n1} not in graph")
if degrees_of_separation is not None:
if not isinstance(degrees_of_separation, int):
raise TypeError(f"Expected degrees_of_separation to be integer, not {type(degrees_of_separation)}")
network = {n1}
q = set(graph.nodes(from_node=n1))
scan_depth = 1
while True:
if not q: # then there's no network.
break
if degrees_of_separation is not None:
if scan_depth > degrees_of_separation:
break
new_q = set()
for peer in q:
if peer in network:
continue
else:
network.add(peer)
new_peers = set(graph.nodes(from_node=peer)) - network
new_q.update(new_peers)
q = new_q
scan_depth += 1
return network | f62095abe184d818d25451b38430eaa331a71654 | 3,635,263 |
def get_conserved_sequences(cur):
"""docstring for get_conserved_sequences"""
cur.execute("SELECT sureselect_probe_counts.'sureselect.seq', \
sureselect_probe_counts.cnt, \
sureselect_probe_counts.data_source, \
cons.cons \
FROM sureselect_probe_counts, cons \
WHERE sureselect_probe_counts.'sureselect.id' = cons.id \
AND data_source = 'conservation'")
return cur.fetchall() | e518d22b7ac2ba072c75a76f72114009eed6ce7c | 3,635,264 |
from typing import Any
def delete_empty_keys(data: Any):
"""Build dictionary copy sans empty fields"""
# Remove empty field from dict
# https://stackoverflow.com/questions/5844672/delete-an-element-from-a-dictionary#5844700
dic = data.dict()
# if isinstance(data, BaseModel):
# dic = **data.dict()
return { i:dic[i] for i in dic if dic[i] != None } | db190b021bb00ae3870e205bc27bf28dd09e29c3 | 3,635,265 |
import os
def scan_for_images(tmos_image_dir):
"""Scan for TMOS disk images"""
return_image_files = []
for image_file in os.listdir(tmos_image_dir):
filepath = "%s/%s" % (tmos_image_dir, image_file)
if os.path.isfile(filepath):
extract_dir = "%s/%s" % (tmos_image_dir,
os.path.splitext(image_file)[0])
if not os.path.exists(extract_dir):
os.makedirs(extract_dir)
arch_ext = os.path.splitext(image_file)[1]
if arch_ext in ARCHIVE_EXTS:
if ARCHIVE_EXTS[arch_ext] == 'zipfile':
extract_zip_archive(filepath, extract_dir)
if ARCHIVE_EXTS[arch_ext] == 'tarfile':
extract_tar_archive(filepath, extract_dir)
for extracted_file in os.listdir(extract_dir):
if os.path.splitext(extracted_file)[1] in IMAGE_TYPES:
image_filepath = "%s/%s" % (extract_dir, extracted_file)
if os.path.splitext(extracted_file)[1] == '.vmdk':
convert_vmdk(image_filepath,
VBOXMANAGE_CLI_PATCH_VARIANT)
return_image_files.append(image_filepath)
return return_image_files | 0cecfa07c80c75d75af0e15d9277cbb5ab3153c9 | 3,635,266 |
def find_language(article_content):
"""Given an article's xml content as string, returns the article's language"""
if article_content.Language is None:
return None
return article_content.Language.string | 4a228779992b156d01bc25501677556a5c9b7d39 | 3,635,267 |
import json
def loadDictFromFile(f):
"""
Load a DotDict from the JSON-format file *f*.
"""
return dotDict.convertToDotDictRecurse(json.load(f)) | 8b2ddb8f00675a05f129f33328e8d17d5f34a96a | 3,635,268 |
def create_problem_from_type_base(problem):
"""
Creates OptProblem from type-base problem.
Parameters
----------
problem : Object
"""
p = OptProblem()
# Init attributes
p.phi = problem.phi
p.gphi = problem.gphi
p.Hphi = problem.Hphi
p.A = problem.A
p.b = problem.b
p.f = problem.f
p.J = problem.J
p.H_combined = problem.H_combined
p.u = problem.u
p.l = problem.l
p.x = problem.x
p.P = None
p.lam = None
p.nu = None
p.mu = None
p.pi = None
p.wrapped_problem = problem
# Methods
def eval(cls, x):
cls.wrapped_problem.eval(x)
cls.phi = cls.wrapped_problem.phi
cls.gphi = cls.wrapped_problem.gphi
cls.Hphi = cls.wrapped_problem.Hphi
cls.f = cls.wrapped_problem.f
cls.J = cls.wrapped_problem.J
def combine_H(cls, coeff, ensure_psd=False):
cls.wrapped_problem.combine_H(coeff, ensure_psd)
cls.H_combined = cls.wrapped_problem.H_combined
p.eval = MethodType(eval, p)
p.combine_H = MethodType(combine_H, p)
# Return
return p | c631e3f27d49b288e85e6043a81f658f65f962e2 | 3,635,269 |
import re
def geturls(str1):
"""returns the URIs in a string"""
URLPAT = 'https?:[\w/\.:;+\-~\%#\$?=&,()]+|www\.[\w/\.:;+\-~\%#\$?=&,()]+|' +\
'ftp:[\w/\.:;+\-~\%#?=&,]+'
return re.findall(URLPAT, str1) | 3d127a3c4250d7b013d9198e21cfb87f7909de8d | 3,635,270 |
import requests
def get_list(imid: str) -> requests.Response:
""" Return the requests.Response containing
the list of images for a given image-net.org
collection ID.
"""
imlist = requests.get(LIST_URL.format(imid=imid))
return imlist | da63e021e594eff6ee672e3aa234522a3d23e34d | 3,635,271 |
def minimum(x1, x2):
"""Element-wise minimum of input variables.
Args:
x1 (~chainer.Variable): Input variables to be compared.
x2 (~chainer.Variable): Input variables to be compared.
Returns:
~chainer.Variable: Output variable.
"""
return Minimum().apply((x1, x2))[0] | b511edb9c13abf3a0df5dad48d1fffcf1d96c82a | 3,635,272 |
import tokenize
import sys
def retype_file(src, pyi_dir, targets, *, quiet=False, hg=False):
"""Retype `src`, finding types in `pyi_dir`. Save in `targets`.
The file should remain formatted exactly as it was before, save for:
- annotations
- additional imports needed to satisfy annotations
- additional module-level names needed to satisfy annotations
Type comments in sources are normalized to type annotations.
"""
with tokenize.open(src) as src_buffer:
src_encoding = src_buffer.encoding
src_node = lib2to3_parse(src_buffer.read())
try:
with open((pyi_dir / src.name).with_suffix('.pyi')) as pyi_file:
pyi_txt = pyi_file.read()
except FileNotFoundError:
if not quiet:
print(
f'warning: .pyi file for source {src} not found in {pyi_dir}',
file=sys.stderr,
)
else:
pyi_ast = ast3.parse(pyi_txt)
assert isinstance(pyi_ast, ast3.Module)
reapply_all(pyi_ast.body, src_node)
fix_remaining_type_comments(src_node)
targets.mkdir(parents=True, exist_ok=True)
with open(targets / src.name, 'w', encoding=src_encoding) as target_file:
target_file.write(lib2to3_unparse(src_node, hg=hg))
return targets / src.name | a31dc990ef46a1d3dec3e4be6b54c5ce2e310195 | 3,635,273 |
import torch
def train(train_dataset : dict, validation_dataset : dict, batch_size : int = 16,
num_epochs : int = 5000, allow_cuda : bool = True,
use_shuffle : bool = True, save_criterion : callable = None,
stop_criterion : callable = None, save_on_finish : bool = True) -> dict:
"""
Perform training
================
Parameters
----------
train_dataset : dict[dict]
Dataset of the training. Dataset should have keys for all three models
but keys' content can be empty. Models that belong to empty dataset will
not get trained.
validation_dataset : dict[dict]
Dataset of the validation. Dataset should have keys for all three models
but keys' content can be empty. Models that belong to empty dataset will
not get trained.
batch_size : int, optional (16 if omitted)
Size of a batch.
num_epochs : int, optional (10000 if omitted)
Number of epochs to train. In the process count of epochs begins with 1
and includes also the value of num_epochs.
allow_cuda : bool, optional (True if omitted)
Whether to allow the use of GPU training on CUDA devices or not. In case
if this option is set to True and the hardware dosen't support CUDA
the training will happen on CPU.
use_shuffle : bool, optional (True if omitted)
Whether to shuffle dataset in each epoch or not.
save_criterion : callable|Nonetype, optional (None if omitted)
Crietiron function to decide whether to save the model at the end of the
epoch or not.
stop_criterion : callable|Nonetype, optional (None if omitted)
Crietiron function to decide whether to stop the training at the end of
the epoch or not.
save_on_finish : bool = True
Whether to save model after training ends or not. If save_criterion and
Returns
-------
dict[dict[key:list]]
The result of the training.
Raises
------
KeyError
When dataset dosen't have all the keys 'model_1', 'model_2' and
'model_3'. Keys can be empty but they must ecist.
Notes
-----
This function is split into sections with one-line comments. This is to
let developers use of the code in their own approach.
"""
global Models
# Default values
Learning_rate_model_1 = 5e1
Learning_rate_model_2 = 5e1
Learning_rate_model_3 = 5e1
# Criterion functions
if save_criterion is None:
save_criterion = default_save_criterion
if stop_criterion is None:
stop_criterion = default_stop_criterion
# Initialize result
result = get_inital_wrapper()
# Check dataset's keys
for key in ['model_1', 'model_2', 'model_3']:
if key not in train_dataset:
raise KeyError('There is at least a missing key in the train ' +
'dataset.')
for key in ['model_1', 'model_2', 'model_3']:
if key not in validation_dataset:
raise KeyError('There is at least a missing key in the validation' +
'dataset.')
# Set device
if allow_cuda:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
device = torch.device('cpu')
# Model_1 training
train_keys = list(train_dataset['model_1'].keys())
validation_keys = list(validation_dataset['model_1'].keys())
if len(train_keys) > 0:
consoleprint('Training of Model 1. Starts.')
# Prepare model
if Models.MODEL_1 is None:
Models.MODEL_1 = Model_1()
Models.MODEL_1.to(device)
# Prepare model helpers
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(Models.MODEL_1.parameters(),
lr=Learning_rate_model_1)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
train_losses, train_accuracies = [], []
validation_losses, validation_accuracies = [], []
train_len = len(train_dataset['model_1'])
validation_len = len(validation_dataset['model_1'])
# Prepare helper variables
for epoch in range(1, num_epochs + 1):
# Training
section_losses = []
section_corrects = 0
Models.MODEL_1.train()
if use_shuffle:
shuffle(train_keys)
for inputs, offsets, targets in batchify(train_dataset['model_1'],
train_keys,
batch_size):
inputs = inputs.to(device)
offsets = offsets.to(device)
targets = targets.to(device)
optimizer.zero_grad()
prediction = Models.MODEL_1(inputs, offsets)
loss = criterion(prediction, targets)
loss.backward()
# This line here is to decrease the risk of wanishing gradients!
torch.nn.utils.clip_grad_norm_(Models.MODEL_1.parameters(), 0.1)
optimizer.step()
predicted_values = torch.nn.functional.log_softmax(prediction,
dim=1)
for row in (predicted_values == targets).tolist():
for correct in row:
if correct:
section_corrects += 1
section_losses += [loss.item() for i in range(targets.size(0))]
train_losses.append(sum(section_losses) / train_len)
train_accuracies.append(section_corrects / (train_len * 9))
consoleprint('Epoch: {}/{} - TRAIN - loss: {:.6f} - accuracy: {:.6f}'
.format(epoch, num_epochs, train_losses[-1],
train_accuracies[-1]))
# Decreasing learning rate, high start value is needed.
if epoch % 500 == 0:
scheduler.step()
# Validation
if len(validation_dataset['model_1']) > 0:
# Training
section_losses = []
section_corrects = 0
Models.MODEL_1.eval()
with torch.no_grad():
for inputs, offsets, targets in batchify(validation_dataset
['model_1'],
validation_keys,
batch_size):
inputs = inputs.to(device)
offsets = offsets.to(device)
targets = targets.to(device)
prediction = Models.MODEL_1(inputs, offsets)
loss = criterion(prediction, targets)
predicted_values = \
torch.nn.functional.log_softmax(prediction, dim=1)
for row in (predicted_values == targets).tolist():
for correct in row:
if correct:
section_corrects += 1
section_losses += [loss.item()
for i in range(targets.size(0))]
validation_losses.append(sum(section_losses) / validation_len)
validation_accuracies.append(section_corrects /
(validation_len * 9))
consoleprint('Epoch: {}/{} - VALIDATION - loss: {:.6f} - accuracy: {:.6f}'
.format(epoch, num_epochs, validation_losses[-1],
validation_accuracies[-1]))
# Check if save
if save_criterion(epoch, num_epochs, train_losses, validation_losses,
train_accuracies, validation_accuracies):
save_model('model_1')
# Check if stop
if stop_criterion(epoch, num_epochs, train_losses, validation_losses,
train_accuracies, validation_accuracies):
break
result['model_1']['train_losses'] = train_losses[:]
result['model_1']['train_accuracies'] = train_accuracies[:]
result['model_1']['validation_losses'] = validation_losses[:]
result['model_1']['validation_accuracies'] = validation_accuracies[:]
consoleprint('Training of Model 1. Stopped.')
# Model_2 training
train_keys = list(train_dataset['model_2'].keys())
validation_keys = list(validation_dataset['model_2'].keys())
if len(train_keys) > 0:
consoleprint('Training of Model 2. Starts.')
# Prepare model
if Models.MODEL_2 is None:
Models.MODEL_2 = Model_2()
Models.MODEL_2.to(device)
# Prepare model helpers
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(Models.MODEL_2.parameters(),
lr=Learning_rate_model_2)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
train_losses, train_accuracies = [], []
validation_losses, validation_accuracies = [], []
train_len = len(train_dataset['model_2'])
validation_len = len(validation_dataset['model_2'])
# Prepare helper variables
Models.MODEL_1.eval()
for epoch in range(1, num_epochs + 1):
# Training
section_losses = []
section_corrects = 0
Models.MODEL_2.train()
if use_shuffle:
shuffle(train_keys)
for inputs, offsets, targets in batchify(train_dataset['model_2'],
train_keys,
batch_size):
inputs = inputs.to(device)
offsets = offsets.to(device)
targets = targets.to(device)
model_1_outputs = Models.MODEL_1(inputs, offsets)
optimizer.zero_grad()
prediction = Models.MODEL_2(inputs, offsets, model_1_outputs)
loss = criterion(prediction, targets)
loss.backward()
# This line here is to decrease the risk of wanishing gradients!
torch.nn.utils.clip_grad_norm_(Models.MODEL_2.parameters(), 0.1)
optimizer.step()
for row in (prediction == targets).tolist():
for correct in row:
if correct:
section_corrects += 1
section_losses += [loss.item() for i in range(targets.size(0))]
train_losses.append(sum(section_losses) / train_len)
train_accuracies.append(section_corrects / (train_len * 18))
consoleprint('Epoch: {}/{} - TRAIN - loss: {:.6f} - accuracy: {:.6f}'
.format(epoch, num_epochs, train_losses[-1],
train_accuracies[-1]))
# Decreasing learning rate, high start value is needed.
if epoch % 500 == 0:
scheduler.step()
# Validation
if len(validation_dataset['model_2']) > 0:
# Training
section_losses = []
section_corrects = 0
Models.MODEL_2.eval()
with torch.no_grad():
for inputs, offsets, targets in batchify(validation_dataset
['model_2'],
validation_keys,
batch_size):
inputs = inputs.to(device)
offsets = offsets.to(device)
targets = targets.to(device)
model_1_outputs = Models.MODEL_1(inputs, offsets)
prediction = Models.MODEL_2(inputs, offsets,
model_1_outputs)
loss = criterion(prediction, targets)
for row in (prediction == targets).tolist():
for correct in row:
if correct:
section_corrects += 1
section_losses += [loss.item()
for i in range(targets.size(0))]
validation_losses.append(sum(section_losses) / validation_len)
validation_accuracies.append(section_corrects
/ (validation_len * 18))
consoleprint('Epoch: {}/{} - VALIDATION - loss: {:.6f} - accuracy: {:.6f}'
.format(epoch, num_epochs, validation_losses[-1],
validation_accuracies[-1]))
# Check if save
if save_criterion(epoch, num_epochs, train_losses, validation_losses,
train_accuracies, validation_accuracies):
save_model('model_2')
# Check if stop
if stop_criterion(epoch, num_epochs, train_losses, validation_losses,
train_accuracies, validation_accuracies):
break
result['model_2']['train_losses'] = train_losses[:]
result['model_2']['train_accuracies'] = train_accuracies[:]
result['model_2']['validation_losses'] = validation_losses[:]
result['model_2']['validation_accuracies'] = validation_accuracies[:]
consoleprint('Training of Model 2. Stopped.')
# Model_3 training
train_keys = list(train_dataset['model_3'].keys())
validation_keys = list(validation_dataset['model_3'].keys())
if len(train_keys) > 0:
consoleprint('Training of Model 3. Starts.')
# Prepare model
if Models.MODEL_3 is None:
Models.MODEL_3 = Model_3()
Models.MODEL_3.to(device)
# Prepare model helpers
criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(Models.MODEL_3.parameters(),
lr=Learning_rate_model_3)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
train_losses, train_accuracies = [], []
validation_losses, validation_accuracies = [], []
train_len = len(train_dataset['model_3'])
validation_len = len(validation_dataset['model_3'])
# Prepare helper variables
for epoch in range(1, num_epochs + 1):
# Training
section_losses = []
section_corrects = 0
Models.MODEL_3.train()
if use_shuffle:
shuffle(train_keys)
for inputs, offsets, targets in batchify(train_dataset['model_3'],
train_keys,
batch_size):
inputs = inputs.to(device)
offsets = offsets.to(device)
targets = targets.unsqueeze(dim=-1)
targets = targets.to(device)
optimizer.zero_grad()
prediction = Models.MODEL_3(inputs, offsets)
loss = criterion(prediction, targets)
loss.backward()
# This line here is to decrease the risk of wanishing gradients!
torch.nn.utils.clip_grad_norm_(Models.MODEL_3.parameters(), 0.1)
optimizer.step()
for row in (prediction == targets).tolist():
for correct in row:
if correct:
section_corrects += 1
section_losses += [loss.item() for i in range(targets.size(0))]
train_losses.append(sum(section_losses) / train_len)
train_accuracies.append(section_corrects / train_len)
consoleprint('Epoch: {}/{} - TRAIN - loss: {:.6f} - accuracy: {:.6f}'
.format(epoch, num_epochs, train_losses[-1],
train_accuracies[-1]))
# Decreasing learning rate, high start value is needed.
if epoch % 500 == 0:
scheduler.step()
# Validation
if len(validation_dataset['model_3']) > 0:
# Training
section_losses = []
section_corrects = 0
Models.MODEL_3.eval()
with torch.no_grad():
for inputs, offsets, targets in batchify(validation_dataset
['model_3'],
validation_keys,
batch_size):
inputs = inputs.to(device)
offsets = offsets.to(device)
targets = targets.unsqueeze(dim=-1)
targets = targets.to(device)
prediction = Models.MODEL_3(inputs, offsets)
loss = criterion(prediction, targets)
for row in (prediction == targets).tolist():
for correct in row:
if correct:
section_corrects += 1
section_losses += [loss.item()
for i in range(targets.size(0))]
validation_losses.append(sum(section_losses) / validation_len)
validation_accuracies.append(section_corrects / validation_len)
consoleprint('Epoch: {}/{} - VALIDATION - loss: {:.6f} - accuracy: {:.6f}'
.format(epoch, num_epochs, validation_losses[-1],
validation_accuracies[-1]))
# Check if save
if save_criterion(epoch, num_epochs, train_losses, validation_losses,
train_accuracies, validation_accuracies):
save_model('model_3')
# Check if stop
if stop_criterion(epoch, num_epochs, train_losses, validation_losses,
train_accuracies, validation_accuracies):
break
result['model_3']['train_losses'] = train_losses[:]
result['model_3']['train_accuracies'] = train_accuracies[:]
result['model_3']['validation_losses'] = validation_losses[:]
result['model_3']['validation_accuracies'] = validation_accuracies[:]
consoleprint('Training of Model 3. Stopped.')
if save_on_finish:
save_model()
return result | db0b50be37fe1dd96bffa1c934d5a68d3ac5198e | 3,635,274 |
def reduce_to(n):
"""processor to reduce list"""
def reduce(list):
if len(list) < n:
return n
else:
return list[0:n]
return reduce | b9a1fb6091ef9801957c6cc64cab5485091d6801 | 3,635,275 |
def render(renderer_name, value, request=None, package=None):
""" Using the renderer ``renderer_name`` (a template
or a static renderer), render the value (or set of values) present
in ``value``. Return the result of the renderer's ``__call__``
method (usually a string or Unicode).
If the ``renderer_name`` refers to a file on disk, such as when the
renderer is a template, it's usually best to supply the name as an
:term:`asset specification`
(e.g. ``packagename:path/to/template.pt``).
You may supply a relative asset spec as ``renderer_name``. If
the ``package`` argument is supplied, a relative renderer path
will be converted to an absolute asset specification by
combining the package ``package`` with the relative
asset specification ``renderer_name``. If ``package``
is ``None`` (the default), the package name of the *caller* of
this function will be used as the package.
The ``value`` provided will be supplied as the input to the
renderer. Usually, for template renderings, this should be a
dictionary. For other renderers, this will need to be whatever
sort of value the renderer expects.
The 'system' values supplied to the renderer will include a basic set of
top-level system names, such as ``request``, ``context``,
``renderer_name``, and ``view``. See :ref:`renderer_system_values` for
the full list. If :term:`renderer globals` have been specified, these
will also be used to augment the value.
Supply a ``request`` parameter in order to provide the renderer
with the most correct 'system' values (``request`` and ``context``
in particular).
"""
try:
registry = request.registry
except AttributeError:
registry = None
if package is None:
package = caller_package()
helper = RendererHelper(name=renderer_name, package=package,
registry=registry)
saved_response = None
# save the current response, preventing the renderer from affecting it
attrs = request.__dict__ if request is not None else {}
if 'response' in attrs:
saved_response = attrs['response']
del attrs['response']
result = helper.render(value, None, request=request)
# restore the original response, overwriting any changes
if saved_response is not None:
attrs['response'] = saved_response
elif 'response' in attrs:
del attrs['response']
return result | 84e172cbb476f12ad6f7e801fa5b995fa2dedc20 | 3,635,276 |
import os
import torch
from ..iotools import check_and_clean
from .iotools import save_checkpoint
def write_cnn_weights(model, source_path, target_path, split, selection="best_acc"):
"""
Write the weights to be loaded in the model and return the corresponding path.
:param model: (Module) the model which must be initialized
:param source_path: (str) path to the source task experiment
:param target_path: (str) path to the target task experiment
:param split: (int) split number to load
:param selection: (str) chooses on which criterion the source model is selected (ex: best_loss, best_acc)
:return: (str) path to the written weights ready to be loaded
"""
model_path = os.path.join(source_path, "best_model_dir", "fold_" + str(split), "CNN",
selection, "model_best.pth.tar")
results = torch.load(model_path)
model.load_state_dict(results['model'])
pretraining_path = os.path.join(target_path, 'best_model_dir', 'fold_' + str(split), 'CNN')
check_and_clean(pretraining_path)
save_checkpoint({'model': model.state_dict(),
'epoch': -1,
'path': model_path},
False, False,
pretraining_path,
filename='model_pretrained.pth.tar')
return pretraining_path | 19cba0ff31f39cca575631705cad64b060fb9746 | 3,635,277 |
def validate(number):
"""Check if the number provided is a valid NCF."""
number = compact(number)
if len(number) == 13:
if number[0] != 'E' or not isdigits(number[1:]):
raise InvalidFormat()
if number[1:3] not in _ecf_document_types:
raise InvalidComponent()
elif len(number) == 11:
if number[0] != 'B' or not isdigits(number[1:]):
raise InvalidFormat()
if number[1:3] not in _ncf_document_types:
raise InvalidComponent()
elif len(number) == 19:
if number[0] not in 'AP' or not isdigits(number[1:]):
raise InvalidFormat()
if number[9:11] not in _ncf_document_types:
raise InvalidComponent()
else:
raise InvalidLength()
return number | f9d2f738b020fc49bbecb0a6be9805dd4243121a | 3,635,278 |
def upsampling_d1_batch_normal_act_subpixel(input_tensor,
residual_tensor,
filter_size,
layer_number,
active_function=tf.nn.relu,
stride=1,
is_training=True,
padding='VALID',
tensorboard_output=False,
num_filters=None,
name=None):
""" Build a single upsampling layer
The function build a single convolution layer with intial weight and bias.
Also add the batch normalization for this single convolution layer.
Also add the active function for this single covolution layer.
Also a subpixel convolution that reorders information along one
dimension to expand the other dimensions.
Also final convolutional layer with restacking and reordering operations
is residually added to the original input to yield the upsampled waveform.
Args:
param1 (tensor): input_tensor
param2 (tensor): residual_tensor
param3 (int): filter_size
param4 (int): stride
param5 (int): layer_number
param6 (funtion): active_function
param7 (bool): is_training
param8 (int): num_filters
param9 (str): padding
param10 (bool): tensorboard_output
param11 (str): name
Returns:
tensor: representing the output of the operation
"""
# assume this layer is half the depth of the previous layer if no depth
# information is given
if num_filters is None:
num_filters = int(input_tensor.get_shape().as_list()[-1] / 2)
# Define the filter
with tf.name_scope('{}_layer_conv_weights'.format(layer_number)):
# input_tensor.get_shape().as_list()[-1] is pre_num_fitlters
w = weight_variables(
[filter_size, input_tensor.get_shape().as_list()[-1], num_filters])
if tensorboard_output:
histogram_variable_summaries(w)
# Define the bias
with tf.name_scope('{}_layer_conv_biases'.format(layer_number)):
b = bias_variables([num_filters])
if tensorboard_output:
histogram_variable_summaries(b)
# Create the single convolution laryer
with tf.name_scope('{}_layer_conv_preactivation'.format(layer_number)):
conv = tf.nn.conv1d(input_tensor, w, stride=stride, padding=padding) + b
if tensorboard_output:
histogram_variable_summaries(conv)
# Add the batch nomalization at output of conlution laryer
with tf.name_scope('{}_layer_batch_norm'.format(layer_number)) as scope:
conv_batch_norm = batch_normalization(conv, is_training, scope)
# Add the active function
with tf.name_scope('{}_layer_conv_activation'.format(layer_number)):
conv_batch_norm_act = active_function(conv_batch_norm, name=name)
if tensorboard_output:
histogram_variable_summaries(conv_batch_norm_act)
# Build a subpixel shuffling layer
with tf.name_scope('{}_layer_subpixel_reshuffle'.format(layer_number)):
subpixel_conv = subpixel_shuffling(
conv_batch_norm_act,
residual_tensor.get_shape().as_list()[-1],
name=name)
if tensorboard_output:
histogram_variable_summaries(subpixel_conv)
# In order to combined final stacking residual connections
with tf.name_scope('{}_layer_stacking'.format(layer_number)):
sliced = tf.slice(residual_tensor,
begin=[0, 0, 0],
size=[-1, subpixel_conv.get_shape().as_list()[1], -1])
# Stack number of filters (the channels)
stack_subpixel_conv = tf.concat((subpixel_conv, sliced),
axis=2,
name=name)
if tensorboard_output:
histogram_variable_summaries(stack_subpixel_conv)
return stack_subpixel_conv | 12b016e6e0e6d44bf396ccbbd9b4f9c870ea7bbf | 3,635,279 |
def search_ws(sheet, search_term, distance=20, warnings=True, origin=[0,0],
exact = False):
""" Searches through an excel sheet for a specified term.
The function searches along the bottom left to top right diagonals.
The function starts at the "origin" and only looks for values below or to
the right of it up to the number of diagonals away specified by the
"distance."
:param sheet: The worksheet to be searched.
:param search_term: The term to be searched for in the worksheet.
:param distance: The number of diagonals to look through until stopping.
:param warnings: Whether to print warning if the search_term wasn't found.
:param origin: Where to start in the excel sheet.
:param exact: Whether the cell must match the search term exactly.
"""
final_search = ((distance+1)*distance)/2
current_diagonal = 1
total_columns = sheet.ncols
total_rows = sheet.nrows
for n in xrange(0, final_search):
if ((current_diagonal+1)*current_diagonal)/2 < n+1:
current_diagonal += 1
i = ((current_diagonal+1)*current_diagonal)/2 - (n+1)
j = current_diagonal - i - 1
if j + origin[1] >= total_columns:
continue
if i + origin[0] >= total_rows:
continue
cur_cell = str(unicode(sheet.cell_value(i+origin[0],j+origin[1])).encode('utf8')).lower()
if(exact):
if str(search_term).lower() == cur_cell:
return [i+origin[0],j+origin[1]]
elif(not exact):
if str(search_term).lower() in cur_cell:
return [i+origin[0],j+origin[1]]
# Failed to find search term:
if warnings:
print "Warning: Search entry not found in the specified search space."
print "Check sample worksheet and consider changing distance input."
return [-1,-1] | 3228ee48960b255cf042252ca0b5ac2f69c0d259 | 3,635,280 |
import os
def readfiles(meta):
"""
Reads in the files saved in datadir and saves them into a list
Parameters
-----------
meta
metadata object
Returns
----------
meta
metadata object but adds segment_list to metadata containing the sorted data fits files
Notes:
----------
History:
Written by Sebastian Zieba December 2021
"""
meta.segment_list = []
for fname in os.listdir(str(meta.datadir)):
if fname.endswith(meta.suffix + '.fits'):
meta.segment_list.append(str(meta.datadir) +'/'+ fname)
return meta | be62cd892e25f4b5cd5a1671b734bac6893c1df9 | 3,635,281 |
def obs_all_table_target_pairs_one_hot(agent_id: int, factory: Factory) -> np.ndarray:
"""One-hot encoding for each table target, NOT summed together; length: number of tables x number of nodes"""
num_nodes = len(factory.nodes)
num_tables = len(factory.tables)
table_target_pair = np.zeros(num_nodes * num_tables)
for n, t in enumerate(factory.tables):
if t.has_core():
table_target_index = [factory.nodes.index(t.core.current_target)]
table_target_pair[n * num_nodes : (n + 1) * num_nodes] = np.asarray(
one_hot_encode(num_nodes, table_target_index)
)
else:
table_target_pair[n * num_nodes : (n + 1) * num_nodes] = np.zeros(num_nodes)
return table_target_pair | dc021799fa09e0bf37dac27908996e471b23223d | 3,635,282 |
import sys, spacy
def clean_up(text):
"""
This function clean up you text
and generate list of words for
each document.
It also corrects for unicode problems
with python version 2.
"""
removal=['ADV','PRON','CCONJ','PUNCT','PART','DET','ADP','SPACE']
text_out = []
if sys.version_info.major == 2:
text = unicode(''.join([i if ord(i) < 128 else ' ' for i in text]))
doc = nlp(text)
for token in doc:
if token.is_stop == False and token.is_alpha and len(token)>2 and token.pos_ not in removal:
lemma = token.lemma_
text_out.append(lemma)
return text_out | c9c40992b9bec847dd66f2ef0a36e7356cf242a4 | 3,635,283 |
import logging
def logger(name):
"""
This method is the preferred way to obtain a logger.
Example:
>>> from qiutil.logging import logger
>>> logger(__name__).debug("Starting my application...")
:Note: Python ``nosetests`` captures log messages and only
reports them on failure.
:param name: the caller's context ``__name__``
:return: the Python Logger instance
"""
# Configure on demand.
if not hasattr(logger, 'configured'):
configure(name)
return logging.getLogger(name) | bb876dbe4d7a522b807133427914c2e84ee99c4a | 3,635,284 |
import math
def severe_obesity_wfl(gender, length, weight, units='metric', severity=1):
"""
Returns a boolean indicator for a zscore determining if the reading is classified as severely obese from:
https://jamanetwork.com/journals/jamapediatrics/fullarticle/2667557.
NOTE: This should only be used for children under the age of 2 as BMI values cannot be accurately recorded until 2 years of age.
#### PARAMETERS ####
parameters should either be arrays or single items
gender: 0 for male, 1 for female
length: length/height
weight: weight
units: default = 'metric'.
'metric': lengths/weights assumed to be in cm/kg respectively
'usa': lengths/weights assumed to be in in/lb respectively
severity: default = 1
1: class I severe obesity; 120% of the 95th percentile of the BMI z score
2: class II severe obesity; 140% of the 95th percentile of the BMI z score
"""
if units not in ('metric','usa'):
raise ValueError('Invalid measurement systm. Must be "metric" or "usa".')
global WHO_percentiles
severe1 = {0: WHO_percentiles[0][:,14] * 1.2,
1: WHO_percentiles[1][:,14] * 1.2}
severe2 = {0: WHO_percentiles[0][:,14] * 1.4,
1: WHO_percentiles[1][:,14] * 1.4}
if units == 'usa':
length *= 2.54 #inches to cm
weight *= 0.4535924 #pounds to kg
if type(severity) != int:
try:
severity = int(severity)
except:
raise ValueError('Invalid Input for severity. Must be able to be converted to an integer of 1 or 2')
elif severity not in (1,2):
raise ValueError('Invalid input for severity. Must be 1 or 2.')
if all([type(x) in (np.ndarray,list,tuple,set) for x in (gender,length,weight)]):
gender = np.array(gender); length = np.array(length); weight = np.array(weight)
length = length.astype(float); weight = weight.astype(float)
severe = np.zeros(gender.reshape(-1,1).shape[0])
for ix in range(gender.shape[0]):
if length[ix] < np.min(WHO_percentiles['length']) or length[ix] > np.max(WHO_percentiles['length']):
continue
if math.fmod(length[ix]*10, 1) == 0:
ix_low = WHO_percentiles['length'].index(length[ix])
else:
ix_low = WHO_percentiles['length'].index(int(length[ix]*10)/10)
if severity == 1:
severe[ix] = linear_interpolation(length[ix], WHO_percentiles['length'][ix_low], WHO_percentiles['length'][ix_low+1], severe1[gender[ix]][ix_low], severe1[gender[ix]][ix_low+1])
else:
severe[ix] = linear_interpolation(length[ix], WHO_percentiles['length'][ix_low], WHO_percentiles['length'][ix_low+1], severe2[gender[ix]][ix_low], severe2[gender[ix]][ix_low+1])
return weight >= severe
else:
if length < np.min(WHO_percentiles['length']) or length > np.max(WHO_percentiles['length']):
return False
if math.fmod(length*10, 1) == 0:
ix_low = WHO_percentiles['length'].index(length)
if severity == 1:
return weight >= severe1[gender][ix_low]
else:
return weight >= severe2[gender][ix_low]
else:
ix_low = WHO_percentiles['length'].index(int(length*10)/10)
if severity == 1:
severe = linear_interpolation(length, WHO_percentiles['length'][ix_low], WHO_percentiles['length'][ix_low+1], severe1[gender][ix_low], severe1[gender][ix_low+1])
else:
severe = linear_interpolation(length, WHO_percentiles['length'][ix_low], WHO_percentiles['length'][ix_low+1], severe2[gender][ix_low], severe2[gender][ix_low+1])
return weight >= severe | c6de69ec90d278b69fb802ce8a435502b1668644 | 3,635,285 |
def calc_Flesh_Kincaid_Grade_rus_flex(n_syllabes, n_words, n_sent):
"""Метрика Flesh Kincaid Grade для русского языка с константными параметрами"""
if n_words == 0 or n_sent == 0: return 0
n = FLG_X_GRADE * (float(n_words) / n_sent) + FLG_Y_GRADE * (float(n_syllabes) / n_words) - FLG_Z_GRADE
return n | 93467f013107660f3b8ad03ac882e857434fbc45 | 3,635,286 |
import signal
def psd(x: np.ndarray, delf: float, type_psd: list, n: float = None) -> np.ndarray:
"""Returns 2d array of PSD computed with specified method
Args:
x (np.ndarray): Values in time domain
delf (float): Sampling Rate
type (list): [x, y] x=0 psd, x=1 psd density && y=0 standard psd, y=1 welch's method
n (float): Number of Segments with Welch's method
Returns:
np.ndarray: 2D array, frequency values and computed PSD values
"""
if type_psd[0] == 0:
scale = 'spectrum'
else:
scale = 'density'
if type_psd[1] == 0:
f, P_xx = signal.periodogram(x, delf, scaling=scale)
else:
num_points = len(x)/n
f, P_xx = signal.welch(x, delf, nperseg=num_points, scaling=scale)
return [f, P_xx] | 0efeb38798ddda5d09a4e3c762f1ebbed69c50da | 3,635,287 |
def comment(request):
"""留言功能"""
if request.method == "POST":
form = CommentForm(request.POST)
blog_id = request.POST["blog_id"]
user = request.user
if form.is_valid():
new_comment = form.save(commit=False)
new_comment.user = user
new_comment.recipe = Blog.objects.filter(id=blog_id)[0]
new_comment.save()
return redirect('/blogs/' + blog_id)
else:
return redirect('index') | a3f8cf5beed1edf3156817aaa0e36e377256d4b1 | 3,635,288 |
def calculate_height_filtration(
graph,
direction,
attribute_in='position',
attribute_out='f',
):
"""Calculate height filtration of a graph in some direction.
*Note*: This function works for *all* vector-valued attributes of
a graph, but in the following, it will be assumed that those
attributes are 3D.
Given a 3D direction vector, this function calculates a height
filtration. To this end, a predefined vertex attribute will be
evaluated. The attribute needs to contain 3D data. The result,
i.e. the dot product between the attribute and direction, will
be stored in an output attribute.
The implementation follows the description of the *Persistent
Homology Transform* [1].
Parameters
----------
graph:
Input graph. Needs to contain a 3D node attribute that can be
queried for the calculation. See `attribute_in` to change the
name of the attribute.
attribute_in:
Specifies the (vertex) attribute that contains the 3D data.
attribute_out:
Specifies the attribute name for storing the result of the
calculation. This name will pertain to *both* vertices and
edges. If the attribute already exists, the function will
overwrite it.
Returns
-------
Copy of the input graph, with vertex weights and each weights added
as attributes with the name `attribute_out`.
References
----------
[1]: Katharine Turner, Sayan Mukherjee, Doug M Boyer: "Persistent
Homology Transform for Modeling Shapes and Surfaces",
arXiv:1310.1030.
"""
assert _has_vertex_attribute(graph, attribute_in)
assert _check_dimensionality(graph, attribute_in, direction)
# Let's make a copy first because we are modifying the graph's
# attributes in place here.
graph = ig.Graph.copy(graph)
# Following the original terminology in the paper
v = direction
for vertex in graph.vs:
x = vertex[attribute_in]
r = np.dot(x, v)
vertex[attribute_out] = r
for edge in graph.es:
source, target = graph.vs[edge.source], graph.vs[edge.target]
# The original paper describes a sublevel set filtration, so it
# is sufficient to use the `max` function here.
r = max(source[attribute_out], target[attribute_out])
edge[attribute_out] = r
return graph | 79010055e4a61862267b4cb9e9f5ebc9c3d1cdca | 3,635,289 |
def read_file(file_name, encoding='utf-8'):
"""
读文本文件
:param encoding:
:param file_name:
:return:
"""
with open(file_name, 'rb') as f:
data = f.read()
if encoding is not None:
data = data.decode(encoding)
return data | 4e4a90512727b4b40d4968930479f226dc656acb | 3,635,290 |
def cbf_qei(gm, wm, csf, img, thresh=0.8):
"""
Quality evaluation index of CBF base on Sudipto Dolui work
Dolui S., Wolf R. & Nabavizadeh S., David W., Detre, J. (2017).
Automated Quality Evaluation Index for 2D ASL CBF Maps. ISMR 2017
"""
def fun1(x, xdata):
d1 = np.exp(-(x[0])*np.power(xdata, x[1]))
return(d1)
def fun2(x, xdata):
d1 = 1-np.exp(-(x[0])*np.power(xdata, x[1]))
return(d1)
x1 = [0.054, 0.9272]
x2 = [2.8478, 0.5196]
x4 = [3.0126, 2.4419]
scbf = smooth_image(nb.load(img), fwhm=5).get_fdata()
if len(scbf.shape) > 3:
scbf = scbf[:, :, :, 0]
# load prob maps
gmm = nb.load(gm).get_fdata()
wmm = nb.load(wm).get_fdata()
ccf = nb.load(csf).get_fdata()
if len(gmm.shape) > 3:
gmm = gmm[:, :, :, 0]
wmm = wmm[:, :, :, 0]
ccf = ccf[:, :, :, 0]
pbcf = 2.5*gmm+wmm # gmm is 2.5 times wm
msk = np.array((scbf != 0) & (scbf != np.nan) & (pbcf != np.nan)).astype(int)
gm1 = np.array(gmm > thresh)
wm1 = np.array(wmm > thresh)
cc1 = np.array(ccf > thresh)
r1 = np.array([0, np.corrcoef(scbf[msk == 1], pbcf[msk == 1])[1, 0]]).max()
V = ((np.sum(gm1)-1)*np.var(scbf[gm1 > 0])+(np.sum(wm1)-1)*np.var(scbf[wm1 > 0])
+ (np.sum(cc1)-1) * np.var(scbf[cc1 > 0]))/(np.sum(gm1 > 0)+np.sum(wm1 > 0)
+ np.sum(cc1 > 0)-3)
negGM = np.sum(scbf[gm1] < 0)/(np.sum(gm1))
GMCBF = np.mean(scbf[gm1])
CV = V/np.abs(GMCBF)
Q = [fun1(x1, CV), fun1(x2, negGM), fun2(x4, r1)]
return gmean(Q) | d52badc74cc01c615afa0a0a5cdab1d040d110e5 | 3,635,291 |
import sqlite3
def calendar():
"""page for all events"""
events = get_all_events(sqlite3.connect(DB_NAME).cursor())
return render_template("calendar.html", events=events) | bf6c1f12cb2261dc68389c56b8c92a4dbe879fda | 3,635,292 |
def _ui_device_family_plist_value(ctx):
"""Returns the value to use for `UIDeviceFamily` in an info.plist.
This function returns the array of value to use or None if there should be
no plist entry (currently, only macOS doesn't use UIDeviceFamily).
Args:
ctx: The Skylark context.
Returns:
A list of integers to use for the `UIDeviceFamily` in an Info.plist
or None if the key should not be added to the Info.plist.
"""
families = []
for f in _families(ctx):
number = _DEVICE_FAMILY_VALUES[f]
if number:
families.append(number)
if families:
return families
return None | 8d6669fcdaf02f1ef254dc77910f2e2e9dfa5126 | 3,635,293 |
def map_amplitude_grid(
ds_ind,
data_columns,
stokes='I',
chunk_size:int=10**6,
return_index:bool=False
):
"""
Map functions to a concurrent dask functions to an Xarray dataset with
pre-computed grid indicies.
Parameters
----------
ds_ind : xarray.dataset
An xarray datset imported from a measurement set. It must contain
coordinates U_bins and V_bins, and relevant data and position
variables.
data_columns : list
Components for Stokes terms to be used to compute amplitude. Depends
on dataset.
chunk_size : int
The chunk size for computing split-apply functions in dask, Default is
'10**6'.
return_index : bool
Determines the return data type. If true, it returns a 2d grid of lists
of values and indicies for each bin.
Returns
-------
value_groups: numpy.array
A two-dimensional array representing a uv-grid. Each cell in the grid
contains a list of values from the dataset.
uvbins: xarray.Dataset
A two-dimensional array representing a uv-grid. Each cell in the grid
contains a list of indicies to be used to map subsequent computations
back to the original dataset.
"""
# Get dask arrays of UV-bins and visibilities from XArray dataset
dd_ubins = ds_ind.U_bins.data
dd_vbins = ds_ind.V_bins.data
dd_flgs = (ds_ind.FLAG[:,data_columns[0]].data |
ds_ind.FLAG[:,data_columns[1]].data)
if stokes=='I':
dd_vals = (np.absolute(ds_ind.DATA[:,data_columns[0]].data +
ds_ind.DATA[:,data_columns[1]].data))
elif stokes=='Q':
dd_vals = (np.absolute(ds_ind.DATA[:,data_columns[0]].data -
ds_ind.DATA[:,data_columns[1]].data))
# Combine U and V bins into one dask array
dd_bins = da.stack([dd_ubins, dd_vbins]).T
# Apply unifrom chunks to both dask arrays
dd_bins = dd_bins.rechunk([chunk_size, 2])
dd_vals = dd_vals.rechunk([chunk_size, 1])
dd_flgs = dd_flgs.rechunk([chunk_size, 1])
# Convert to delayed data structures
bin_partitions = dd_bins.to_delayed()
val_partitions = dd_vals.to_delayed()
flg_partitions = dd_flgs.to_delayed()
# Compute indicies for each bin in the grid for each chunk
group_chunks = [dask.delayed(groupby_apply.group_bin_flagval_wrap)(
part[0][0],
part[1],
part[2],
init_index=(chunk_size*kth)
) for kth, part in enumerate(zip(bin_partitions,
val_partitions,
flg_partitions)
)
]
groups = dask.delayed(groupby_apply.combine_group_flagval)(group_chunks)
# group_chunks = [dask.delayed(groupby_apply.group_bin_idx_val_wrap)(part[0][0], part[1]) for part in zip(bin_partitions, val_partitions)]
# groups = dask.delayed(groupby_apply.combine_group_idx_val)(group_chunks)
if return_index:
# Compute the grid from above without doing the apply step
groups = groups.compute()
index_groups, value_groups, flag_groups = groups[0], groups[1], groups[2]
return index_groups, value_groups, flag_groups
else:
# Apply the function to the grid without explicitly computing the indicies
median_grid = dask.delayed(groupby_apply.apply_to_groups)(value_groups_, np.median)
std_grid = dask.delayed(groupby_apply.apply_to_groups)(value_groups_, np.std)
# median_grid = median_grid_.compute()
# std_grid = std_grid_.compute()
return median_grid, std_grid | f363f1bc8de2eb58d5d6ecca529e0d8fca255496 | 3,635,294 |
def fetch_query(query, columns):
"""
Creates a connection to database, returns query from specified table
as a list of dictionaries.
Input: query: a SQL query (string)
Returns: pairs: dataframe of cursor.fetchall() response in JSON pairs
"""
# Fetch query
response = fetch_query_records(query)
# List of tuples to DF
df = pd.DataFrame(response, columns=columns)
# DF to dictionary
pairs = df.to_json(orient='records')
return pairs | 75465b0a920a19ca339c732bfe5b8ca4c356a9a5 | 3,635,295 |
import logging
def fetch(key):
"""Gets snapshots referenced by the given instance template revision.
Args:
key: ndb.Key for a models.InstanceTemplateRevision entity.
Returns:
A list of snapshot URLs.
"""
itr = key.get()
if not itr:
logging.warning('InstanceTemplateRevision does not exist: %s', key)
return []
if not itr.project:
logging.warning('InstanceTemplateRevision project unspecified: %s', key)
return []
# No snapshots configured.
if not itr.snapshot_name and not itr.snapshot_labels:
return []
labels = {}
for label in itr.snapshot_labels:
# label is necessarily in this format per config.py.
key, value = label.split(':', 1)
labels[key] = value
api = gce.Project(itr.project)
result = api.get_snapshots(itr.snapshot_name, labels, max_results=500)
snapshot_urls = [i['selfLink'] for i in result.get('items', [])]
while result.get('nextPageToken'):
result = api.get_snapshots(
itr.snapshot_name, labels, max_results=500,
page_token=result['nextPageToken'])
snapshot_urls.extend([i['selfLink'] for i in result['items']])
return snapshot_urls | c567a0e76c602936b7fd018c8824c6d3cce0d70d | 3,635,296 |
def CreateNameToSymbolInfo(symbol_infos):
"""Create a dict {name: symbol_info, ...}.
Args:
symbol_infos: iterable of SymbolInfo instances
Returns:
a dict {name: symbol_info, ...}
If a symbol name corresponds to more than one symbol_info, the symbol_info
with the lowest offset is chosen.
"""
# TODO(lizeb,pasko): move the functionality in this method into
# check_orderfile.
symbol_infos_by_name = {}
warnings = cygprofile_utils.WarningCollector(_MAX_WARNINGS_TO_PRINT)
for infos in GroupSymbolInfosByName(symbol_infos).itervalues():
first_symbol_info = min(infos, key=lambda x: x.offset)
symbol_infos_by_name[first_symbol_info.name] = first_symbol_info
if len(infos) > 1:
warnings.Write('Symbol %s appears at %d offsets: %s' %
(first_symbol_info.name,
len(infos),
','.join([hex(x.offset) for x in infos])))
warnings.WriteEnd('symbols at multiple offsets.')
return symbol_infos_by_name | 6f6c0ebfdaf103126455d344c199106ee0c0b764 | 3,635,297 |
def EFI(data, period=13):
"""
Elder Force Index
EFI is an indicator that uses price and volume to assess the power behind a move or identify possible turning
points.
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
:param int period: period used for indicator calculation
:return pd.Series: with indicator data calculation results
"""
return TA.EFI(data, period=period) | ae644c82a5dc4fd304fd17f9939e427eccb47468 | 3,635,298 |
def Gdelta(GP, testfunc, firstY, delta=0.01, maxiter=10, **kwargs):
"""
given a GP, find the max and argmax of G_delta, the confidence-bounded
prediction of the max of the response surface
"""
assert testfunc.maximize
mb = MuBound(GP, delta)
_, optx = cdirect(mb.objective, testfunc.bounds, maxiter=maxiter, **kwargs)
opt = max(testfunc.f(optx), firstY)
return (opt-firstY) / (-testfunc.minimum-firstY) | 0a74a922cba87cfccc4a63e1195abe4dca8b6d9c | 3,635,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.