content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def uiTemplate(q=1,e=1,dt="string",ex=1,ut="string"):
"""
http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/uiTemplate.html
-----------------------------------------
uiTemplate is undoable, queryable, and editable.
This command creates a new command template object. Template objects can hold
default flag arguments for multiple UI commands. The command arguments are
specified with the individual commands using the -defineTemplate flag and the
desired flags and arguments. See also setUITemplate.
-----------------------------------------
Return Value:
string The name of the uiTemplate created.
In query mode, return type is based on queried flag.
-----------------------------------------
Flags:
-----------------------------------------
dt : defineTemplate [string] []
Puts the command in a mode where any other flags and arguments are parsed and added to the command template specified in the argument. They will be used as default arguments in any subsequent invocations of the command when templateName is set as the current template.
-----------------------------------------
ex : exists [boolean] []
Returns whether the specified object exists or not. Other flags are ignored.
-----------------------------------------
ut : useTemplate [string]
Forces the command to use a command template other than the current one.
""" | 34,200 |
def save_image(image: Image, path: str):
"""
Open an image using Image.save(...) method from PIL lib.
Args:
image (Image): Image to get pixel from.
path (str): Path to the save location.
Returns:
None
"""
image.save(path) | 34,201 |
def save_CSV_from_file(h5_file, h5_path='/', append='', mirror=False):
"""
Saves the tfp, shift, and fixed_tfp as CSV files
:param h5_file: Reminder you can always type: h5_svd.file or h5_avg.file for this
:type h5_file: H5Py file
:param h5_path: specific folder path to search for the tfp data. Usually not needed.
:type h5_path: str, optional
:param append: text to append to file name
:type append: str, optional
:param mirror:
:type mirror: bool, optional
"""
tfp = usid.hdf_utils.find_dataset(h5_file[h5_path], 'tfp')[0][()]
tfp_fixed = usid.hdf_utils.find_dataset(h5_file[h5_path], 'tfp_fixed')[0][()]
shift = usid.hdf_utils.find_dataset(h5_file[h5_path], 'shift')[0][()]
print(usid.hdf_utils.find_dataset(h5_file[h5_path], 'shift')[0].parent.name)
path = h5_file.file.filename.replace('\\', '/')
path = '/'.join(path.split('/')[:-1]) + '/'
os.chdir(path)
if mirror:
np.savetxt('tfp-' + append + '.csv', np.fliplr(tfp).T, delimiter=',')
np.savetxt('shift-' + append + '.csv', np.fliplr(shift).T, delimiter=',')
np.savetxt('tfp_fixed-' + append + '.csv', np.fliplr(tfp_fixed).T, delimiter=',')
else:
np.savetxt('tfp-' + append + '.csv', tfp.T, delimiter=',')
np.savetxt('shift-' + append + '.csv', shift.T, delimiter=',')
np.savetxt('tfp_fixed-' + append + '.csv', tfp_fixed.T, delimiter=',')
return | 34,202 |
def _split_vector(expr, ranges, fill_ranges=True):
"""Extract the components of the given vector or matrix.
Parameters
==========
expr : Vector, DenseMatrix or list/tuple
ranges : list/tuple
Returns
=======
split_expr : tuple
Tuple of the form (x_expr, y_expr, z_expr). If a 2D vector is
provided, z_expr = S.Zero.
ranges : list/tuple
NOTE: this function is located in utils.py module (and not in vectors.py)
in order to avoid circular import.
"""
if isinstance(expr, Vector):
N = list(_get_coord_systems(expr))[0]
expr = expr.to_matrix(N)
# TODO: experimental_lambdify is not able to deal with base scalars.
# Need to replace them both in the vector as well in the ranges.
# Sympy's lambdify is able to deal with them. Once experimental_lambdify
# is removed, the following code shouldn't be necessary anymore.
bs = list(expr.atoms(BaseScalar))
bs = sorted(bs, key=str)
bs_dict = {b: Symbol(t) for b, t in zip(bs, ["x", "y", "z"])}
expr = expr.subs(bs_dict)
ranges = [r.subs(bs_dict) for r in ranges]
elif not isinstance(expr, (DenseMatrix, list, tuple, Tuple)):
raise TypeError(
"The provided expression must be a symbolic vector, or a "
"symbolic matrix, or a tuple/list with 2 or 3 symbolic "
+ "elements.\nReceived type = {}".format(type(expr))
)
elif (len(expr) < 2) or (len(expr) > 3):
raise ValueError(
"This function only plots 2D or 3D vectors.\n"
+ "Received: {}. Number of elements: {}".format(expr, len(expr))
)
if fill_ranges:
ranges = list(ranges)
fs = set().union(*[e.free_symbols for e in expr])
if len(ranges) < len(fs):
fs_ranges = set().union([r[0] for r in ranges])
for s in fs:
if s not in fs_ranges:
ranges.append(Tuple(s, -10, 10))
if len(expr) == 2:
xexpr, yexpr = expr
zexpr = S.Zero
else:
xexpr, yexpr, zexpr = expr
split_expr = xexpr, yexpr, zexpr
return split_expr, ranges | 34,203 |
def Transition_rep(source_State_name, target_State_name):
"""Representation of a transition
:param source_State_name: The sequence of "name" values of State objects referred to by attribute "source" in this Transition
:type source_State_name: Array
:param target_State_name: The sequence of "name" values of State objects referred to by attribute "target" in this Transition
:type target_State_name: Array
"""
return [f' {source_name}--{target_name}' for source_name, target_name in zip(source_State_name, target_State_name)] | 34,204 |
def debug(dataset):
"""Debugging utility for tf.data.Dataset."""
iterator = tf.data.Iterator.from_structure(
dataset.output_types, dataset.output_shapes)
next_element = iterator.get_next()
ds_init_op = iterator.make_initializer(dataset)
with tf.Session() as sess:
sess.run(ds_init_op)
viz(sess, next_element)
import pdb; pdb.set_trace()
res = sess.run(next_element)
# for i in range(len(res)):
# print("IoU of label with itself:")
# print(Gecko._iou(res[i][1], res[i][1], class_of_interest_channel=None))
print(res) | 34,205 |
def tifread(ifile, metaData):
"""Read raster from file."""
file = gdal.Open(ifile, GA_ReadOnly)
projection = file.GetProjection()
src = osr.SpatialReference()
src.ImportFromWkt(projection)
proj = src.ExportToWkt()
Nx = file.RasterXSize
Ny = file.RasterYSize
trans = file.GetGeoTransform()
dx = trans[1]
dy = trans[5]
if metaData == "A":
xp = np.arange(Nx)
yp = np.arange(Ny)
(Xp, Yp) = np.meshgrid(xp,yp)
X = trans[0] + (Xp+0.5)*trans[1] + (Yp+0.5)*trans[2] #FIXME: bottleneck!
Y = trans[3] + (Xp+0.5)*trans[4] + (Yp+0.5)*trans[5]
if metaData == "P":
xp = np.arange(Nx)
yp = np.arange(Ny)
(Xp, Yp) = np.meshgrid(xp,yp)
X = trans[0] + Xp*trans[1] + Yp*trans[2] #FIXME: bottleneck!
Y = trans[3] + Xp*trans[4] + Yp*trans[5]
band = file.GetRasterBand(1)
Z = band.ReadAsArray()
dx = np.abs(dx)
dy = np.abs(dy)
#return X, Y, Z, dx, dy, proj
return X, Y, Z | 34,206 |
def search_model(trial: optuna.trial.Trial) -> List[Any]:
"""Search model structure from user-specified search space."""
model = []
n_stride = 0
MAX_NUM_STRIDE = 5
UPPER_STRIDE = 2 # 5(224 example): 224, 112, 56, 28, 14, 7
n_layers = trial.suggest_int("n_layers", 8, 12)
stride = 1
input_max = 64
imput_min = 32
module_info = {}
### 몇개의 레이어를 쌓을지도 search하게 했습니다.
for i in range(n_layers):
out_channel = trial.suggest_int(f"{i+1}units", imput_min, input_max)
block = trial.suggest_categorical(
f"m{i+1}", ["Conv", "DWConv", "InvertedResidualv2", "InvertedResidualv3"]
)
repeat = trial.suggest_int(f"m{i+1}/repeat", 1, 5)
m_stride = trial.suggest_int(f"m{i+1}/stride", low=1, high=UPPER_STRIDE)
if m_stride == 2:
stride += 1
if n_stride == 0:
m_stride = 2
if block == "Conv":
activation = trial.suggest_categorical(
f"m{i+1}/activation", ["ReLU", "Hardswish"]
)
# Conv args: [out_channel, kernel_size, stride, padding, groups, activation]
model_args = [out_channel, 3, m_stride, None, 1, activation]
elif block == "DWConv":
activation = trial.suggest_categorical(
f"m{i+1}/activation", ["ReLU", "Hardswish"]
)
# DWConv args: [out_channel, kernel_size, stride, padding_size, activation]
model_args = [out_channel, 3, 1, None, activation]
elif block == "InvertedResidualv2":
c = trial.suggest_int(
f"m{i+1}/v2_c", low=imput_min, high=input_max, step=16
)
t = trial.suggest_int(f"m{i+1}/v2_t", low=1, high=4)
model_args = [c, t, m_stride]
elif block == "InvertedResidualv3":
kernel = trial.suggest_int(f"m{i+1}/kernel_size", low=3, high=5, step=2)
t = round(
trial.suggest_float(f"m{i+1}/v3_t", low=1.0, high=6.0, step=0.1), 1
)
c = trial.suggest_int(f"m{i+1}/v3_c", low=imput_min, high=input_max, step=8)
se = trial.suggest_categorical(f"m{i+1}/v3_se", [0, 1])
hs = trial.suggest_categorical(f"m{i+1}/v3_hs", [0, 1])
# k t c SE HS s
model_args = [kernel, t, c, se, hs, m_stride]
in_features = out_channel
model.append([repeat, block, model_args])
if i % 2:
input_max *= 2
input_max = min(input_max, 160)
module_info[f"block{i+1}"] = {"type": block, "repeat": repeat, "stride": stride}
# last layer
last_dim = trial.suggest_int("last_dim", low=128, high=1024, step=128)
# We can setup fixed structure as well
model.append([1, "Conv", [last_dim, 1, 1]])
model.append([1, "GlobalAvgPool", []])
model.append([1, "FixedConv", [6, 1, 1, None, 1, None]])
return model, module_info | 34,207 |
def create_search_forms(name, language_code, script_code):
"""Return a list of names suitable for searching.
Arguments:
name -- string name
language_code -- string code of language
script_code -- string code of script
"""
# QAZ: It would be useful if something could be done here (or
# wherever is most appropriate) to handle the case where names are
# assembled without spaces between the parts (eg, Chinese), since
# this means that whatever part(s) come after the first will not
# be found in a search.
name = str(name)
search_forms = [name]
if script_code == 'Latn':
ascii_form = asciify_name(name)
if ascii_form and ascii_form != name:
search_forms.append(ascii_form)
macron_as_double_form = demacronise_name(name)
if macron_as_double_form != name:
search_forms.append(macron_as_double_form)
abbreviated_form = abbreviate_name(name, language_code, script_code)
if abbreviated_form != name:
search_forms.append(abbreviated_form)
unpunctuated_form = unpunctuate_name(name)
if unpunctuated_form != name:
search_forms.append(unpunctuated_form)
return search_forms | 34,208 |
def iv_params(*, N_s, T_degC, I_ph_A, I_rs_1_A, n_1, I_rs_2_A, n_2, R_s_Ohm, G_p_S,
minimize_scalar_bounded_options=minimize_scalar_bounded_options_default,
newton_options=newton_options_default):
"""
Compute I-V curve parameters.
Inputs (any broadcast-compatible combination of python/numpy scalars and numpy arrays):
Same as P_mp().
Outputs (device-level, at each combination of broadcast inputs, return type is numpy.float64 for all scalar inputs):
dict containing the outputs of FF() with the addition of:
R_oc_Ohm resistance at open circuit
R_sc_Ohm resistance at short circuit
"""
result = FF(N_s=N_s, T_degC=T_degC, I_ph_A=I_ph_A, I_rs_1_A=I_rs_1_A, n_1=n_1, I_rs_2_A=I_rs_2_A, n_2=n_2,
R_s_Ohm=R_s_Ohm, G_p_S=G_p_S, minimize_scalar_bounded_options=minimize_scalar_bounded_options,
newton_options=newton_options)
R_oc_Ohm = R_oc(N_s=N_s, T_degC=T_degC, I_ph_A=I_ph_A, I_rs_1_A=I_rs_1_A, n_1=n_1, I_rs_2_A=I_rs_2_A, n_2=n_2,
R_s_Ohm=R_s_Ohm, G_p_S=G_p_S, newton_options=newton_options)['R_oc_Ohm']
R_sc_Ohm = R_sc(N_s=N_s, T_degC=T_degC, I_ph_A=I_ph_A, I_rs_1_A=I_rs_1_A, n_1=n_1, I_rs_2_A=I_rs_2_A, n_2=n_2,
R_s_Ohm=R_s_Ohm, G_p_S=G_p_S, newton_options=newton_options)['R_sc_Ohm']
result.update({'R_oc_Ohm': R_oc_Ohm, 'R_sc_Ohm': R_sc_Ohm})
return result | 34,209 |
def list_class_names(dir_path):
"""
Return the mapping of class names in all files
in dir_path to their file path.
Args:
dir_path (str): absolute path of the folder.
Returns:
dict: mapping from the class names in all python files in the
folder to their file path.
"""
py_files = glob.glob(os.path.join(dir_path, "*.py"))
py_files = [f for f in py_files if os.path.isfile(f) and
not f.endswith('__init__.py')]
cls_name_to_path = dict()
for py_file in py_files:
with open(py_file) as f:
node = ast.parse(f.read())
classes_in_file = [n for n in node.body if isinstance(n, ast.ClassDef)]
cls_names_in_file = [c.name for c in classes_in_file]
for cls_name in cls_names_in_file:
cls_name_to_path[cls_name] = py_file
return cls_name_to_path | 34,210 |
def update_domain(
uuid, name=None, disabled=None, project_id=None, user_id=None):
"""Update an existing domain."""
res = get_domain(uuid=uuid)
if disabled is not None:
res['disabled'] = disabled
if name is not None:
res['name'] = name
if project_id is not None:
res['project_id'] = project_id
if user_id is not None:
res['user_id'] = user_id
res.save()
return res | 34,211 |
def ja_of(tree: Tree) -> str:
"""tree string in the Japanese CCGBank's format
Args:
tree (Tree): tree object
Returns:
str: tree string in Japanese CCGBank's format
"""
def rec(node):
if node.is_leaf:
cat = node.cat
word = normalize(node.word)
token = node.token
poss = [
token.get(pos, '*')
for pos in ('pos', 'pos1', 'pos2', 'pos3')
]
poss = [pos for pos in poss if pos != '*']
pos = '-'.join(poss) if len(poss) else '_'
inflections = [
token.get(i, '*')
for i in ('inflectionForm', 'inflectionType')
]
inflections = [i for i in inflections if i != '*']
inflection = '-'.join(inflections) if len(inflections) else '_'
return f'{{{cat} {word}/{word}/{pos}/{inflection}}}'
else:
children = ' '.join(rec(child) for child in node.children)
return f'{{{node.op_symbol} {node.cat} {children}}}'
return rec(tree) | 34,212 |
def test():
"""Test MagicGrid's default text alignment rules."""
root = Tk()
root.title("Text Alignment Rules Test")
for seq in "<Escape>", "<Control-w>", "<Control-q>":
root.bind(seq, lambda event: root.destroy())
mg = MagicGrid(root)
mg.pack(side="top", expand=1, fill="both")
# Format: label, value
test_values = [
("Empty", ""),
("None", None),
("String", "Test"),
("Numeric", 8675309),
]
labels = [item[0] for item in test_values]
values = [item[1] for item in test_values]
# Format: label, add_widget_function
widgets = [
("Label", mg.add_cell),
("Entry", mg.add_widget_entry),
("Spinbox", mg.add_widget_spinbox),
("Checkbutton", mg.add_widget_checkbutton),
("Radiobutton", mg.add_widget_radiobutton),
("Button", mg.add_widget_button),
]
# Header row identifies each type of test value
header_cells = mg.add_header("", *labels,
anchor="center",
justify="center")
for label, add_widget in widgets:
# Label for the widget type
mg.add_cell(label, width=12)
for value in values:
# Create a test widget
widget = add_widget(value, width=20)
# Fill Empty and None cells after a delay so we can see
# how text added after the fact would be aligned
if (isinstance(widget, Checkbutton)
or isinstance(widget, Radiobutton)):
continue
elif value == "":
root.after(5000, lambda widget=widget:
set_widget_text(widget, "Initially Empty"))
elif value is None:
root.after(5000, lambda widget=widget:
set_widget_text(widget, "Initially None"))
mg.end_row()
root.mainloop() | 34,213 |
def cut_flowlines_at_points(flowlines, joins, points, next_lineID):
"""General method for cutting flowlines at points and updating joins.
Only new flowlines are returned; any that are not cut by points are omitted.
Parameters
----------
flowlines : GeoDataFrame
joins : DataFrame
flowline joins
points : ndarray of MultiPoint or Point geometries
expected to match to flowlines
next_lineID : int
id of next flowline to be created
Returns
-------
(GeoDataFrame, DataFrame, ndarray)
new flowlines, updated joins, remove_ids (original flowline IDs that
need to be removed before merging in returned flowlines)
"""
flowlines = flowlines.copy()
joins = joins.copy()
flowlines["geometry"] = cut_lines_at_multipoints(
flowlines.geometry.values.data, points
)
# discard any that have only one segment; they weren't split and we don't want
# to update them. Split the rest into parts.
ix = pg.get_num_geometries(flowlines.geometry.values.data) > 1
flowlines = explode(
flowlines.loc[ix].reset_index().rename(columns={"lineID": "origLineID"})
).reset_index(drop=True)
# recalculate length and sinuosity
flowlines["length"] = pg.length(flowlines.geometry.values.data).astype("float32")
flowlines["sinuosity"] = calculate_sinuosity(flowlines.geometry.values.data).astype(
"float32"
)
# calculate new ID
flowlines["lineID"] = (flowlines.index + next_lineID).astype("uint32")
### Update flowline joins
# transform new lines to create new joins at the upstream / downstream most
# points of the original line
l = flowlines.groupby("origLineID").lineID
# the first new line per original line is the furthest upstream, so use its
# ID as the new downstream ID for anything that had this origLineID as its downstream
first = l.first().rename("new_downstream_id")
# the last new line per original line is the furthest downstream...
last = l.last().rename("new_upstream_id")
# Update existing joins with the new lineIDs we created at the upstream or downstream
# ends of segments we just created
joins = update_joins(
joins, first, last, downstream_col="downstream_id", upstream_col="upstream_id",
)
### Create new line joins for any that weren't inserted above
# Transform all groups of new line IDs per original lineID
# into joins structure
atts = (
flowlines.groupby("origLineID")[["NHDPlusID", "loop", "HUC4"]]
.first()
.rename(columns={"NHDPlusID": "upstream"})
)
# function to make upstream / downstream side of join
pairs = lambda a: pd.Series(zip(a[:-1], a[1:]))
new_joins = (
l.apply(pairs)
.apply(pd.Series)
.reset_index()
.rename(columns={0: "upstream_id", 1: "downstream_id"})
.join(atts, on="origLineID")
)
# NHDPlusID is same for both sides
new_joins["downstream"] = new_joins.upstream
new_joins["type"] = "internal"
new_joins["marine"] = False
new_joins = new_joins[
[
"upstream",
"downstream",
"upstream_id",
"downstream_id",
"type",
"loop",
"marine",
"HUC4",
]
]
joins = (
joins.append(new_joins, ignore_index=True, sort=False)
.sort_values(["downstream_id", "upstream_id"])
.reset_index(drop=True)
)
remove_ids = flowlines.origLineID.unique()
flowlines = flowlines.drop(columns=["origLineID"]).set_index("lineID")
return flowlines, joins, remove_ids | 34,214 |
def export(gen, directory, file_prefix='{uid}-', **kwargs):
"""
Export a stream of documents to nxstxm_baseline.
.. note::
This can alternatively be used to write data to generic buffers rather
than creating files on disk. See the documentation for the
``directory`` parameter below.
Parameters
----------
gen : generator
expected to yield ``(name, document)`` pairs
directory : string, Path or Manager.
For basic uses, this should be the path to the output directory given
as a string or Path object. Use an empty string ``''`` to place files
in the current working directory.
In advanced applications, this may direct the serialized output to a
memory buffer, network socket, or other writable buffer. It should be
an instance of ``suitcase.utils.MemoryBufferManager`` and
``suitcase.utils.MultiFileManager`` or any object implementing that
inferface. See the suitcase documentation at
https://nsls-ii.github.io/suitcase for details.
file_prefix : str, optional
The first part of the filename of the generated output files. This
string may include templates as in ``{proposal_id}-{sample_name}-``,
which are populated from the RunStart document. The default value is
``{uid}-`` which is guaranteed to be present and unique. A more
descriptive value depends on the application and is therefore left to
the user.
**kwargs : kwargs
Keyword arugments to be passed through to the underlying I/O library.
Returns
-------
artifacts : dict
dict mapping the 'labels' to lists of file names (or, in general,
whatever resources are produced by the Manager)
Examples
--------
Generate files with unique-identifer names in the current directory.
>>> export(gen, '')
Generate files with more readable metadata in the file names.
>>> export(gen, '', '{plan_name}-{motors}-')
Include the experiment's start time formatted as YY-MM-DD_HH-MM.
>>> export(gen, '', '{time:%%Y-%%m-%%d_%%H:%%M}-')
Place the files in a different directory, such as on a mounted USB stick.
>>> export(gen, '/path/to/my_usb_stick')
"""
with Serializer(directory, file_prefix, **kwargs) as serializer:
for item in gen:
#print('ITEM:', item)
serializer(*item)
return serializer.artifacts | 34,215 |
def get_test_dataset(path):
"""
Gets a dataset that only has features
:param string path: The path the the dataset file after /datasets/
:return: features
"""
with open(os.path.abspath(os.path.join(os.getcwd(), "../datasets/", path)), "r") as file:
data = [line.split(',') for line in file.read().split('\n')][:-1]
data = [[int(element) for element in row] for row in data]
features = [d for d in data]
return features | 34,216 |
def services():
"""
Returns the grader-notebook list used as services in jhub
Response: json
example:
```
{
services: [{"name":"<course-id", "url": "http://grader-<course-id>:8888"...}],
groups: {"formgrade-<course-id>": ["grader-<course-id>"] }
}
```
"""
services = GraderService.query.all()
# format a json
services_resp = []
groups_resp = {}
for s in services:
services_resp.append({
'name': s.name,
'url': s.url,
'oauth_no_confirm': s.oauth_no_confirm,
'admin': s.admin,
'api_token': s.api_token
})
# add the jhub user group
groups_resp.update({f'formgrade-{s.course_id}': [f'grader-{s.course_id}']})
return jsonify(services=services_resp, groups=groups_resp) | 34,217 |
def chunks(l, k):
"""
Take a list, l, and create k sublists.
"""
n = len(l)
return [l[i * (n // k) + min(i, n % k):(i+1) * (n // k) + min(i+1, n % k)] for i in range(k)] | 34,218 |
def bfs(adj, src, dst, cache=None):
"""BFS search from source to destination. Check whether a path exists, does
not return the actual path.
Work on directed acyclic graphs where we assume that there is no path to the
node itself.
Args:
adj: Adjacency matrix.
src: Source node index, 0-based.
dst: Destination node index, 0-based.
cache: 2D matrix, cache[i, j] = 1 indicates path exists between two node
i and j. cache[i, j] = -1 indicates path does not exists between two node
i and j. chace[i, j] = 0 indicates unknown.
Returns:
found: A path is found between source and destination.
"""
if src == dst: return False
num_nodes = adj.shape[0]
if num_nodes == 0:
return False
if src >= num_nodes or dst >= num_nodes:
raise Exception("Index must be smaller than the number of nodes.")
if num_nodes == 1:
return False
# Whether a node has been visited, if not negative, the parent.
parent = np.zeros([num_nodes], dtype=np.int64) - 1
nodes_to_visit = [src]
found = False
# BFS loop.
while len(nodes_to_visit) > 0:
cur = nodes_to_visit.pop(0)
if cur == dst:
found = True
break
if cache is not None:
if cache[cur, dst] == 1:
found = True
break
elif cache[cur, dst] == -1:
continue
for jj in range(num_nodes):
if adj[cur, jj] == 1:
if parent[jj] == -1:
nodes_to_visit.append(jj)
parent[jj] = cur
if not found:
# Add the source node to the cache.
if cache is not None:
#log.info(("Setting -1", src, dst), verbose=2)
for ii in range(num_nodes):
if parent[ii] >= 0:
cache[ii, dst] = -1
cache[src, dst] = -1
return False
else:
# Add all the nodes to the cache.
if cache is not None:
# Backtrack.
while cur != src:
cur = parent[cur]
cache[cur, dst] = 1
#log.info(("Setting 1", cur, dst), verbose=2)
cache[src, dst] = 1
#log.info(("Setting 1", src, dst), verbose=2)
return True | 34,219 |
def get_source_ast(name: str) -> _ast.Module:
"""
Return ast of source code
"""
with open(name, "r") as f:
data = f.read()
return ast.parse(data) | 34,220 |
def fit_grain_FF_reduced(grain_id):
"""
Perform non-linear least-square fit for the specified grain.
Parameters
----------
grain_id : int
The grain id.
Returns
-------
grain_id : int
The grain id.
completeness : float
The ratio of predicted to measured (observed) Bragg reflections.
chisq: float
Figure of merit describing the sum of squared residuals for each Bragg
reflection in the form (x, y, omega) normalized by the total number of
degrees of freedom.
grain_params : array_like
The optimized grain parameters
[<orientation [3]>, <centroid [3]> <inverse stretch [6]>].
Notes
-----
input parameters are
[plane_data, instrument, imgser_dict,
tth_tol, eta_tol, ome_tol, npdiv, threshold]
"""
grains_table = paramMP['grains_table']
plane_data = paramMP['plane_data']
instrument = paramMP['instrument']
imgser_dict = paramMP['imgser_dict']
tth_tol = paramMP['tth_tol']
eta_tol = paramMP['eta_tol']
ome_tol = paramMP['ome_tol']
npdiv = paramMP['npdiv']
refit = paramMP['refit']
threshold = paramMP['threshold']
eta_ranges = paramMP['eta_ranges']
ome_period = paramMP['ome_period']
analysis_dirname = paramMP['analysis_dirname']
prefix = paramMP['spots_filename']
spots_filename = None if prefix is None else prefix % grain_id
grain = grains_table[grain_id]
grain_params = grain[3:15]
for tols in zip(tth_tol, eta_tol, ome_tol):
complvec, results = instrument.pull_spots(
plane_data, grain_params,
imgser_dict,
tth_tol=tols[0],
eta_tol=tols[1],
ome_tol=tols[2],
npdiv=npdiv, threshold=threshold,
eta_ranges=eta_ranges,
ome_period=ome_period,
dirname=analysis_dirname, filename=spots_filename,
return_spot_list=False,
quiet=True, check_only=False, interp='nearest')
# ======= DETERMINE VALID REFLECTIONS =======
culled_results = dict.fromkeys(results)
num_refl_tot = 0
num_refl_valid = 0
for det_key in culled_results:
panel = instrument.detectors[det_key]
'''
grab panel results:
peak_id
hkl_id
hkl
sum_int
max_int,
pred_angs,
meas_angs,
meas_xy
'''
presults = results[det_key]
nrefl = len(presults)
# make data arrays
refl_ids = np.empty(nrefl)
max_int = np.empty(nrefl)
for i, spot_data in enumerate(presults):
refl_ids[i] = spot_data[0]
max_int[i] = spot_data[4]
valid_refl_ids = refl_ids >= 0
# find unsaturated spots on this panel
unsat_spots = np.ones(len(valid_refl_ids), dtype=bool)
if panel.saturation_level is not None:
unsat_spots[valid_refl_ids] = \
max_int[valid_refl_ids] < panel.saturation_level
idx = np.logical_and(valid_refl_ids, unsat_spots)
# if an overlap table has been written, load it and use it
overlaps = np.zeros_like(idx, dtype=bool)
try:
ot = np.load(
os.path.join(
analysis_dirname, os.path.join(
det_key, 'overlap_table.npz'
)
)
)
for key in ot.keys():
for this_table in ot[key]:
these_overlaps = np.where(
this_table[:, 0] == grain_id)[0]
if len(these_overlaps) > 0:
mark_these = np.array(
this_table[these_overlaps, 1], dtype=int
)
otidx = [
np.where(refl_ids == mt)[0]
for mt in mark_these
]
overlaps[otidx] = True
idx = np.logical_and(idx, ~overlaps)
# logger.info("found overlap table for '%s'", det_key)
except(IOError, IndexError):
# logger.info("no overlap table found for '%s'", det_key)
pass
# attach to proper dict entry
# FIXME: want to avoid looping again here
culled_results[det_key] = [presults[i] for i in np.where(idx)[0]]
num_refl_tot += len(valid_refl_ids)
num_refl_valid += sum(valid_refl_ids)
pass # now we have culled data
# CAVEAT: completeness from pullspots only; incl saturated and overlaps
# <JVB 2015-12-15>
try:
completeness = num_refl_valid / float(num_refl_tot)
except(ZeroDivisionError):
raise RuntimeError(
"simulated number of relfections is 0; "
+ "check instrument config or grain parameters"
)
# ======= DO LEASTSQ FIT =======
if num_refl_valid <= 12: # not enough reflections to fit... exit
return grain_id, completeness, np.inf, grain_params
else:
grain_params = fitGrain(
grain_params, instrument, culled_results,
plane_data.latVecOps['B'], plane_data.wavelength
)
# get chisq
# TODO: do this while evaluating fit???
chisq = objFuncFitGrain(
grain_params[gFlag_ref], grain_params, gFlag_ref,
instrument,
culled_results,
plane_data.latVecOps['B'], plane_data.wavelength,
ome_period,
simOnly=False, return_value_flag=2)
pass # end conditional on fit
pass # end tolerance looping
if refit is not None:
# first get calculated x, y, ome from previous solution
# NOTE: this result is a dict
xyo_det_fit_dict = objFuncFitGrain(
grain_params[gFlag_ref], grain_params, gFlag_ref,
instrument,
culled_results,
plane_data.latVecOps['B'], plane_data.wavelength,
ome_period,
simOnly=True, return_value_flag=2)
# make dict to contain new culled results
culled_results_r = dict.fromkeys(culled_results)
num_refl_valid = 0
for det_key in culled_results_r:
presults = culled_results[det_key]
if not presults:
culled_results_r[det_key] = []
continue
ims = imgser_dict[det_key]
ome_step = sum(np.r_[-1, 1]*ims.metadata['omega'][0, :])
xyo_det = np.atleast_2d(
np.vstack([np.r_[x[7], x[6][-1]] for x in presults])
)
xyo_det_fit = xyo_det_fit_dict[det_key]
xpix_tol = refit[0]*panel.pixel_size_col
ypix_tol = refit[0]*panel.pixel_size_row
fome_tol = refit[1]*ome_step
# define difference vectors for spot fits
x_diff = abs(xyo_det[:, 0] - xyo_det_fit['calc_xy'][:, 0])
y_diff = abs(xyo_det[:, 1] - xyo_det_fit['calc_xy'][:, 1])
ome_diff = np.degrees(
xfcapi.angularDifference(xyo_det[:, 2],
xyo_det_fit['calc_omes'])
)
# filter out reflections with centroids more than
# a pixel and delta omega away from predicted value
idx_new = np.logical_and(
x_diff <= xpix_tol,
np.logical_and(y_diff <= ypix_tol,
ome_diff <= fome_tol)
)
# attach to proper dict entry
culled_results_r[det_key] = [
presults[i] for i in np.where(idx_new)[0]
]
num_refl_valid += sum(idx_new)
pass
# only execute fit if left with enough reflections
if num_refl_valid > 12:
grain_params = fitGrain(
grain_params, instrument, culled_results_r,
plane_data.latVecOps['B'], plane_data.wavelength
)
# get chisq
# TODO: do this while evaluating fit???
chisq = objFuncFitGrain(
grain_params[gFlag_ref],
grain_params, gFlag_ref,
instrument,
culled_results_r,
plane_data.latVecOps['B'], plane_data.wavelength,
ome_period,
simOnly=False, return_value_flag=2)
pass
pass # close refit conditional
return grain_id, completeness, chisq, grain_params | 34,221 |
def floor(base):
"""Get the floor of a number"""
return math.floor(float(base)) | 34,222 |
def show_mask(bot, trigger):
"""Show the topic mask for the current channel."""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
if not bot.db:
bot.say("I'm afraid I can't do that.")
elif trigger.sender.lower() in bot.db.preferences:
bot.say(bot.db.preferences.get(trigger.sender.lower(), 'topic_mask'))
else:
bot.say("%s") | 34,223 |
def test_atomic_base64_binary_pattern_2_nistxml_sv_iv_atomic_base64_binary_pattern_3_1(mode, save_output, output_format):
"""
Type atomic/base64Binary is restricted by facet pattern with value
[a-zA-Z0-9+/]{64}.
"""
assert_bindings(
schema="nistData/atomic/base64Binary/Schema+Instance/NISTSchema-SV-IV-atomic-base64Binary-pattern-3.xsd",
instance="nistData/atomic/base64Binary/Schema+Instance/NISTXML-SV-IV-atomic-base64Binary-pattern-3-1.xml",
class_name="NistschemaSvIvAtomicBase64BinaryPattern3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 34,224 |
def test_list_g_year_white_space_nistxml_sv_iv_list_g_year_white_space_1_3(mode, save_output, output_format):
"""
Type list/gYear is restricted by facet whiteSpace with value collapse.
"""
assert_bindings(
schema="nistData/list/gYear/Schema+Instance/NISTSchema-SV-IV-list-gYear-whiteSpace-1.xsd",
instance="nistData/list/gYear/Schema+Instance/NISTXML-SV-IV-list-gYear-whiteSpace-1-3.xml",
class_name="NistschemaSvIvListGYearWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 34,225 |
def TranslateSecureTagsForFirewallPolicy(client, secure_tags):
"""Returns a list of firewall policy rule secure tags, translating namespaced tags if needed.
Args:
client: compute client
secure_tags: array of secure tag values
Returns:
List of firewall policy rule secure tags
"""
ret_secure_tags = []
for tag in secure_tags:
if tag.startswith('tagValues/'):
ret_secure_tags.append(
client.messages.FirewallPolicyRuleSecureTag(name=tag))
else:
ret_secure_tags.append(
client.messages.FirewallPolicyRuleSecureTag(
name=tag_utils.GetTagValueFromNamespacedName(tag).name))
return ret_secure_tags | 34,226 |
def _find_files(directory, pattern):
"""Searches a directory finding all files and dirs matching unix pattern.
Args:
directory : (str)
The directory to search in.
patterns : (str)
A unix style pattern to search for. This should be the same style
of pattern that fnmatch or glob would take, and not regex.
Returns:
[str]
A list of file and directory names matching one of the patterns is
returned. The file names are relative to the directory we were
given.
Raises:
N/A
"""
files = [item for item in os.listdir(directory) if fnmatch(item, pattern)]
files.sort(key=lambda v: v.lower())
return files | 34,227 |
def AddImportDestinationFlag(parser, folder):
"""Adds a --destination flag for a storage import command to a parser.
Args:
parser: argparse.ArgumentParser, the parser to which to add the flag
folder: str, the top-level folder in the bucket into which the import
command will write. Should not contain any slashes. For example, 'dags'.
"""
base.Argument(
'--destination',
metavar='DESTINATION',
required=False,
help="""\
An optional subdirectory under the {}/ directory in the environment's
Cloud Storage bucket into which to import files. May contain forward
slashes to delimit multiple levels of subdirectory nesting, but should not
contain leading or trailing slashes. If the DESTINATION does not exist, it
will be created.
""".format(folder)).AddToParser(parser) | 34,228 |
def memoize(func: Callable):
"""
A decorator that memoizes a function by storing its inputs and outputs.
Calling the function again with the same arguments will return the cached
output.
This function is somewhat more permissive than
:func:`functools.lru_cache` in what kinds of arguments can be cached,
but incurs a larger runtime overhead as a penalty.
"""
memo: Dict[Any, Any] = {}
@functools.wraps(func)
def memoizer(*args, **kwargs):
key = _hash_args_kwargs(*args, **kwargs)
try:
v = memo[key]
except KeyError:
v = memo[key] = func(*args, **kwargs)
return v
return memoizer | 34,229 |
def mini_batch(positive_rdd, negative_rdd, num_iterations):
"""get the positive and negative classes with index for mini-batch"""
# mini-batch preparation
pos_num = int(batch_size / 46)
neg_num = pos_num * 45
i = num_iterations % int(74 / pos_num)
# get the new mini-batch rdd for this iteration
new_rdd = positive_rdd. \
filter(lambda x: i * pos_num <= x[1] < (i + 1) * pos_num). \
map(lambda x: (x[0][0], x[0][1])).union(
negative_rdd.filter(
lambda x: i * neg_num <= x[1] < (i + 1) * neg_num).map(
lambda x: (x[0][0], x[0][1])))
return new_rdd | 34,230 |
def draw_polygon(img, max_sides=8, min_len=32, min_label_len=64):
""" Draw a polygon with a random number of corners and return the position
of the junctions + line map.
Parameters:
max_sides: maximal number of sides + 1
"""
num_corners = random_state.randint(3, max_sides)
min_dim = min(img.shape[0], img.shape[1])
rad = max(random_state.rand() * min_dim / 2, min_dim / 10)
# Center of a circle
x = random_state.randint(rad, img.shape[1] - rad)
y = random_state.randint(rad, img.shape[0] - rad)
# Convert length constrain to pixel if given float number
if isinstance(min_len, float) and min_len <= 1.:
min_len = int(min_dim * min_len)
if isinstance(min_label_len, float) and min_label_len <= 1.:
min_label_len = int(min_dim * min_label_len)
# Sample num_corners points inside the circle
slices = np.linspace(0, 2 * math.pi, num_corners + 1)
angles = [slices[i] + random_state.rand() * (slices[i+1] - slices[i])
for i in range(num_corners)]
points = np.array(
[[int(x + max(random_state.rand(), 0.4) * rad * math.cos(a)),
int(y + max(random_state.rand(), 0.4) * rad * math.sin(a))]
for a in angles])
# Filter the points that are too close or that have an angle too flat
norms = [np.linalg.norm(points[(i-1) % num_corners, :]
- points[i, :]) for i in range(num_corners)]
mask = np.array(norms) > 0.01
points = points[mask, :]
num_corners = points.shape[0]
corner_angles = [angle_between_vectors(points[(i-1) % num_corners, :] -
points[i, :],
points[(i+1) % num_corners, :] -
points[i, :])
for i in range(num_corners)]
mask = np.array(corner_angles) < (2 * math.pi / 3)
points = points[mask, :]
num_corners = points.shape[0]
# Get junction pairs from points
segments = np.zeros([0, 4])
# Used to record all the segments no matter we are going to label it or not.
segments_raw = np.zeros([0, 4])
for idx in range(num_corners):
if idx == (num_corners - 1):
p1 = points[idx]
p2 = points[0]
else:
p1 = points[idx]
p2 = points[idx + 1]
segment = np.concatenate((p1, p2), axis=0)
# Only record the segments longer than min_label_len
seg_len = np.sqrt(np.sum((p1 - p2) ** 2))
if seg_len >= min_label_len:
segments = np.concatenate((segments, segment[None, ...]), axis=0)
segments_raw = np.concatenate((segments_raw, segment[None, ...]),
axis=0)
# If not enough corner, just regenerate one
if (num_corners < 3) or check_segment_len(segments_raw, min_len):
return draw_polygon(img, max_sides, min_len, min_label_len)
# Get junctions from segments
junctions_all = np.concatenate((segments[:, :2], segments[:, 2:]), axis=0)
if junctions_all.shape[0] == 0:
junc_points = None
line_map = None
else:
junc_points = np.unique(junctions_all, axis=0)
# Get the line map
line_map = get_line_map(junc_points, segments)
corners = points.reshape((-1, 1, 2))
col = get_random_color(int(np.mean(img)))
cv.fillPoly(img, [corners], col)
return {
"points": junc_points,
"line_map": line_map
} | 34,231 |
def order_budget_update(request, order_id):
"""
Update budget for order
"""
serializer = OrderBudgetSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
order = get_object_or_404(Order, pk=order_id)
budget = serializer.validated_data['budget']
order.budget = budget
order.save()
serializer = OrderSerializer(order)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED) | 34,232 |
def analyse_readability_metrics(article_text):
"""
Use the textstat library to report multiple readability measures.
The readability metrics analysed are:
* The Flesch Reading Ease Score. A score from 100 (very easy to read) to 0 (very confusing).
* The grade score using the Flesch-Kincaid Grade Formula. For example a score of 9.3 means that a ninth grader would be able to read the document.
* The FOG index of the given text
* The SMOG index of the given text
* The ARI(Automated Readability Index) which outputs a number that approximates the grade level needed to comprehend the text. For example if the ARI is 6.5, then the grade level to comprehend the text is 6th to 7th grade
* The grade level of the text using the Coleman-Liau Formula
* The grade level using the Lisear Write Formula
* The grade level using the New Dale-Chall Formula.
:param article_text: The article text to operate on.
:return: An object containing all measures
"""
sylls = textstat.syllable_count(article_text)
words = textstat.lexicon_count(article_text)
sents = textstat.sentence_count(article_text)
if article_text != "":
"""
returns the Flesch Reading Ease Score. Following table is helpful to access the ease of readability in a document.
* 90-100 : Very Easy
* 80-89 : Easy
* 70-79 : Fairly Easy
* 60-69 : Standard
* 50-59 : Fairly Difficult
* 30-49 : Difficult
* 0-29 : Very Confusing
"""
flesch = textstat.flesch_reading_ease(article_text)
"""
returns the grade score using the Flesch-Kincaid Grade Formula.
For example a score of 9.3 means that a ninth grader would be able to read the document.
"""
flesch_k = textstat.flesch_kincaid_grade(article_text)
"""
returns the FOG index of the given text.
"""
fog = textstat.gunning_fog(article_text)
"""
return the SMOG index of the given text.
"""
smog = textstat.smog_index(article_text)
"""
returns the ARI(Automated Readability Index) which outputs a number that approximates the grade level needed to comprehend the text.
For example if the ARI is 6.5, then the grade level to comprehend the text is 6th to 7th grade
"""
ari = textstat.automated_readability_index(article_text)
"""
returns the grade level of the text using the Coleman-Liau Formula
"""
coleman_l = textstat.coleman_liau_index(article_text)
"""
returns the grade level using the Lisear Write Formula
"""
linsear_write = textstat.linsear_write_formula(article_text)
"""
Different from other tests, since it uses a lookup table of most commonly used 3000 english words.
Thus it returns the grade level using the New Dale-Chall Formula.
"""
dale_chall = textstat.dale_chall_readability_score(article_text)
"""
Based upon all the above tests returns the best grade level under which the given text belongs to.
"""
overall_consensus = textstat.text_standard(article_text)
return {
"syllable_count": sylls,
"word_count": words,
"sentence_count": sents,
"flesch_reading_ease": flesch,
"flesch_kincaid_grade": flesch_k,
"gunning_fog": fog,
"smog_index": smog,
"automated_readability_index": ari,
"coleman_liau_index": coleman_l,
"linsear_write_formula": linsear_write,
"dale_chall_readability_score": dale_chall,
"overall_consensus_grade": overall_consensus
} | 34,233 |
def sbsplot(spec, output, show_lines, transitions, z,
x_axis, x_col, x_min, x_max, y_axis, y_col, identifier):
"""
"""
pdf = PdfPages(output)
specs = glob.glob(spec)
crrls.natural_sort(specs)
# If only one file is passed, it probably contains a list
if len(specs) == 1:
specs = np.genfromtxt(specs[0], dtype=str)
try:
specs.shape[1]
specs = glob.glob(spec)
# Or a single file is to be plotted
except IndexError:
pass
for s in specs:
data = np.loadtxt(s)
x = data[:,x_col]
y = data[:,y_col]
# Determine the subband name
try:
sb = re.findall('{0}\d+'.format(identifier), s)[0]
except IndexError:
print("Could not find SB number.")
print("Will use the file name.")
sb = s
# Begin ploting
fig = plt.figure(frameon=False)
fig.suptitle(sb)
ax = fig.add_subplot(1, 1, 1, adjustable='datalim')
ax.step(x, y, 'k-', lw=1, where='mid')
# Mark the transitions?
if show_lines:
trans = transitions.split(',')
for o,t in enumerate(trans):
if x[~np.isnan(x)][0] > x[~np.isnan(x)][1]:
r = -1
else:
r = 1
qns, freqs = crrls.find_lines_sb(x[~np.isnan(x)][::r], t, z)
ylbl = np.ma.masked_invalid(y).mean()
for label, i, j in zip(qns, freqs, [ylbl]*len(freqs)):
plt.annotate(label, xy=(i, j), xytext=(-10, 15*o+5),
size='x-small', textcoords='offset points',
ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5',
fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->',
connectionstyle='arc3,rad=0'))
#if len(qns) > 0:
plt.annotate(tprops[t][0], xy=(i,j), xytext=(-4,0),
textcoords='offset points', size='xx-small')
plt.plot(freqs, [ylbl]*len(freqs), marker='|', ls='none', ms=25,
c=tprops[t][1], mew=8, alpha=0.8)
ax.set_xlabel(x_axis)
ax.set_ylabel(y_axis)
if x_max:
ax.set_xlim(x_min, x_max)
pdf.savefig(fig)
plt.close(fig)
pdf.close() | 34,234 |
def dx(data):
"""
Derivative by central difference
Edges are takes as difference between nearest points
Parameters
----------
data : ndarray
Array of NMR data.
Returns
-------
ndata : ndarray
Derivate of NMR data.
"""
z = np.empty_like(data)
z[..., 0] = data[..., 1] - data[..., 0] # first point
z[..., -1] = data[..., -1] - data[..., -2] # last point
z[..., 1:-1] = data[..., 2:] - data[..., :-2] # interior
return z | 34,235 |
def grad(w):
""" Dao ham """
N = Xbar.shape[0]
return 1/N * Xbar.T.dot(Xbar.dot(w) - y) | 34,236 |
def test_sanity_download(cfg):
""" test if the cfg file is valid for downloading"""
repo_root=utils.get_root()+'/'
# test store_path exists
store_path=cfg['DOWNLOAD']['store_path']
if not(os.path.exists(repo_root+store_path)):
utils.throw_error(print_prefix+'cannot locate:'+repo_root+store_path) | 34,237 |
def main(inp_file, exp_file, out_file, th=5, motif_file="", motifout_file="", use_vcf=True):
"""
If use_vcf true then:
For a given motif annotated vcf file (already run through motifs.py),
remove all motif matches for TFs that are
not expressed in at least one sample above the threshold.
Else:
For a given vcf file read the samples,
then filter the motif_file, only outputing to motifout_file those items
not expressed in at least one sample above the threshold
Args:
-i (str): Name of sorted variant file to process.
-o (str): Name of output file to be created.
-e (str): Name of expression file.
-th (float): TFs are considered expressed if they are above this threshold.
"""
if use_vcf:
output_f = open(out_file, "w")
with open(inp_file) as vcf:
line = vcf.readline().strip()
# Skip info lines.
while line.startswith("##"):
print(line, file=output_f)
line = vcf.readline().strip()
# First non-## line is the header. Get sample names and print to output.
samples = parse_header(line)
print(line, file=output_f)
print("Creating gene dictionary for expression data.")
gene_dict = get_genes(exp_file, samples, th, True)
if len(gene_dict) == 0:
print("Error, no genes above threshold found in expression file.",
"\nTry lowering the threshold and ensure the expression fil",
"e has values in the range that you expect.")
sys.exit()
print("Filtering motif info for TFs that don't meet the expression threshold of " +
str(th) + ". Found " + format(len(gene_dict)) + " genes. Start processing vcf file.")
for line in vcf:
new_line = process_line(line, gene_dict, th)
if new_line is not None:
print(new_line, file=output_f)
output_f.close()
else:
# this version processes the motif file
with open(inp_file) as vcf:
line = vcf.readline().strip()
# Skip info lines.
while line.startswith("##"):
line = vcf.readline().strip()
# First non-## line is the header. Get sample names and print to output.
samples = parse_header(line)
# done with vcf file; only used to get samples
print("Creating gene dictionary for expression data.")
gene_dict = get_genes(exp_file, samples, th, True)
if len(gene_dict) == 0:
print("Error, no genes above threshold found in expression file.",
"\nTry lowering the threshold and ensure the expression fil",
"e has values in the range that you expect.")
sys.exit()
print("Filtering motif info for TFs that don't meet the expression threshold of " +
str(th) + ". Found " + format(len(gene_dict)) + " genes. Start filtering motifs.")
motif.get_filterbygene_put_motifs(motif_file, motifout_file, th, gene_dict)
print("COMPLETE.") | 34,238 |
def from_tensorflow(graphdef, output_nodes=[], preprocessor=None, **kwargs):
"""
Converts a TensorFlow GraphDef to a UFF model.
Args:
graphdef (tensorflow.GraphDef): The TensorFlow graph to convert.
output_nodes (list(str)): The names of the outputs of the graph. If not provided, graphsurgeon is used to automatically deduce output nodes.
output_filename (str): The UFF file to write.
preprocessor (str): The path to a preprocessing script that will be executed before the converter. This script should define a ``preprocess`` function which accepts a graphsurgeon DynamicGraph and modifies it in place.
write_preprocessed (bool): If set to True, the converter will write out the preprocessed graph as well as a TensorBoard visualization. Must be used in conjunction with output_filename.
text (bool): If set to True, the converter will also write out a human readable UFF file. Must be used in conjunction with output_filename.
quiet (bool): If set to True, suppresses informational messages. Errors may still be printed.
list_nodes (bool): If set to True, the converter displays a list of all nodes present in the graph.
debug_mode (bool): If set to True, the converter prints verbose debug messages.
return_graph_info (bool): If set to True, this function returns the graph input and output nodes in addition to the serialized UFF graph.
Returns:
serialized UFF MetaGraph (str)
OR, if return_graph_info is set to True,
serialized UFF MetaGraph (str), graph inputs (list(tensorflow.NodeDef)), graph outputs (list(tensorflow.NodeDef))
"""
quiet = False
input_node = []
text = False
list_nodes = False
output_filename = None
write_preprocessed = False
debug_mode = False
return_graph_info = False
for k, v in kwargs.items():
if k == "quiet":
quiet = v
elif k == "input_node":
input_node = v
elif k == "text":
text = v
elif k == "list_nodes":
list_nodes = v
elif k == "output_filename":
output_filename = v
elif k == "write_preprocessed":
write_preprocessed = v
elif k == "debug_mode":
debug_mode = v
elif k == "return_graph_info":
return_graph_info = v
tf_supported_ver = "1.12.0"
if not quiet:
print("NOTE: UFF has been tested with TensorFlow " + str(tf_supported_ver) + ". Other versions are not guaranteed to work")
if tf.__version__ != tf_supported_ver:
print("WARNING: The version of TensorFlow installed on this system is not guaranteed to work with UFF.")
try:
import graphsurgeon as gs
except ImportError as err:
raise ImportError("""ERROR: Failed to import module ({})
Please make sure you have graphsurgeon installed.
For installation instructions, see:
https://docs.nvidia.com/deeplearning/sdk/tensorrt-api/#python and click on the 'TensoRT Python API' link""".format(err))
# Create a dynamic graph so we can adjust it as needed.
dynamic_graph = gs.DynamicGraph(graphdef)
# Always remove assert ops.
assert_nodes = dynamic_graph.find_nodes_by_op("Assert")
dynamic_graph.remove(assert_nodes, remove_exclusive_dependencies=True)
# Now, run the preprocessor, if provided.
if preprocessor:
import importlib, sys
# Temporarily insert this working dir into the sys.path
sys.path.insert(0, os.path.dirname(preprocessor))
# Import and execute!
pre = importlib.import_module(os.path.splitext(os.path.basename(preprocessor))[0])
pre.preprocess(dynamic_graph)
# Now clean up, by removing the directory from the system path.
del sys.path[0]
# Run process_dilated_conv() and process_softmax() so the user doesn't have to.
gs.extras.process_dilated_conv(dynamic_graph)
gs.extras.process_softmax(dynamic_graph)
# Get the modified graphdef back.
graphdef = dynamic_graph.as_graph_def()
if write_preprocessed and output_filename:
preprocessed_output_name = os.path.splitext(output_filename)[0] + "_preprocessed"
dynamic_graph.write(preprocessed_output_name + ".pb")
dynamic_graph.write_tensorboard(preprocessed_output_name)
if not quiet:
print("Preprocessed graph written to " + preprocessed_output_name + ".pb")
print("TensorBoard visualization written to " + preprocessed_output_name)
if not quiet:
print("UFF Version " + uff.__version__)
if debug_mode:
_debug_print("Debug Mode is ENABLED")
if not input_node:
if not quiet:
print("=== Automatically deduced input nodes ===")
print(str(dynamic_graph.graph_inputs))
print("=========================================\n")
# Deduce the likely graph outputs if none are provided
if not output_nodes:
output_nodes = [node.name for node in dynamic_graph.graph_outputs]
if not quiet:
print("=== Automatically deduced output nodes ===")
print(str(dynamic_graph.graph_outputs))
print("==========================================\n")
if list_nodes:
for i, node in enumerate(graphdef.node):
print('%i %s: "%s"' % (i + 1, node.op, node.name))
return
for i, name in enumerate(output_nodes):
if debug_mode:
_debug_print("Enumerating outputs")
output_nodes[i] = tf2uff.convert_node_name_or_index_to_name(
name, graphdef.node, debug_mode=debug_mode)
if not quiet:
print("Using output node", output_nodes[i])
input_replacements = {}
for i, name_data in enumerate(input_node):
name, new_name, dtype, shape = name_data.split(',', 3)
name = tf2uff.convert_node_name_or_index_to_name(name, graphdef.node, debug_mode=debug_mode)
if new_name == '':
new_name = name
dtype = np.dtype(dtype)
shape = [int(x) for x in shape.split(',')]
input_replacements[name] = (new_name, dtype, shape)
if not quiet:
print("Using input node", name)
if not quiet:
print("Converting to UFF graph")
uff_metagraph = uff.model.MetaGraph()
tf2uff.add_custom_descriptors(uff_metagraph)
uff_graph = tf2uff.convert_tf2uff_graph(
graphdef,
uff_metagraph,
output_nodes=output_nodes,
input_replacements=input_replacements,
name="main",
debug_mode=debug_mode)
uff_metagraph_proto = uff_metagraph.to_uff()
if not quiet:
print('No. nodes:', len(uff_graph.nodes))
if output_filename:
with open(output_filename, 'wb') as f:
f.write(uff_metagraph_proto.SerializeToString())
if not quiet:
print("UFF Output written to", output_filename)
if text: # ASK: Would you want to return the prototxt?
if not output_filename:
raise ValueError(
"Requested prototxt but did not provide file path")
output_filename_txt = _replace_ext(output_filename, '.pbtxt')
with open(output_filename_txt, 'w') as f:
f.write(str(uff_metagraph.to_uff(debug=True)))
if not quiet:
print("UFF Text Output written to", output_filename_txt)
# Always return the UFF graph!
if return_graph_info:
return uff_metagraph_proto.SerializeToString(), dynamic_graph.graph_inputs, dynamic_graph.graph_outputs
else:
return uff_metagraph_proto.SerializeToString() | 34,239 |
def random_binary():
"""
测试 cached 缓存视图的装饰器 设置 key
:return:
"""
return [random.randrange(0, 2) for i in range(500)] | 34,240 |
def remove_from_end(string, text_to_remove):
"""
Remove a String from the end of a string if it exists
Args:
string (str): string to edit
text_to_remove (str): the text to remove
Returns: the string with the text removed
"""
if string is not None and string.endswith(text_to_remove):
return string[:-len(text_to_remove)]
return string | 34,241 |
def is_3pt_shot(blob):
"""Parses the play description to determine if shot was 3pt attempt"""
raise NotImplementedError("Pass a dictionary or pd.DataFrame") | 34,242 |
def spec_to_hole(
spec: specs.Spec, inputs: Optional[Iterable] = None, output: Optional[Any] = None
) -> "Impl":
"""Returns a default, incomplete schedule for a Spec which consume given inputs.
If either `inputs` or `output` is None, default Tensors from the corresponding
TensorSpecs will be constructed (using the current target).
"""
# Import some Impls here to avoid import cycle
# TODO: Can we move this to its own file instead?
from .compose import ComposeHole
from .directconv import DirectConv
from .matmuls import MatmulHole
from .reducesum import ReduceSum
if inputs is None:
target = current_target()
inputs = tuple(target.tensor(s) for s in spec.inputs)
if output is None:
output = current_target().tensor(spec.output)
inputs = tuple(inputs)
if isinstance(spec, specs.Convolution):
assert len(inputs) == 2, f"Expected 2 Tensor/Tile operands; got {len(inputs)}"
return DirectConv(
lhs=inputs[0], rhs=inputs[1], output=output, serial_only=spec.serial_only
)
elif isinstance(spec, specs.Matmul):
assert len(inputs) == 2, f"Expected 2 Tensor/Tile operands; got {len(inputs)}"
return MatmulHole(
lhs=inputs[0], rhs=inputs[1], output=output, serial_only=spec.serial_only
)
elif isinstance(spec, specs.ReduceSum):
assert len(inputs) == 1, f"Expected 1 Tensor/Tile operands; got {len(inputs)}"
return ReduceSum(source=inputs[0], output=output, serial_only=spec.serial_only)
elif isinstance(spec, specs.Compose):
return ComposeHole(spec, inputs=inputs, output=output)
else:
raise NotImplementedError() | 34,243 |
def get_node_number(self, node, typ) -> str:
"""Get the number for the directive node for HTML."""
ids = node.attributes.get("ids", [])[0]
if isinstance(self, LaTeXTranslator):
docname = find_parent(self.builder.env, node, "section")
else:
docname = node.attributes.get("docname", "")
# Latex does not have builder.fignumbers
fignumbers = self.builder.env.toc_fignumbers.get(docname, {})
number = fignumbers.get(typ, {}).get(ids, ())
return ".".join(map(str, number)) | 34,244 |
def count_mp3_files_below(adir_path):
"""counts all mp3 files below given dir including subdirs"""
matches = []
for root, dirnames, filenames in os.walk(adir_path):
for filename in fnmatch.filter(filenames, '*.mp3'):
matches.append(os.path.join(root, filename))
return len(matches) | 34,245 |
def get_client(client, aws_access_key_id, aws_secret_access_key, region=None):
"""Shortcut for getting an initialized instance of the boto3 client."""
return boto3.client(
client,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region
) | 34,246 |
def main(req: func.HttpRequest) -> func.HttpResponse:
""" main function for status/http """
logging.info('Status processed a request.')
try:
response = get_http_response_by_status(200)
if req.get_body() and len(req.get_body()):
response = get_http_response_by_status(202)
headers = {
"Access-Control-Allow-Origin": "*"
}
return func_json_response(response, headers, "message")
#pylint: disable=broad-except
except Exception as err:
logging.error("Status HTTP error occurred: %s", traceback.format_exc())
msg_error = f"This endpoint encountered an error. {err}"
func_response = json.dumps(jsend.error(msg_error))
return func.HttpResponse(func_response, status_code=500) | 34,247 |
def _variable_map_by_name(variables):
"""
Returns Dict,representing referenced variable fields mapped by name.
Keyword Parameters:
variables -- list of 'variable_python_type' Warehouse support DTOs
>>> from pprint import pprint
>>> var1 = { 'column':'frob_hz', 'title':'Frobniz Resonance (Hz)'
... ,'python_type': 'float'
... ,'table': 'foo_fact'}
>>> list1 = [var1]
>>> pprint(_variable_map_by_name(list1))
{'frob_hz': {'column': 'frob_hz',
'python_type': 'float',
'table': 'foo_fact',
'title': 'Frobniz Resonance (Hz)'}}
"""
variable_by_field = {}
for var in variables:
field_name = var['column']
variable_by_field[field_name] = var
return variable_by_field | 34,248 |
def test_show_info_retrieve_core_info_by_ids(show_ids: List[int]):
"""Testing for :py:meth:`wwdtm.show.ShowInfoMultiple.retrieve_core_info_by_ids`
:param show_id: Show ID to test retrieving show core information
"""
info = ShowInfoMultiple(connect_dict=get_connect_dict())
shows = info.retrieve_core_info_by_ids(show_ids)
assert shows, "Core information all shows could not be retrieved"
for show_id in show_ids:
assert show_id in shows, ("Core information could not be retrieved for "
f"show ID {show_id}")
assert "id" in shows[show_id], ("'id' was not returned with core information "
f"for show ID {show_id}")
assert "description" in shows[show_id], ("'description' was not returned with "
f"show information for show ID {show_id}") | 34,249 |
def test_generator_setattr_typechecking():
"""
setattr should provide type checking based on PROPERTIES definition
"""
for type in SAMPLES:
mock = Mock(Resource)
object.__setattr__(mock, 'PROPERTIES', {'key': type})
object.__setattr__(mock, '_dirty', dict())
for t2, samples in SAMPLES.iteritems():
if not isinstance(t2, type):
for sample in samples:
yield setattr_eq, mock, 'key', sample
else:
for sample in samples:
yield setattr_typeerror, mock, 'key', sample | 34,250 |
def _apply_nat_net_less_greedy_subnet():
"""By default, VirtualBox claims 10.0.2.x for itself as part of its NAT routing
scheme. This subnet is commonly used on internal networks, making this a pretty
damn greedy choice. We instead alter the VM to use the less greedy subnet of
10.174.249.x which is less likely to conflict."""
check_and_log_output_and_error_demoted(
['VBoxManage', 'modifyvm', constants.VM_MACHINE_NAME, '--natnet1', '10.174.249/24'],
quiet_on_success=True) | 34,251 |
def true_divide(x, y):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.math.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x (np.ndarray): input tensor.
y (np.ndarray): another tensor.
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return np.true_divide(x, y) | 34,252 |
def ObjectNotFoundError(NDARError):
"""S3 object not found"""
def __init__(self, object):
self.object = object
return
def __str__(self):
return 'Object not found: %s' % self.object | 34,253 |
def test_get_bool():
"""get_bool should get the bool from the environment variable, or raise an exception if it's not parseable as a bool"""
assert envs.get_bool(name="TRUE", default=1234, description="description") is True
assert envs.get_bool(name="FALSE", default=1234, description="description") is False
for key, value in FAKE_ENVIRONS.items():
if key not in ("TRUE", "FALSE"):
with pytest.raises(envs.EnvironmentVariableParseException) as ex:
envs.get_bool(name=key, default=1234, description="description")
assert ex.value.args[
0
] == "Expected value in {key}={value} to be a boolean".format(
key=key, value=value
)
assert (
envs.get_bool(name="missing_true", default=True, description="description")
is True
)
assert (
envs.get_bool(name="missing_false", default=False, description="description")
is False
) | 34,254 |
def hello():
"""
Say hello using a template file.
"""
return render_template('index.html') | 34,255 |
def pause_sale(ctx):
"""
Pause the token sale
:param ctx:GetContext() used to access contract storage
:return:bool Whether pausing the sale was successful
"""
if CheckWitness(TOKEN_OWNER):
Put(ctx, SALE_STATUS_KEY, SALE_PAUSED)
return True
return False | 34,256 |
def parse_item_hash(value):
"""
Parses the item-hash datatype, e.g. sha-256:5b8e5ee02caedd0a6f3539b19d6b462dd2d08918764e7f476506996024f7b84a
:param value: a string to parse
:return: parsed value
"""
if isinstance(value, ItemHash):
return value
if not isinstance(value, str):
raise ValueError('value must be a str')
return ItemHash(value) | 34,257 |
def __convert_swizzle_scale(scale, export_settings):
"""Convert a scale from Blender coordinate system to glTF coordinate system."""
if export_settings[gltf2_blender_export_keys.YUP]:
return Vector((scale[0], scale[2], scale[1]))
else:
return Vector((scale[0], scale[1], scale[2])) | 34,258 |
def launch_plugin_flow(current, client_id, rekall_session, plugin, plugin_arg):
"""Launch the flow on the client."""
db = current.db
flow_id = utils.new_flow_id()
spec = plugins.RekallAPI(current).get(plugin)
if not spec:
raise ValueError("Unknown plugin")
# Validate both plugin args and session args.
validate_plugin_args(plugin_arg, spec)
validate_plugin_args(rekall_session, plugins.SessionAPI(current))
flow = agent.Flow.from_keywords(
flow_id=flow_id,
created_time=time.time(),
ticket=dict(
location=dict(
__type__="HTTPLocation",
base=utils.route_api('/control/ticket'),
path_prefix=flow_id,
)),
actions=[
dict(__type__="PluginAction",
plugin=plugin,
args=plugin_arg,
rekall_session=rekall_session,
collection=dict(
__type__="JSONCollection",
location=dict(
__type__="BlobUploader",
base=html.URL(
c="api", f="control", args=['upload'], host=True),
path_template=(
"collection/%s/{part}" % flow_id),
))
)])
if rekall_session.get("also_upload_files"):
flow.file_upload = dict(
__type__="FileUploadLocation",
flow_id=flow_id,
base=html.URL(c="api", f='control/file_upload',
host=True))
db.flows.insert(
flow_id=flow_id,
client_id=client_id,
status=agent.FlowStatus.from_keywords(
timestamp=time.time(),
client_id=client_id,
flow_id=flow_id,
status="Pending"),
creator=utils.get_current_username(current),
flow=flow,
timestamp=flow.created_time.timestamp,
)
firebase.notify_client(client_id)
audit.log(current, "FlowLaunchPlugin", flow_id=flow_id, plugin=plugin,
client_id=client_id)
return {} | 34,259 |
def deletePressed(self):
"""
TOWRITE
"""
qDebug("deletePressed()")
QApplication.setOverrideCursor(Qt.WaitCursor)
mdiWin = self.mdiArea.activeSubWindow() # MdiWindow* mdiWin = qobject_cast<MdiWindow*>(mdiArea->activeSubWindow());
if mdiWin:
mdiWin.deletePressed()
QApplication.restoreOverrideCursor() | 34,260 |
def setup():
"""Initial power up routine.
Additionally ramps up and monitors the heater temperature.
"""
spdc.peltier_loop_on()
spdc.heater_loop_on()
spdc.save_settings()
spdc.heater_temp_setpoint = HSETTEMP # heater ramp
spdc.laser_on(LCURRENT) # laser current ramp
# Monitor temperature
print("Laser switched on with current:", spdc.laser_current, "mA")
try:
print("Monitoring current heater temperature...")
print("Terminates at {}°C (Ctrl-C to stop monitoring)".format(HSETTEMP))
while True:
heater_temp = spdc.heater_temp
print(heater_temp, "°C")
if heater_temp > HSETTEMP: # terminate monitoring once overshoot
break
time.sleep(1)
except KeyboardInterrupt:
pass
print(
"Device started up with:"
+ "\n - Laser current: {} mA".format(spdc.laser_current)
+ "\n - Heater temp: {} °C".format(spdc.heater_temp)
+ "\n - Peltier temp: {} °C".format(spdc.peltier_temp)
) | 34,261 |
def set_gain(camera, gain, value):
"""Set the analog gain of a PiCamera.
camera: the picamera.PiCamera() instance you are configuring
gain: either MMAL_PARAMETER_ANALOG_GAIN or MMAL_PARAMETER_DIGITAL_GAIN
value: a numeric value that can be converted to a rational number.
"""
if gain not in [MMAL_PARAMETER_ANALOG_GAIN, MMAL_PARAMETER_DIGITAL_GAIN]:
raise ValueError("The gain parameter was not valid")
ret = mmal.mmal_port_parameter_set_rational(
camera._camera.control._port,
gain,
to_rational(value)
)
if ret == 4:
raise exc.PiCameraMMALError(
ret,
"Are you running the latest version of the userland libraries? Gain setting was introduced in late 2017."
)
elif ret != 0:
raise exc.PiCameraMMALError(ret) | 34,262 |
def load_config_from_expt_dir(experiment_dir: Path, loop_config: Type[OptimizerConfig]) -> OptimizerConfig:
"""
Locate a config file in experiment_dir or one of its subdirectories (for a per-seed config).
Config files are now normally in seed subdirectories, as they contain seed values.
"""
config_files = sorted(experiment_dir.glob(f"*/seed*/{CONFIG_FILENAME}")) or [experiment_dir / CONFIG_FILENAME]
config_file = config_files[0]
if not config_file.exists():
raise FileNotFoundError(f"Cannot find {CONFIG_FILENAME} at or under {experiment_dir}") # pragma: no cover
return cast(loop_config, simple_load_config(config_file, config_class=loop_config)) | 34,263 |
def glsadf_delay(order, stage):
"""Delay for glsadf
Parameters
----------
order : int
Order of glsadf filter coefficients
stage : int
-1 / gamma
Returns
-------
delay : array
Delay
"""
return np.zeros(_sptk.glsadf_delay_length(order, stage)) | 34,264 |
def find_next(s: str)->[int]:
"""
input:string
output:the next array of string
"""
if len(s) == 1:
return [-1]
result = [0 for i in range(len(s))]
result[0] = -1
result[1] = 0
i = 2
cn = 0
while i < len(result):
if s[i-1] == s[cn]:
cn += 1
result[i] = cn
elif cn > 0:
cn = result[cn]
else:
result[i+1] = 0
i = i + 1
return result | 34,265 |
def _read_date():
""" read date from input; default to today """
# show date
while 1:
dts = prompt("Date", default=str(datetime.date.today()))
try:
datetime.datetime.strptime(dts, "%Y-%m-%d")
break
except ValueError:
continue
return dts | 34,266 |
def handle_pending_submission(self, request, layout=None):
""" Renders a pending submission, takes it's input and allows the
user to turn the submission into a complete submission, once all data
is valid.
This view has two states, a completable state where the form values
are displayed without a form and an edit state, where a form is rendered
to change the values.
Takes the following query parameters for customization::
* ``edit`` render the view in the edit state
* ``return-to`` the view redirects to this url once complete
* ``title`` a custom title (required if external submission)
* ``quiet`` no success messages are rendered if present
"""
collection = FormCollection(request.session)
form = request.get_form(self.form_class, data=self.data)
form.action = request.link(self)
form.model = self
if 'edit' not in request.GET:
form.validate()
if not request.POST:
form.ignore_csrf_error()
elif not form.errors:
collection.submissions.update(self, form)
completable = not form.errors and 'edit' not in request.GET
if completable and 'return-to' in request.GET:
if 'quiet' not in request.GET:
request.success(_("Your changes were saved"))
# the default url should actually never be called
return request.redirect(request.url)
if 'title' in request.GET:
title = request.GET['title']
else:
title = self.form.title
price = get_price(request, form, self)
# retain some parameters in links (the rest throw away)
form.action = copy_query(
request, form.action, ('return-to', 'title', 'quiet'))
edit_link = URL(copy_query(
request, request.link(self), ('title', )))
# the edit link always points to the editable state
edit_link = edit_link.query_param('edit', '')
edit_link = edit_link.as_string()
return {
'layout': layout or FormSubmissionLayout(self, request, title),
'title': title,
'form': form,
'completable': completable,
'edit_link': edit_link,
'complete_link': request.link(self, 'complete'),
'model': self,
'price': price,
'checkout_button': price and request.app.checkout_button(
button_label=request.translate(_("Pay Online and Complete")),
title=title,
price=price,
email=self.email or self.get_email_field_data(form),
locale=request.locale
)
} | 34,267 |
def i18n_view(tpl_base_name=None, **defaults):
"""
Renders a template with locale name as suffix. Unlike the normal view
decorator, the template name should not have an extension. The locale names
are appended to the base template name using underscore ('_') as separator,
and lower-case locale identifier.
Any additional keyword arguments are used as default template variables.
For example::
@i18n_view('foo')
def render_foo():
# Renders 'foo_en' for English locale, 'foo_fr' for French, etc.
return
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
locale = request.locale
tpl_name = '%s_%s' % (tpl_base_name, locale.lower())
except AttributeError:
tpl_name = tpl_base_name
tplvars = defaults.copy()
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator | 34,268 |
def tempfile_delete(tfile):
"""delete a temp file"""
if tfile:
tfile.close()
os.unlink(tfile.name) | 34,269 |
def git_file_list(path_patterns=()):
"""Returns: List of files in current git revision matching `path_patterns`.
This is basically git ls-files.
"""
return exec_output_lines(['git', 'ls-files', '--exclude-standard'] + path_patterns, False) | 34,270 |
def _keep_it_real():
""" Keep the native """
if not getattr(boto3, "real_client", None):
boto3.real_client = boto3.client | 34,271 |
def unreserve_id():
"""
Removes the reservation of a SCSI ID as well as the memo for the reservation
"""
scsi_id = request.form.get("scsi_id")
reserved_ids = get_reserved_ids()["ids"]
reserved_ids.remove(scsi_id)
process = reserve_scsi_ids(reserved_ids)
if process["status"]:
RESERVATIONS[int(scsi_id)] = ""
flash(_(u"Released the reservation for SCSI ID %(id_number)s", id_number=scsi_id))
return redirect(url_for("index"))
flash(_(u"Failed to release the reservation for SCSI ID %(id_number)s", id_number=scsi_id))
flash(process["msg"], "error")
return redirect(url_for("index")) | 34,272 |
def compress_delete_outdir(outdir):
"""Compress the contents of the passed directory to .tar.gz and delete."""
# Compress output in .tar.gz file and remove raw output
tarfn = outdir + ".tar.gz"
logger.info("\tCompressing output from %s to %s", outdir, tarfn)
with tarfile.open(tarfn, "w:gz") as fh:
fh.add(outdir)
logger.info("\tRemoving output directory %s", outdir)
shutil.rmtree(outdir) | 34,273 |
def iprint(
img: np.ndarray,
title: str = "",
info: bool = False,
) -> None:
"""
Image displaying, can print information and the title of the image.
Args:
img (np.ndarray): Image which is displayed
title (str, optional): Title of the image. Defaults to ''.
info (bool, optional): If information is displayed or not. Defaults to False.
"""
if info:
print(f"{ f' {title} ':-^50}\n")
print_img_info(img)
plt.figure()
plt.axis("off")
plt.title(title)
io.imshow(img) | 34,274 |
def integral_sqrt_a2_minus_x2(x, a):
"""Integral of $\sqrt(a^2 - x^2)$ --- see (30) at
http://integral-table.com.
"""
return 0.5*x*np.sqrt(a**2 - x**2) + 0.5*a**2*np.arctan2(x, np.sqrt(a**2 - x**2)) | 34,275 |
def do_qc(fn, df, year):
"""Run some checks on this dataframe"""
(lon, lat) = fn2lonlat(fn)
stage4 = compute_stage4(lon, lat, year)
# Does the frame appear to have all dates?
if len(df.index) != len(df.resample("D").mean().index):
print("ERROR: Appears to be missing dates!")
if open(fn).read()[-1] != "\n":
print("ERROR: File does not end with \\n")
print("--------- Summary stats from the .cli file")
print("YEAR | RAIN | MAXRATE | MAXACC | #DAYS | #>1RT | RAD/D")
print(" --- | --- | --- | --- | --- | --- | ---")
for _year, gdf in df.groupby(by=df.index.year):
print(
("%s | %6.2f | %7.2f | %7.2f | %6i | %6i | %6.0f")
% (
_year,
distance(gdf["pcpn"].sum(), "MM").value("IN"),
distance(gdf["maxr"].max(), "MM").value("IN"),
distance(gdf["pcpn"].max(), "MM").value("IN"),
len(gdf[gdf["pcpn"] > 0].index),
len(gdf[gdf["maxr"] > 25.4].index),
gdf["rad"].mean(),
)
)
print("---- Months with < 0.05 precipitation ----")
gdf = df.groupby(by=[df.index.year, df.index.month])["pcpn"].sum()
print(gdf[gdf < 1.0])
print("----- Average high temperature -----")
print("YEAR | Avg High F | Avg Low F | Days > 100F")
print(" --- | --- | --- | ---")
for _year, gdf in df.groupby(by=df.index.year):
print(
("%s | %6.2f | %6.2f | %3i")
% (
_year,
temperature(gdf["tmax"].mean(), "C").value("F"),
temperature(gdf["tmin"].mean(), "C").value("F"),
len(gdf[gdf["tmax"] > 37.7].index),
)
)
monthly = df[df.index.year == year]["pcpn"].resample("M").sum().copy()
monthly = pd.DataFrame(
{"dep": distance(monthly.values, "MM").value("IN")}, index=range(1, 13)
)
# Get prism, for a bulk comparison
prism = requests.get(
(
"http://mesonet.agron.iastate.edu/json/prism/"
"%.2f/%.2f/%s0101-%s1231"
)
% (lon, lat, year, year)
).json()
rows = []
for entry in prism["data"]:
rows.append(
{
"date": datetime.datetime.strptime(
entry["valid"][:10], "%Y-%m-%d"
),
"precip": entry["precip_in"],
}
)
prismdf = pd.DataFrame(rows)
prismdf.set_index("date", inplace=True)
monthly["prism"] = prismdf["precip"].resample("M").sum().copy().values
# Compare daily values
iemjson = requests.get(
(
"http://mesonet.agron.iastate.edu/iemre/multiday/"
"%s-01-01/%s-12-31/%s/%s/json"
)
% (year, year, lat, lon)
).json()
rows = []
for entry in iemjson["data"]:
rows.append(
{
"date": datetime.datetime.strptime(entry["date"], "%Y-%m-%d"),
"precip": entry["daily_precip_in"],
}
)
iemdf = pd.DataFrame(rows)
iemdf.set_index("date", inplace=True)
print("PRISM %s precip is: %.2f" % (year, prismdf["precip"].sum()))
print("IEMRE sum precip is: %.2f" % (iemdf["precip"].sum(),))
print("StageIV sum precip is: %.2f" % (stage4["precip"].sum(),))
monthly["stage4"] = stage4["precip"].resample("M").sum().copy().values
monthly["iemre"] = iemdf["precip"].resample("M").sum().copy().values
monthly["prism-dep"] = monthly["prism"] - monthly["dep"]
monthly["iemre-dep"] = monthly["iemre"] - monthly["dep"]
print(" --------- %s Monthly Totals --------" % (year,))
print(monthly)
df.at[
slice(datetime.date(year, 1, 1), datetime.date(year, 12, 31)),
"stage4_precip",
] = stage4["precip"].values
df["iemre_precip"] = iemdf["precip"]
df["diff_precip"] = df["pcpn_in"] - df["iemre_precip"]
df["diff_stage4"] = df["pcpn_in"] - df["stage4_precip"]
print(" --- Top 5 Largest DEP > IEMRE ----")
print(
df[
[
"diff_precip",
"pcpn_in",
"iemre_precip",
"stage4_precip",
"diff_stage4",
]
]
.sort_values(by="diff_precip", ascending=False)
.head()
)
print(" --- Top 5 Largest IEMRE > DEP ----")
print(
df[
[
"diff_precip",
"pcpn_in",
"iemre_precip",
"stage4_precip",
"diff_stage4",
]
]
.sort_values(by="diff_precip", ascending=True)
.head()
)
print(" --- Top 10 Largest Stage4 > DEP ----")
print(
df[
[
"diff_precip",
"pcpn_in",
"iemre_precip",
"stage4_precip",
"diff_stage4",
]
]
.sort_values(by="diff_stage4", ascending=True)
.head(10)
)
print(" vvv job listing based on the above vvv")
for dt in df.sort_values(by="diff_stage4", ascending=True).head(10).index:
print(
"python daily_clifile_editor.py 0 %s %s %s"
% (dt.year, dt.month, dt.day)
)
df2 = df.loc[slice(datetime.date(year, 1, 1), datetime.date(year, 1, 31))][
["diff_precip", "pcpn_in", "iemre_precip", "stage4_precip"]
].sort_values(by="diff_precip")
print(" --- Daily values for month " "")
print(df2) | 34,276 |
def read_length(file_obj): # pragma: no cover
""" Numpy trick to get a 32-bit length from four bytes
Equivalent to struct.unpack('<i'), but suitable for numba-jit
"""
sub = file_obj.read(4)
return sub[0] + sub[1]*256 + sub[2]*256*256 + sub[3]*256*256*256 | 34,277 |
def check_and_reorder_reads(input_files, output_folder, temp_output_files):
""" Check if reads are ordered and if not reorder """
# read in the ids from the first pair (only check the first 100)
ids = []
for count, lines in zip(range(100),read_file_n_lines(input_files[0],4)):
ids.append(get_read_id_minus_pair(lines[0]))
mismatch=False
for lines, pair_id in zip(read_file_n_lines(input_files[1],4), ids):
if not get_read_id_minus_pair(lines[0]) == pair_id:
mismatch=True
break
# reorder the pairs to match
new_file_list = []
if mismatch:
message="Reordering read identifiers ..."
print(message+"\n")
logger.info(message)
for index, infile in enumerate(input_files):
file_out, new_file=tempfile.mkstemp(prefix="reordered_",
suffix="_"+file_without_extension(infile), dir=output_folder)
os.close(file_out)
# read in all of the sequences then sort and write out
ids={}
for lines in read_file_n_lines(infile,4):
id=get_read_id_minus_pair(lines[0])
ids[id]=lines
with open(new_file,"w") as file_handle:
for id in sorted(ids.keys()):
file_handle.write("".join(ids[id]))
# set the input file to the reordered temp file
input_files[index]=new_file
new_file_list.append(new_file)
# add the temp file to the list and remove extra that are not needed
update_temp_output_files(temp_output_files, new_file_list, input_files)
return input_files | 34,278 |
def acquires_lock(expires, should_fail=True, should_wait=False, resource=None, prefix=DEFAULT_PREFIX, create_id=None):
"""
Decorator to ensure function only runs when it is unique holder of the resource.
Any invocations of the functions before the first is done
will raise RuntimeError.
Locks are stored in redis with default prefix: `lock:acquires_lock`
Arguments:
expires(timedelta|int): Expiry time of lock, way more than expected time to run.
Intended as a failsafe clean-up mechanism.
should_fail(bool): Should error be raised if failed to acquire lock.
should_wait(bool): Should this task wait for lock to be released.
resource(str): Resource identifier, by default taken from function name.
prefix(str): Change prefix added to redis key (the 'lock:' part will always be added)
create_id(function): Change suffix added to redis key to lock only specific function call based on arguments.
Example:
You have a celery task and you want to ensure it is never
executed concurrently:
@shared_task
@acquire_lock(60, resource='foo')
def foo():
...
"""
# This is just a tiny wrapper around redis_lock
# 1) acquire lock or fail
# 2) run function
# 3) release lock
def decorator(f):
nonlocal resource
if resource is None:
resource = f.__name__
resource = '%s:%s' % (prefix, resource)
@wraps(f)
def wrapper(*args, **kwargs):
lock_suffix = None
if create_id:
lock_suffix = create_id(*args, **kwargs)
# The context manager is annoying and always blocking...
lock = get_lock(
resource='%s:%s' % (resource, lock_suffix) if lock_suffix else resource,
expires=expires,
)
lock_acquired = False
# Get default lock blocking mode
# Copying to local variable so original variable would not be touched
nonlocal should_wait
is_blocking = should_wait
should_execute_if_lock_fails = False
if 'should_execute_if_lock_fails' in kwargs:
should_execute_if_lock_fails = kwargs.pop("should_execute_if_lock_fails")
# If decorated fn is called with should_wait kwarg
# Override lock blocking mode
if 'should_wait' in kwargs:
is_blocking = kwargs.pop('should_wait')
if is_blocking:
logger.debug('Waiting for resource "%s"', resource)
if not lock.acquire(blocking=is_blocking):
if should_fail:
raise RuntimeError("Failed to acquire lock: %s" % resource)
logger.warning('Failed to acquire lock: %s', resource)
if not should_execute_if_lock_fails:
return False
else:
lock_acquired = True
try:
return f(*args, **kwargs)
finally:
try:
if lock_acquired:
lock.release()
except Exception as e:
logger.exception('Failed to release lock: %s', str(e), exc_info=False)
return wrapper
return decorator | 34,279 |
def ifttt_comparator_alpha_options():
""" Option values for alphanumeric comparators """
errmsg = check_ifttt_service_key()
if errmsg:
return errmsg, 401
data = {"data": [
{"value": "ignore", "label": "ignore"},
{"value": "equal", "label": "is equal to"},
{"value": "not_equal", "label": "is not equal to"},
{"value": "cont", "label": "contains"},
{"value": "not_cont", "label": "does not contain"},
{"value": "equal_nc", "label": "is equal to (ignore case)"},
{"value": "not_equal_nc", "label": "is not equal to (ignore case)"},
{"value": "cont_nc", "label": "contains (ignore case)"},
{"value": "not_cont_nc", "label": "does not contain (ignore case)"},
{"value": "in", "label": "in [json array]"},
{"value": "not_in", "label": "not in [json array]"},
{"value": "in_nc", "label": "in [json array] (ignore case)"},
{"value": "not_in_nc", "label": "not in [json array] (ignore case)"},
]}
return json.dumps(data) | 34,280 |
def flat_abs_maximum(data, preserve_sign=True):
"""
Function to return the absolute maximum value in an array. By default,
this function will preserve the sign, meaning that if an array contains [-75, -25, 0, 25, 50]
then the function will return -75 because that value has the highest magnitude but it will return
the original value (preserving the sign).
Removing the sign preservation basically makes this function a composite of abs and max.
:param data: data array source
:param preserve_sign: whether or not to preserve the sign of the output, default is True
:return: largest absolute value in the data array
"""
data = np.asarray(data)
abs_data = np.abs(data)
subset = np.unravel_index(np.argmax(abs_data), data.shape)
return data[subset] if preserve_sign else abs_data[subset] | 34,281 |
def get_doctop_vis(clustering_pipeline, media='videos'):
""" calls a function to generate bokeh visualization
Parameters
----------
clustering_pipeline : class reference
The current modeling pipeling
media : str, optional
'articles' or 'videos', by default 'videos'
"""
bokeh_layout = clustering_pipeline.generate_bokeh_umap(media)
st.bokeh_chart(bokeh_layout) | 34,282 |
def get_image_to_groundplane_homography(P):
"""Given the 3x4 camera projection matrix P, returns the homography
mapping image plane points onto the ground plane."""
return np.linalg.inv(get_groundplane_to_image_homography(P)) | 34,283 |
def create_object_detection_edge_training(
train_object_detection_edge_model_request: TrainImageEdgeModel,
):
"""[Train a Object Detection Model for Edge in AutoML GCP]
Args:
train_object_detection_edge_model_request (TrainImageEdgeModel): [Based on Input Schema]
Raises:
error: [Error]
Returns:
[type]: [description]
"""
try:
logging.info(
f"Create Object Detection Model Router: {train_object_detection_edge_model_request}"
)
return TrainModelController().train_object_detection_edge_model_controller(
request=train_object_detection_edge_model_request
)
except Exception as error:
logging.error(f"{error=}")
raise error | 34,284 |
def download_all(links, destination):
"""Download all files from a list of urls."""
for link in links:
download_file(link, destination) | 34,285 |
def readOneLineFileWithCommas(filepath: str) -> List[str]:
"""
Reads a file that is one line long, separated by commas
"""
try:
with open(filepath) as fp:
s: str = fp.readline()
return s.split(",")
except:
raise Exception(f"Failed to open {filepath}") | 34,286 |
def argparser(parser):
"""Default argument parser for regressions.
"""
parser.add_argument("--local",
action="store_true",
help="run regression in local mode without docker-compose down", default=True) | 34,287 |
def package_search(filters, context, limit=None, catalog=False):
"""Search packages with different filters
Catalog param controls the base query creation. Catalog queries
only search packages a user can deploy. Non-catalog queries searches
packages a user can edit.
* Admin is allowed to browse all the packages
* Regular user is allowed to browse all packages belongs to user tenant
and all other packages marked is_public.
Also all packages should be enabled.
* Use marker (inside filters param) and limit for pagination:
The typical pattern of limit and marker is to make an initial limited
request and then to use the ID of the last package from the response
as the marker parameter in a subsequent limited request.
"""
session = db_session.get_session()
pkg = models.Package
query = session.query(pkg)
if catalog:
# Only show packages one can deploy, i.e. own + public
query = query.filter(or_(
pkg.owner_id == context.tenant, pkg.is_public)
)
else:
# Show packages one can edit.
if not context.is_admin:
query = query.filter(pkg.owner_id == context.tenant)
# No else here admin can edit everything.
if not filters.get('include_disabled', '').lower() == 'true':
query = query.filter(pkg.enabled)
if filters.get('owned', '').lower() == 'true':
query = query.filter(pkg.owner_id == context.tenant)
if 'type' in filters.keys():
query = query.filter(pkg.type == filters['type'].title())
if 'category' in filters.keys():
query = query.filter(pkg.categories.any(
models.Category.name.in_(filters['category'])))
if 'tag' in filters.keys():
query = query.filter(pkg.tags.any(
models.Tag.name.in_(filters['tag'])))
if 'class_name' in filters.keys():
query = query.filter(pkg.class_definitions.any(
models.Class.name == filters['class_name']))
if 'fqn' in filters.keys():
query = query.filter(pkg.fully_qualified_name == filters['fqn'])
if 'search' in filters.keys():
fk_fields = {'categories': 'Category',
'tags': 'Tag',
'class_definitions': 'Class'}
conditions = []
for attr in dir(pkg):
if attr.startswith('_'):
continue
if isinstance(getattr(pkg, attr),
attributes.InstrumentedAttribute):
search_str = filters['search']
for delim in ',;':
search_str = search_str.replace(delim, ' ')
for key_word in search_str.split():
_word = '%{value}%'.format(value=key_word)
if attr in fk_fields.keys():
condition = getattr(pkg, attr).any(
getattr(models, fk_fields[attr]).name.like(_word))
conditions.append(condition)
elif isinstance(getattr(pkg, attr)
.property.columns[0].type, sa.String):
conditions.append(getattr(pkg, attr).like(_word))
query = query.filter(or_(*conditions))
sort_keys = [SEARCH_MAPPING[sort_key] for sort_key in
filters.get('order_by', ['name'])]
marker = filters.get('marker')
sort_dir = filters.get('sort_dir')
if marker is not None: # set marker to real object instead of its id
marker = _package_get(marker, session)
query = utils.paginate_query(
query, pkg, limit, sort_keys, marker, sort_dir)
return query.all() | 34,288 |
def is_valid(listener_tuple):
"""
There are a few rules that aws has when creating listeners,
this function ensures those rules are met before we try and create
or update a listener.
While these could be caught with boto exception handling, I would
rather be nice and catch these early before we sent them out to aws.
It also gives us an opportunity to create nice user warnings.
This validity check should also be checked in the frontend
but must also be enforced by server.
:param listener_tuple:
"""
current_app.logger.debug(listener_tuple)
lb_port, i_port, lb_protocol, arn = listener_tuple
current_app.logger.debug(lb_protocol)
if lb_protocol.lower() in ['ssl', 'https']:
if not arn:
raise InvalidListener
return listener_tuple | 34,289 |
def change_name(player_obj):
"""Changes the player objects name for specified user."""
username = get_random_username()
player_obj.name = username
player_obj.save() | 34,290 |
def _run_wrapped(
func,
is_multimachine,
master_ip,
port,
world_size,
rank,
dev,
device_type,
args,
kwargs,
backend,
queue: mp.Queue,
machine_ranks: list,
):
"""Init distributed process group and run wrapped function."""
_check_device_initialized(device_type, dev)
init_process_group(
master_ip=master_ip,
port=port,
world_size=world_size,
rank=rank,
device=dev,
backend=backend,
device_type=device_type,
)
# set NCCL_LAUNCH_MODE to avoid deadlock
os.environ["NCCL_LAUNCH_MODE"] = "PARALLEL"
_set_machine_ranks(machine_ranks)
if is_multimachine:
group_barrier()
ret = func(*args, **kwargs)
queue.put((dev, ret))
full_sync()
if is_multimachine:
group_barrier()
_exit(0) | 34,291 |
def _jitter_boxes(gt_boxes, jitter=0.05):
"""
"""
jittered_boxes = gt_boxes.copy()
ws = jittered_boxes[:, 2] - jittered_boxes[:, 0] + 1.0
hs = jittered_boxes[:, 3] - jittered_boxes[:, 1] + 1.0
width_offset = (np.random.rand(jittered_boxes.shape[0]) - 0.5) * jitter * ws
height_offset = (np.random.rand(jittered_boxes.shape[0]) - 0.5) * jitter * hs
jittered_boxes[:, 0] += width_offset
jittered_boxes[:, 2] += width_offset
jittered_boxes[:, 1] += height_offset
jittered_boxes[:, 3] += height_offset
return jittered_boxes | 34,292 |
def last_model_path(exp_name):
"""
get path of the last model in the exp
"""
model_path = os.path.join(constants.ET_LOGS, exp_name, "latest.pth")
assert os.path.islink(model_path)
return model_path | 34,293 |
def lcm_gcd(a, b):
"""Finds the least common multiple of two integers
Args:
a, b: integers greater than or equal to 1
"""
return a * b//greatest_common_divisor(a, b) | 34,294 |
def test_ahocorasick_rs_overlapping(benchmark, test_data):
"""ahocorasick_rs overlapping matches."""
patterns, haystacks = test_data
ac = ahocorasick_rs.AhoCorasick(patterns)
def run():
for haystack in haystacks:
x = ac.find_matches_as_strings(haystack, overlapping=True)
return x
print(benchmark(run)) | 34,295 |
def merge_frames(frames):
"""
Merge the multiple data files downloaded from the M2M system or the Gold
Copy THREDDS server into a single xarray data set. Keep track of how many
files fail to merge.
:param frames: The data frames to concatenate/merge into a single data set
:return data: The final, merged data set
"""
# merge the list of processed data frames into a single data set
nfiles = len(frames)
nframes = nfiles
bad_files = 0
if nframes > 1:
# try merging all of the frames into a single data set (some frames may be corrupted, and will be skipped)
data, fail = _frame_merger(frames[0], frames)
# if all of the files, except for the first one, failed that would suggest the first file is the problem.
# try the merge again, reset the starting frame to skip the first one.
if nframes - fail == 1:
data, fail = _frame_merger(frames[1], frames[1:])
nframes -= 1
# if we still can't merge the frames, then there probably is something more fundamentally wrong, and trying
# to account for it here is not going to be possible
if nframes - 1 - fail == 1:
message = f"Unable to merge the {len(frames)} files downloaded from the Gold Copy THREDDS server."
warnings.warn(message)
return None
else:
bad_files = nfiles - nframes + fail
else:
# there is just the one
data = frames[0]
if bad_files > 0:
message = "{} of the {} downloaded files failed to merge.".format(bad_files, nfiles)
warnings.warn(message)
data = data.sortby(['deployment', 'time'])
data.attrs['time_coverage_start'] = ('%sZ' % data.time.min().values)
data.attrs['time_coverage_end'] = ('%sZ' % data.time.max().values)
data.attrs['time_coverage_resolution'] = ('P%.2fS' % (np.mean(data.time.diff('time').values).astype(float) / 1e9))
return data | 34,296 |
def sorted_nicely(l):
""" This function sorts the given iterable in the way that is expected
Obtained from:
https://arcpy.wordpress.com/2012/05/11/sorting-alphanumeric-strings-in-python/
:param l: The iterable to be sorted
:return: Sorted iterable
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key) | 34,297 |
def run_board(effects: list, audio: np.array, sample_rate: float) -> np.array:
"""Run board on input audio data.
Args:
board (list): List of Pedalboard effects.
audio (np.array): Input audio data.
Returns:
Output (effected) audio data
"""
board = Pedalboard(effects, sample_rate=sample_rate)
return board(audio) | 34,298 |
def html_escape( s ):
"""
"""
s = s.replace( '&', '&' )
s = s.replace( '<', '<' )
s = s.replace( '>', '>' )
return s | 34,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.