content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _bias_scale(x, b, data_format):
"""The multiplication counter part of tf.nn.bias_add."""
if data_format == 'NHWC':
return x * b
elif data_format == 'NCHW':
return x * b
else:
raise ValueError('invalid data_format: %s' % data_format)
| 12,500
|
def checkTableName(tables):
""" Check if table name has an underscore or not."""
bad = set()
output = []
for i in tables:
if re.search('.*_.*', i):
bad.add(i)
if bad:
output.append("These tables have underscores in the name")
for i in bad:
output.append(i)
output.append("")
else:
output.append("No malformed table names")
output.append("")
return (output, bad)
| 12,501
|
def Popd():
"""Pops and returns the top of the directory stack.
"""
os.chdir(_dirstack.pop())
| 12,502
|
def global_exception_handler(loop, context) -> None:
"""Global exception handler."""
LOGGER.exception(
"Caught exception: %s", context.get("exception", context["message"])
)
loop.default_exception_handler(context)
| 12,503
|
def bluecat():
"""
Collect bluecat data
"""
bluecat_.collect()
| 12,504
|
def load_split_from_tfds_builder(builder,
batch_size,
split,
preprocess_example=None,
augment_train_example=None,
shuffle_buffer_size=None,
shuffle_seed=0,
cache=True):
"""Loads a split from a dataset using TensorFlow Datasets compatible builder.
Args:
builder: tfds.core.DatasetBuilder; A TFDS compatible dataset builder.
batch_size: int; The batch size returned by the data pipeline.
split: str; Name of the split to be loaded.
preprocess_example: function; A function that given an example, returns the
preprocessed example. Note that the preprocessing is done BEFORE caching
to re-use them.
augment_train_example: A function that given a train example returns the
augmented example. Note that this function is applied AFTER caching and
repeat to get true randomness.
shuffle_buffer_size: int; Size of the tf.data.dataset shuffle buffer.
shuffle_seed: int; Seed for shuffling the training data.
cache: bool; Whether to cache dataset in memory.
Returns:
A `tf.data.Dataset`, and dataset information.
"""
# Prepare map functions.
preprocess_example = preprocess_example or (lambda ex: ex)
augment_train_example = augment_train_example or (lambda ex: ex)
shuffle_buffer_size = shuffle_buffer_size or (8 * batch_size)
# Download dataset:
builder.download_and_prepare()
# Each host is responsible for a fixed subset of data.
base_split_name, host_start, host_end = get_data_range(
builder, split, jax.process_index(), jax.process_count())
data_range = tfds.core.ReadInstruction(
base_split_name, unit='abs', from_=host_start, to=host_end)
ds = builder.as_dataset(split=data_range, shuffle_files=False)
options = tf.data.Options()
options.threading.private_threadpool_size = 48
ds = ds.with_options(options)
# Applying preprocessing before `ds.cache()` to re-use it.
ds = ds.map(
preprocess_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Caching.
if cache:
ds = ds.cache()
if 'train' in split:
# First repeat then batch.
ds = ds.repeat()
# Augmentation should be done after repeat for true randomness.
ds = ds.map(
augment_train_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Shuffle after augmentation to avoid loading uncropped images into buffer:
ds = ds.shuffle(shuffle_buffer_size, seed=shuffle_seed)
ds = ds.batch(batch_size, drop_remainder=True)
else:
# First batch then repeat.
ds = ds.batch(batch_size, drop_remainder=False)
ds = ds.repeat()
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
return ds, builder.info
| 12,505
|
def load_model_weights(model, filename, verbose=1, cp_folder=""):
"""
Loads the weights of a PyTorch model. The exception handles cpu/gpu incompatibilities
Arguments:
model {torch module} -- Model to load the weights to
filename {str} -- Name of the checkpoint
Keyword Arguments:
verbose {int} -- Whether to display infos (default: {1})
cp_folder {str} -- Folder to load from (default: {''})
Returns:
torch module -- Model with loaded weights
"""
if verbose:
print(f"\n -> Loading weights from {os.path.join(cp_folder,filename)}\n")
try:
model.load_state_dict(os.path.join(cp_folder, filename), strict=strict)
except BaseException:
model.load_state_dict(
torch.load(os.path.join(cp_folder, filename), map_location="cpu"),
strict=True,
)
return model
| 12,506
|
def analyze(tokens):
"""
表达式元素组合,形成操作树
"""
assert_non_empty(tokens)
# 数字或者操作符
token = analyze_token(tokens.pop(0))
# 如果是数字,直接放回就好了,继续求下一个,因为数字是自求解的,本身就是解
if type(token) in (int, float):
return token
# 如果是操作符,则需要组合为Exp表达式
if token in known_operators:
# 当前是操作符, 则需要检查后面有没有操作数
# 计算器的操作符后面是有操作数的
# 操作数递归组合即可
if len(tokens) == 0 or tokens.pop(0) != '(':
raise SyntaxError('expected ( after ' + token)
return Exp(token, analyze_operands(tokens))
else:
raise SyntaxError('unexpected ' + token)
| 12,507
|
def copy_dict(dic: Mapping[str, Any], depth: int = 1) -> Mapping[str, Any]:
"""Deep copy a dict
Args:
dic: The dict to be copied
depth: The depth to be deep copied
Returns:
The deep-copied dict
"""
if depth <= 1:
return dic.copy()
return {
key: copy_dict(val, depth - 1) if isinstance(val, dict) else val
for key, val in dic.items()
}
| 12,508
|
def set_token(jq_mob, jq_pwd):
"""
:param jq_mob: str
mob是申请JQData时所填写的手机号
:param jq_pwd: str
Password为聚宽官网登录密码,新申请用户默认为手机号后6位
:return: None
"""
with open(file_token, 'wb') as f:
pickle.dump([jq_mob, jq_pwd], f)
| 12,509
|
def get_wrf_config(wrf_config, start_date=None, **kwargs):
"""
precedence = kwargs > wrf_config.json > constants
"""
if start_date is not None:
wrf_config['start_date'] = start_date
for key in kwargs:
wrf_config[key] = kwargs[key]
return wrf_config
| 12,510
|
def pdb_to_structure(filename):
"""Import a structure object from a PDB file.
"""
try:
from Bio import PDB
except ImportError:
print("I can't import Biopython which is needed to handle PDB files.")
raise
p = PDB.PDBParser()
structure = p.get_structure("S", filename)
for _ in structure.get_chains():
atoms = [np.array(atom.get_coord()) for atom in structure.get_atoms()]
return atoms
| 12,511
|
def allocation_ncsist():
"""
Real Name: Allocation NCSIST
Original Eqn: IF THEN ELSE( ShiMen Reservoir Depth>=ShiMenReservoir Operation Rule Lower Limit , 6048, IF THEN ELSE( ShiMen Reservoir Depth >=ShiMenReservoir Operation Rule Lower Severe Limit, 6048*0.9 , 6048*0.8 ) )
Units: m3
Limits: (None, None)
Type: component
Subs: None
water right 6048(m^3 per day), the same for each Ten-days; 0.07 CMSD, classified as Domestic.
"""
return if_then_else(
shimen_reservoir_depth() >= shimenreservoir_operation_rule_lower_limit(),
lambda: 6048,
lambda: if_then_else(
shimen_reservoir_depth()
>= shimenreservoir_operation_rule_lower_severe_limit(),
lambda: 6048 * 0.9,
lambda: 6048 * 0.8,
),
)
| 12,512
|
def hamming(s1, s2):
"""Return the hamming distance between 2 DNA sequences"""
return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2)) + abs(len(s1) - len(s2))
| 12,513
|
def extract_geometric_plane(polygon: Polygon, plane_triangle_indices, tri_mesh: HalfEdgeTriangulation, normal: np.ndarray):
"""Will extract geometric details from the polygon and plane of interest
Args:
polygon (Polygon): Shapely Polygon of a flat surface
plane_triangle_indices (ndarray uint64): Triangle indices of the plane in the mesh
tri_mesh (HalfEdgeTriangulation): The mesh of the environment
normal (np.ndarray): The surface normal that this plane was extracted on
Returns:
[type]: [description]
"""
# triangles:np.ndarray = np.asarray(tri_mesh.triangles)
# vertices:np.ndarray = np.asarray(tri_mesh.vertices)
# all_point_indices = triangles[plane_triangle_indices, :]
# all_point_indices = np.reshape(all_point_indices, (np.prod(all_point_indices.shape), ))
# all_point_indices = np.unique(all_point_indices)
# all_points = vertices[all_point_indices, :]
all_points = np.asarray(polygon.exterior.coords)
# centroid = np.mean(all_points, axis=0) # TODO polygon.centroid ?
normal_ransac, centroid, _ = estimate_plane(all_points)
return dict(point=centroid, normal=normal, all_points=all_points, area=polygon.area, normal_ransac=normal_ransac)
| 12,514
|
def get_org_df(pr_label_f, metadata_df, seq_len):
"""
Returns the org_df given pr_label_f,metadata_df,
"""
org_r, org_c = torch.nonzero(pr_label_f, as_tuple=True)
org_df = cudf.DataFrame()
org_df["seq_row"] = cudf.Series(org_r)
org_df["org_seq_col"] = cudf.Series(org_c)
org_df = org_df.merge(metadata_df)
org_df = org_df.rename(columns={"seq_row": "org_seq_row"})
org_df["flat_loc_org"] = org_df["org_seq_row"] * seq_len + org_df["org_seq_col"]
### Trim overlapping and invalid predictions
flag = (org_df["org_seq_col"] >= org_df["start_index"]) & (
org_df["org_seq_col"] <= org_df["stop_index"]
)
org_df = org_df[flag]
return org_df[["org_seq_row", "org_seq_col", "input_text_index", "flat_loc_org"]]
| 12,515
|
def rotate_cube(cube, up_direction):
"""Generator function that rotates the cube clockwise around the given up_direction"""
while True:
for i in range(SIZE * 4 - 4):
c = cube.copy()
for layer in range(SIZE // 2):
# layer 0 is outermost
layer_size = SIZE * 4 - 4 - 8 * layer
layer_pos = i * layer_size // (SIZE * 4 - 4)
rotate_layer(c, up_direction, layer, layer_pos)
yield c
yield True
| 12,516
|
def binomial_proportion(nsel, ntot, coverage=0.68):
"""
Calculate a binomial proportion (e.g. efficiency of a selection) and its confidence interval.
Parameters
----------
nsel: array-like
Number of selected events.
ntot: array-like
Total number of events.
coverage: float (optional)
Requested fractional coverage of interval (default: 0.68).
Returns
-------
p: array of dtype float
Binomial fraction.
dpl: array of dtype float
Lower uncertainty delta (p - pLow).
dpu: array of dtype float
Upper uncertainty delta (pUp - p).
Examples
--------
>>> p, dpl, dpu = binomial_proportion(50,100,0.68)
>>> round(p, 3)
0.5
>>> round(dpl, 3)
0.049
>>> round(dpu, 3)
0.049
>>> abs(np.sqrt(0.5*(1.0-0.5)/100.0)-0.5*(dpl+dpu)) < 1e-3
True
Notes
-----
The confidence interval is approximate and uses the score method
of Wilson. It is based on the log-likelihood profile and can
undercover the true interval, but the coverage is on average
closer to the nominal coverage than the exact Clopper-Pearson
interval. It is impossible to achieve perfect nominal coverage
as a consequence of the discreteness of the data.
"""
from scipy.stats import norm
z = norm().ppf(0.5 + 0.5 * coverage)
z2 = z * z
p = np.asarray(nsel, dtype=np.float) / ntot
div = 1.0 + z2 / ntot
pm = (p + z2 / (2 * ntot))
dp = z * np.sqrt(p * (1.0 - p) / ntot + z2 / (4 * ntot * ntot))
pl = (pm - dp) / div
pu = (pm + dp) / div
return p, p - pl, pu - p
| 12,517
|
def nasnet_dual_path_scheme_ordinal(module,
x,
_):
"""
NASNet specific scheme of dual path response for an ordinal module with dual inputs/outputs in a DualPathSequential
module.
Parameters:
----------
module : nn.Module
A module.
x : Tensor
Current processed tensor.
Returns
-------
x_next : Tensor
Next processed tensor.
x : Tensor
Current processed tensor.
"""
return module(x), x
| 12,518
|
def ifttt_budget_options():
""" Option values for the budget field """
if "IFTTT-Service-Key" not in request.headers or \
request.headers["IFTTT-Service-Key"] != get_ifttt_key():
return json.dumps({"errors": [{"message": "Invalid key"}]}), 401
try:
data = get_ynab_budgets()
return json.dumps({"data": data})
except:
traceback.print_exc()
return json.dumps({"data": [{"label": "ERROR retrieving YNAB data",
"value": ""}]})
| 12,519
|
def get_exp_date_stats(db_file_name, Table):
"""Caculate exp date stats of collection"""
conn = sqlite3.connect(db_file_name)
c = conn.cursor()
c.execute('''SELECT exp, count(exp) FROM {} GROUP BY exp'''.format(Table))
exp_dict = {}
results = c.fetchall()
for result in results:
exp_dict[str(result[0])] = result[1]
conn.commit()
conn.close()
return exp_dict
| 12,520
|
def create_default_identifier(node_address, token_address, target):
"""
The default message identifier value is the first 8 bytes of the sha3 of:
- Our Address
- Our target address
- The token address
- A random 8 byte number for uniqueness
"""
hash_ = sha3('{}{}{}{}'.format(
node_address,
target,
token_address,
random.randint(0, UINT64_MAX)
))
return int(hash_[0:8].encode('hex'), 16)
| 12,521
|
def set_layers_to_non_trainable(model, layers):
""" Set layers of a model to non-trainable """
layers_to_non_trainable = [model.layers[i] for i in layers]
for layer in layers_to_non_trainable:
layer.trainable = False
for layer in model.layers:
logging.debug("Layer %s is trainable: %s" %
(layer.name, layer.trainable))
return model
| 12,522
|
def print_scientific_16(value: float) -> str:
"""
Prints a value in 16-character scientific notation.
This is a sub-method and shouldnt typically be called
.. seealso:: print_float_16 for a better method
"""
if value == 0.0:
return '%16s' % '0.'
python_value = '%16.14e' % value # -1.e-2
svalue, sexponent = python_value.strip().split('e')
exponent = int(sexponent) # removes 0s
if abs(value) < 1.:
sign = '-'
else:
sign = '+'
# the exponent will be added later...
sexp2 = str(exponent).strip('-+')
value2 = float(svalue)
# the plus 1 is for the sign
len_sexp = len(sexp2) + 1
leftover = 16 - len_sexp
if value < 0:
fmt = "%%1.%sf" % (leftover - 3)
else:
fmt = "%%1.%sf" % (leftover - 2)
svalue3 = fmt % value2
svalue4 = svalue3.strip('0')
field = "%16s" % (svalue4 + sign + sexp2)
return field
| 12,523
|
def _interpolate_target(bin_edges, y_vals, idx, target):
"""Helper to identify when a function y that has been discretized hits value target.
idx is the first index where y is greater than the target
"""
if idx == 0:
y_1 = 0.
else:
y_1 = y_vals[idx - 1]
y_2 = y_vals[idx]
edge_1 = bin_edges[idx]
edge_2 = bin_edges[idx + 1]
frac = (target - y_1) / (y_2 - y_1)
x = edge_1 + frac * (edge_2 - edge_1)
return x
| 12,524
|
def process_alpha(alpha, experiments, filename):
"""
Save experiment results to CSV file
:param alpha: Value of alpha to run for
:param experiments: List of (data, dictionary) for experiments ('no_normalization', 'd_normalization', 'x_normalization', 'full_normalization')
:param filename: filename for the CSV
"""
# Run experiments
d_normalization_Z, full_normalization_Z, no_normalization_Z, x_normalization_Z = run_experiments(alpha, experiments)
# Extract statistics. TODO: Turn output and input to dictionary
d_normalization_percentile, d_normalization_score, d_normalization_std, \
full_normalization_percentile, full_normalization_score, full_normalization_std, \
no_normalization_percentile, no_normalization_score, no_normalization_std, \
x_normalization_percentile, x_normalization_score, x_normalization_std = \
calculate_statistics(d_normalization_Z, full_normalization_Z, no_normalization_Z, x_normalization_Z)
# Create histograms
histogram_fn(filename, 'no_normalization_Z', no_normalization_Z.view(-1), alpha)
histogram_fn(filename, 'd_normalization_Z', d_normalization_Z.view(-1), alpha)
histogram_fn(filename, 'x_normalization_Z', x_normalization_Z.view(-1), alpha)
histogram_fn(filename, 'full_normalization_Z', full_normalization_Z.view(-1), alpha)
# Create final row
results_row = '{},{},{},{},{},{},{},{},{},{},{},{},{}\n'.format(alpha,
no_normalization_score, no_normalization_std,
no_normalization_percentile,
d_normalization_score, d_normalization_std,
d_normalization_percentile,
x_normalization_score, x_normalization_std,
x_normalization_percentile,
full_normalization_score,
full_normalization_std,
full_normalization_percentile,
)
with open('{}.csv'.format(filename), 'a') as f:
f.write(results_row)
| 12,525
|
def reinstall_fonts_sb(fonts_path):
"""
Reinstall all fonts.
"""
print_section_header("REINSTALLING FONTS", Fore.BLUE)
# Copy every file in fonts_path to ~/Library/Fonts
for font in get_abs_path_subfiles(fonts_path):
font_lib_path = get_fonts_dir()
dest_path = os.path.join(font_lib_path, font.split("/")[-1])
copyfile(quote(font), quote(dest_path))
print_section_header("FONT REINSTALLATION COMPLETED", Fore.BLUE)
| 12,526
|
def azimuth_range_to_lat_lon(azimuths, ranges, center_lon, center_lat, geod=None):
"""Convert azimuth and range locations in a polar coordinate system to lat/lon coordinates.
Pole refers to the origin of the coordinate system.
Parameters
----------
azimuths : array_like
array of azimuths defining the grid. If not a `pint.Quantity`,
assumed to be in degrees.
ranges : array_like
array of range distances from the pole. Typically in meters.
center_lat : float
The latitude of the pole in decimal degrees
center_lon : float
The longitude of the pole in decimal degrees
geod : `pyproj.Geod` or ``None``
PyProj Geod to use for forward azimuth and distance calculations. If ``None``, use a
default spherical ellipsoid.
Returns
-------
lon, lat : 2D arrays of longitudes and latitudes corresponding to original locations
Notes
-----
Credit to Brian Blaylock for the original implementation.
"""
if geod is None:
g = Geod(ellps='sphere')
else:
g = geod
rng2d, az2d = np.meshgrid(ranges, azimuths)
lats = np.full(az2d.shape, center_lat)
lons = np.full(az2d.shape, center_lon)
lon, lat, _ = g.fwd(lons, lats, az2d, rng2d)
return lon, lat
| 12,527
|
def count_path_recursive(m, n):
"""Count number of paths with the recursive method."""
def traverse(m, n, location=[1, 1]):
# return 0 if past edge
if location[0] > m or location[1] > n:
return 0
# return 1 if at end position
if location == [m, n]:
return 1
return traverse(m, n, [location[0] + 1, location[1]]) + traverse(m, n, [location[0], location[1] + 1])
return traverse(m, n)
| 12,528
|
def markdown(caller):
"""Renders the argument to markdown. Useful in `{% filter markdown() %} `
blocks
Args:
caller (str): Markdown source
Returns:
str: rendered HTML
"""
return render_markdown(caller)
| 12,529
|
def aggregate_threedi_results(gridadmin: str, results_3di: str, demanded_aggregations: List[Aggregation],
bbox=None, start_time: int = None, end_time: int = None, subsets=None, epsg: int = 28992,
interpolation_method: str = None, resample_point_layer: bool = False,
resolution: float = None, output_flowlines: bool = True, output_nodes: bool = True,
output_cells: bool = True, output_rasters: bool = True):
"""
# TODO: use new version of threedi_ogr that inludes adding default attributes to nodes, cells and flowline layers
:param resolution:
:param interpolation_method:
:param gridadmin: path to gridadmin.h5
:param results_3di: path to results_3di.nc
:param demanded_aggregations: list of dicts containing variable, method, [threshold]
:param bbox: bounding box [min_x, min_y, max_x, max_y]
:param start_time: start of time filter (seconds since start of simulation)
:param end_time: end of time filter (seconds since start of simulation)
:param subsets:
:param epsg: epsg code to project the results to
:return: an ogr Memory DataSource with one or more Layers: node (point), cell (polygon) or flowline (linestring) with the aggregation results
:rtype: ogr.DataSource
"""
# make output datasource and layers
tgt_drv = ogr.GetDriverByName('MEMORY')
tgt_ds = tgt_drv.CreateDataSource('')
out_rasters = {}
if not (output_flowlines or output_nodes or output_cells or output_rasters):
return tgt_ds, out_rasters
if resample_point_layer and (not output_nodes):
resample_point_layer = False
# perform demanded aggregations
node_results = dict()
line_results = dict()
first_pass_nodes = True
first_pass_flowlines = True
for da in demanded_aggregations:
# It would seem more sensical to keep the instantiatian of gr, the subsetting and filtering outside the loop...
# ... but for some strange reason that leads to an error if more than 2 flowline aggregations are demanded
gr = GridH5ResultAdmin(gridadmin, results_3di)
# TODO: select subset
# Spatial filtering
if bbox is None:
lines = gr.lines
nodes = gr.nodes
cells = gr.cells
else:
if bbox[0] >= bbox[2] or bbox[1] >= bbox[3]:
raise Exception('Invalid bounding box.')
lines = gr.lines.filter(line_coords__in_bbox=bbox)
if lines.count == 0:
raise Exception('No flowlines found within bounding box.')
nodes = gr.nodes.filter(coordinates__in_bbox=bbox)
cells = gr.cells.filter(
coordinates__in_bbox=bbox) # filter on cell center coordinates to have the same results for cells as for nodes
if nodes.count == 0:
raise Exception('No nodes found within bounding box.')
new_column_name = da.as_column_name()
if da.variable.short_name in AGGREGATION_VARIABLES.short_names(var_types=[VT_FLOW]):
if output_flowlines:
if first_pass_flowlines:
first_pass_flowlines = False
try:
line_results[new_column_name] = time_aggregate(nodes_or_lines=lines,
start_time=start_time,
end_time=end_time,
aggregation=da
)
except AttributeError:
warnings.warn('Demanded aggregation of variable that is not included in these 3Di results')
line_results[new_column_name] = np.full(len(line_results['id']), fill_value=None, dtype=np.float)
elif da.variable.short_name in AGGREGATION_VARIABLES.short_names(var_types=[VT_NODE]):
if output_nodes or output_cells or output_rasters:
if first_pass_nodes:
first_pass_nodes = False
try:
node_results[new_column_name] = time_aggregate(nodes_or_lines=nodes,
start_time=start_time,
end_time=end_time,
aggregation=da
)
except AttributeError:
warnings.warn('Demanded aggregation of variable that is not included in these 3Di results')
node_results[new_column_name] = np.full(len(node_results['id']), fill_value=None, dtype=np.float)
elif da.variable.short_name in AGGREGATION_VARIABLES.short_names(var_types=[VT_NODE_HYBRID]):
if output_nodes or output_cells or output_rasters:
if first_pass_nodes:
first_pass_nodes = False
try:
node_results[new_column_name] = hybrid_time_aggregate(gr=gr,
ids=nodes.id,
start_time=start_time,
end_time=end_time,
aggregation=da
)
except AttributeError:
warnings.warn('Demanded aggregation of variable that is not included in these 3Di results')
node_results[new_column_name] = np.full(len(node_results['id']), fill_value=None, dtype=np.float)
# translate results to GIS layers
# node and cell layers
if len(node_results) > 0:
attributes = node_results
attr_data_types = {}
for attr, vals in node_results.items():
try:
attr_data_types[attr] = NP_OGR_DTYPES[vals.dtype]
except KeyError:
attr_data_types[attr] = ogr.OFTString
if output_nodes:
threedigrid_to_ogr(threedigrid_src=nodes,
tgt_ds=tgt_ds,
attributes=attributes,
attr_data_types=attr_data_types
)
if output_cells or output_rasters or resample_point_layer:
threedigrid_to_ogr(threedigrid_src=cells,
tgt_ds=tgt_ds,
attributes=attributes,
attr_data_types=attr_data_types
)
# rasters
if output_rasters or resample_point_layer:
cell_layer = tgt_ds.GetLayerByName('cell')
if cell_layer.GetFeatureCount() > 0:
first_pass_rasters = True
if (resolution is None or resolution == 0):
resolution = gr.grid.dx[0]
column_names = []
band_nr = 0
for da in demanded_aggregations:
if da.variable.short_name in AGGREGATION_VARIABLES.short_names(var_types=[VT_NODE, VT_NODE_HYBRID]):
col = da.as_column_name()
band_nr += 1
out_rasters[col] = rasterize_cell_layer(cell_layer=cell_layer,
column_name=col,
pixel_size=resolution,
interpolation_method=interpolation_method,
pre_resample_method=da.variable.pre_resample_method)
column_names.append(col)
if first_pass_rasters:
first_pass_rasters = False
tmp_drv = gdal.GetDriverByName('MEM')
tmp_ds = tmp_drv.CreateCopy('multiband', out_rasters[col])
# create resampled nodes output target_node_layer
if resample_point_layer:
srs = osr.SpatialReference()
srs.ImportFromWkt(tmp_ds.GetProjection())
points_resampled_lyr = tgt_ds.CreateLayer('node_resampled',
srs=srs,
geom_type=ogr.wkbPoint)
field = ogr.FieldDefn(col, ogr.OFTReal)
points_resampled_lyr.CreateField(field)
else:
tmp_ds.AddBand(datatype=gdal.GDT_Float32)
src_band = out_rasters[col].GetRasterBand(1)
src_arr = src_band.ReadAsArray()
tmp_band = tmp_ds.GetRasterBand(band_nr)
tmp_band.WriteArray(src_arr)
tmp_band.SetNoDataValue(src_band.GetNoDataValue())
if resample_point_layer:
field = ogr.FieldDefn(col, ogr.OFTReal)
points_resampled_lyr.CreateField(field)
if resample_point_layer:
tmp_points_resampled = pixels_to_geoms(raster=tmp_ds,
column_names=column_names,
output_geom_type=ogr.wkbPoint,
output_layer_name='unimportant name')
tmp_points_resampled_lyr = tmp_points_resampled.GetLayer(0)
for feat in tmp_points_resampled_lyr:
points_resampled_lyr.CreateFeature(feat)
feat = None
# flowline target_node_layer
if len(line_results) > 0 and output_flowlines:
attributes = line_results
attr_data_types = {}
for attr, vals in line_results.items():
try:
attr_data_types[attr] = NP_OGR_DTYPES[vals.dtype]
except KeyError:
attr_data_types[attr] = ogr.OFTString
threedigrid_to_ogr(threedigrid_src=lines, tgt_ds=tgt_ds, attributes=attributes, attr_data_types=attr_data_types)
if not output_rasters:
out_rasters = {}
if (not output_cells) and (resample_point_layer or output_rasters):
tgt_ds.DeleteLayer('cell')
return tgt_ds, out_rasters
| 12,530
|
def color_menu(colno: int, colname: str, entry: Dict[str, Any]) -> int:
# pylint: disable=unused-argument
"""color the menu"""
if entry.get("__shadowed") is True:
return 8
if entry.get("__deprecated") is True:
return 9
return 2
| 12,531
|
def likelihood(sent, ai, domain, temperature):
"""Computes likelihood of a given sentence according the giving model."""
enc = ai._encode(sent, ai.model.word_dict)
score, _, _= ai.model.score_sent(enc, ai.lang_h, ai.ctx_h, temperature)
return score
| 12,532
|
def simulator(
theta,
model="angle",
n_samples=1000,
delta_t=0.001, # n_trials
max_t=20,
no_noise=False,
bin_dim=None,
bin_pointwise=False,
):
"""Basic data simulator for the models included in HDDM.
:Arguments:
theta : list or numpy.array or panda.DataFrame
Parameters of the simulator. If 2d array, each row is treated as a 'trial'
and the function runs n_sample * n_trials simulations.
model: str <default='angle'>
Determines the model that will be simulated.
n_samples: int <default=1000>
Number of simulation runs (for each trial if supplied n_trials > 1)
n_trials: int <default=1>
Number of trials in a simulations run (this specifically addresses trial by trial parameterizations)
delta_t: float
Size fo timesteps in simulator (conceptually measured in seconds)
max_t: float
Maximum reaction the simulator can reach
no_noise: bool <default=False>
Turn noise of (useful for plotting purposes mostly)
bin_dim: int <default=None>
Number of bins to use (in case the simulator output is supposed to come out as a count histogram)
bin_pointwise: bool <default=False>
Wheter or not to bin the output data pointwise. If true the 'RT' part of the data is now specifies the
'bin-number' of a given trial instead of the 'RT' directly. You need to specify bin_dim as some number for this to work.
:Return: tuple
can be (rts, responses, metadata)
or (rt-response histogram, metadata)
or (rts binned pointwise, responses, metadata)
"""
# Useful for sbi
if type(theta) == list:
print("theta is supplied as list --> simulator assumes n_trials = 1")
theta = np.asarray(theta).astype(np.float32)
elif type(theta) == np.ndarray:
theta = theta.astype(np.float32)
elif type(theta) == pd.core.frame.DataFrame:
theta = theta[model_config[model]["params"]].values.astype(np.float32)
else:
theta = theta.numpy().astype(float32)
if len(theta.shape) < 2:
theta = np.expand_dims(theta, axis=0)
if theta.ndim > 1:
n_trials = theta.shape[0]
else:
n_trials = 1
# 2 choice models
if no_noise:
s = 0.0
else:
s = 1.0
if model == "test":
x = ddm_flexbound(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
boundary_params={},
boundary_fun=bf.constant,
boundary_multiplicative=True,
max_t=max_t,
)
if model == "ddm" or model == "ddm_elife" or model == "ddm_analytic":
x = ddm_flexbound(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
boundary_params={},
boundary_fun=bf.constant,
boundary_multiplicative=True,
max_t=max_t,
)
if model == "ddm_legacy" or model == "ddm_vanilla":
x = ddm(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
)
if model == "full_ddm_legacy" or model == "full_ddm_vanilla":
x = full_ddm_vanilla(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
sz=theta[:, 4],
sv=theta[:, 5],
st=theta[:, 6],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
)
if model == "angle" or model == "angle2":
x = ddm_flexbound(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
s=s,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 4]},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if (
model == "weibull_cdf"
or model == "weibull_cdf2"
or model == "weibull_cdf_ext"
or model == "weibull_cdf_concave"
or model == "weibull"
):
x = ddm_flexbound(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
s=s,
boundary_fun=bf.weibull_cdf,
boundary_multiplicative=True,
boundary_params={"alpha": theta[:, 4], "beta": theta[:, 5]},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "levy":
x = levy_flexbound(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
alpha_diff=theta[:, 3],
t=theta[:, 4],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "full_ddm" or model == "full_ddm2":
x = full_ddm(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
sz=theta[:, 4],
sv=theta[:, 5],
st=theta[:, 6],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "ddm_sdv":
x = ddm_sdv(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
sv=theta[:, 4],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "ornstein" or model == "ornstein_uhlenbeck":
x = ornstein_uhlenbeck(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
g=theta[:, 3],
t=theta[:, 4],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
# 3 Choice models
if no_noise:
s = np.tile(np.array([0.0, 0.0, 0.0], dtype=np.float32), (n_trials, 1))
else:
s = np.tile(np.array([1.0, 1.0, 1.0], dtype=np.float32), (n_trials, 1))
if model == "race_3":
x = race_model(
v=theta[:, :3],
a=theta[:, [3]],
z=theta[:, 4:7],
t=theta[:, [7]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "race_no_bias_3":
x = race_model(
v=theta[:, :3],
a=theta[:, [3]],
z=np.column_stack([theta[:, [4]], theta[:, [4]], theta[:, [4]]]),
t=theta[:, [5]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "race_no_bias_angle_3":
x = race_model(
v=theta[:, :3],
a=theta[:, [3]],
z=np.column_stack([theta[:, [4]], theta[:, [4]], theta[:, [4]]]),
t=theta[:, [5]],
s=s,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 6]},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "lca_3":
x = lca(
v=theta[:, :3],
a=theta[:, [3]],
z=theta[:, 4:7],
g=theta[:, [7]],
b=theta[:, [8]],
t=theta[:, [9]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "lca_no_bias_3":
x = lca(
v=theta[:, :3],
a=theta[:, [3]],
z=np.column_stack([theta[:, [4]], theta[:, [4]], theta[:, [4]]]),
g=theta[:, [5]],
b=theta[:, [6]],
t=theta[:, [7]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "lca_no_bias_angle_3":
x = lca(
v=theta[:, :3],
a=theta[:, [3]],
z=np.column_stack([theta[:, [4]], theta[:, [4]], theta[:, [4]]]),
g=theta[:, [5]],
b=theta[:, [6]],
t=theta[:, [7]],
s=s,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 8]},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
# 4 Choice models
if no_noise:
s = np.tile(np.array([0.0, 0.0, 0.0, 0.0], dtype=np.float32), (n_trials, 1))
else:
s = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=np.float32), (n_trials, 1))
if model == "race_4":
x = race_model(
v=theta[:, :4],
a=theta[:, [4]],
z=theta[:, 5:9],
t=theta[:, [9]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "race_no_bias_4":
x = race_model(
v=theta[:, :4],
a=theta[:, [4]],
z=np.column_stack(
[theta[:, [5]], theta[:, [5]], theta[:, [5]], theta[:, [5]]]
),
t=theta[:, [6]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "race_no_bias_angle_4":
x = race_model(
v=theta[:, :4],
a=theta[:, [4]],
z=np.column_stack(
[theta[:, [5]], theta[:, [5]], theta[:, [5]], theta[:, [5]]]
),
t=theta[:, [6]],
s=s,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 7]},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "lca_4":
x = lca(
v=theta[:, :4],
a=theta[:, [4]],
z=theta[:, 5:9],
g=theta[:, [9]],
b=theta[:, [10]],
t=theta[:, [11]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "lca_no_bias_4":
x = lca(
v=theta[:, :4],
a=theta[:, [4]],
z=np.column_stack(
[theta[:, [5]], theta[:, [5]], theta[:, [5]], theta[:, [5]]]
),
g=theta[:, [6]],
b=theta[:, [7]],
t=theta[:, [8]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "lca_no_bias_angle_4":
x = lca(
v=theta[:, :4],
a=theta[:, [4]],
z=np.column_stack(
[theta[:, [5]], theta[:, [5]], theta[:, [5]], theta[:, [5]]]
),
g=theta[:, [6]],
b=theta[:, [7]],
t=theta[:, [8]],
s=s,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 9]},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
# Seq / Parallel models (4 choice)
if no_noise:
s = 0.0
else:
s = 1.0
# Precompute z_vector for no_bias models
z_vec = np.tile(np.array([0.5], dtype=np.float32), reps=n_trials)
if model == "ddm_seq2":
x = ddm_flexbound_seq2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=theta[:, 4],
z_l_1=theta[:, 5],
z_l_2=theta[:, 6],
t=theta[:, 7],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
)
if model == "ddm_seq2_no_bias":
x = ddm_flexbound_seq2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
t=theta[:, 4],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
)
if model == "ddm_seq2_angle_no_bias":
x = ddm_flexbound_seq2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
t=theta[:, 4],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 5]},
)
if model == "ddm_seq2_weibull_no_bias":
x = ddm_flexbound_seq2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
t=theta[:, 4],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.weibull_cdf,
boundary_multiplicative=True,
boundary_params={"alpha": theta[:, 5], "beta": theta[:, 6]},
)
if model == "ddm_par2":
x = ddm_flexbound_par2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=theta[:, 4],
z_l_1=theta[:, 5],
z_l_2=theta[:, 6],
t=theta[:, 7],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
)
if model == "ddm_par2_no_bias":
x = ddm_flexbound_par2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
t=theta[:, 4],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
)
if model == "ddm_par2_angle_no_bias":
x = ddm_flexbound_par2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
t=theta[:, 4],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 5]},
)
if model == "ddm_par2_weibull_no_bias":
x = ddm_flexbound_par2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
t=theta[:, 4],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.weibull_cdf,
boundary_multiplicative=True,
boundary_params={"alpha": theta[:, 5], "beta": theta[:, 6]},
)
if model == "ddm_mic2_adj":
x = ddm_flexbound_mic2_adj(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=theta[:, 4], # np.array([0.5], dtype = np.float32),
z_l_1=theta[:, 5], # np.array([0.5], dtype = np.float32),
z_l_2=theta[:, 6], # np.array([0.5], dtype = np.float32),
d=theta[:, 7],
t=theta[:, 8],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
)
if model == "ddm_mic2_adj_no_bias":
x = ddm_flexbound_mic2_adj(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec[:],
z_l_1=z_vec[:],
z_l_2=z_vec[:],
d=theta[:, 4],
t=theta[:, 5],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
)
if model == "ddm_mic2_adj_angle_no_bias":
x = ddm_flexbound_mic2_adj(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
d=theta[:, 4],
t=theta[:, 5],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 6]},
)
if model == "ddm_mic2_adj_weibull_no_bias":
x = ddm_flexbound_mic2_adj(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
d=theta[:, 4],
t=theta[:, 5],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.weibull_cdf,
boundary_multiplicative=True,
boundary_params={"alpha": theta[:, 6], "beta": theta[:, 7]},
)
# Output compatibility
if n_trials == 1:
x = (np.squeeze(x[0], axis=1), np.squeeze(x[1], axis=1), x[2])
if n_trials > 1 and n_samples == 1:
x = (np.squeeze(x[0], axis=0), np.squeeze(x[1], axis=0), x[2])
x[2]["model"] = model
if bin_dim == 0 or bin_dim == None:
return x
elif bin_dim > 0 and n_trials == 1 and not bin_pointwise:
binned_out = bin_simulator_output(x, nbins=bin_dim)
return (binned_out, x[2])
elif bin_dim > 0 and n_trials == 1 and bin_pointwise:
binned_out = bin_simulator_output_pointwise(x, nbins=bin_dim)
return (
np.expand_dims(binned_out[:, 0], axis=1),
np.expand_dims(binned_out[:, 1], axis=1),
x[2],
)
elif bin_dim > 0 and n_trials > 1 and n_samples == 1 and bin_pointwise:
binned_out = bin_simulator_output_pointwise(x, nbins=bin_dim)
return (
np.expand_dims(binned_out[:, 0], axis=1),
np.expand_dims(binned_out[:, 1], axis=1),
x[2],
)
elif bin_dim > 0 and n_trials > 1 and n_samples > 1 and bin_pointwise:
return "currently n_trials > 1 and n_samples > 1 will not work together with bin_pointwise"
elif bin_dim > 0 and n_trials > 1 and not bin_pointwise:
return "currently binned outputs not implemented for multi-trial simulators"
elif bin_dim == -1:
return "invalid bin_dim"
| 12,533
|
def logical_not(x: ArrayOrScalar) -> Union[Array, bool]:
"""
Returns the element-wise logical NOT of *x*.
"""
if isinstance(x, SCALAR_CLASSES):
# https://github.com/python/mypy/issues/3186
return np.logical_not(x) # type: ignore
assert isinstance(x, Array)
from pytato.utils import with_indices_for_broadcasted_shape
return IndexLambda(with_indices_for_broadcasted_shape(prim.Variable("_in0"),
x.shape,
x.shape),
shape=x.shape,
dtype=np.dtype(np.bool8),
bindings={"_in0": x})
| 12,534
|
def _build_xyz_pow(name, pref, l, m, n, shift=2):
"""
Builds an individual row contraction line.
name = pref * xc_pow[n] yc_pow[m] * zc_pow[n]
"""
l = l - shift
m = m - shift
n = n - shift
if (pref <= 0) or (l < 0) or (n < 0) or (m < 0):
return None
mul = " "
if pref == 1:
ret = name + " ="
else:
# Basically always an int
ret = name + " = %2.1f" % float(pref)
mul = " * "
if l > 0:
ret += mul + "xc_pow[%d]" % (l - 1)
mul = " * "
if m > 0:
ret += mul + "yc_pow[%d]" % (m - 1)
mul = " * "
if n > 0:
ret += mul + "zc_pow[%d]" % (n - 1)
mul = " * "
if mul == " ":
ret += " 1"
return ret
| 12,535
|
def create_decode_network(width=width, height=height, Din=Din, Dout=Dout, d_range=d_range):
"""
data flow with traffic on:
input IO ->
tag horn ->
(pre-fifo valve) ->
FIFO ->
(post-fifo valve) ->
TAT ->
AER_tx ->
neurons ->
AER_rx ->
(neuron output valve) ->
PAT ->
accumulator ->
(pre-fifo valve) ->
FIFO ->
(post-fifo valve) ->
TAT ->
tag funnel ->
output IO
"""
N = width * height
net = graph.Network("net")
min_d, max_d = d_range
decoders = np.ones((Dout, N)) * (max_d - min_d) + min_d
tap_matrix = np.zeros((N, Din))
if Din == 1:
# one synapse per 4 neurons
for x in range(0, width, 2):
for y in range(0, height, 2):
n = y * width + x
if x < width // 2:
tap_matrix[n, 0] = 1
else:
tap_matrix[n, 0] = -1
else:
print("need to implement reasonable taps for Din > 1")
assert(False)
i1 = net.create_input("i1", Din)
p1 = net.create_pool("p1", tap_matrix)
b1 = net.create_bucket("b1", Dout)
o1 = net.create_output("o1", Dout)
net.create_connection("c_i1_to_p1", i1, p1, None)
decoder_conn = net.create_connection("c_p1_to_b1", p1, b1, decoders)
net.create_connection("c_b1_to_o1", b1, o1, None)
return net
| 12,536
|
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
| 12,537
|
def pscmd(item, pid=os.getpid()):
"""Invoke ps -o %(item)s -p %(pid)d and return the result"""
pscmd = PSCMD
if item == 'sid' and os.uname()[0] == 'AIX':
pscmd = '/usr/sysv/bin/ps'
if item == 'sid' and os.uname()[0] == 'Darwin':
item = 'sess'
assert pscmd, 'ps command not found (%s), can not run test' % pscmd
if item == 'ni' and os.uname()[0] == 'SunOS':
item = 'nice'
if item == 'rssize' and os.uname()[0] in ['SunOS', 'Darwin']:
item = 'rss'
if item == 'pgrp' and os.uname()[0] in ['SunOS', 'AIX', 'Darwin']:
item = 'pgid'
cmdl = [pscmd, '-o', item, '-p', str(pid)]
if HAVE_SUBPROCESS:
val = subprocess.Popen(cmdl, stdout=subprocess.PIPE).communicate()[0]
else:
val = os.popen(' '.join(cmdl)).read()
val = val.decode()
val = val.strip().split()[-1]
if item == 'sess' and os.uname()[0] == 'Darwin':
# 'ps -o sess' on Darwin returns a hex value
val = int(val, 16)
return val
| 12,538
|
def dGcthetalnorm(w: Wilson, cthetal):
"""Normalized distribution 1D cthetal"""
return tauBp / Btaul * dGcthetal(w, cthetal)
| 12,539
|
def test_image_signatures():
"""Test image signatures."""
for method in dir(Image):
if method.startswith('_'):
continue
method_signature = signature(getattr(Image, method)).parameters
base_signature = signature(getattr(BaseImage, f'_{method}')).parameters
specific_args = {'use_box', 'only_prepare'}
cleared_signature = {
i for i in method_signature if i not in specific_args
}
assert cleared_signature == set(
base_signature,
), f'Method name {method}'
| 12,540
|
def user_deposit_address_fixture(
deploy_smart_contract_bundle_concurrently: FixtureSmartContracts,
) -> Optional[UserDepositAddress]:
""" Deploy UserDeposit and fund accounts with some balances """
services_smart_contracts = deploy_smart_contract_bundle_concurrently.services_smart_contracts
if services_smart_contracts:
return services_smart_contracts.user_deposit_proxy.address
return None
| 12,541
|
def vonNeumann(t, rho, H):
"""(quantum Liouville-)von Neumann equation"""
H = H(t)
rho = rho.reshape(H.shape)
rho_dot = -1j*(np.dot(H, rho) - np.dot(rho, H))
return rho_dot.flatten()
| 12,542
|
def conjure_categories(path):
"""
Look for all pngs in the path. They are generated by quicklook.py and organised into
folders by resolution. Each resolution has a number of variables associated to it
and each variable can have a number of vertical levels and lead times associated to
it. We want to get all of these associations as a nested dictionary so that we can
use them for drop downs when selecting images.
Args:
path:
Returns:
"""
# Get a list of the resolutions (each directory in the path)
resolutions = [d.name for d in path.glob("*") if d.is_dir()]
# For each directory find a list of all unique variables using the file patterns
# from quicklook.py
lookup = dict()
for resolution in resolutions:
lookup[resolution] = dict(
varnames=[],
lead_times=[],
levels=dict(),
)
plots = [f.name for f in (path / resolution).glob("*.png")]
for plot in tqdm(plots):
# TODO: Only have altitude at the moment but will need to format the
# filenames in a more parseable way in future for more coordinates
if "altitude" in plot:
result = parse.parse(
"{name}_altitude{vertical_level:d}_T+{lead_time:02d}.png", plot
).named
if result["name"] not in lookup[resolution]["levels"].keys():
lookup[resolution]["levels"][result["name"]] = []
if (
result["vertical_level"]
not in lookup[resolution]["levels"][result["name"]]
):
lookup[resolution]["levels"][result["name"]].append(
result["vertical_level"]
)
else:
result = parse.parse("{name}_T+{lead_time:02d}.png", plot).named
if result["name"] not in lookup[resolution]["varnames"]:
lookup[resolution]["varnames"].append(result["name"])
if result["lead_time"] not in lookup[resolution]["lead_times"]:
lookup[resolution]["lead_times"].append(result["lead_time"])
return lookup
| 12,543
|
def nicer_array(a, mm_cutoff=0.3):
"""
Returns a scaled array, the scaling, and a unit prefix
Example:
nicer_array( np.array([2e-10, 3e-10]) )
Returns:
(array([200., 300.]), 1e-12, 'p')
"""
if np.isscalar(a):
x = a
elif len(a) == 1:
x = a[0]
else:
x = np.array(a)
fac, prefix = nicer_scale_prefix( x, mm_cutoff=mm_cutoff )
return a/fac, fac, prefix
| 12,544
|
def sender(cl, stanza, cb=None, args={}):
"""
Sends stanza. Writes a crashlog on error
Parameters:
cl: the xmpp.Client object
stanza: the xmpp.Node object
cb: callback function
args: callback function arguments
"""
if cb:
cl.SendAndCallForResponse(stanza, cb, args)
else:
try:
cl.send(stanza)
except Exception:
disconnectHandler()
| 12,545
|
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, number.DOMAIN, DEFAULT_CONFIG
)
| 12,546
|
async def _process_recorder_platform(
hass: HomeAssistant, domain: str, platform: Any
) -> None:
"""Process a recorder platform."""
instance: Recorder = hass.data[DATA_INSTANCE]
instance.queue_task(AddRecorderPlatformTask(domain, platform))
| 12,547
|
def build_playground():
"""
build a playground based on user's input building and algorithm type
input: userid, algorithm, target building
output: none
"""
userid, building, algo_type = request.form['userid'], request.form['building'], request.form['algo_type']
user = User.objects(userid=userid).first()
pgid = str(uuid.uuid4())
algo_instance = get_algo_instance(algo_type=algo_type, target_building=building, pgid=pgid)
algo_binaries = pickle.dumps(algo_instance, protocol=pickle.HIGHEST_PROTOCOL)
objs = RawMetadata.objects(building=building)
pg = Playground(
userid=userid,
pgid=pgid,
building=building,
algo_type=algo_type,
algo_model=algo_binaries,
playground_labeled_metadata=[]
).save()
# add playground to user's record
user.playground.append(pg)
user.save()
logger.info('build playground={} for user={}'.format(pg.pgid, user.userid))
message = {
'userid': userid,
'new_playground': pgid
}
resp = jsonify(message)
return resp
| 12,548
|
def menuItemDirective(_context, menu, for_,
action, title, description=u'', icon=None, filter=None,
permission=None, layer=IDefaultBrowserLayer, extra=None,
order=0, item_class=None):
"""Register a single menu item."""
return menuItemsDirective(_context, menu, for_, layer).menuItem(
_context, action, title, description, icon, filter,
permission, extra, order, item_class)
| 12,549
|
def getQueueStatistics ():
"""
Returns a 4-tuple containing the numbers of identifiers in the
Crossref queue by status: (awaiting submission, submitted,
registered with warning, registration failed).
"""
q = ezidapp.models.CrossrefQueue.objects.values("status").\
annotate(django.db.models.Count("status"))
d = {}
for r in q: d[r["status"]] = r["status__count"]
return (d.get("U", 0), d.get("S", 0), d.get("W", 0), d.get("F", 0))
| 12,550
|
def assign_targeting_score_v2(
base,
manual_selected_objids=None,
gmm_parameters=None,
ignore_specs=False,
debug=False,
n_random=50,
seed=123,
remove_lists=None,
low_priority_objids=None,
**kwargs,
):
"""
Last updated: 05/19/2020
100 Human selection and Special targets
150 sats without AAT/MMT/PAL specs
180 low-z (z < 0.05) but ZQUALITY = 2
200 within host, r < 17.77, gri/grz cuts OR others with very low SB
300 within host, r < 20.75, high p_GMM or GMM outliers or very high priority
400 within host, r < 20.75, main targeting cuts
500 within host, r < 20.75, gri/grz cuts, low-SB, random selection of 50
600 outwith host, r < 17.77 OR very high p_GMM, low SB
700 within host, r < 20.75, gri/grz cuts, low SB
800 within host, r < 20.75, gri/grz cuts, everything else
900 outwith host, r < 20.75, gri/grz cuts
1000 everything else
1100 Not in gri/grz cut
1200 Not galaxy
1300 Not clean
1350 Removed by hand
1400 Has spec already
"""
basic_cut = (C.relaxed_targeting_cuts | C.paper1_targeting_cut) & C.is_clean2 & C.is_galaxy2 & Query("r_mag < 21")
if not ignore_specs:
basic_cut &= ~C.has_spec
base = add_cut_scores(base)
base["P_GMM"] = np.float64(0)
base["log_L_GMM"] = np.float64(0)
base["TARGETING_SCORE"] = np.int32(1000)
base["index"] = np.arange(len(base))
surveys = [col[6:] for col in base.colnames if col.startswith("OBJID_")]
if gmm_parameters is not None:
for survey in surveys:
gmm_parameters_this = gmm_parameters.get(survey)
if gmm_parameters_this is None:
continue
postfix = "_" + survey
base_this = Query(
basic_cut,
"OBJID{} != -1".format(postfix),
"REMOVE{} == 0".format(postfix),
"is_galaxy{}".format(postfix),
).filter(base)
for color in get_all_colors():
b1, b2 = color
n1 = "".join((b1, "_mag", postfix))
n2 = "".join((b2, "_mag", postfix))
if n1 not in base_this.colnames or n2 not in base_this.colnames:
continue
with np.errstate(invalid="ignore"):
base_this[color] = base_this[n1] - base_this[n2]
base_this[color + "_err"] = np.hypot(
base_this["".join((b1, "_err", postfix))],
base_this["".join((b2, "_err", postfix))],
)
bands = getattr(utils, "get_{}_bands".format(survey))() # pylint: disable=not-callable
base_this["P_GMM"] = ensure_proper_prob(
calc_gmm_satellite_probability(
base_this,
gmm_parameters_this,
bands=bands,
mag_err_postfix="_err" + postfix,
)
)
base_this["log_L_GMM"] = calc_log_likelihood(
*get_input_data(
base_this,
bands=bands,
mag_err_postfix="_err" + postfix,
),
*(gmm_parameters_this[n] for n in param_labels_nosat),
)
to_update_mask = base_this["P_GMM"] > base["P_GMM"][base_this["index"]]
if to_update_mask.any():
to_update_idx = base_this["index"][to_update_mask]
for col in ("P_GMM", "log_L_GMM"):
base[col][to_update_idx] = base_this[col][to_update_mask]
del base_this, to_update_mask
del base["index"]
bright = C.sdss_limit
exclusion_cuts = Query()
if low_priority_objids is not None:
exclusion_cuts = Query(exclusion_cuts, QueryMaker.in1d("OBJID", low_priority_objids, invert=True))
if "sdss" in surveys and ("decals" in surveys or "des" in surveys):
deep_survey = "des" if "des" in surveys else "decals"
has_good_deep = Query(
"OBJID_{} != -1".format(deep_survey),
"REMOVE_{} == 0".format(deep_survey),
)
over_subtraction = Query(
QueryMaker.equals("survey", "sdss"),
Query(has_good_deep, "r_mag_{} > 20.8".format(deep_survey)) | Query(~has_good_deep, "u_mag > r_mag + 3.5"),
)
exclusion_cuts = Query(exclusion_cuts, ~over_subtraction)
if "des" in surveys:
des_bright_stars = Query(
QueryMaker.equals("survey", "des"),
"0.7 * (r_mag + 10.2) > sb_r",
"gr < 0.6",
"r_mag < 17",
C.valid_g_mag,
C.valid_sb,
)
bright = Query(bright, ~des_bright_stars)
exclusion_cuts = Query(exclusion_cuts, ~des_bright_stars)
veryhigh_p_gmm = Query("P_GMM >= 0.95", "log_L_GMM >= -7")
high_p_gmm = Query("P_GMM >= 0.7") | Query("log_L_GMM < -7")
low_sb_cut = Query(Query("score_sb_r >= 20"), C.valid_sb)
very_low_sb_cut = Query(
"r_mag < 20.8",
(
Query(
C.high_priority_cuts,
Query("score_sb_r >= 21.25") | Query("sb_r >= 25.25"),
)
| Query(
QueryMaker.equals("survey", "des"),
Query("score_sb_r >= 21.5") | Query("sb_r >= 25.5"),
)
),
C.valid_sb,
exclusion_cuts,
)
fill_values_by_query(base, C.faint_end_limit, {"TARGETING_SCORE": 900})
fill_values_by_query(base, Query(C.sat_rcut, C.faint_end_limit), {"TARGETING_SCORE": 800})
fill_values_by_query(
base,
Query(
C.sat_rcut,
C.faint_end_limit,
C.relaxed_targeting_cuts,
exclusion_cuts,
),
{"TARGETING_SCORE": 700},
)
fill_values_by_query(
base,
(bright | Query(veryhigh_p_gmm, C.relaxed_cut_sb, exclusion_cuts)),
{"TARGETING_SCORE": 600},
)
fill_values_by_query(
base,
Query(C.sat_rcut, C.high_priority_cuts, C.faint_end_limit, exclusion_cuts),
{"TARGETING_SCORE": 400},
)
fill_values_by_query(
base,
Query("TARGETING_SCORE == 400", (high_p_gmm | low_sb_cut)),
{"TARGETING_SCORE": 300},
)
fill_values_by_query(base, Query(C.sat_rcut, (bright | very_low_sb_cut)), {"TARGETING_SCORE": 200})
need_random_selection = np.flatnonzero(
Query(basic_cut, "TARGETING_SCORE >= 700", "TARGETING_SCORE < 800").mask(base)
)
if len(need_random_selection) > n_random:
random_mask = np.zeros(len(need_random_selection), dtype=bool)
random_mask[:n_random] = True
np.random.RandomState(seed).shuffle(random_mask) # pylint: disable=no-member
need_random_selection = need_random_selection[random_mask]
base["TARGETING_SCORE"][need_random_selection] = 500
base["TARGETING_SCORE"] += (8 - np.digitize(base["score_sb_r"], np.linspace(19.25, 22, 7))) * 10 + (
9 - np.floor(base["P_GMM"] * 10).astype(np.int32)
)
fill_values_by_query(base, ~basic_cut, {"TARGETING_SCORE": 1100})
fill_values_by_query(base, ~C.is_galaxy2, {"TARGETING_SCORE": 1200})
fill_values_by_query(base, ~C.is_clean2, {"TARGETING_SCORE": 1300})
if not ignore_specs:
fill_values_by_query(base, C.has_spec, {"TARGETING_SCORE": 1400})
fill_values_by_query(
base,
Query(basic_cut, "ZQUALITY == 2", "SPEC_Z < 0.05"),
{"TARGETING_SCORE": 180},
)
fill_values_by_query(
base,
Query(
C.is_sat,
(lambda x: (x != "AAT") & (x != "MMT") & (x != "PAL"), "TELNAME"),
),
{"TARGETING_SCORE": 150},
)
if remove_lists is not None:
for survey in surveys:
if survey not in remove_lists:
continue
fill_values_by_query(
base,
Query(
C.is_clean2,
(lambda x: np.in1d(x, remove_lists[survey]), "OBJID"),
(lambda x: x == survey, "survey"),
),
{"TARGETING_SCORE": 1350},
)
if manual_selected_objids is not None:
q = Query((lambda x: np.in1d(x, manual_selected_objids), "OBJID"))
if not ignore_specs:
q &= ~C.has_spec
fill_values_by_query(base, q, {"TARGETING_SCORE": 100})
base.sort("TARGETING_SCORE")
return base
| 12,551
|
def authenticate(controller, password = None, chroot_path = None, protocolinfo_response = None):
"""
Authenticates to a control socket using the information provided by a
PROTOCOLINFO response. In practice this will often be all we need to
authenticate, raising an exception if all attempts to authenticate fail.
All exceptions are subclasses of AuthenticationFailure so, in practice,
callers should catch the types of authentication failure that they care
about, then have a :class:`~stem.connection.AuthenticationFailure` catch-all
at the end.
This can authenticate to either a :class:`~stem.control.BaseController` or
:class:`~stem.socket.ControlSocket`.
:param controller: tor controller or socket to be authenticated
:param str password: passphrase to present to the socket if it uses password
authentication (skips password auth if **None**)
:param str chroot_path: path prefix if in a chroot environment
:param stem.response.protocolinfo.ProtocolInfoResponse protocolinfo_response:
tor protocolinfo response, this is retrieved on our own if **None**
:raises: If all attempts to authenticate fails then this will raise a
:class:`~stem.connection.AuthenticationFailure` subclass. Since this may
try multiple authentication methods it may encounter multiple exceptions.
If so then the exception this raises is prioritized as follows...
* :class:`stem.connection.IncorrectSocketType`
The controller does not speak the tor control protocol. Most often this
happened because the user confused the SocksPort or ORPort with the
ControlPort.
* :class:`stem.connection.UnrecognizedAuthMethods`
All of the authentication methods tor will accept are new and
unrecognized. Please upgrade stem and, if that doesn't work, file a
ticket on 'trac.torproject.org' and I'd be happy to add support.
* :class:`stem.connection.MissingPassword`
We were unable to authenticate but didn't attempt password authentication
because none was provided. You should prompt the user for a password and
try again via 'authenticate_password'.
* :class:`stem.connection.IncorrectPassword`
We were provided with a password but it was incorrect.
* :class:`stem.connection.IncorrectCookieSize`
Tor allows for authentication by reading it a cookie file, but that file
is the wrong size to be an authentication cookie.
* :class:`stem.connection.UnreadableCookieFile`
Tor allows for authentication by reading it a cookie file, but we can't
read that file (probably due to permissions).
* **\\***:class:`stem.connection.IncorrectCookieValue`
Tor allows for authentication by reading it a cookie file, but rejected
the contents of that file.
* **\\***:class:`stem.connection.AuthChallengeUnsupported`
Tor doesn't recognize the AUTHCHALLENGE command. This is probably a Tor
version prior to SAFECOOKIE being implement, but this exception shouldn't
arise because we won't attempt SAFECOOKIE auth unless Tor claims to
support it.
* **\\***:class:`stem.connection.UnrecognizedAuthChallengeMethod`
Tor couldn't recognize the AUTHCHALLENGE method Stem sent to it. This
shouldn't happen at all.
* **\\***:class:`stem.connection.InvalidClientNonce`
Tor says that the client nonce provided by Stem during the AUTHCHALLENGE
process is invalid.
* **\\***:class:`stem.connection.AuthSecurityFailure`
Nonce value provided by the server was invalid.
* **\\***:class:`stem.connection.OpenAuthRejected`
Tor says that it allows for authentication without any credentials, but
then rejected our authentication attempt.
* **\\***:class:`stem.connection.MissingAuthInfo`
Tor provided us with a PROTOCOLINFO reply that is technically valid, but
missing the information we need to authenticate.
* **\\***:class:`stem.connection.AuthenticationFailure`
There are numerous other ways that authentication could have failed
including socket failures, malformed controller responses, etc. These
mostly constitute transient failures or bugs.
**\\*** In practice it is highly unusual for this to occur, being more of a
theoretical possibility rather than something you should expect. It's fine
to treat these as errors. If you have a use case where this commonly
happens, please file a ticket on 'trac.torproject.org'.
In the future new :class:`~stem.connection.AuthenticationFailure`
subclasses may be added to allow for better error handling.
"""
if not protocolinfo_response:
try:
protocolinfo_response = get_protocolinfo(controller)
except stem.ProtocolError:
raise IncorrectSocketType('unable to use the control socket')
except stem.SocketError as exc:
raise AuthenticationFailure('socket connection failed (%s)' % exc)
auth_methods = list(protocolinfo_response.auth_methods)
auth_exceptions = []
if len(auth_methods) == 0:
raise NoAuthMethods('our PROTOCOLINFO response did not have any methods for authenticating')
# remove authentication methods that are either unknown or for which we don't
# have an input
if AuthMethod.UNKNOWN in auth_methods:
auth_methods.remove(AuthMethod.UNKNOWN)
unknown_methods = protocolinfo_response.unknown_auth_methods
plural_label = 's' if len(unknown_methods) > 1 else ''
methods_label = ', '.join(unknown_methods)
# we... er, can't do anything with only unrecognized auth types
if not auth_methods:
exc_msg = 'unrecognized authentication method%s (%s)' % (plural_label, methods_label)
auth_exceptions.append(UnrecognizedAuthMethods(exc_msg, unknown_methods))
else:
log.debug('Authenticating to a socket with unrecognized auth method%s, ignoring them: %s' % (plural_label, methods_label))
if protocolinfo_response.cookie_path is None:
for cookie_auth_method in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE):
if cookie_auth_method in auth_methods:
auth_methods.remove(cookie_auth_method)
exc_msg = 'our PROTOCOLINFO response did not have the location of our authentication cookie'
auth_exceptions.append(NoAuthCookie(exc_msg, cookie_auth_method == AuthMethod.SAFECOOKIE))
if AuthMethod.PASSWORD in auth_methods and password is None:
auth_methods.remove(AuthMethod.PASSWORD)
auth_exceptions.append(MissingPassword('no passphrase provided'))
# iterating over AuthMethods so we can try them in this order
for auth_type in (AuthMethod.NONE, AuthMethod.PASSWORD, AuthMethod.SAFECOOKIE, AuthMethod.COOKIE):
if auth_type not in auth_methods:
continue
try:
if auth_type == AuthMethod.NONE:
authenticate_none(controller, False)
elif auth_type == AuthMethod.PASSWORD:
authenticate_password(controller, password, False)
elif auth_type in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE):
cookie_path = protocolinfo_response.cookie_path
if chroot_path:
cookie_path = os.path.join(chroot_path, cookie_path.lstrip(os.path.sep))
if auth_type == AuthMethod.SAFECOOKIE:
authenticate_safecookie(controller, cookie_path, False)
else:
authenticate_cookie(controller, cookie_path, False)
if isinstance(controller, stem.control.BaseController):
controller._post_authentication()
return # success!
except OpenAuthRejected as exc:
auth_exceptions.append(exc)
except IncorrectPassword as exc:
auth_exceptions.append(exc)
except PasswordAuthRejected as exc:
# Since the PROTOCOLINFO says password auth is available we can assume
# that if PasswordAuthRejected is raised it's being raised in error.
log.debug('The authenticate_password method raised a PasswordAuthRejected when password auth should be available. Stem may need to be corrected to recognize this response: %s' % exc)
auth_exceptions.append(IncorrectPassword(str(exc)))
except AuthSecurityFailure as exc:
log.info('Tor failed to provide the nonce expected for safecookie authentication. (%s)' % exc)
auth_exceptions.append(exc)
except (InvalidClientNonce, UnrecognizedAuthChallengeMethod, AuthChallengeFailed) as exc:
auth_exceptions.append(exc)
except (IncorrectCookieSize, UnreadableCookieFile, IncorrectCookieValue) as exc:
auth_exceptions.append(exc)
except CookieAuthRejected as exc:
auth_func = 'authenticate_safecookie' if exc.is_safecookie else 'authenticate_cookie'
log.debug('The %s method raised a CookieAuthRejected when cookie auth should be available. Stem may need to be corrected to recognize this response: %s' % (auth_func, exc))
auth_exceptions.append(IncorrectCookieValue(str(exc), exc.cookie_path, exc.is_safecookie))
except stem.ControllerError as exc:
auth_exceptions.append(AuthenticationFailure(str(exc)))
# All authentication attempts failed. Raise the exception that takes priority
# according to our pydocs.
for exc_type in AUTHENTICATE_EXCEPTIONS:
for auth_exc in auth_exceptions:
if isinstance(auth_exc, exc_type):
raise auth_exc
# We really, really shouldn't get here. It means that auth_exceptions is
# either empty or contains something that isn't an AuthenticationFailure.
raise AssertionError('BUG: Authentication failed without providing a recognized exception: %s' % str(auth_exceptions))
| 12,552
|
def test_sum():
"""
test sum pattern 1, 11, 10, 01, 001, 010, 100, 110, 011, 111, 0011, 0101, 0111, 1011, 1111
test sum pattern implemented with reshape:
1000, 0100, 0010, 0001, 11111
others implemented by reshape that are not tested
0011,0101,0110,1001,1010,1100
1110,1101,1011
TODO: test with broadcast
"""
for shape, pattern in [((100,3,1300),[1]),
((0,),[0]),((5,),[0]),
((0,0),[0,1]),((1,0),[0,1]),((5,4),[0,1]),((33,31),[0,1]),((5,4),[1]),((5,4),[0]),#need something bigger then 32 for some opt test.
((5,4,3),[0]),((5,4,3),[1]),((5,4,3),[0,1]),((5,4,3),[2]),((5,4,3),[1,2]),((5,4,3),[0,1,2]),
((0,0,0,0),[0,1,2,3]),
((5,4,3,20),[2,3]), ((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),((5,4,3,2),[1,2,3]),
((5,4,3,10,11),[1,2]),
((5,4,3,20),[2,3]), ((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),((5,4,3,2),[1,2,3]),
#test shape bigger then 4096 on each dimension to make sure that we work correctly when we don't have enought thread/block in each dimensions
((4100,3),[0]),((3,4101),[0]),#10
((1024,33),[0]),((33,1024),[0]),#10
((1025,33),[0]),((33,1025),[0]),#10
((4100,3),[1]),((3,4101),[1]),#01
((1024,33),[1]),((33,1024),[1]),#01
((1025,33),[1]),((33,1025),[1]),#01
((4100,3),[0,1]),((3,4101),[0,1]),#11
((1024,33),[0,1]),((33,1024),[0,1]),#01
((1025,33),[0,1]),((33,1025),[0,1]),#01
((4100,4,3),[0]),((5,4100,3),[0]),((5,4,4100),[0]),#100
((4100,4,3),[1]),((5,4100,3),[1]),((5,4,4100),[1]),#010
((4100,4,3),[2]),((5,4100,3),[2]),((5,4,4100),[2]),#001
((4100,4,3),[0,1]),((5,4100,3),[0,1]),((5,4,4100),[0,1]),#110
((4100,4,3),[1,2]),((5,4100,3),[1,2]),((5,4,4100),[1,2]),#011
#((4100,4,3),[0,2]),((5,4100,3),[0,2]),((5,4,4100),[0,2]),#101 ##not implemented
((4100,4,3),[0,1,2]),((5,4100,3),[0,1,2]),((5,4,4100),[0,1,2]),#111
((4100,4,3,2),[2,3]),((4,4100,3,2),[2,3]),((4,3,4100,2),[2,3]),((4,3,2,4100),[2,3]),#0011
((4100,4,3,2),[1,3]),((4,4100,3,2),[1,3]),((4,3,4100,2),[1,3]),((4,3,2,4100),[1,3]),#0101
((4100,4,3,2),[0,2,3]),((4,4100,3,2),[0,2,3]),((4,3,4100,2),[0,2,3]),#((4,3,2,4100),[0,2,3]),#1011
((4100,4,3,2),[1,2,3]),((4,4100,3,2),[1,2,3]),((4,3,4100,2),[1,2,3]),((4,3,2,4100),[1,2,3]),#0111
((4100,2,3,4),[0,1,2,3]),((2,4100,3,4),[0,1,2,3]),((2,3,4100,4),[0,1,2,3]),((2,3,4,4100),[0,1,2,3]),#1111
#test pattern implemented by reshape
((4100,4,3,2),[0]),((4,4100,3,2),[0]),((4,3,4100,2),[0]),((4,3,2,4100),[0]),#1000
((4100,4,3,2),[1]),((4,4100,3,2),[1]),((4,3,4100,2),[1]),((4,3,2,4100),[1]),#0100
((4100,4,3,2),[2]),((4,4100,3,2),[2]),((4,3,4100,2),[2]),((4,3,2,4100),[2]),#0010
((4100,4,3,2),[3]),((4,4100,3,2),[3]),((4,3,4100,2),[3]),((4,3,2,4100),[3]),#0001
((1100,2,3,4,5),[0,1,2,3,4]),((2,1100,3,4,5),[0,1,2,3,4]),((2,3,1100,4,5),[0,1,2,3,4]),((2,3,4,1100,5),[0,1,2,3,4]),((2,3,4,5,1100),[0,1,2,3,4]),#11111
]:
a = tensor.TensorType('float32',(False,)*len(shape))()
b = T.Sum(pattern)(a)
val = numpy.random.rand(numpy.prod(shape)).reshape(shape)
# val = numpy.ones(shape)
# val = numpy.arange(numpy.prod(shape)).reshape(shape)
val = theano._asarray(val,dtype='float32')
f = theano.function([a],b, mode=mode_with_gpu)
f2 = theano.function([a],b, mode=mode_without_gpu)
assert tcn.GpuSum in [x.op.__class__ for x in f.maker.env.toposort()]
assert T.Sum in [x.op.__class__ for x in f2.maker.env.toposort()]
if val.size==0:
assert f2(val)==f(val), ('shape', shape, 'pattern', pattern)
else:
try:
#We raise the error threashold as we sum big matrix
#and this cause small rounding difference with some seed
#example in debug mode with unittests.rseed=9275
orig_rtol = theano.tensor.basic.float32_rtol
theano.tensor.basic.float32_rtol = 2e-5
assert _allclose(f2(val),f(val)), ('shape', shape, 'pattern', pattern, sum([shape[i] for i in pattern]))
finally:
theano.tensor.basic.float32_rtol = orig_rtol
#test with dimshuffle
#we shuffle the 2 outer dims.
for shape, pattern in [#((5,),[0]),
((5,4),[0,1]),((5,4),[0]),
((5,4,3),[0]),((5,4,3),[0,1]),((5,4,3),[2]),((5,4,3),[0,1,2]),
((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3])]:
a = tensor.TensorType('float32',(False,)*len(shape))()
dim_pattern = range(len(shape))
dim_pattern[0]=1
dim_pattern[1]=0
a = a.dimshuffle(dim_pattern)
b = T.Sum(pattern)(a)
val = numpy.random.rand(numpy.prod(shape)).reshape(shape)
# val = numpy.ones(shape)
# val = numpy.arange(numpy.prod(shape)).reshape(shape)
val = theano._asarray(val,dtype='float32')
f = theano.function([a],b, mode=mode_with_gpu)
f2 = theano.function([a],b, mode=mode_without_gpu)
assert tcn.GpuSum in [x.op.__class__ for x in f.maker.env.toposort()]
assert T.Sum in [x.op.__class__ for x in f2.maker.env.toposort()]
assert _allclose(f2(val),f(val)), ('shape', shape, 'pattern', pattern, sum([shape[i] for i in pattern]))
#test with broadcast
for shape, pattern in [((5,),[0]),
((5,4),[0,1]),((5,4),[0]),
((5,4,3),[0]),((5,4,3),[0,1]),((5,4,3),[2]),((5,4,3),[0,1,2]),
((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3])]:
shape = numpy.asarray(shape)*2
a = tensor.TensorType('float32',(False,)*len(shape))()
a2 = tcn.CudaNdarrayType((False,)*len(shape))()
b = T.Sum(pattern)(a)
b2 = T.Sum(pattern)(a2)
val = numpy.random.rand(numpy.prod(shape)).reshape(shape)
# val = numpy.ones(shape)
# val = numpy.arange(numpy.prod(shape)).reshape(shape)
val = theano._asarray(val,dtype='float32')
val2 = cuda.CudaNdarray(val)
if len(shape)==1:
val = val[::2]
val2 = val2[::2]
elif len(shape)==2:
val = val[::2,::2]
val2 = val2[::2,::2]
elif len(shape)==3:
val = val[::2,::2,::2]
val2 = val2[::2,::2,::2]
elif len(shape)==4:
val = val[::2,::2,::2,::2]
val2 = val2[::2,::2,::2,::2]
f = theano.function([a],b, mode=mode_without_gpu)
f2 = theano.function([a2],b2, mode=mode_with_gpu)
assert tcn.GpuSum in [x.op.__class__ for x in f2.maker.env.toposort()]
assert T.Sum in [x.op.__class__ for x in f.maker.env.toposort()]
assert _allclose(f2(val2),f(val)), ('shape', shape, 'pattern', pattern, sum([shape[i] for i in pattern]))
| 12,553
|
def test_ctm_distribution_d1(nsymbols):
"""Check normalization of CTM distributions."""
bdm = BDM(ndim=1, nsymbols=nsymbols)
total = 0
for dct in bdm._ctm.values():
for key, cmx in dct.items():
n = len(set(key))
mult = factorial(nsymbols) / factorial(nsymbols - n)
total += 2**-cmx * mult
assert total == approx(1, .01)
| 12,554
|
def naive(edges: List[Edge[T]]) -> Iterator[Matching[T]]:
"""Enumerate best matchings"""
MAX_HEAPSIZE = 100
graph = nx.Graph()
for n1, n2, w in edges:
graph.add_node(n1)
graph.add_node(n2)
graph.add_edge(n1, n2, weight=w)
if not bprt.is_bipartite(graph):
raise RuntimeError("Not bipartite")
left_set, right_set = nx.bipartite.sets(graph)
lefts = sorted(left_set)
rights = sorted(right_set)
heap = []
for m in _naive(lefts, rights, is_valid=lambda x, y: (x, y) in graph.edges):
score = sum(graph.edges[(n1, n2)]['weight'] for n1, n2 in m)
if len(heap) <= MAX_HEAPSIZE:
heapq.heappush(heap, (score, m))
else:
heapq.heappushpop(heap, (score, m))
while heap != []:
score, m = heapq.heappop(heap)
yield m
| 12,555
|
def create_compressed_generator(
original_generator: CompressorArg,
compressed_cse_list: List[List[Union[List[uint64], List[Union[bytes, None, Program]]]]],
) -> BlockGenerator:
"""
Bind the generator block program template to a particular reference block,
template bytes offsets, and SpendBundle.
"""
start = original_generator.start
end = original_generator.end
program = DECOMPRESS_BLOCK.curry(
DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, Program.to(start), Program.to(end), compressed_cse_list
)
generator_arg = GeneratorArg(original_generator.block_height, original_generator.generator)
return BlockGenerator(program, [generator_arg])
| 12,556
|
def parse(path):
"""
Returns generator which yields Quotes
"""
with codecs.open(path, 'r') as f:
while f.readline():
f.readline()
f.readline()
line4 = f.readline()
f.readline()
# title, author = re.findall(r'^(.*) \((.*)\)$', line1)[0]
yield Quote(text=line4.strip().decode('utf-8'))
| 12,557
|
def csv_to_db_func(file_name):
"""The function reads a file that was uploaded by the user to the server.
It creates connection to the database and that file was dumped to the database.
: param file_name : csv file uploaded by the user."""
logging.info("Welcome to csv func")
df_data = pd.read_csv(file_name, index_col=False, delimiter=',')
replacement = {'height_feet': 0.0, 'height_inches': 0.0,
'position': "missing", 'weight_pounds': 0.0}
df_data.fillna(value=replacement, inplace=True)
df_data.fillna(0, inplace=True)
try:
conn = connect(host='localhost',
database="csvfile_upload",
user='root',
password='yogesh1304')
if conn.is_connected():
cursor = conn.cursor()
cursor.execute("select database();")
record = cursor.fetchone()
logging.info("You're connected to database: %s", record)
cursor.execute('DROP TABLE IF EXISTS csvfile_data;')
logging.info("Creating table....")
cursor.execute(CREATE_TABLE_QUERY)
logging.info("Table is created....")
# loop through the data frame
for i,row in df_data.iterrows():
print(row)
cursor.execute("INSERT INTO csvfile_upload.csvfile_data VALUES {}"
.format(tuple(row)))
# the connection is not auto committed by default, so we must commit to
# save our changes
conn.commit()
except errors.DatabaseError as db_e:
logging.error('%s: %s', db_e.__class__.__name__, db_e)
except errors.Error as error_e:
logging.error('%s: %s', error_e.__class__.__name__, error_e)
| 12,558
|
def run_mcmc(meas, x, nsamples, covm=None, scales=None):
"""
Sample the likelihood space with a Markov Chain Monte Carlo.
:param meas: TemplateMeasurement
measurement whose spectrum likelihood space is to be probe
:param x: [float]
parameter values where to start the chain
:param covm: [[float]]
covariance matrix values if sampling transformed space
:param scales: [float]
parameter scales if not sampling transformed space
:return: [float], [float], [float], pymcmc.MCMC
posterior mean, lower CI, upper CI for each parameter, and the MCMC
object used for sampling
"""
mcmc = MCMC(meas.spec.npars)
mcmc.set_values(x)
if covm is not None and scales is None:
mcmc.set_covm(covm)
elif scales is not None:
mcmc.set_scales(scales)
else:
raise ValueError("Must provide covariance OR scales")
mcmc.rescale = 2 # good starting point
mcmc.learn_scale(meas.spec.ll, 1000)
mcmc.run(meas.spec.ll, nsamples)
mean = list()
mean_down = list()
mean_up = list()
for ipar in range(meas.spec.npars):
mean.append(np.mean(mcmc.data[:, ipar]))
low, high, _, _ = npinterval.interval(mcmc.data[:, ipar], 0.6827)
mean_down.append(low-mean[-1])
mean_up.append(high-mean[-1])
return mean, mean_down, mean_up, mcmc
| 12,559
|
def unconfig_extended_acl(device,acl_name):
""" Unconfigure the extended acls
Args:
device ('obj'): device to use
acl_name ('str'): name of acl
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure(["no ip access-list extended {acl_name}".format(acl_name=acl_name)])
except SubCommandFailure as e:
raise SubCommandFailure("Could not unconfigure extended acl. Error:\n{error}".format(error=e))
| 12,560
|
def get_vlan_list(dut, cli_type="click"):
"""
Get list of VLANs
Author : Prudvi Mangadu (prudvi.mangadu@broadcom.com)
:param dut:
:param cli_type:
:return:
"""
st.log("show vlan to get vlan list")
rv = show_vlan_config(dut, cli_type=cli_type)
vlan_list = list(set([eac['vid'] for eac in rv]))
return vlan_list
| 12,561
|
def begin_organization_creation_task(registered_id):
"""
Asynchronously create our tenant schema. Email owner when process completes.
"""
# Run the sub-routine for taking the OrganizationRegistration object
# creating our Tenant from it.
call_command('populate_organization', str(registered_id)) # foundation_public/management/commands/populate_organization.py
# Send email to the owner of the Organization letting them know we've successfully
# finished setting up their tenancy.
call_command('send_organization_ready_email', str(registered_id)) # foundation_email/management/commands/send_organization_ready_email.py
# Delete the registered organization.
PublicOrganizationRegistration.objects.get(id=registered_id).delete()
# Return nothing.
return None
| 12,562
|
def rewrite_blockwise(inputs):
"""Rewrite a stack of Blockwise expressions into a single blockwise expression
Given a set of Blockwise layers, combine them into a single layer. The provided
layers are expected to fit well together. That job is handled by
``optimize_blockwise``
Parameters
----------
inputs : List[Blockwise]
Returns
-------
blockwise: Blockwise
See Also
--------
optimize_blockwise
"""
if len(inputs) == 1:
# Fast path: if there's only one input we can just use it as-is.
return inputs[0]
inputs = {inp.output: inp for inp in inputs}
dependencies = {
inp.output: {d for d, v in inp.indices if v is not None and d in inputs}
for inp in inputs.values()
}
dependents = reverse_dict(dependencies)
new_index_iter = (
c + (str(d) if d else "") # A, B, ... A1, B1, ...
for d in itertools.count()
for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
)
[root] = [k for k, v in dependents.items() if not v]
# Our final results. These will change during fusion below
indices = list(inputs[root].indices)
new_axes = inputs[root].new_axes
concatenate = inputs[root].concatenate
dsk = dict(inputs[root].dsk)
changed = True
while changed:
changed = False
for i, (dep, ind) in enumerate(indices):
if ind is None:
continue
if dep not in inputs:
continue
changed = True
# Replace _n with dep name in existing tasks
# (inc, _0) -> (inc, 'b')
dsk = {k: subs(v, {blockwise_token(i): dep}) for k, v in dsk.items()}
# Remove current input from input indices
# [('a', 'i'), ('b', 'i')] -> [('a', 'i')]
_, current_dep_indices = indices.pop(i)
sub = {
blockwise_token(i): blockwise_token(i - 1)
for i in range(i + 1, len(indices) + 1)
}
dsk = subs(dsk, sub)
# Change new input_indices to match give index from current computation
# [('c', j')] -> [('c', 'i')]
new_indices = inputs[dep].indices
sub = dict(zip(inputs[dep].output_indices, current_dep_indices))
contracted = {
x
for _, j in new_indices
if j is not None
for x in j
if x not in inputs[dep].output_indices
}
extra = dict(zip(contracted, new_index_iter))
sub.update(extra)
new_indices = [(x, index_subs(j, sub)) for x, j in new_indices]
# Update new_axes
for k, v in inputs[dep].new_axes.items():
new_axes[sub[k]] = v
# Bump new inputs up in list
sub = {}
# Map from (id(key), inds or None) -> index in indices. Used to deduplicate indices.
index_map = {(id(k), inds): n for n, (k, inds) in enumerate(indices)}
for i, index in enumerate(new_indices):
id_key = (id(index[0]), index[1])
if id_key in index_map: # use old inputs if available
sub[blockwise_token(i)] = blockwise_token(index_map[id_key])
else:
index_map[id_key] = len(indices)
sub[blockwise_token(i)] = blockwise_token(len(indices))
indices.append(index)
new_dsk = subs(inputs[dep].dsk, sub)
# indices.extend(new_indices)
dsk.update(new_dsk)
# De-duplicate indices like [(a, ij), (b, i), (a, ij)] -> [(a, ij), (b, i)]
# Make sure that we map everything else appropriately as we remove inputs
new_indices = []
seen = {}
sub = {} # like {_0: _0, _1: _0, _2: _1}
for i, x in enumerate(indices):
if x[1] is not None and x in seen:
sub[i] = seen[x]
else:
if x[1] is not None:
seen[x] = len(new_indices)
sub[i] = len(new_indices)
new_indices.append(x)
sub = {blockwise_token(k): blockwise_token(v) for k, v in sub.items()}
dsk = {k: subs(v, sub) for k, v in dsk.items()}
indices_check = {k for k, v in indices if v is not None}
numblocks = toolz.merge([inp.numblocks for inp in inputs.values()])
numblocks = {k: v for k, v in numblocks.items() if v is None or k in indices_check}
# Update IO-dependency information
io_deps = {}
for v in inputs.values():
io_deps.update(v.io_deps)
return Blockwise(
root,
inputs[root].output_indices,
dsk,
new_indices,
numblocks=numblocks,
new_axes=new_axes,
concatenate=concatenate,
annotations=inputs[root].annotations,
io_deps=io_deps,
)
| 12,563
|
def sort_drugs(processed_data, alpha_sort, **kwargs):
"""
Sorts all drug names, as primary keys of processed data dictionary. Sorting
is governed by primary criteria of decreasing cost, then secondary criteria
of alphabetical order. Secondary criteria ignores unsafe characters if
"alpha_sort" is True; and does not ignore unsafe characters if False.
Requires sort_criteria() inner function.
Args:
processed_data (dictionary): contains all analyzed data. Primary key
is drug name (string), and primary value is tuple containing
number of prescribers (integer, index 0) and total cost (float,
index 1).
alpha_sort (boolean): if True, special characters are not considered
during sorting. If False, special characters are considered during
sorting.
safe_char (list of strings): contains all characters considered safe.
Returns:
all_drugs_sorted (list of strings): contains all drug names in
sequential list sorted by drug cost and alphanumeric name.
"""
def sort_criteria(drug):
"""
Determines mapped sorting value of cost and alphanumeric name for
all drugs, as keys of processed data dictionary. Required by
sort_drugs() outer function.
Args:
drug (string): drug name.
Returns:
(tuple): ordered and mapped sorting criteria of cost and name.
"""
# Sets first criteria of decreasing drug cost
cost_criteria = - processed_data[drug][1]
# Sets second criteria of alphanumeric drug name
name_criteria = drug.upper()
# If True, does not consider special characters in alphanumeric order
if alpha_sort:
# Iterates over all characters in drug name
for char in drug:
# If character is not in safe list, remove from name criteria
if char not in safe_char:
# Removes special characters
name_criteria = name_criteria.replace(char,"")
# Returns primary and secondary sorting criteria
return (cost_criteria, name_criteria)
# Sets safe characters for evaluation of name criteria
safe_char = kwargs['ch']
# Sorts drug names by decreasing cost then alphanumeric order
all_drugs_sorted = sorted(processed_data, key=sort_criteria)
# Returns list of sorted drug names
return all_drugs_sorted
| 12,564
|
def log_k2ex_and_get_msg(ex, prefix, topology):
""" LOG K2 exception and extracted message. Return NLS message """
LOG.exception(ex)
detail = {}
k2msg = _("None")
if isinstance(ex, K2Error) and ex.k2response:
detail['Request_headers'] = ex.k2response.reqheaders
detail['Response_headers'] = ex.k2response.headers
detail['Response_body'] = ex.k2response.body
detail['Response_status'] = ex.k2response.status
if hasattr(ex.k2response, 'k2err'):
m = ex.k2response.k2err.find('./Message')
if m is not None:
k2msg = m.text
msg = _("%(prefix)s ***K2 Operator Error***: %(ex_msg)s [K2 Error body "
"Message: %(k2msg)s]") %\
dict(prefix=prefix, ex_msg=ex, k2msg=k2msg)
LOG.error(msg)
if detail:
LOG.error(_("Error details: %s") % detail)
if topology is not None:
if 'error' in topology:
topology['error'].append(msg)
else:
topology['error'] = [msg]
return msg
| 12,565
|
def fetch_ticket(identifier):
"""Return data of ticket with given identifier as pandas dataframe."""
try:
return pd.read_csv(f'./data/tickets/{identifier}.csv')
except:
return None
| 12,566
|
def dice_loss(logits, targets, smooth=1.0):
"""
logits: (torch.float32) shape (N, C, H, W)
targets: (torch.float32) shape (N, H, W), value {0,1,...,C-1}
"""
outputs = F.softmax(logits, dim=1)
targets = torch.unsqueeze(targets, dim=1)
targets = torch.zeros_like(logits).scatter_(dim=1, index=targets.type(torch.int64), src=torch.tensor(1.0))
inter = outputs * targets
dice = 1 - ((2*inter.sum(dim=(2,3)) + smooth) / (outputs.sum(dim=(2,3))+targets.sum(dim=(2,3)) + smooth))
return dice.mean()
| 12,567
|
def deltaG_methanogenesis_early_Earth(T, pCO2, pH2, pCH4):
"""
Equation: CO2 (g) + 4H2 (g) --> CH4 (g)+ 2H2O (l)
Assumes bar, 1bar total pressure, for gases.
T must be array (even if just 1 entry)
"""
R=8.314E-3 #kJ mol^-1 K^-1
deltaG_0=deltaG_F_PSat_T_CH4_g(T)+2.0*deltaG_F_PSat_T_H2O_l(T) - (deltaG_F_PSat_T_CO2_g(T) + 4.0*deltaG_F_PSat_T_H2_g(T))# Standard free energy
Q=(pCH4*(1.0)**2.0)/(pCO2*pH2**4.0) #Reaction quotient Q
deltaG=deltaG_0+R*T*np.log(Q)
deltaG[deltaG > life_threshold_deltaG]=np.nan #if at least 10 kJ/mol not generated, bugs not known to live.
return deltaG
| 12,568
|
def optimize_on_joints(j2d,
model,
cam,
img,
prior,
try_both_orient,
body_orient,
n_betas=10,
regs=None,
conf=None,
viz=False):
"""Fit the model to the given set of joints, given the estimated camera
:param j2d: 14x2 array of CNN joints
:param model: SMPL model
:param cam: estimated camera
:param img: h x w x 3 image
:param prior: mixture of gaussians pose prior
:param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
:param body_orient: 3D vector, initialization for the body orientation
:param n_betas: number of shape coefficients considered during optimization
:param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
:param conf: 14D vector storing the confidence values from the CNN
:param viz: boolean, if True enables visualization during optimization
:returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
"""
t0 = time()
# define the mapping LSP joints -> SMPL joints
# cids are joints ids for LSP:
cids = range(12) + [13]
# joint ids for SMPL
# SMPL does not have a joint for head, instead we use a vertex for the head
# and append it later.
smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]
# the vertex id for the joint corresponding to the head
head_id = 411
# weights assigned to each joint during optimization;
# the definition of hips in SMPL and LSP is significantly different so set
# their weights to zero
base_weights = np.array(
[1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.float64)
if try_both_orient:
flipped_orient = cv2.Rodrigues(body_orient)[0].dot(
cv2.Rodrigues(np.array([0., np.pi, 0]))[0])
flipped_orient = cv2.Rodrigues(flipped_orient)[0].ravel()
orientations = [body_orient, flipped_orient]
else:
orientations = [body_orient]
if try_both_orient:
# store here the final error for both orientations,
# and pick the orientation resulting in the lowest error
errors = []
svs = []
cams = []
for o_id, orient in enumerate(orientations):
# initialize the shape to the mean shape in the SMPL training set
betas = ch.zeros(n_betas)
# initialize the pose by using the optimized body orientation and the
# pose prior
init_pose = np.hstack((orient, prior.weights.dot(prior.means)))
# instantiate the model:
# verts_decorated allows us to define how many
# shape coefficients (directions) we want to consider (here, n_betas)
sv = verts_decorated(
trans=ch.zeros(3),
pose=ch.array(init_pose),
v_template=model.v_template,
J=model.J_regressor,
betas=betas,
shapedirs=model.shapedirs[:, :, :n_betas],
weights=model.weights,
kintree_table=model.kintree_table,
bs_style=model.bs_style,
f=model.f,
bs_type=model.bs_type,
posedirs=model.posedirs)
# make the SMPL joints depend on betas
Jdirs = np.dstack([model.J_regressor.dot(model.shapedirs[:, :, i])
for i in range(len(betas))])
J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
model.v_template.r)
# get joint positions as a function of model pose, betas and trans
(_, A_global) = global_rigid_transformation(
sv.pose, J_onbetas, model.kintree_table, xp=ch)
Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans
# add the head joint, corresponding to a vertex...
Jtr = ch.vstack((Jtr, sv[head_id]))
# ... and add the joint id to the list
if o_id == 0:
smpl_ids.append(len(Jtr) - 1)
# update the weights using confidence values
weights = base_weights * conf[
cids] if conf is not None else base_weights
# project SMPL joints on the image plane using the estimated camera
cam.v = Jtr
# data term: distance between observed and estimated joints in 2D
obj_j2d = lambda w, sigma: (
w * weights.reshape((-1, 1)) * GMOf((j2d[cids] - cam[smpl_ids]), sigma))
# mixture of gaussians pose prior
pprior = lambda w: w * prior(sv.pose)
# joint angles pose prior, defined over a subset of pose parameters:
# 55: left elbow, 90deg bend at -np.pi/2
# 58: right elbow, 90deg bend at np.pi/2
# 12: left knee, 90deg bend at np.pi/2
# 15: right knee, 90deg bend at np.pi/2
alpha = 10
my_exp = lambda x: alpha * ch.exp(x)
obj_angle = lambda w: w * ch.concatenate([my_exp(sv.pose[55]), my_exp(-sv.pose[
58]), my_exp(-sv.pose[12]), my_exp(-sv.pose[15])])
if viz:
import matplotlib.pyplot as plt
plt.ion()
def on_step(_):
"""Create visualization."""
plt.figure(1, figsize=(10, 10))
plt.subplot(1, 2, 1)
# show optimized joints in 2D
tmp_img = img.copy()
for coord, target_coord in zip(
np.around(cam.r[smpl_ids]).astype(int),
np.around(j2d[cids]).astype(int)):
if (coord[0] < tmp_img.shape[1] and coord[0] >= 0 and
coord[1] < tmp_img.shape[0] and coord[1] >= 0):
cv2.circle(tmp_img, tuple(coord), 3, [0, 0, 255])
if (target_coord[0] < tmp_img.shape[1] and
target_coord[0] >= 0 and
target_coord[1] < tmp_img.shape[0] and
target_coord[1] >= 0):
cv2.circle(tmp_img, tuple(target_coord), 3,
[0, 255, 0])
plt.imshow(tmp_img[:, :, ::-1])
plt.draw()
plt.show()
plt.pause(1e-2)
on_step(_)
else:
on_step = None
if regs is not None:
# interpenetration term
sp = SphereCollisions(
pose=sv.pose, betas=sv.betas, model=model, regs=regs)
sp.no_hands = True
# weight configuration used in the paper, with joints + confidence values from the CNN
# (all the weights used in the code were obtained via grid search, see the paper for more details)
# the first list contains the weights for the pose priors,
# the second list contains the weights for the shape prior
opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
[1e2, 5 * 1e1, 1e1, .5 * 1e1])
# run the optimization in 4 stages, progressively decreasing the
# weights for the priors
for stage, (w, wbetas) in enumerate(opt_weights):
_LOGGER.info('stage %01d', stage)
objs = {}
objs['j2d'] = obj_j2d(1., 100)
objs['pose'] = pprior(w)
objs['pose_exp'] = obj_angle(0.317 * w)
objs['betas'] = wbetas * betas
if regs is not None:
objs['sph_coll'] = 1e3 * sp
ch.minimize(
objs,
x0=[sv.betas, sv.pose],
method='dogleg',
callback=on_step,
options={'maxiter': 100,
'e_3': .0001,
'disp': 0})
t1 = time()
_LOGGER.info('elapsed %.05f', (t1 - t0))
if try_both_orient:
errors.append((objs['j2d'].r**2).sum())
svs.append(sv)
cams.append(cam)
if try_both_orient and errors[0] > errors[1]:
choose_id = 1
else:
choose_id = 0
if viz:
plt.ioff()
return (svs[choose_id], cams[choose_id].r, cams[choose_id].t.r)
| 12,569
|
def make_axis_angle_matrix(axis, angle):
"""construct a matrix that rotates around axis by angle (in radians)"""
#[RMS] ported from WildMagic4
fCos = math.cos(angle)
fSin = math.sin(angle)
fX2 = axis[0]*axis[0]
fY2 = axis[1]*axis[1]
fZ2 = axis[2]*axis[2]
fXYM = axis[0]*axis[1]*(1-fCos)
fXZM = axis[0]*axis[2]*(1-fCos)
fYZM = axis[1]*axis[2]*(1-fCos)
fXSin = axis[0]*fSin
fYSin = axis[1]*fSin
fZSin = axis[2]*fSin
return ( fX2*(1-fCos)+fCos, fXYM-fZSin, fXZM+fYSin, fXYM+fZSin, fY2*(1-fCos)+fCos, fYZM-fXSin, fXZM-fYSin, fYZM+fXSin, fZ2*(1-fCos)+fCos )
| 12,570
|
def get_agent_type_from_project_type():
""" use project type to determine agent type """
if 'METRIC' in if_config_vars['project_type']:
if if_config_vars['is_replay']:
return 'MetricFileReplay'
else:
return 'CUSTOM'
elif if_config_vars['is_replay']:
return 'LogFileReplay'
else:
return 'LogStreaming'
# INCIDENT and DEPLOYMENT don't use this
| 12,571
|
def close_server(is_rebooting = False):
"""
Close the Unity server and tell clients to react appropriately.
Set `is_rebooting` to handle cases like domain reload when Unity is expected to come back shortly.
Returns True if the server was closed by this call, False if it was already closed.
"""
global server
global clients
if server is None:
return False
# Tell all the clients to quit.
client_shutdown_async = []
clients_to_shutdown = []
with clients_lock:
for client_list in clients.values():
for c in client_list:
try:
shutdown_result = c.async_shutdown(is_rebooting)
# Give the client a half-second to tell us there was a problem.
# If they don't tell us in that time, we just ignore the problem.
shutdown_result.set_expiry(0.5)
client_shutdown_async.append(shutdown_result)
clients_to_shutdown.append(c)
except EOFError:
pass
for a in client_shutdown_async:
try:
a.wait()
a.value
except EOFError:
# The client shut down when we told it to shut down -- pretty normal.
pass
except:
print("Exception while shutting down a client: {}".format(traceback.format_exc()))
server.close()
# Process all jobs pending. Client threads might be waiting for jobs to be
# run on the main thread
while not jobs.empty():
process_jobs();
server.thread.join()
for c in clients_to_shutdown:
c.wait_for_thread()
# Finally release the lock file.
server.lockfile.release()
server = None
clients = dict()
return True
| 12,572
|
def pip_install(path: PathType, package_name: str) -> ContextManagerFunctionReturnType[None]:
"""
Installs a package with pip located in the given path and with the given name.
This method is intended to use with `with`, so after its usage, the package will be
uninstalled.
"""
pip_main(["install", str(path)])
try:
yield
finally:
pip_main(["uninstall", "-y", package_name])
| 12,573
|
def dict_decode(node_dict: dict) -> Node:
"""Convert a dictionary to an `Entity` node (if it has a `type` item)."""
if "type" not in node_dict:
return node_dict
node_type = node_dict.pop("type")
class_ = getattr(types, node_type, None)
if class_ is None:
return node_dict
node_kwargs = {}
for key, val in node_dict.items():
if isinstance(val, dict):
val = dict_decode(val)
elif isinstance(val, list):
processed_list = []
for sub_val in val:
if isinstance(sub_val, dict):
processed_list.append(dict_decode(sub_val))
else:
processed_list.append(sub_val)
val = processed_list
node_kwargs[key] = val
return class_(**node_kwargs)
| 12,574
|
def compute_purges(snapshots, pattern, now):
"""Return the list of snapshots to purge,
given a list of snapshots, a purge pattern and a now time
"""
snapshots = sorted(snapshots)
pattern = sorted(pattern, reverse=True)
purge_list = []
max_age = pattern[0]
# Age of the snapshots in minutes.
# Example : [30, 70, 90, 150, 210, ..., 4000]
snapshots_age = []
valid_snapshots = []
for s in snapshots:
try:
snapshots_age.append(
int((now - datetime.strptime(
s.split('@')[1], DTFORMAT)).total_seconds()
)/60)
valid_snapshots.append(s)
except:
log.info("Skipping purge of %s with invalid date format", s)
continue
if not valid_snapshots:
return purge_list
# pattern = 3600:180:60
# age segments = [(3600, 180), (180, 60)]
for age_segment in [(pattern[i], pattern[i+1])
for i, p in enumerate(pattern[:-1])]:
last_timeframe = -1
for i, age in enumerate(snapshots_age):
# if the age is outside the age_segment, delete nothing.
# Only 70 and 90 are inside the age_segment (60, 180)
if age > age_segment[0] < max_age or age < age_segment[1]:
continue
# Now get the timeframe number of the snapshot.
# Ages 70 and 90 are in the same timeframe (70//60 == 90//60)
timeframe = age // age_segment[1]
# delete if we already had a snapshot in the same timeframe
# or if the snapshot is very old
if timeframe == last_timeframe or age > max_age:
purge_list.append(valid_snapshots[i])
last_timeframe = timeframe
return purge_list
| 12,575
|
def design_partial_factorial(k: int, res: int) -> DataFrame:
"""
design_partial_factorial
This function helps design 2 level partial factorial experiments. These experiments are often
described using the syntax l**(k-p) where l represents the level of each factor, k represents
the total number of factors considered, and p represents a scaling factor relative to the full
factorial design.
This function assumes that l=2. Users are not asked to set p, instead the user sets a minimum
desired resolution for their experiment. Resolution describes the kind of aliasing incurred by
scaling down from a full to a partial factorial design. Higher resolutions have less potential
aliasing (confounding).
Resolution number is determined through the defining relation of the partial factorial design.
For the 6 factor design 2**(6-p) with factors ABCDEF, example defining relations (I) are shown
below. The resolution cannot exceed the number of factors in the experiment. So a 6 factor
experiment can be at most a resolution 6 (otherwise it would be a full factorial experiment).
* Res I: I = A
* Res II: I = AB
* Res III: I = ABC
* Res IV: I = ABCD
* Res V: I = ABCDE
* Res VI: I = ABCDEF
Practically we tend to use resolution III-, IV- and V-designs.
* Res I: Cannot distinguish between levels within main effects (not useful).
* Res II: Main effects may be aliased with other main effects (not useful).
* Res III: Main effects may be aliased with two-way interactions.
* Res IV: Two-way interactions may be aliased with each other.
* Res V: Two-way interactions may be aliased with three-way interactions.
* Res VI: Three-way interactions may be aliased with each other.
Parameters
----------
k : int
the total number of factors considered in the experiment
res : int
the desired minimum resolution of the experiment
Returns
-------
pd.DataFrame
A dataframe with the partial factorial design
Examples
--------
>>> # create partial factorial design for a 2 level 4 factor resolution III experiment
>>> design_df = design_partial_factorial(k=4, res=3)
"""
_check_int_input(k, "k")
_check_int_input(res, "res")
assert res <= k, "Resolution must be smaller than or equal to the number of factors."
# Assume l=2 and use k specified by user to solve for p in design
n = arange(res - 1, k, 1)
k_minus_p = k - 1 if res == k else n[~(_k_combo_vec(n, res) < k)][0]
logging.info("Partial Factorial Design: l=2, k={}, p={}".format(k, k - k_minus_p))
logging.info("Ratio to Full Factorial Design: {}".format(Fraction(2**k_minus_p / 2**k)))
# identify the main effects and interactions for the design
main_factors = arange(k_minus_p)
clean = lambda x: x.replace(" ", " ").strip(" ").replace(" ", ":")
interactions = [clean(_array_to_string(main_factors))] if res == k else \
[
clean(_array_to_string(c))
for r in range(res - 1, k_minus_p)
for c in combinations(main_factors, r)
][:k - k_minus_p]
# combine main effects and interactions into a single design string (format inspired by patsy)
factors = " ".join([_array_to_string(main_factors)] + interactions)
logging.info("Design string: {}".format(factors))
main_factors = [i for i in factors.split(" ") if i and ":" not in i]
two_level_full_factorial = [[-1, 1] for _ in main_factors]
full_factorial_design = design_full_factorial(two_level_full_factorial)
interactions = [
["x" + i for i in j.split(":")]
for j in [i for i in factors.split(" ") if i and ":" in i]
]
design = "+".join(full_factorial_design.columns.tolist() + [":".join(i) for i in interactions])
partial_factorial_design = dmatrix(design, full_factorial_design, return_type='dataframe').drop(
columns=["Intercept"], axis=1)
partial_factorial_design.columns = \
["x{}".format(i) for i in range(partial_factorial_design.shape[1])]
return partial_factorial_design
| 12,576
|
def title_first_word(field: Field = None) -> Optional[str]:
"""
Returns an uppercase first word (skipping any articles) of
the title field (245 MARC tag subfield $a).
Args:
field: pymarc.Field instance
Returns:
word
"""
pass
| 12,577
|
def train(model, target_label=1, epochs=1, learning_rate=5.0):
""" Learns the patch for taget_label
Args:
model: Model to be trained (ModelContainer object)
target_label: Target label for which the patch will be trained
epochs: Number of iteration through the training set
Returns:
None. The trained patch can be accessed by model.patch()
"""
model.reset_patch()
target_ys = gen_target_ys(target_label=target_label, batch_size = BATCH_SIZE)
for i in range(epochs):
epoch_loss = model.train_step(target_ys = target_ys, scale = (0.1, 1.0),
learning_rate = learning_rate)
print("Loss after epoch %s: %s" % (i, epoch_loss))
| 12,578
|
def find_latest(message_ts: str, post_dir: Path) -> str:
"""Retrieves the latest POST request timestamp for a given message."""
latest_ts = message_ts
for postfile in os.listdir(os.fsencode(post_dir)):
if (filename := os.fsdecode(postfile)).endswith('.json'):
request_ts = filename.strip('.json')
if request_ts < latest_ts:
continue
else:
with open(os.path.join(post_dir, filename), 'r') as file:
request = json.load(file)
if request['container']['message_ts'] == message_ts:
if request_ts > latest_ts : latest_ts = request_ts
else:
continue
else:
continue
return latest_ts
| 12,579
|
def command_line_code_generation(filename, language, out_path=None):
"""Starts a code generator without starting the GUI.
filename: Name of wxg file to generate code from
language: Code generator language
out_path: output file / output directory"""
from xml_parse import CodeWriter
try:
if language not in common.code_writers:
raise errors.WxgMissingCodeWriter(language)
writer = common.code_writers[language]
CodeWriter( writer=writer, input=filename, out_path=out_path )
except errors.WxgBaseException as inst:
logging.error(inst)
sys.exit(inst)
except Exception:
logging.error( _("An exception occurred while generating the code for the application.\n"
"If you think this is a wxGlade bug, please report it.") )
logging.exception(_('Internal Error'))
sys.exit(1)
sys.exit(0)
| 12,580
|
def get_trending_queries(filename):
"""Extract trends from a file."""
f = open(filename, 'r')
trend_tuples_list = []
for line in f:
trend_tuples_list.append(tuple((line.strip()).split(',')))
f.close()
return trend_tuples_list
| 12,581
|
def get_bio(x, lang='en'):
"""Get the one-sentence introduction"""
bio = x.loc[16][lang]
return bio
| 12,582
|
def create_tf_example(filename, source_id, encoded_jpeg, annotations, resize=True):
"""
This function creates a tf.train.Example in object detection api format from a Waymo data frame.
args:
- filename [str]: name of the original tfrecord file
- source_id [str]: original image source id (here: frame context name + camera name + frame index)
- encoded_jpeg [bytes]: jpeg encoded image
- annotations [protobuf object]: bboxes and classes
returns:
- tf_example [tf.Train.Example]: tf example in the objection detection api format.
"""
if not resize:
encoded_jpg_io = io.BytesIO(encoded_jpeg)
image = Image.open(encoded_jpg_io)
width, height = image.size
width_factor, height_factor = image.size
else:
image_tensor = tf.io.decode_jpeg(encoded_jpeg)
height_factor, width_factor, _ = image_tensor.shape
image_res = tf.cast(tf.image.resize(image_tensor, (640, 640)), tf.uint8)
encoded_jpeg = tf.io.encode_jpeg(image_res).numpy()
width, height = 640, 640
mapping = {1: 'vehicle', 2: 'pedestrian', 4: 'cyclist'}
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
filename = filename.encode('utf8') # convert to bytes in utf8 format
source_id = source_id.encode('utf8') # convert to bytes in utf8 format
for ann in annotations:
xmin, ymin = ann.box.center_x - 0.5 * ann.box.length, ann.box.center_y - 0.5 * ann.box.width
xmax, ymax = ann.box.center_x + 0.5 * ann.box.length, ann.box.center_y + 0.5 * ann.box.width
xmins.append(xmin / width_factor)
xmaxs.append(xmax / width_factor)
ymins.append(ymin / height_factor)
ymaxs.append(ymax / height_factor)
classes.append(ann.type)
classes_text.append(mapping[ann.type].encode('utf8'))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': int64_feature(height),
'image/width': int64_feature(width),
'image/filename': bytes_feature(filename),
'image/source_id': bytes_feature(source_id),
'image/encoded': bytes_feature(encoded_jpeg),
'image/format': bytes_feature(image_format),
'image/object/bbox/xmin': float_list_feature(xmins),
'image/object/bbox/xmax': float_list_feature(xmaxs),
'image/object/bbox/ymin': float_list_feature(ymins),
'image/object/bbox/ymax': float_list_feature(ymaxs),
'image/object/class/text': bytes_list_feature(classes_text),
'image/object/class/label': int64_list_feature(classes),
}))
return tf_example
| 12,583
|
def format_oids(oids_parameters):
"""
Format dictionary OIDs to ``cryptography.x509.oid.NameOID`` object list
:param oids_parameters: CA Object Identifiers (OIDs).
The are typically seen in X.509 names.
Allowed keys/values:
``'country_name': str (two letters)``,
``'locality_name': str``,
``'state_or_province': str``,
``'street_address': str``,
``'organization_name': str``,
``'organization_unit_name': str``,
``'email_address': str``,
:type oids_parameters: dict, required
:return: ``cryptography.x509.oid.NameOID`` object list
:rtype: object ``cryptography.x509.oid.NameOID`` object list
"""
oids = list()
for oid in oids_parameters:
if oid in OIDS:
current_oid = oids_parameters[oid]
if type(current_oid) is not str:
raise TypeError(f"'{oid}' must be str")
if oid == "country_name":
# country name ISO 3166-1 (alfa-2)
if not re.match(COUNTRY_REGEX, current_oid):
raise OwnCAInvalidOID(
f"'{oid}' must be ISO 3166-1 (alfa-2)"
)
else:
oids.append(
x509.NameAttribute(NameOID.COUNTRY_NAME, current_oid)
)
elif oid == "locality_name":
oids.append(
x509.NameAttribute(NameOID.LOCALITY_NAME, current_oid)
)
elif oid == "state_or_province":
oids.append(
x509.NameAttribute(
NameOID.STATE_OR_PROVINCE_NAME, current_oid
)
)
elif oid == "street_address":
oids.append(
x509.NameAttribute(NameOID.STREET_ADDRESS, current_oid)
)
elif oid == "organization_name":
oids.append(
x509.NameAttribute(NameOID.ORGANIZATION_NAME, current_oid)
)
elif oid == "organization_unit_name":
oids.append(
x509.NameAttribute(
NameOID.ORGANIZATIONAL_UNIT_NAME, current_oid
)
)
elif oid == "email_address":
oids.append(
x509.NameAttribute(NameOID.EMAIL_ADDRESS, current_oid)
)
else:
raise OwnCAInvalidOID(
f"The '{oid}' is Invalid. Allowed OIDs: {', '.join(OIDS)}."
)
return oids
| 12,584
|
def Phases(*args):
"""Number of phases"""
# Getter
if len(args) == 0:
return lib.Generators_Get_Phases()
# Setter
Value, = args
lib.Generators_Set_Phases(Value)
| 12,585
|
def create_channel(logger: Logger,
connection: komand.connection,
team_id: str,
channel_name: str,
description: str) -> bool:
"""
Creates a channel for a given team
:param logger: (logging.logger)
:param connection: Object (komand.connection)
:param team_id: String
:param channel_name: String
:param description: String
:return: boolean
"""
create_channel_endpoint = f"https://graph.microsoft.com/beta/teams/{team_id}/channels"
create_channel_paylaod = {
"description": description,
"displayName": channel_name
}
headers = connection.get_headers()
logger.info(f"Creating channel with: {create_channel_endpoint}")
result = requests.post(create_channel_endpoint, json=create_channel_paylaod, headers=headers)
try:
result.raise_for_status()
except Exception as e:
raise PluginException(cause=f"Create channel {channel_name} failed.",
assistance=result.text) from e
if not result.status_code == 201:
raise PluginException(cause=f"Create channel returned an unexpected result.",
assistance=result.text)
return True
| 12,586
|
def getMoveValue(board, table, depth, move):
""" Sort criteria is as follows.
1. The move from the hash table
2. Captures as above.
3. Killers.
4. History.
5. Moves to the centre. """
# As we only return directly from transposition table if hashf == hashfEXACT
# There could be a non hashfEXACT very promising move for us to test
if table.isHashMove(depth, move):
return sys.maxsize
fcord = (move >> 6) & 63
tcord = move & 63
flag = move >> 12
arBoard = board.arBoard
fpiece = fcord if flag == DROP else arBoard[fcord]
tpiece = arBoard[tcord]
if tpiece != EMPTY:
if board.variant == ATOMICCHESS:
if kingExplode(board, move, board.color):
return MATE_VALUE
# We add some extra to ensure also bad captures will be searched early
if board.variant in ASEAN_VARIANTS:
return ASEAN_PIECE_VALUES[tpiece] - PIECE_VALUES[fpiece] + 1000
else:
return PIECE_VALUES[tpiece] - PIECE_VALUES[fpiece] + 1000
if flag in PROMOTIONS:
if board.variant in ASEAN_VARIANTS:
return ASEAN_PIECE_VALUES[flag - 3] - PAWN_VALUE + 1000
else:
return PIECE_VALUES[flag - 3] - PAWN_VALUE + 1000
if flag == DROP:
return PIECE_VALUES[tpiece] + 1000
killervalue = table.isKiller(depth, move)
if killervalue:
return 1000 + killervalue
# King tropism - a move that brings us nearer to the enemy king, is probably
# a good move
# opking = board.kings[1-board.color]
# score = distance[fpiece][fcord][opking] - distance[fpiece][tcord][opking]
if fpiece not in position_values:
# That is, fpiece == EMPTY
print(fcord, tcord)
print(board)
if board.variant in ASEAN_VARIANTS:
score = 0
else:
score = (
position_values[fpiece][board.color][tcord]
- position_values[fpiece][board.color][fcord]
)
# History heuristic
score += table.getButterfly(move)
return score
| 12,587
|
def activate_user(username):
"""Activate a user account."""
user = annotator.credentials.find_one({'username': username})
if not user['active']:
annotator.credentials.update_one(user, {'$set': {'active': True}})
flash("User {0} activated successfully".format(username), 'success')
else:
flash("User {0} is already active".format(username), 'warning')
return redirect(url_for('manage_users'))
| 12,588
|
def query_all():
"""Queries all matches in Elasticsearch, to be used further for suggesting
product names when a user is not aware of them.
"""
query_all = {
"query": {"match_all": {}},
}
return query_all
| 12,589
|
def cmd():
"""
A command-line interface for downloading wildfire perimeter and incident points data from NIFC.
Returns GeoJSON.
"""
pass
| 12,590
|
def _mesh_homogeneous_cell(cell_vect, mesh_path):
"""Generate a simple mesh for a homogeneous cell.
cell_vect: np.array 2x2 colonnes = vecteurs periodicité
"""
name = mesh_path.stem
geometry.init_geo_tools()
geometry.set_gmsh_option("Mesh.MshFileVersion", 4.1)
# Mesh.Algorithm = 6; Frontal - Delaunay for 2D meshes
geometry.set_gmsh_option("Mesh.Algorithm", 6)
geometry.set_gmsh_option("Mesh.MeshSizeMin", 0.05)
geometry.set_gmsh_option("Mesh.MeshSizeMax", 0.05)
rve = Gmsh2DRVE([], cell_vect, (1, 1), np.zeros(2), [], False, name)
rve.mesh_generate()
gmsh.model.mesh.renumberNodes()
gmsh.model.mesh.renumberElements()
gmsh.write(str(mesh_path))
mesh_path = msh_conversion(mesh_path, ".xdmf")
geometry.reset()
return mesh_path
| 12,591
|
def test_get_effective_tip_length(
mock_labware_store: MagicMock,
geometry_store: GeometryStore
) -> None:
"""It should get the effective tip length from a labware ID and pipette config."""
pipette_config: PipetteDict = cast(PipetteDict, {
"tip_overlap": {
"default": 10,
"opentrons/opentrons_96_tiprack_300ul/1": 20,
}
})
mock_labware_store.state.get_tip_length.return_value = 50
mock_labware_store.state.get_definition_uri.return_value = (
"opentrons/opentrons_96_tiprack_300ul/1"
)
length_eff = geometry_store.state.get_effective_tip_length(
labware_id="tip-rack-id",
pipette_config=pipette_config
)
assert length_eff == 30
mock_labware_store.state.get_tip_length.assert_called_with("tip-rack-id")
mock_labware_store.state.get_definition_uri.assert_called_with("tip-rack-id")
mock_labware_store.state.get_definition_uri.return_value = (
"opentrons/something_else/1"
)
default_length_eff = geometry_store.state.get_effective_tip_length(
labware_id="tip-rack-id",
pipette_config=pipette_config
)
assert default_length_eff == 40
| 12,592
|
async def async_setup_entry(hass, config_entry):
"""Initialize the sharkiq platform via config entry."""
ayla_api = get_ayla_api(
username=config_entry.data[CONF_USERNAME],
password=config_entry.data[CONF_PASSWORD],
websession=hass.helpers.aiohttp_client.async_get_clientsession(),
)
try:
if not await async_connect_or_timeout(ayla_api):
return False
except CannotConnect as exc:
raise exceptions.ConfigEntryNotReady from exc
shark_vacs = await ayla_api.async_get_devices(False)
device_names = ", ".join(d.name for d in shark_vacs)
_LOGGER.debug("Found %d Shark IQ device(s): %s", len(shark_vacs), device_names)
coordinator = SharkIqUpdateCoordinator(hass, config_entry, ayla_api, shark_vacs)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][config_entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
| 12,593
|
def fetch_incidents(client: Client, max_incidents: int,
last_run: Dict[str, Union[Optional[int], Optional[str]]], first_fetch: Optional[int],
priority: Optional[str], activity_status: Optional[str],
progress_status: Optional[str], business_units: Optional[str], issue_types: Optional[str],
tags: Optional[str], cloud_management_status: Optional[str],
mirror_direction: Optional[str], sync_tags: Optional[List[str]],
fetch_details: Optional[bool]
) -> Tuple[Dict[str, Union[Optional[int], Optional[str]]], List[dict]]:
"""This function retrieves new alerts every interval (default is 1 minute).
This function has to implement the logic of making sure that incidents are
fetched only onces and no incidents are missed. By default it's invoked by
XSOAR every minute. It will use last_run to save the timestamp of the last
incident it processed. If last_run is not provided, it should use the
integration parameter first_fetch to determine when to start fetching
the first time. Uses "createdAfter" in the Expanse API for timestamp.
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch, and the last issue id.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, Union[Optional[int], Optional[str]]], List[dict]]``
"""
last_fetch = last_run.get('last_fetch')
if last_fetch is None:
last_fetch = cast(int, first_fetch)
else:
last_fetch = cast(int, last_fetch)
latest_created_time = last_fetch
last_issue_id = last_run.get('last_issue_id')
latest_issue_id: Optional[str] = None
incidents: List[Dict[str, Any]] = []
arg_list = argToList(priority)
if arg_list and not all(i in ISSUE_PRIORITY for i in arg_list):
raise ValueError(f'priority must include: {", ".join(ISSUE_PRIORITY)}')
_priority = ','.join(arg_list)
arg_list = argToList(progress_status)
if arg_list and not all(i in ISSUE_PROGRESS_STATUS for i in arg_list):
raise ValueError(f'progressStatus must include: {", ".join(ISSUE_PROGRESS_STATUS)}')
_progress_status = ','.join(arg_list)
arg_list = argToList(activity_status)
if arg_list and not all(i in ISSUE_ACTIVITY_STATUS for i in arg_list):
raise ValueError(f'activityStatus must include: {", ".join(ISSUE_ACTIVITY_STATUS)}')
_activity_status = ','.join(arg_list)
arg_list = argToList(cloud_management_status)
if arg_list and not all(i in CLOUD_MANAGEMENT_STATUS for i in arg_list):
raise ValueError(f'cloudManagementStatus must include: {", ".join(CLOUD_MANAGEMENT_STATUS)}')
_cloud_management_status = ','.join(arg_list)
created_after = timestamp_us_to_datestring_utc(latest_created_time, DATE_FORMAT)
r = client.get_issues(
limit=max_incidents if not last_issue_id else max_incidents + 1, # workaround to avoid unnecessary API calls
priority=_priority, business_units=business_units,
progress_status=_progress_status, activity_status=_activity_status, tags=tags,
issue_type=issue_types, cloud_management_status=_cloud_management_status,
created_after=created_after, sort='created'
)
broken = False
issues: List = []
skip = cast(str, last_issue_id)
for i in r:
if skip and not broken:
if 'id' not in i or 'created' not in i:
continue
# fix created time to make sure precision is the same to microsecond with no rounding
i['created'] = timestamp_us_to_datestring_utc(datestring_to_timestamp_us(i['created']), DATE_FORMAT)
if i['created'] != created_after:
issues.append(i)
broken = True
elif i['id'] == skip:
broken = True
else:
issues.append(i)
if len(issues) == max_incidents:
break
for issue in issues:
ml_feature_list: List[str] = []
if 'created' not in issue or 'id' not in issue:
continue
incident_created_time = datestring_to_timestamp_us(issue.get('created'))
if last_fetch:
if incident_created_time < last_fetch:
continue
incident_name = issue.get('headline') if 'headline' in issue else issue.get('id')
# Mirroring
issue['xsoar_mirroring'] = {
'mirror_direction': mirror_direction,
'mirror_id': issue.get('id'),
'mirror_instance': demisto.integrationInstance(),
'sync_tags': sync_tags
}
issue['xsoar_severity'] = convert_priority_to_xsoar_severity(issue.get('priority', 'Unknown'))
# Handle asset information
issue['assets'], ml_feature_list, _ = client.parse_asset_data(issue, fetch_details)
# add issue specific information to ml key
if (
(provider := issue.get('providers'))
and isinstance(provider, list)
and 'name' in provider[0]
):
ml_feature_list.append(provider[0].get('name'))
if (
(latest_evidence := issue.get('latestEvidence'))
and isinstance(latest_evidence, dict)
):
if (
(geolocation := latest_evidence.get('geolocation'))
and isinstance(geolocation, dict)
):
for f in ['countryCode', 'city']:
if (x := geolocation.get(f)):
ml_feature_list.append(x)
# dedup, sort and join ml feature list
issue['ml_features'] = ' '.join(sorted(list(set(ml_feature_list))))
incident = {
'name': incident_name,
'details': issue.get('helpText'),
'occurred': issue.get('created'),
'rawJSON': json.dumps(issue),
'severity': issue.get('xsoar_severity')
}
latest_issue_id = issue.get('id')
incidents.append(incident)
if incident_created_time > latest_created_time:
latest_created_time = incident_created_time
next_run = {
'last_fetch': latest_created_time,
'last_issue_id': latest_issue_id if latest_issue_id else last_issue_id}
return next_run, incidents
| 12,594
|
def extractWordFeatures(x):
"""
Extract word features for a string x. Words are delimited by
whitespace characters only.
@param string x:
@return dict: feature vector representation of x.
Example: "I am what I am" --> {'I': 2, 'am': 2, 'what': 1}
"""
# BEGIN_YOUR_CODE (our solution is 4 lines of code, but don't worry if you deviate from this)
mydict = collections.defaultdict(float)
for s in x.split(' '):
if s.isalnum() and s[0:4] != "http":
mydict[s] += 1
return mydict
# END_YOUR_CODE
| 12,595
|
def delete_user(user_id):
"""
Delete user specified in user ID
Note: Always return the appropriate response for the action requested.
"""
user = mongo_mgr.db.user.find_one({'_id': user_id})
if user:
user.deleteOne({'_id': user_id})
result = {'id': user_id}
else:
result = "No result."
return jsonify({'result': result})
| 12,596
|
def query_attention_one(**kwargs):
"""
查询当前用户是否关注指定的物件
:param kwargs: {'user_id': user_id, 'object_id': object_id}
:return: 0 or 1
"""
session = None
try:
session = get_session()
results = session.query(func.count('*')).filter(and_(Attention.OPEN_ID == kwargs['user_id'],
Attention.OBJECT_ID == kwargs['object_id'])).scalar()
# 提交即保存到数据库
session.commit()
logging.info('OK : attention.py--->query_attention_one(), 成功')
return str(results)
except Exception as e:
logging.critical('Error : attention.py--->query_attention_one() 失败:{}'.format(e))
return RESULT_ERROR
finally:
session.close()
| 12,597
|
def _dict_empty_map_helper(values, empty, delim, av_separator, v_delimiter,
parser):
"""
A helper to consolidate logic between singleton and non-singleton mapping.
Args:
values: The value to parse.
empty: The empty representation for this value in CoNLL-U format.
delim: The delimiter between components of the value.
av_separator: The separator between attribute and value in each
component.
v_delimiter: The delimiter between values for the same attribute.
parser: The parser of the value from the attribute value pair.
Returns:
An empty dict if the value is empty and otherwise a parsed equivalent.
Raises:
ParseError: If the dict format was unable to parsed. This error will be
raised by the provided parser.
"""
if values == empty:
return {}
d = {}
for el in values.split(delim):
parts = el.split(av_separator, 1)
if len(parts) == 1 or (len(parts) == 2 and parts[1] == ''):
k = parts[0]
v = None
elif len(parts) == 2:
k, v = parts
parsed = parser(v, v_delimiter)
d[k] = parsed
return d
| 12,598
|
def get_full_json(msa, component, sessionkey, pretty=False, human=False):
"""
Form text in JSON with storage component data.
:param msa: MSA DNS name and IP address.
:type msa: tuple
:param sessionkey: Session key.
:type sessionkey: str
:param pretty: Print in pretty format
:type pretty: int
:param component: Name of storage component.
:type component: str
:param human: Expand result dict keys in human readable format
:type: bool
:return: JSON with all found data.
:rtype: str
"""
# Forming URL
msa_conn = msa[1] if VERIFY_SSL else msa[0]
url = '{strg}/api/show/{comp}'.format(strg=msa_conn, comp=component)
# Making request to API
resp_return_code, resp_description, xml = query_xmlapi(url, sessionkey)
if resp_return_code != '0':
raise SystemExit('ERROR: {rc} : {rd}'.format(rc=resp_return_code, rd=resp_description))
# Processing XML
all_components = {}
if component == 'disks':
for PROP in xml.findall("./OBJECT[@name='drive']"):
# Processing main properties
disk_location = PROP.find("./PROPERTY[@name='location']").text
disk_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text
disk_full_data = {
"h": disk_health_num
}
# Processing advanced properties
disk_ext = dict()
disk_ext['t'] = PROP.find("./PROPERTY[@name='temperature-numeric']")
disk_ext['ts'] = PROP.find("./PROPERTY[@name='temperature-status-numeric']")
disk_ext['cj'] = PROP.find("./PROPERTY[@name='job-running-numeric']")
disk_ext['poh'] = PROP.find("./PROPERTY[@name='power-on-hours']")
for prop, value in disk_ext.items():
if value is not None:
disk_full_data[prop] = value.text
all_components[disk_location] = disk_full_data
elif component == 'vdisks':
for PROP in xml.findall("./OBJECT[@name='virtual-disk']"):
vdisk_name = PROP.find("./PROPERTY[@name='name']").text
vdisk_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text
vdisk_status_num = PROP.find("./PROPERTY[@name='status-numeric']").text
vdisk_owner_num = PROP.find("./PROPERTY[@name='owner-numeric']").text
vdisk_owner_pref_num = PROP.find("./PROPERTY[@name='preferred-owner-numeric']").text
vdisk_full_data = {
"h": vdisk_health_num,
"s": vdisk_status_num,
"ow": vdisk_owner_num,
"owp": vdisk_owner_pref_num
}
all_components[vdisk_name] = vdisk_full_data
elif component == 'pools':
for PROP in xml.findall("./OBJECT[@name='pools']"):
pool_sn = PROP.find("./PROPERTY[@name='serial-number']").text
pool_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text
pool_owner_num = PROP.find("./PROPERTY[@name='owner-numeric']").text
pool_owner_pref_num = PROP.find("./PROPERTY[@name='preferred-owner-numeric']").text
pool_full_data = {
"h": pool_health_num,
"ow": pool_owner_num,
"owp": pool_owner_pref_num
}
all_components[pool_sn] = pool_full_data
elif component == 'disk-groups':
for PROP in xml.findall("./OBJECT[@name='disk-group']"):
dg_sn = PROP.find(".PROPERTY[@name='serial-number']").text
dg_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text
dg_status_num = PROP.find("./PROPERTY[@name='status-numeric']").text
dg_owner_num = PROP.find("./PROPERTY[@name='owner-numeric']").text
dg_owner_pref_num = PROP.find("./PROPERTY[@name='preferred-owner-numeric']").text
dg_curr_job_num = PROP.find("./PROPERTY[@name='current-job-numeric']").text
dg_curr_job_pct = PROP.find("./PROPERTY[@name='current-job-completion']").text
# current job completion return None if job isn't running, so I'm replacing it with zero if None
if dg_curr_job_pct is None:
dg_curr_job_pct = '0'
dg_full_data = {
"h": dg_health_num,
"s": dg_status_num,
"ow": dg_owner_num,
"owp": dg_owner_pref_num,
"cj": dg_curr_job_num,
"cjp": dg_curr_job_pct.rstrip('%')
}
all_components[dg_sn] = dg_full_data
elif component == 'volumes':
for PROP in xml.findall("./OBJECT[@name='volume']"):
vol_sn = PROP.find("./PROPERTY[@name='serial-number']").text
vol_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text
vol_owner_num = PROP.find("./PROPERTY[@name='owner-numeric']").text
vol_owner_pref_num = PROP.find("./PROPERTY[@name='preferred-owner-numeric']").text
vol_full_data = {
"h": vol_health_num,
"ow": vol_owner_num,
"owp": vol_owner_pref_num
}
all_components[vol_sn] = vol_full_data
elif component == 'controllers':
for PROP in xml.findall("./OBJECT[@name='controllers']"):
# Processing main controller properties
ctrl_id = PROP.find("./PROPERTY[@name='controller-id']").text
ctrl_sc_fw = PROP.find("./PROPERTY[@name='sc-fw']").text
ctrl_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text
ctrl_status_num = PROP.find("./PROPERTY[@name='status-numeric']").text
ctrl_rd_status_num = PROP.find("./PROPERTY[@name='redundancy-status-numeric']").text
# Get controller statistics
url = '{strg}/api/show/{comp}/{ctrl}'.format(strg=msa_conn, comp='controller-statistics', ctrl=ctrl_id)
# Making request to API
stats_ret_code, stats_descr, stats_xml = query_xmlapi(url, sessionkey)
if stats_ret_code != '0':
raise SystemExit('ERROR: {} : {}'.format(stats_ret_code, stats_descr))
# TODO: I don't know, is it good solution, but it's one more query to XML API
ctrl_cpu_load = stats_xml.find("./OBJECT[@name='controller-statistics']/PROPERTY[@name='cpu-load']").text
ctrl_iops = stats_xml.find("./OBJECT[@name='controller-statistics']/PROPERTY[@name='iops']").text
# Making full controller dict
ctrl_full_data = {
"h": ctrl_health_num,
"s": ctrl_status_num,
"rs": ctrl_rd_status_num,
"cpu": ctrl_cpu_load,
"io": ctrl_iops,
"fw": ctrl_sc_fw
}
# Processing advanced controller properties
ctrl_ext = dict()
ctrl_ext['fh'] = PROP.find("./OBJECT[@basetype='compact-flash']/PROPERTY[@name='health-numeric']")
ctrl_ext['fs'] = PROP.find("./OBJECT[@basetype='compact-flash']/PROPERTY[@name='status-numeric']")
for prop, value in ctrl_ext.items():
if value is not None:
ctrl_full_data[prop] = value.text
all_components[ctrl_id] = ctrl_full_data
elif component == 'enclosures':
for PROP in xml.findall("./OBJECT[@name='enclosures']"):
# Processing main enclosure properties
encl_id = PROP.find("./PROPERTY[@name='enclosure-id']").text
encl_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text
encl_status_num = PROP.find("./PROPERTY[@name='status-numeric']").text
# Making full enclosure dict
encl_full_data = {
"h": encl_health_num,
"s": encl_status_num
}
all_components[encl_id] = encl_full_data
elif component == 'power-supplies':
# Getting info about all power supplies
for PS in xml.findall("./OBJECT[@name='power-supplies']"):
# Processing main power supplies properties
ps_id = PS.find("./PROPERTY[@name='durable-id']").text
ps_name = PS.find("./PROPERTY[@name='name']").text
# Exclude voltage regulators
if ps_name.lower().find('voltage regulator') == -1:
ps_health_num = PS.find("./PROPERTY[@name='health-numeric']").text
ps_status_num = PS.find("./PROPERTY[@name='status-numeric']").text
ps_dc12v = PS.find("./PROPERTY[@name='dc12v']").text
ps_dc5v = PS.find("./PROPERTY[@name='dc5v']").text
ps_dc33v = PS.find("./PROPERTY[@name='dc33v']").text
ps_dc12i = PS.find("./PROPERTY[@name='dc12i']").text
ps_dc5i = PS.find("./PROPERTY[@name='dc5i']").text
ps_full_data = {
"h": ps_health_num,
"s": ps_status_num,
"12v": ps_dc12v,
"5v": ps_dc5v,
"33v": ps_dc33v,
"12i": ps_dc12i,
"5i": ps_dc5i
}
# Processing advanced power supplies properties
ps_ext = dict()
ps_ext['t'] = PS.find("./PROPERTY[@name='dctemp']")
for prop, value in ps_ext.items():
if value is not None:
ps_full_data[prop] = value.text
all_components[ps_id] = ps_full_data
elif component == 'fans':
# Getting info about all fans
for FAN in xml.findall("./OBJECT[@name='fan-details']"):
# Processing main fan properties
fan_id = FAN.find(".PROPERTY[@name='durable-id']").text
fan_health_num = FAN.find(".PROPERTY[@name='health-numeric']").text
fan_status_num = FAN.find(".PROPERTY[@name='status-numeric']").text
fan_speed = FAN.find(".PROPERTY[@name='speed']").text
fan_full_data = {
"h": fan_health_num,
"s": fan_status_num,
"sp": fan_speed
}
all_components[fan_id] = fan_full_data
elif component == 'ports':
for FC in xml.findall("./OBJECT[@name='ports']"):
# Processing main ports properties
port_name = FC.find("./PROPERTY[@name='port']").text
port_health_num = FC.find("./PROPERTY[@name='health-numeric']").text
port_full_data = {
"h": port_health_num
}
# Processing advanced ports properties
port_ext = dict()
port_ext['ps'] = FC.find("./PROPERTY[@name='status-numeric']")
for prop, value in port_ext.items():
if value is not None:
port_full_data[prop] = value.text
# SFP Status
# Because of before 1050/2050 API has no numeric property for sfp-status, creating mapping self
sfp_status_map = {"Not compatible": '0', "Incorrect protocol": '1', "Not present": '2', "OK": '3'}
sfp_status_char = FC.find("./OBJECT[@name='port-details']/PROPERTY[@name='sfp-status']")
sfp_status_num = FC.find("./OBJECT[@name='port-details']/PROPERTY[@name='sfp-status-numeric']")
if sfp_status_num is not None:
port_full_data['ss'] = sfp_status_num.text
else:
if sfp_status_char is not None:
port_full_data['ss'] = sfp_status_map[sfp_status_char.text]
all_components[port_name] = port_full_data
# Transform dict keys to human readable format if '--human' argument is given
if human:
all_components = expand_dict(all_components)
return json.dumps(all_components, separators=(',', ':'), indent=pretty)
| 12,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.